gt
stringclasses
1 value
context
stringlengths
2.49k
119k
from __future__ import absolute_import import os import re import json import base64 import inspect import requests import mimetypes from StringIO import StringIO from pytz import utc from datetime import datetime, timedelta from random import randint from contextlib import contextmanager from django.conf import settings # Do not import from sentry here! Bad things will happen optional_group_matcher = re.compile(r'\(\?\:(.+)\)') named_group_matcher = re.compile(r'\(\?P<(\w+)>[^\)]+\)') non_named_group_matcher = re.compile(r'\(.*?\)') camel_re = re.compile(r'([A-Z]+)([a-z])') API_PREFIX = '/api/0/' scenarios = {} def simplify_regex(pattern): """Clean up urlpattern regexes into something somewhat readable by Mere Humans: turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$" into "{sport_slug}/athletes/{athlete_slug}/" """ pattern = optional_group_matcher.sub(lambda m: '[%s]' % m.group(1), pattern) # handle named groups first pattern = named_group_matcher.sub(lambda m: '{%s}' % m.group(1), pattern) # handle non-named groups pattern = non_named_group_matcher.sub("{var}", pattern) # clean up any outstanding regex-y characters. pattern = pattern.replace('^', '').replace('$', '') \ .replace('?', '').replace('//', '/').replace('\\', '') if not pattern.startswith('/'): pattern = '/' + pattern return pattern def get_internal_endpoint_from_pattern(pattern): from sentry.api.base import Endpoint if not hasattr(pattern, 'callback'): return if hasattr(pattern.callback, 'cls'): cls = pattern.callback.cls if issubclass(cls, Endpoint): return cls elif hasattr(pattern.callback, 'cls_instance'): inst = pattern.callback.cls_instance if isinstance(inst, Endpoint): return inst.__class__ def extract_documentation(func): doc = inspect.getdoc(func) if doc is not None: return doc.decode('utf-8') def get_endpoint_path(internal_endpoint): return '%s.%s' % ( internal_endpoint.__module__, internal_endpoint.__name__, ) def extract_title_and_text(doc): title = None iterable = iter((doc or u'').splitlines()) clean_end = False for line in iterable: line = line.strip() if title is None: if not line: continue title = line elif line[0] * len(line) == line: clean_end = True break else: break lines = [] if clean_end: for line in iterable: if line.strip(): lines.append(line) break lines.extend(iterable) return title, lines def camelcase_to_dashes(string): def handler(match): camel, regular = match.groups() if len(camel) != 1: camel = camel[:-1].lower() + '-' + camel[-1].lower() else: camel = camel.lower() return '-' + camel + regular.lower() return camel_re.sub(handler, string).lstrip('-') def extract_endpoint_info(pattern, internal_endpoint): path = simplify_regex(pattern.regex.pattern) from sentry.constants import HTTP_METHODS for method_name in HTTP_METHODS: if method_name in ('HEAD', 'OPTIONS'): continue method = getattr(internal_endpoint, method_name.lower(), None) if method is None: continue doc = extract_documentation(method) if doc is None: continue section = getattr(internal_endpoint, 'doc_section', None) if section is None: continue endpoint_name = method.__name__.title() + internal_endpoint.__name__ if endpoint_name.endswith('Endpoint'): endpoint_name = endpoint_name[:-8] endpoint_name = camelcase_to_dashes(endpoint_name) title, text = extract_title_and_text(doc) yield dict( path=API_PREFIX + path.lstrip('/'), method=method_name, title=title, text=text, scenarios=getattr(method, 'api_scenarios', None) or [], section=section.name.lower(), internal_path='%s:%s' % ( get_endpoint_path(internal_endpoint), method.__name__ ), endpoint_name=endpoint_name, ) def iter_endpoints(): from sentry.api.urls import urlpatterns for pattern in urlpatterns: internal_endpoint = get_internal_endpoint_from_pattern(pattern) if internal_endpoint is None: continue for endpoint in extract_endpoint_info(pattern, internal_endpoint): yield endpoint def scenario(ident): def decorator(f): if ident in scenarios: raise RuntimeError('Scenario duplicate: %s' % ident) scenarios[ident] = f f.api_scenario_ident = ident return f return decorator def attach_scenarios(scenarios): def decorator(f): f.api_scenarios = [x.api_scenario_ident for x in scenarios] return f return decorator def iter_scenarios(): # Make sure everything is imported. for endpoint in iter_endpoints(): pass return iter(sorted(scenarios.items())) def get_sections(): from sentry.api.base import DocSection return dict((x.name.lower(), x.value) for x in DocSection) def create_sample_time_series(event): from sentry.app import tsdb group = event.group now = datetime.utcnow().replace(tzinfo=utc) for _ in xrange(60): count = randint(1, 10) tsdb.incr_multi(( (tsdb.models.project, group.project.id), (tsdb.models.group, group.id), ), now, count) tsdb.incr_multi(( (tsdb.models.organization_total_received, group.project.organization_id), (tsdb.models.project_total_received, group.project.id), ), now, int(count * 1.1)) tsdb.incr_multi(( (tsdb.models.organization_total_rejected, group.project.organization_id), (tsdb.models.project_total_rejected, group.project.id), ), now, int(count * 0.1)) now = now - timedelta(seconds=1) for _ in xrange(24 * 30): count = randint(100, 1000) tsdb.incr_multi(( (tsdb.models.project, group.project.id), (tsdb.models.group, group.id), ), now, count) tsdb.incr_multi(( (tsdb.models.organization_total_received, group.project.organization_id), (tsdb.models.project_total_received, group.project.id), ), now, int(count * 1.1)) tsdb.incr_multi(( (tsdb.models.organization_total_rejected, group.project.organization_id), (tsdb.models.project_total_rejected, group.project.id), ), now, int(count * 0.1)) now = now - timedelta(hours=1) class MockUtils(object): def create_user(self, mail): from sentry.models import User user, _ = User.objects.get_or_create( username=mail, defaults={ 'email': mail, } ) user.set_password('dummy') user.save() return user def create_org(self, name, owner): from sentry.models import Organization, OrganizationMember org, _ = Organization.objects.get_or_create( name=name, defaults={ 'owner': owner, }, ) dummy_member, _ = OrganizationMember.objects.get_or_create( user=owner, organization=org, defaults={ 'has_global_access': False, } ) if dummy_member.has_global_access: dummy_member.update(has_global_access=False) return org def create_api_key(self, org, label='Default'): from sentry.models import ApiKey return ApiKey.objects.get_or_create( organization=org, label=label, scopes=(1 << len(ApiKey.scopes.keys())) - 1, )[0] def create_client_key(self, project, label='Default'): from sentry.models import ProjectKey return ProjectKey.objects.get_or_create( project=project, label=label )[0] def create_team(self, name, org): from sentry.models import Team return Team.objects.get_or_create( name=name, defaults={ 'organization': org, }, )[0] def create_project(self, name, team, org): from sentry.models import Project return Project.objects.get_or_create( team=team, name=name, defaults={ 'organization': org, } )[0] def create_release(self, project, user, version=None): from sentry.models import Release, Activity if version is None: version = os.urandom(20).encode('hex') release = Release.objects.get_or_create( version=version, project=project, )[0] Activity.objects.create( type=Activity.RELEASE, project=project, ident=version, user=user, data={'version': version}, ) return release def create_release_file(self, project, release, path, content_type=None, contents=None): from sentry.models import File, ReleaseFile if content_type is None: content_type = mimetypes.guess_type(path)[0] or 'text/plain' if content_type.startswith('text/'): content_type += '; encoding=utf-8' f = File(name=path.rsplit('/', 1)[-1], type='release.file', headers={ 'Content-Type': content_type }) f.putfile(StringIO(contents or '')) return ReleaseFile.objects.create( project=project, release=release, file=f, name=path ) def create_event(self, project, release, platform='python', raw=True): from sentry.utils.samples import create_sample_event event = create_sample_event( project=project, platform=platform, release=release.version, raw=raw ) create_sample_time_series(event) return event class Runner(object): """The runner is a special object that holds state for the automatic running of example scenarios. It gets created by api-docs/generator.py which does the majority of the heavy lifting. It mainly exists here so that the scenarios can be run separately if needed. """ def __init__(self, ident, func, api_key, org, me, teams=None): self.ident = ident self.func = func self.requests = [] self.utils = MockUtils() self.api_key = api_key self.org = org self.me = me self.teams = teams @property def default_team(self): return self.teams[0]['team'] @property def default_project(self): return self.teams[0]['projects'][0]['project'] @property def default_release(self): return self.teams[0]['projects'][0]['release'] @property def default_event(self): return self.teams[0]['projects'][0]['events'][0] @contextmanager def isolated_project(self, project_name): project = self.utils.create_project(project_name, team=self.default_team, org=self.org) release = self.utils.create_release(project=project, user=self.me) self.utils.create_event(project=project, release=release, platform='python') self.utils.create_event(project=project, release=release, platform='java') try: yield project finally: project.delete() @contextmanager def isolated_org(self, org_name): org = self.utils.create_org(org_name, owner=self.me) try: yield org finally: org.delete() def request(self, method, path, headers=None, data=None, api_key=None, format='json'): if api_key is None: api_key = self.api_key path = '/api/0/' + path.lstrip('/') headers = dict(headers or {}) request_is_json = True body = None files = None was_multipart = False if data is not None: if format == 'json': body = json.dumps(data, sort_keys=True) headers['Content-Type'] = 'application/json' elif format == 'multipart': files = {} for key, value in data.items(): if hasattr(value, 'read') or isinstance(value, tuple): files[key] = value del data[key] was_multipart = True body = data req_headers = dict(headers) req_headers['Host'] = 'app.getsentry.com' req_headers['Authorization'] = 'Basic %s' % base64.b64encode('%s:' % ( api_key.key.encode('utf-8'))) url = 'http://127.0.0.1:%s%s' % ( settings.SENTRY_APIDOCS_WEB_PORT, path, ) response = requests.request(method=method, url=url, files=files, headers=req_headers, data=body) response_headers = dict(response.headers) # Don't want those response_headers.pop('server', None) response_headers.pop('date', None) if response.headers.get('Content-Type') == 'application/json': response_data = response.json() is_json = True else: response_data = response.text is_json = False if was_multipart: headers['Content-Type'] = response.request.headers['content-type'] data = response.request.body request_is_json = False rv = { 'request': { 'method': method, 'path': path, 'headers': headers, 'data': data, 'is_json': request_is_json, }, 'response': { 'headers': response_headers, 'status': response.status_code, 'reason': response.reason, 'data': response_data, 'is_json': is_json, } } self.requests.append(rv) return rv def to_json(self): doc = extract_documentation(self.func) title, text = extract_title_and_text(doc) return { 'ident': self.ident, 'requests': self.requests, 'title': title, 'text': text, }
from pyhamtools.consts import LookupConventions as const def freq_to_band(freq): """converts a Frequency [kHz] into the band and mode according to the IARU bandplan Args: frequency (float): Frequency in kHz Returns: dict: Dictionary containing the band (int) and mode (str) Raises: KeyError: Wrong frequency or out of band Example: The following example converts the frequency *14005.3 kHz* into band and mode. >>> from pyhamtools.utils import freq_to_band >>> print freq_to_band(14005.3) { 'band': 20, 'mode': CW } Note: Modes are: - CW - USB - LSB - DIGITAL """ band = None mode = None if ((freq >= 135) and (freq <= 138)): band = 2190 mode = const.CW elif ((freq >= 1800) and (freq <= 2000)): band = 160 if ((freq >= 1800) and (freq < 1838)): mode = const.CW elif ((freq >= 1838) and (freq < 1840)): mode = const.DIGITAL elif (freq == 1840): mode = const.DIGITAL #FT8 elif ((freq > 1840) and (freq < 2000)): mode = const.LSB elif ((freq >= 3500) and (freq <= 4000)): band = 80 if ((freq >= 3500) and (freq < 3573)): mode = const.CW elif (freq == 3573): mode = const.DIGITAL #FT8 elif ((freq > 3573) and (freq < 3580)): mode = const.CW elif ((freq >= 3580) and (freq < 3600)): mode = const.DIGITAL elif ((freq >= 3600) and (freq < 4000)): mode = const.LSB elif ((freq >= 5000) and (freq <= 5500)): band = 60 elif ((freq >= 7000) and (freq <= 7300)): band = 40 if ((freq >= 7000) and (freq < 7040)): mode = const.CW elif ((freq >= 7040) and (freq < 7050)): mode = const.DIGITAL elif ((freq >= 7050) and (freq < 7074)): mode = const.LSB elif (freq == 7074): mode = const.DIGITAL #FT8 elif ((freq > 7074) and (freq < 7300)): mode = const.LSB elif ((freq >= 10100) and (freq <= 10150)): band = 30 if ((freq >= 10100) and (freq < 10136)): mode = const.CW elif (freq == 10136): mode = const.DIGITAL #FT8 elif ((freq > 10136) and (freq < 10140)): mode = const.CW elif ((freq >= 10140) and (freq < 10150)): mode = const.DIGITAL elif ((freq >= 14000) and (freq <= 14350)): band = 20 if ((freq >= 14000) and (freq < 14070)): mode = const.CW elif ((freq >= 14070) and (freq < 14074)): mode = const.DIGITAL elif (freq == 14074): mode = const.DIGITAL #FT8 elif ((freq > 14074) and (freq < 14099)): mode = const.DIGITAL elif ((freq >= 14100) and (freq < 14350)): mode = const.USB elif ((freq >= 18068) and (freq <= 18268)): band = 17 if ((freq >= 18068) and (freq < 18095)): mode = const.CW elif ((freq >= 18095) and (freq < 18100)): mode = const.DIGITAL elif (freq == 18100): mode = const.DIGITAL #FT8 elif ((freq > 18100) and (freq < 18110)): mode = const.DIGITAL elif ((freq >= 18110) and (freq < 18268)): mode = const.USB elif ((freq >= 21000) and (freq <= 21450)): band = 15 if ((freq >= 21000) and (freq < 21070)): mode = const.CW elif ((freq >= 21070) and (freq < 21074)): mode = const.DIGITAL elif (freq == 21074): mode = const.DIGITAL #FT8 elif ((freq > 21074) and (freq < 21150)): mode = const.DIGITAL elif ((freq >= 21150) and (freq < 21450)): mode = const.USB elif ((freq >= 24890) and (freq <= 24990)): band = 12 if ((freq >= 24890) and (freq < 24915)): mode = const.CW elif (freq == 24915): mode = const.DIGITAL #FT8 elif ((freq > 24915) and (freq < 24930)): mode = const.DIGITAL elif ((freq >= 24930) and (freq < 24990)): mode = const.USB elif ((freq >= 28000) and (freq <= 29700)): band = 10 if ((freq >= 28000) and (freq < 28070)): mode = const.CW elif ((freq >= 28070) and (freq < 28074)): mode = const.DIGITAL elif (freq == 28074): mode = const.DIGITAL #FT8 elif ((freq > 28074) and (freq < 28190)): mode = const.DIGITAL elif ((freq >= 28300) and (freq < 29700)): mode = const.USB elif ((freq >= 50000) and (freq <= 54000)): band = 6 if ((freq >= 50000) and (freq < 50100)): mode = const.CW elif ((freq >= 50100) and (freq < 50313)): mode = const.USB elif (freq == 50313): mode = const.DIGITAL #FT8 elif ((freq > 50313) and (freq < 50500)): mode = const.USB elif ((freq >= 50500) and (freq < 51000)): mode = const.DIGITAL elif ((freq >= 70000) and (freq <= 71000)): band = 4 mode = None elif ((freq >= 144000) and (freq <= 148000)): band = 2 if ((freq >= 144000) and (freq < 144150)): mode = const.CW elif ((freq >= 144150) and (freq < 144174)): mode = const.USB elif (freq >= 144174) and (freq < 144175): mode = const.DIGITAL #FT8 elif ((freq > 144175) and (freq < 144400)): mode = const.USB elif ((freq >= 144400) and (freq < 148000)): mode = None elif ((freq >= 220000) and (freq <= 226000)): band = 1.25 #1.25m mode = None elif ((freq >= 420000) and (freq <= 470000)): band = 0.7 #70cm mode = None elif ((freq >= 902000) and (freq <= 928000)): band = 0.33 #33cm US mode = None elif ((freq >= 1200000) and (freq <= 1300000)): band = 0.23 #23cm mode = None elif ((freq >= 2390000) and (freq <= 2450000)): band = 0.13 #13cm mode = None elif ((freq >= 3300000) and (freq <= 3500000)): band = 0.09 #9cm mode = None elif ((freq >= 5650000) and (freq <= 5850000)): band = 0.053 #5.3cm mode = None elif ((freq >= 10000000) and (freq <= 10500000)): band = 0.03 #3cm mode = None elif ((freq >= 24000000) and (freq <= 24050000)): band = 0.0125 #1,25cm mode = None elif ((freq >= 47000000) and (freq <= 47200000)): band = 0.0063 #6,3mm mode = None else: raise KeyError return {"band": band, "mode": mode}
################################################# # # # This class takes in a text file input of the # # tax defaulters list, and parses it to extract # # individual defaulter information such as # # name, address and fine amount. # # # # Author: Shane Brennan # # Date: 20171216 # # Version: 0.1 # ################################################# import io import re import os import csv import sys import json import time import logging import datetime import collections from sets import Set from Defaulter import Defaulter from optparse import OptionParser from logging.handlers import RotatingFileHandler class ProcessDefaulters: def __init__(self, configDict=None): self.keywordList = {} self.keywordSet = Set() self.logger = None self.defaultersList = [] if configDict is not None: self.setupLogging(configDict, False) self.run(configDict, False) def setupLogging(self, configDict, flaskApp=False): """ Sets up the rotating file logger for the parser. This records logs to a specified backup location, in this case ./logs/parser.log. The log file location is stored in a JSON configuration script passed to the CreateFeatures at runtime. configDict - A dict object containing configuration settings flaskApp - A register to flag whether the class is being run as a Flask Application """ if not flaskApp: self.logger = logging.getLogger(__name__) handler = RotatingFileHandler(configDict['log-filename'], maxBytes=500000, backupCount=3) format = "%(asctime)s %(levelname)-8s %(message)s" handler.setFormatter(logging.Formatter(format)) handler.setLevel(logging.INFO) self.logger.addHandler(handler) self.logger.setLevel(logging.INFO) self.logger.info('Starting ProcessDefaulters script..') else: self.logger = logging.getLogger('App') def run(self, configDict, flaskApp=False): """ Called to run the processing of the defaulters, instantiate the defaulters list and execute the summary analysis. configDict - A dict object containing configuration settings flaskApp - A register to flag whether the class is being run as a Flask Application """ # Retrieve the data folder dir = configDict['data-folder'] # Parse the folder for the files for root, dirs, files in os.walk(dir): path = root.split(os.sep) for filename in files: if filename[-4:] == '.txt': if self.logger is not None: self.logger.info('Reading input data file {0}'.format(filename)) self.processFile(root+os.sep+filename) # Log the output if self.logger is not None: self.logger.info('Processed {0} defaulters'.format(len(self.defaultersList))) def processFile(self, inputFilename): """ The processFile() function iterates over each section. Basically this operates in three passes, first to assign the types to each line in the input file, second to adjust the lines which contain the charges, and lastly to process each defaulter line. inputFilename - The name and path of the file to be processed """ reader = self.unicodeReader(open(inputFilename, 'rU')) emptyRegex = re.compile('^\s*$') pageRegex = re.compile('^\s+[0-9]+$') headerRegex = re.compile('^(Name){1}\s+(Address)') notesRegex = re.compile('^\s*[a-zA-Z]{1}[a-z]+') lines = [] lineTypes = [] index = 0 # The first pass to figure out whos a # defaulter and who's not. for line in reader: lines.append(line) if emptyRegex.match(line): lineTypes.append('EMPTY') elif pageRegex.match(line): lineTypes.append('PAGE') elif headerRegex.match(line): lineTypes.append('HEADER') elif notesRegex.match(line): lineTypes.append('NOTES') else: lineTypes.append('DEFAULTER') index += 1 # Find and set the lines with the charges lastDefaulterIndex = 0 for index, type in enumerate(lineTypes): if type == 'DEFAULTER': lastDefaulterIndex = index elif type == 'HEADER': lineTypes[lastDefaulterIndex] = 'CHARGE' currentCharge = 'UNKNOWN' for index, type in enumerate(lineTypes): line = lines[index] if type == 'CHARGE': currentCharge = line elif type == 'DEFAULTER' and lineTypes[index-1] != 'DEFAULTER': defaulter = Defaulter(line.rstrip()) defaulter.addCharge(currentCharge) self.defaultersList.append(defaulter) elif type == 'DEFAULTER' and lineTypes[index-1] == 'DEFAULTER': self.defaultersList[-1].update(line.rstrip()) if self.logger is not None: self.logger.info('Added {0} defaulters'.format(len(self.defaultersList))) def unicodeReader(self, fileReader): """ A more robust file reader that can handle nonsense characters, null bytes and the like. """ while True: try: yield next(fileReader) except csv.Error: pass continue return def getNumDefaulters(self): return len(self.defaultersList) def getDefaulter(self, name): tokens = name.upper().replace(',',' ').split(' ') for defaulter in self.defaultersList: result = True for token in tokens: if token not in defaulter.getName(): result = False if result: return defaulter return None def getConfig(filename): """ This function reads in the JSON configuration file, found in the /conf folder in the repository, which contains all the setup details for the system. """ try: with open(filename) as jsonFile: config = json.load(jsonFile) return config except Exception, err: print 'Error reading in configuration file {0}'.format(filename) print 'Description - {0}'.format(str(err)) print 'Exiting ...' exit(1) def main(argv): parser = OptionParser(usage="Usage: ProcessDefaulters <config-json>") (options, filename) = parser.parse_args() if len(filename) == 1: if os.path.exists(filename[0]): configDict = getConfig(filename[0]) processor = ProcessDefaulters(configDict) else: parser.print_help() print '\nYou need to provide a config file as input.' exit(1) else: parser.print_help() print '\nYou need to provide a config file as input.' exit(1) if __name__ == "__main__": sys.exit(main(sys.argv))
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt from rally.plugins.task.runners import rps from rally.task import runner from tests.unit import fakes from tests.unit import test RUNNERS_BASE = "rally.task.runner." RUNNERS = "rally.plugins.task.runners." @ddt.ddt class RPSScenarioRunnerTestCase(test.TestCase): def setUp(self): super(RPSScenarioRunnerTestCase, self).setUp() self.task = mock.MagicMock() @ddt.data( { "config": { "rps": { "start": 1, "end": 3, "step": 1, }, "times": 6 } }, { "config": { "rps": { "start": 1, "end": 10, "step": 1, }, "times": 55 } }, { "config": { "rps": { "start": 1, "end": 2, "step": 1, }, "times": 1 } }, { "config": { "rps": { "start": 2, "end": 1, "step": 1, }, "times": 2 }, "valid": False }, { "config": { "rps": { "start": 2, "end": 1, "step": 3, }, "times": 2 }, "valid": False }, { "config": { "times": 1, "rps": 100, "max_concurrency": 50, "max_cpu_count": 8, "timeout": 1 } }, { "config": { "rps": 0.000001 }, "valid": False }, { "config": { "rps": { "start": 1, "end": 10, "step": 1, }, "times": 55 } }, { "config": { "rps": 0, "times": 55 }, "valid": False }, { "config": { "rps": 2, "times": 55, "foo": "bar" }, "valid": False }, ) @ddt.unpack def test_validate(self, config, valid=True): results = runner.ScenarioRunner.validate("rps", None, None, config) if valid: self.assertEqual([], results) else: self.assertGreater(len(results), 0) @mock.patch(RUNNERS + "rps.LOG") @mock.patch(RUNNERS + "rps.time") @mock.patch(RUNNERS + "rps.threading.Thread") @mock.patch(RUNNERS + "rps.multiprocessing.Queue") @mock.patch(RUNNERS + "rps.runner") def test__worker_process(self, mock_runner, mock_queue, mock_thread, mock_time, mock_log): def time_side(): time_side.last += 0.03 time_side.count += 1 return time_side.last time_side.last = 0 time_side.count = 0 mock_time.time = time_side mock_thread_instance = mock.MagicMock( is_alive=mock.MagicMock(return_value=False)) mock_thread.return_value = mock_thread_instance mock_event = mock.MagicMock( is_set=mock.MagicMock(return_value=False)) mock_event_queue = mock.MagicMock() times = 4 max_concurrent = 3 fake_ram_int = iter(range(10)) context = {"users": [{"tenant_id": "t1", "credential": "c1", "id": "uuid1"}]} info = {"processes_to_start": 1, "processes_counter": 1} mock_runs_per_second = mock.MagicMock(return_value=10) rps._worker_process(mock_queue, fake_ram_int, 1, times, max_concurrent, context, "Dummy", "dummy", (), mock_event_queue, mock_event, mock_runs_per_second, 10, 1, info) self.assertEqual(times, mock_log.debug.call_count) self.assertEqual(times + 1, mock_thread.call_count) self.assertEqual(times + 1, mock_thread_instance.start.call_count) self.assertEqual(times + 1, mock_thread_instance.join.call_count) # NOTE(rvasilets): `times` + 1 here because `times` the number of # scenario repetition and one more need on "initialization" stage # of the thread stuff. self.assertEqual(1, mock_time.sleep.call_count) self.assertEqual(2, mock_thread_instance.is_alive.call_count) self.assertEqual(times * 4 - 1, mock_time.time.count) self.assertEqual(times, mock_runner._get_scenario_context.call_count) for i in range(times): scenario_context = mock_runner._get_scenario_context(i, context) call = mock.call( args=(mock_queue, "Dummy", "dummy", scenario_context, (), mock_event_queue), target=mock_runner._worker_thread, ) self.assertIn(call, mock_thread.mock_calls) @mock.patch(RUNNERS + "rps.runner._run_scenario_once") def test__worker_thread(self, mock__run_scenario_once): mock_queue = mock.MagicMock() mock_event_queue = mock.MagicMock() args = ("fake_cls", "fake_method_name", "fake_context_obj", {}, mock_event_queue) runner._worker_thread(mock_queue, *args) self.assertEqual(1, mock_queue.put.call_count) expected_calls = [mock.call(*args)] self.assertEqual(expected_calls, mock__run_scenario_once.mock_calls) @ddt.data( { "config": { "times": 20, "rps": 20, "timeout": 5, "max_concurrency": 15 } }, { "config": { "type": "rps", "rps": { "start": 20, "end": 40, "step": 10, }, "times": 40 } }, { "config": { "type": "rps", "rps": { "start": 10, "end": 20, "step": 2, }, "times": 40 } }, { "config": { "type": "rps", "rps": { "start": 10, "end": 15, "step": 1, }, "times": 20 } }, ) @ddt.unpack @mock.patch(RUNNERS + "rps.time.sleep") def test__run_scenario(self, mock_sleep, config): runner_obj = rps.RPSScenarioRunner(self.task, config) runner_obj._run_scenario(fakes.FakeScenario, "do_it", fakes.FakeContext({}).context, {}) self.assertEqual(config["times"], len(runner_obj.result_queue)) for result_batch in runner_obj.result_queue: for result in result_batch: self.assertIsNotNone(result) @mock.patch(RUNNERS + "rps.time.sleep") def test__run_scenario_exception(self, mock_sleep): config = {"times": 4, "rps": 10} runner_obj = rps.RPSScenarioRunner(self.task, config) runner_obj._run_scenario(fakes.FakeScenario, "something_went_wrong", fakes.FakeContext({}).context, {}) self.assertEqual(config["times"], len(runner_obj.result_queue)) for result_batch in runner_obj.result_queue: for result in result_batch: self.assertIsNotNone(result) @mock.patch(RUNNERS + "rps.time.sleep") def test__run_scenario_aborted(self, mock_sleep): config = {"times": 20, "rps": 20, "timeout": 5} runner_obj = rps.RPSScenarioRunner(self.task, config) runner_obj.abort() runner_obj._run_scenario(fakes.FakeScenario, "do_it", {}, {}) self.assertEqual(0, len(runner_obj.result_queue)) for result in runner_obj.result_queue: self.assertIsNotNone(result) @mock.patch(RUNNERS + "constant.multiprocessing.Queue") @mock.patch(RUNNERS + "rps.multiprocessing.cpu_count") @mock.patch(RUNNERS + "rps.RPSScenarioRunner._log_debug_info") @mock.patch(RUNNERS + "rps.RPSScenarioRunner._create_process_pool") @mock.patch(RUNNERS + "rps.RPSScenarioRunner._join_processes") def test_that_cpu_count_is_adjusted_properly( self, mock__join_processes, mock__create_process_pool, mock__log_debug_info, mock_cpu_count, mock_queue): samples = [ { "input": {"times": 20, "rps": 20, "max_concurrency": 10, "max_cpu_count": 1}, "real_cpu": 2, "expected": { # max_cpu_used equals to min(max_cpu_count, real_cpu) "max_cpu_used": 1, # processes_to_start equals to # min(max_cpu_used, times, max_concurrency)) "processes_to_start": 1, "rps_per_worker": 20, "times_per_worker": 20, "times_overhead": 0, "concurrency_per_worker": 10, "concurrency_overhead": 0 } }, { "input": {"times": 20, "rps": 9, "max_concurrency": 5, "max_cpu_count": 3}, "real_cpu": 4, "expected": { "max_cpu_used": 3, "processes_to_start": 3, "rps_per_worker": 3, "times_per_worker": 6, "times_overhead": 2, "concurrency_per_worker": 1, "concurrency_overhead": 2 } }, { "input": {"times": 10, "rps": 20, "max_concurrency": 12, "max_cpu_count": 20}, "real_cpu": 20, "expected": { "max_cpu_used": 20, "processes_to_start": 10, "rps_per_worker": 2, "times_per_worker": 1, "times_overhead": 0, "concurrency_per_worker": 1, "concurrency_overhead": 2 } }, { "input": {"times": 20, "rps": 20, "max_concurrency": 10, "max_cpu_count": 20}, "real_cpu": 20, "expected": { "max_cpu_used": 20, "processes_to_start": 10, "rps_per_worker": 2, "times_per_worker": 2, "times_overhead": 0, "concurrency_per_worker": 1, "concurrency_overhead": 0 } } ] for sample in samples: mock__log_debug_info.reset_mock() mock_cpu_count.reset_mock() mock__create_process_pool.reset_mock() mock__join_processes.reset_mock() mock_queue.reset_mock() mock_cpu_count.return_value = sample["real_cpu"] runner_obj = rps.RPSScenarioRunner(self.task, sample["input"]) runner_obj._run_scenario(fakes.FakeScenario, "do_it", {}, {}) mock_cpu_count.assert_called_once_with() mock__log_debug_info.assert_called_once_with( times=sample["input"]["times"], timeout=0, max_cpu_used=sample["expected"]["max_cpu_used"], processes_to_start=sample["expected"]["processes_to_start"], times_per_worker=sample["expected"]["times_per_worker"], times_overhead=sample["expected"]["times_overhead"], concurrency_per_worker=( sample["expected"]["concurrency_per_worker"]), concurrency_overhead=( sample["expected"]["concurrency_overhead"])) args, kwargs = mock__create_process_pool.call_args self.assertIn(sample["expected"]["processes_to_start"], args) self.assertIn(rps._worker_process, args) mock__join_processes.assert_called_once_with( mock__create_process_pool.return_value, mock_queue.return_value, mock_queue.return_value) def test_abort(self): config = {"times": 4, "rps": 10} runner_obj = rps.RPSScenarioRunner(self.task, config) self.assertFalse(runner_obj.aborted.is_set()) runner_obj.abort() self.assertTrue(runner_obj.aborted.is_set())
from rx.disposable import Disposable, CompositeDisposable, SingleAssignmentDisposable, SerialDisposable from rx.internal import Struct from rx.observable import Producer import rx.linq.sink from collections import deque from threading import RLock class Buffer(Producer): def __init__(self, source, count=0, skip=0, timeSpan=0, timeShift=0, scheduler=None): if skip == 0: skip = count self.source = source self.count = count # length of each buffer self.skip = skip # number of elements to skip between creation of buffers self.timeShift = timeShift self.timeSpan = timeSpan self.scheduler = scheduler def run(self, observer, cancel, setSink): if self.scheduler == None: sink = self.SinkWithCount(self, observer, cancel) setSink(sink) return sink.run() elif self.count > 0: sink = self.SinkWithCountAndTimeSpan(self, observer, cancel) setSink(sink) return sink.run() else: if self.timeSpan == self.timeShift: sink = self.SinkWithTimeSpan(self, observer, cancel) setSink(sink) return sink.run() else: sink = self.SinkWithTimerAndTimeSpan(self, observer, cancel) setSink(sink) return sink.run() class SinkWithCount(rx.linq.sink.Sink): def __init__(self, parent, observer, cancel): super(Buffer.SinkWithCount, self).__init__(observer, cancel) self.parent = parent def run(self): self.queue = deque() self.n = 0 self.createWindow() return self.parent.source.subscribeSafe(self) def createWindow(self): s = [] self.queue.append(s) def onNext(self, value): for s in self.queue: s.append(value) c = self.n - self.parent.count + 1 if c >= 0 and c % self.parent.skip == 0: s = self.queue.popleft() if len(s) > 0: self.observer.onNext(s) self.n += 1 if self.n % self.parent.skip == 0: self.createWindow() def onError(self, exception): while len(self.queue) > 0: self.queue.popleft().clear() self.observer.onError(exception) self.dispose() def onCompleted(self): while len(self.queue) > 0: s = self.queue.popleft() if len(s) > 0: self.observer.onNext(s) self.observer.onCompleted() self.dispose() class SinkWithTimeSpan(rx.linq.sink.Sink): def __init__(self, parent, observer, cancel): super(Buffer.SinkWithTimeSpan, self).__init__(observer, cancel) self.parent = parent def run(self): self.gate = RLock() self.list = [] d = self.parent.scheduler.schedulePeriodic(self.parent.timeSpan, self.tick) s = self.parent.source.subscribeSafe(self) return CompositeDisposable(d, s) def tick(self): with self.gate: self.observer.onNext(self.list) self.list = [] def onNext(self, value): with self.gate: self.list.append(value) def onError(self, exception): with self.gate: self.list.clear() self.observer.onError(exception) self.dispose() def onCompleted(self): with self.gate: self.observer.onNext(self.list) self.observer.onCompleted() self.dispose() class SinkWithTimerAndTimeSpan(rx.linq.sink.Sink): def __init__(self, parent, observer, cancel): super(Buffer.SinkWithTimerAndTimeSpan, self).__init__(observer, cancel) self.parent = parent def run(self): self.totalTime = 0 self.nextShift = self.parent.timeShift self.nextSpan = self.parent.timeSpan self.queue = deque() self.gate = RLock() self.timerDisposable = SerialDisposable() self.createWindow() self.createTimer() subscription = self.parent.source.subscribeSafe(self) return CompositeDisposable(self.timerDisposable, subscription) def createWindow(self): s = [] self.queue.append(s) def createTimer(self): m = SingleAssignmentDisposable() self.timerDisposable.disposable = m isSpan = False isShift = False if self.nextSpan == self.nextShift: isSpan = True isShift = True elif self.nextShift < self.nextShift: isSpan = True else: isShift = True newTotalTime = self.nextSpan if isSpan else self.nextShift ts = newTotalTime - self.totalTime self.totalTime = newTotalTime if isSpan: self.nextSpan += self.parent.timeShift if isShift: self.nextShift += self.parent.timeShift m.disposable = self.parent.scheduler.scheduleWithRelativeAndState( Struct(isSpan=isSpan, isShift=isShift), ts, self.tick ) def tick(self, scheduler, state): with self.gate: if state.isSpan: s = self.queue.popleft() self.observer.onNext(s) if state.isShift: self.createWindow() self.createTimer() return Disposable.empty() def onNext(self, value): with self.gate: for s in self.queue: s.append(value) def onError(self, exception): with self.gate: while len(self.queue) > 0: self.queue.popleft().clear() self.observer.onError(exception) self.dispose() def onCompleted(self): with self.gate: while len(self.queue) > 0: s = self.queue.popleft() self.observer.onNext(s) self.observer.onCompleted() self.dispose() class SinkWithCountAndTimeSpan(rx.linq.sink.Sink): def __init__(self, parent, observer, cancel): super(Buffer.SinkWithCountAndTimeSpan, self).__init__(observer, cancel) self.parent = parent def run(self): self.gate = RLock() self.list = [] self.n = 0 self.windowId = 0 self.timerDisposable = SerialDisposable() self.createTimer(0) subscription = self.parent.source.subscribeSafe(self) return CompositeDisposable(self.timerDisposable, subscription) def createTimer(self, wId): m = SingleAssignmentDisposable() self.timerDisposable.disposable = m m.disposable = self.parent.scheduler.scheduleWithRelativeAndState( wId, self.parent.timeSpan, self.tick ) def tick(self, scheduler, wId): d = Disposable.empty() newId = 0 with self.gate: if wId != self.windowId: return d self.n = 0 self.windowId += 1 newId = self.windowId res = self.list self.list = [] self.observer.onNext(res) self.createTimer(newId) return d def onNext(self, value): newWindow = False newId = 0 with self.gate: self.list.append(value) self.n += 1 if self.n == self.parent.count: newWindow = True self.windowId += 1 newId = self.windowId res = self.list self.list = [] self.n = 0 self.observer.onNext(res) if newWindow: self.createTimer(newId) def onError(self, exception): with self.gate: self.list.clear() self.observer.onError(exception) self.dispose() def onCompleted(self): with self.gate: self.observer.onNext(self.list) self.observer.onCompleted() self.dispose()
#!/usr/bin/python # -*- coding: utf-8 -*- # # FILE: FriendsFollowers.py # # Object to request friends and followers of the specified user. This request # requires cursoring. This really requires throttling because of the number of # friends/followers are very large. Therefore, throttling this on by default. # # Copyright by Author. All rights reserved. Not for reuse without # express permissions. # import sys, time, json, logging from sochi.twitter.Login import Login from sochi.twitter.TwitterBase import TwitterBase from sochi.twitter.auth_settings import * class FriendsFollowers(TwitterBase): def __init__(self, name="FriendsFollowers", logger=None, args=(), kwargs={}): TwitterBase.__init__(self, name=name, logger=logger, args=args, kwargs=kwargs) self.friends_url = "https://api.twitter.com/1.1/friends/ids.json" self.followers_url = "https://api.twitter.com/1.1/followers/ids.json" self.cursor_forward = True self.next_cursor = None self.prev_cursor = None self.set_request_type_as_friends() ## # Sets the domain to the friend search # def set_request_type_as_friends(self): if( not self.querying ): self.clear_request_params() self.set_request_domain(self.friends_url) self.set_rate_limit_resource("friends","ids") self._set_cursor() # should *almost always* throttle these friends/followers queries self.set_throttling(tr=True) ## # Sets the domain to the friend search # def set_request_type_as_followers(self): if( not self.querying ): self.clear_request_params() self.set_request_domain(self.followers_url) self.set_rate_limit_resource("followers","ids") self._set_cursor() # should *almost always* throttle these friends/followers queries self.set_throttling(tr=True) ## # Set the user (username/screen name) whose friends/followers will # be returned # def set_username(self, un=None): if( not self.querying ): # if setting the username, then unset the user_id self.set_request_param(kw="screen_name",val=un) self.set_request_param(kw="user_id",val=None) self._set_cursor() ## # Set the user (username/screen name) whose friends/followers will # be returned # def set_screen_name(self, sc=None): self.set_username(un=sc) ## # Set the user (user_id) whose friends/followers will be returned # def set_user_id(self, uid=None): if( not self.querying ): # if setting the user_id, then unset the screen_name self.set_request_param(kw="user_id",val=str(uid)) self.set_request_param(kw="screen_name",val=None) self._set_cursor() ## # Set the count, the number of ids to be returned, current default # for twitter is 5000 ids per request # def set_count(self, c=5000): if( not self.querying ): self.set_request_param(kw="count",val=str(c)) ## # Sets the cursor for the current request # def _set_cursor(self, cursor="-1"): if( cursor ): self.set_request_param(kw="cursor",val=str(cursor)) else: self.set_request_param(kw="cursor",val=None) ## # # def make_request(self): # this code is not reentrant, don't make the request twice if( self.querying ): return self.querying = True self.warning_or_error = False self.last_warning_message = {} try: self.next_cursor = -1 self.prev_cursor = -1 if( self.cursor_forward ): self._set_cursor(cursor=self.next_cursor) cursor_end = self.next_cursor else: self._set_cursor(cursor=self.prev_cursor) cursor_end = self.prev_cursor while( cursor_end ): self.set_request(domain=self.get_request_domain(), method="GET", params=self.get_request_params()) request_results = self._make_request(request=self._request_data) if( request_results or request_results.text ): try: js = request_results.json() #print "IN make_request() cursor=%d"%(next_cursor) #print json.dumps(js, sort_keys=True, indent=4) self.put_message(m=js) if( "error" in js ): self.next_cursor = 0 self.prev_cursor = 0 else: if( "next_cursor" in js ): self.next_cursor = js['next_cursor'] else: self.next_cursor = 0 if( "previous_cursor" in js ): self.prev_cursor = js['previous_cursor'] else: self.prev_cursor = 0 if( self.cursor_forward ): self._set_cursor(cursor=self.next_cursor) cursor_end = self.next_cursor else: self._set_cursor(cursor=self.prev_cursor) cursor_end = self.prev_cursor except ValueError, e: mesg = "JSON ValueError: "+str(e) self.logger.info(mesg) js = None cursor_end = 0 else: cursor_end = 0 self.querying = False except: self.querying = False raise return def parse_params(argv): auth = None user = None uname = None uid = None count = 0 followers = True logging = False json = False limits = False pc = 1 while( pc < len(argv) ): param = argv[pc] if( param == "-auth"): pc += 1 auth = argv[pc] if( param == "-user"): pc += 1 user = argv[pc] if( param == "-n"): pc += 1 uname = argv[pc] if( param == "-name"): pc += 1 uname = argv[pc] if( param == "-id"): pc += 1 uid = argv[pc] if( param == "-uid"): pc += 1 uid = argv[pc] if( param == "-count"): pc += 1 count = int(argv[pc]) if( param == "-friends"): followers = False if( param == "-followers"): followers = True if( param == "-log"): logging = True if( param == "-json"): json = True if( param == "-limits"): limits = True pc += 1 return {'auth':auth, 'user':user, 'followers':followers, 'uid':uid, 'uname':uname, 'count':count, 'logging':logging, 'json':json, 'limits':limits } #python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -friends -name aplusk #python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -friends -name apluskTV #python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -friends -name NatGeo #python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -friends -name timoreilly #python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -followers -name dwmcphd -count 5 #python FriendsFollowers.py -auth INFX547Test01 -user infxtweets -followers -name timoreilly def usage(argv): print "USAGE: python %s -auth <appname> -user <auth_user> [-friends | -followers] -n <username> | -id <userid> [-count <count_per_request>] [-json]"%(argv[0]) sys.exit(0) def main(argv): if len(argv) < 6: usage(argv) p = parse_params(argv) print p twit = FriendsFollowers() twit.set_user_agent(agent="random") twit.set_throttling(True) if( p['logging'] ): log_fname = twit.get_preferred_logname() fmt='[%(asctime)s][%(module)s:%(funcName)s():%(lineno)d] %(levelname)s:%(message)s' logging.basicConfig(filename=log_fname,format=fmt,level=logging.INFO) log = logging.getLogger("twit_tools") if( p['followers'] ): print "Requesting FOLLOWERS" twit.set_request_type_as_followers() else: print "Requesting FRIENDS" twit.set_request_type_as_friends() lg = None if( not p['auth'] and not p['user'] ): print "Must have authenticating User and Application!" usage(argv) return if( p['auth'] ): app = p['auth'] app_keys = TWITTER_APP_OAUTH_PAIR(app=p['auth']) app_token_fname = TWITTER_APP_TOKEN_FNAME(app=p['auth']) lg = Login( name="FriendsFollowersLoginObj", app_name=p['auth'], app_user=p['user'], token_fname=app_token_fname) #lg.set_debug(True) ## Key and secret for specified application lg.set_consumer_key(consumer_key=app_keys['consumer_key']) lg.set_consumer_secret(consumer_secret=app_keys['consumer_secret']) lg.login() twit.set_auth_obj(obj=lg) if( p['count']>0 ): print "Requesting %d IDs per request"%(p['count']) twit.set_count(p['count']) if( p['uname'] ): print "Requesting user:",p['uname'] twit.set_username(p['uname']) elif( p['uid'] ): print "Requesting UID:",p['uid'] twit.set_user_id(long(p['uid'])) else: print "Must supply a username or user id" return twit.start_thread() twit.start_request() # The request is being made by an asynchronous thread, we need # to wait until that thread is done before we can see the result. # # This convenience routine must be called by a different thread. # In our case here, we're in the "__main__" thread which can make # this call and safely wait until the twit thread is done. twit.wait_request() if( twit.messages()==0 ): print "No results from query." m = None count = 0 total = 0 while( twit.messages()>0 or twit.query_in_process() ): m = twit.get_message() if( m ): count += 1 #print m if( p['limits'] ): print "Limits:",twit.get_rate_limit(),twit._throttling() if( ("errors" in m) and m['errors'] ): error = m['errors'][0] print "\tError %d: %s"%(error['code'],error['message']) else: id_list = m['ids'] total = total + len(id_list) if( p['json'] ): print json.dumps(m, sort_keys=True, indent=4) else: print "Messages: %d"%(count) print id_list print "IDs: %d Total IDs: %d"%(len(id_list),total) if( twit.had_warning() ): print "WARNING:",twit.get_last_warning() if( twit.had_error() ): print "ERROR:",twit.get_last_error() twit.terminate_thread() return if __name__ == '__main__': main(sys.argv)
# Copyright (c) 2013 eBay Inc. # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit Tests for qos specs internal API.""" import time from unittest import mock from oslo_db import exception as db_exc from oslo_utils import timeutils from cinder import context from cinder import db from cinder import exception from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import test from cinder.tests.unit import utils as test_utils from cinder.volume import qos_specs from cinder.volume import volume_types def fake_db_qos_specs_create(context, values): if values['name'] == 'DupQoSName': raise exception.QoSSpecsExists(specs_id=values['name']) elif values['name'] == 'FailQoSName': raise db_exc.DBError() pass def fake_db_get_vol_type(vol_type_number=1): return {'name': 'type-' + str(vol_type_number), 'id': fake.QOS_SPEC_ID, 'updated_at': None, 'created_at': None, 'deleted_at': None, 'description': 'desc', 'deleted': False, 'is_public': True, 'projects': [], 'qos_specs_id': fake.QOS_SPEC_ID, 'extra_specs': None} class QoSSpecsTestCase(test.TestCase): """Test cases for qos specs code.""" def setUp(self): super(QoSSpecsTestCase, self).setUp() self.ctxt = context.get_admin_context() def _create_qos_specs(self, name, consumer='back-end', values=None): """Create a transfer object.""" if values is None: values = {'key1': 'value1', 'key2': 'value2'} specs = {'name': name, 'consumer': consumer, 'specs': values} return db.qos_specs_create(self.ctxt, specs)['id'] def test_create(self): input = {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'} ref = qos_specs.create(self.ctxt, 'FakeName', input) specs_obj = qos_specs.get_qos_specs(self.ctxt, ref['id']) specs_obj_dic = {'consumer': specs_obj['consumer'], 'id': specs_obj['id'], 'name': specs_obj['name'], 'specs': specs_obj['specs']} expected = {'consumer': 'back-end', 'id': ref['id'], 'name': 'FakeName', 'specs': input} self.assertDictEqual(expected, specs_obj_dic) # qos specs must have unique name self.assertRaises(exception.QoSSpecsExists, qos_specs.create, self.ctxt, 'FakeName', input) # consumer must be one of: front-end, back-end, both input['consumer'] = 'fake' self.assertRaises(exception.InvalidQoSSpecs, qos_specs.create, self.ctxt, 'QoSName', input) del input['consumer'] self.mock_object(db, 'qos_specs_create', fake_db_qos_specs_create) # able to catch DBError self.assertRaises(exception.QoSSpecsCreateFailed, qos_specs.create, self.ctxt, 'FailQoSName', input) def test_update(self): def fake_db_update(context, specs_id, values): raise db_exc.DBError() qos = {'consumer': 'back-end', 'specs': {'key1': 'value1'}} # qos specs must exists self.assertRaises(exception.QoSSpecsNotFound, qos_specs.update, self.ctxt, 'fake_id', qos['specs']) specs_id = self._create_qos_specs('Name', qos['consumer'], qos['specs']) qos_specs.update(self.ctxt, specs_id, {'key1': 'newvalue1', 'key2': 'value2'}) specs = qos_specs.get_qos_specs(self.ctxt, specs_id) self.assertEqual('newvalue1', specs['specs']['key1']) self.assertEqual('value2', specs['specs']['key2']) # consumer must be one of: front-end, back-end, both self.assertRaises(exception.InvalidQoSSpecs, qos_specs.update, self.ctxt, specs_id, {'consumer': 'not-real'}) self.mock_object(db, 'qos_specs_update', fake_db_update) self.assertRaises(exception.QoSSpecsUpdateFailed, qos_specs.update, self.ctxt, specs_id, {'key': 'new_key'}) def test_delete(self): qos_id = self._create_qos_specs('my_qos') def fake_db_associations_get(context, id): vol_types = [] if id == qos_id: vol_types = [fake_db_get_vol_type(id)] return vol_types def fake_db_delete(context, id): return {'deleted': True, 'deleted_at': timeutils.utcnow()} def fake_disassociate_all(context, id): pass self.mock_object(db, 'qos_specs_associations_get', fake_db_associations_get) self.mock_object(qos_specs, 'disassociate_all', fake_disassociate_all) self.mock_object(db, 'qos_specs_delete', fake_db_delete) self.assertRaises(exception.InvalidQoSSpecs, qos_specs.delete, self.ctxt, None) self.assertRaises(exception.QoSSpecsNotFound, qos_specs.delete, self.ctxt, 'NotFound') self.assertRaises(exception.QoSSpecsInUse, qos_specs.delete, self.ctxt, qos_id) # able to delete in-use qos specs if force=True qos_specs.delete(self.ctxt, qos_id, force=True) # Can delete without forcing when no volume types qos_id_with_no_vol_types = self._create_qos_specs('no_vol_types') qos_specs.delete(self.ctxt, qos_id_with_no_vol_types, force=False) def test_delete_keys(self): def fake_db_qos_delete_key(context, id, key): if key == 'NotFound': raise exception.QoSSpecsKeyNotFound(specs_id=id, specs_key=key) else: pass value = {'foo': 'Foo', 'bar': 'Bar', 'zoo': 'tiger'} name = 'QoSName' consumer = 'front-end' specs_id = self._create_qos_specs(name, consumer, value) qos_specs.delete_keys(self.ctxt, specs_id, ['foo', 'bar']) del value['foo'] del value['bar'] expected = {'name': name, 'id': specs_id, 'consumer': consumer, 'specs': value} specs = qos_specs.get_qos_specs(self.ctxt, specs_id) specs_dic = {'consumer': specs['consumer'], 'id': specs['id'], 'name': specs['name'], 'specs': specs['specs']} self.assertDictEqual(expected, specs_dic) self.mock_object(db, 'qos_specs_item_delete', fake_db_qos_delete_key) self.assertRaises(exception.InvalidQoSSpecs, qos_specs.delete_keys, self.ctxt, None, []) self.assertRaises(exception.QoSSpecsNotFound, qos_specs.delete_keys, self.ctxt, 'NotFound', []) self.assertRaises(exception.QoSSpecsKeyNotFound, qos_specs.delete_keys, self.ctxt, specs_id, ['NotFound']) self.assertRaises(exception.QoSSpecsKeyNotFound, qos_specs.delete_keys, self.ctxt, specs_id, ['foo', 'bar', 'NotFound']) @mock.patch.object(db, 'qos_specs_associations_get') def test_get_associations(self, mock_qos_specs_associations_get): vol_types = [fake_db_get_vol_type(x) for x in range(2)] mock_qos_specs_associations_get.return_value = vol_types specs_id = self._create_qos_specs('new_spec') res = qos_specs.get_associations(self.ctxt, specs_id) for vol_type in vol_types: expected_type = { 'association_type': 'volume_type', 'id': vol_type['id'], 'name': vol_type['name'] } self.assertIn(expected_type, res) e = exception.QoSSpecsNotFound(specs_id='Trouble') mock_qos_specs_associations_get.side_effect = e self.assertRaises(exception.CinderException, qos_specs.get_associations, self.ctxt, 'Trouble') def test_associate_qos_with_type(self): def fake_qos_specs_get(context, id): if id == 'NotFound': raise exception.QoSSpecsNotFound(specs_id=id) else: pass def fake_db_associate(context, id, type_id): if id == 'Trouble': raise db_exc.DBError() elif type_id == 'NotFound': raise exception.VolumeTypeNotFound(volume_type_id=type_id) pass def fake_vol_type_qos_get(type_id): if type_id == 'Invalid': return {'qos_specs': {'id': 'Invalid'}} else: return {'qos_specs': None} type_ref = volume_types.create(self.ctxt, 'TypeName') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(1, len(res)) self.assertEqual('TypeName', res[0]['name']) self.assertEqual(type_ref['id'], res[0]['id']) self.mock_object(db, 'qos_specs_associate', fake_db_associate) self.mock_object(qos_specs, 'get_qos_specs', fake_qos_specs_get) self.mock_object(volume_types, 'get_volume_type_qos_specs', fake_vol_type_qos_get) self.assertRaises(exception.VolumeTypeNotFound, qos_specs.associate_qos_with_type, self.ctxt, 'specs-id', 'NotFound') self.assertRaises(exception.QoSSpecsAssociateFailed, qos_specs.associate_qos_with_type, self.ctxt, 'Trouble', 'id') self.assertRaises(exception.QoSSpecsNotFound, qos_specs.associate_qos_with_type, self.ctxt, 'NotFound', 'id') self.assertRaises(exception.InvalidVolumeType, qos_specs.associate_qos_with_type, self.ctxt, 'specs-id', 'Invalid') def test_disassociate_qos_specs(self): def fake_db_disassociate(context, id, type_id): raise db_exc.DBError() type_ref = volume_types.create(self.ctxt, 'TypeName') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(1, len(res)) qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(0, len(res)) self.assertRaises(exception.VolumeTypeNotFound, qos_specs.disassociate_qos_specs, self.ctxt, specs_id, 'NotFound') # Verify we can disassociate specs from volume_type even if they are # not associated with no error qos_specs.disassociate_qos_specs(self.ctxt, specs_id, type_ref['id']) qos_specs.associate_qos_with_type(self.ctxt, specs_id, type_ref['id']) self.mock_object(db, 'qos_specs_disassociate', fake_db_disassociate) self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_qos_specs, self.ctxt, specs_id, type_ref['id']) def test_disassociate_all(self): def fake_db_disassociate_all(context, id): if id == 'Trouble': raise db_exc.DBError() pass def fake_qos_specs_get(context, id): if id == 'NotFound': raise exception.QoSSpecsNotFound(specs_id=id) else: pass type1_ref = volume_types.create(self.ctxt, 'TypeName1') type2_ref = volume_types.create(self.ctxt, 'TypeName2') specs_id = self._create_qos_specs('QoSName') qos_specs.associate_qos_with_type(self.ctxt, specs_id, type1_ref['id']) qos_specs.associate_qos_with_type(self.ctxt, specs_id, type2_ref['id']) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(2, len(res)) qos_specs.disassociate_all(self.ctxt, specs_id) res = qos_specs.get_associations(self.ctxt, specs_id) self.assertEqual(0, len(res)) self.mock_object(db, 'qos_specs_disassociate_all', fake_db_disassociate_all) self.mock_object(qos_specs, 'get_qos_specs', fake_qos_specs_get) self.assertRaises(exception.QoSSpecsDisassociateFailed, qos_specs.disassociate_all, self.ctxt, 'Trouble') def test_get_all_specs(self): qos_specs_list = [{'name': 'Specs1', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'consumer': 'both', 'specs': {'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}}, {'name': 'Specs2', 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': None, 'consumer': 'both', 'specs': {'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'key4': 'value4'}}] for index, qos_specs_dict in enumerate(qos_specs_list): qos_specs_id = self._create_qos_specs( qos_specs_dict['name'], qos_specs_dict['consumer'], qos_specs_dict['specs']) qos_specs_dict['id'] = qos_specs_id specs = db.qos_specs_get(self.ctxt, qos_specs_id) qos_specs_list[index]['created_at'] = test_utils.time_format( specs['created_at']) res = qos_specs.get_all_specs(self.ctxt) self.assertEqual(len(qos_specs_list), len(res)) qos_res_simple_dict = [] # Need to make list of dictionaries instead of VOs for assertIn to work for qos in res: qos_res_simple_dict.append( qos.obj_to_primitive()['versioned_object.data']) for qos_spec in qos_specs_list: self.assertIn(qos_spec, qos_res_simple_dict) def test_get_qos_specs(self): one_time_value = str(int(time.time())) specs = {'key1': one_time_value, 'key2': 'value2', 'key3': 'value3'} qos_id = self._create_qos_specs('Specs1', 'both', specs) specs = qos_specs.get_qos_specs(self.ctxt, qos_id) self.assertEqual(one_time_value, specs['specs']['key1']) self.assertRaises(exception.InvalidQoSSpecs, qos_specs.get_qos_specs, self.ctxt, None)
import re import os import csv import xml.etree.ElementTree as ET import logging import glob import json from datetime import datetime from collections import OrderedDict from bs4 import BeautifulSoup # html parser from io import open class RunParser(object): """Parses an Illumina run folder. It generates data for statusdb notable attributes : :RunInfoParser runinfo: see RunInfo :RunParametersParser runparameters: see RunParametersParser :SampleSheetParser samplesheet: see SampleSheetParser :LaneBarcodeParser lanebarcodes: see LaneBarcodeParser """ def __init__(self, path): if os.path.exists(path): self.log = logging.getLogger(__name__) self.path = path self.parse() self.create_db_obj() else: raise os.error(" flowcell cannot be found at {0}".format(path)) def parse(self, demultiplexingDir='Demultiplexing'): """Tries to parse as many files as possible from a run folder""" pattern = r'(\d{6})_([ST-]*\w+\d+)_\d+_([AB]?)([A-Z0-9\-]+)' m = re.match(pattern, os.path.basename(os.path.abspath(self.path))) instrument = m.group(2) # NextSeq2000 has a different FC ID pattern that ID contains the first position letter if "VH" in instrument: fc_name = m.group(3) + m.group(4) else: fc_name = m.group(4) # For MiSeq we parse the samplesheet "run_folder/SampleSheet_copy.csv" if "M0" in instrument: ss_path = os.path.join(self.path, 'SampleSheet_copy.csv') else: ss_path = os.path.join(self.path, 'SampleSheet.csv') rinfo_path = os.path.join(self.path, 'RunInfo.xml') rpar_path = os.path.join(self.path, 'runParameters.xml') cycle_times_log = os.path.join(self.path, 'Logs', "CycleTimes.txt") # These three are generate post-demultiplexing and could thus # potentially be replaced by reading from stats.json lb_path = os.path.join(self.path, demultiplexingDir, 'Reports', 'html', fc_name, 'all', 'all', 'all', 'laneBarcode.html') ln_path = os.path.join(self.path, demultiplexingDir, 'Reports', 'html', fc_name, 'all', 'all', 'all', 'lane.html') undeterminedStatsFolder = os.path.join(self.path, demultiplexingDir, "Stats") json_path = os.path.join(self.path, demultiplexingDir, "Stats", "Stats.json") try: self.runinfo = RunInfoParser(rinfo_path) except OSError as e: self.log.info(str(e)) self.runinfo = None try: self.runparameters = RunParametersParser(rpar_path) except OSError as e: self.log.info(str(e)) self.runparameters = None try: self.samplesheet = SampleSheetParser(ss_path) except OSError as e: self.log.info(str(e)) self.samplesheet = None try: self.lanebarcodes = LaneBarcodeParser(lb_path) except OSError as e: self.log.info(str(e)) self.lanebarcodes = None try: self.lanes = LaneBarcodeParser(ln_path) except OSError as e: self.log.info(str(e)) self.lanes = None try: self.undet = DemuxSummaryParser(undeterminedStatsFolder) except OSError as e: self.log.info(str(e)) self.undet = None try: self.time_cycles = CycleTimesParser(cycle_times_log) except OSError as e: self.log.info(str(e)) self.time_cycles = None try: self.json_stats = StatsParser(json_path) except OSError as e: self.log.info(str(e)) self.json_stats = None def create_db_obj(self): self.obj = {} bits = os.path.basename(os.path.abspath(self.path)).split('_') name = "{0}_{1}".format(bits[0], bits[-1]) self.obj['name'] = name if self.runinfo: self.obj['RunInfo'] = self.runinfo.data if self.runinfo.recipe: self.obj['run_setup'] = self.runinfo.recipe if self.runparameters: self.obj.update(self.runparameters.data) if self.runparameters.recipe: self.obj['run_setup'] = self.runparameters.recipe if self.samplesheet: self.obj['samplesheet_csv'] = self.samplesheet.data if self.lanebarcodes: self.obj['illumina'] = {} self.obj['illumina']['Demultiplex_Stats'] = {} self.obj['illumina']['Demultiplex_Stats']['Barcode_lane_statistics'] = \ self.lanebarcodes.sample_data self.obj['illumina']['Demultiplex_Stats']['Flowcell_stats'] = \ self.lanebarcodes.flowcell_data if self.lanes: self.obj['illumina']['Demultiplex_Stats']['Lanes_stats'] = \ self.lanes.sample_data if self.undet: self.obj['Undetermined'] = self.undet.result if self.time_cycles: for cycle in self.time_cycles.cycles: for k, v in cycle.items(): cycle[k] = str(v) self.obj['time cycles'] = self.time_cycles.cycles if self.json_stats: self.obj['Json_Stats'] = self.json_stats.data class DemuxSummaryParser(object): def __init__(self, path): if os.path.exists(path): self.path = path self.result = {} self.TOTAL = {} self.parse() else: raise os.error("DemuxSummary folder {0} cannot be found".format(path)) def parse(self): # will only save the 50 more frequent indexes pattern = re.compile('DemuxSummaryF1L([0-9]).txt') for file in glob.glob(os.path.join(self.path, 'DemuxSummaryF1L?.txt')): lane_nb = pattern.search(file).group(1) self.result[lane_nb] = OrderedDict() self.TOTAL[lane_nb] = 0 with open(file, newline='') as f: undeterminePart = False for line in f: if not undeterminePart: if "### Columns:" in line: undeterminePart = True else: # it means I am readng the index_Sequence Hit_Count components = line.rstrip().split('\t') if len(self.result[lane_nb].keys()) < 50: self.result[lane_nb][components[0]] = int(components[1]) self.TOTAL[lane_nb] += int(components[1]) class LaneBarcodeParser(object): def __init__(self, path): if os.path.exists(path): self.path = path self.parse() else: raise os.error(" laneBarcode.html cannot be found at {0}".format(path)) def parse(self): self.sample_data = [] self.flowcell_data = {} with open(self.path, newline='') as htmlfile: bsoup = BeautifulSoup(htmlfile, 'html.parser') flowcell_table = bsoup.find_all('table')[1] lane_table = bsoup.find_all('table')[2] keys = [] values = [] for th in flowcell_table.find_all('th'): keys.append(th.text) for td in flowcell_table.find_all('td'): values.append(td.text) self.flowcell_data = dict(zip(keys, values)) keys = [] rows = lane_table.find_all('tr') for row in rows[0:]: if len(row.find_all('th')): # this is the header row for th in row.find_all('th'): key = th.text.replace( '<br/>', ' ').replace( '&gt;', '>') keys.append(key) elif len(row.find_all('td')): values = [] for td in row.find_all('td'): values.append(td.text.replace('NaN', '0') if td.text else '0') d = dict(zip(keys, values)) self.sample_data.append(d) class SampleSheetParser(object): """Parses Samplesheets, with their fake csv format. Should be instancied with the samplesheet path as an argument. .header : a dict containing the info located under the [Header] section .settings : a dict containing the data from the [Settings] section .reads : a list of the values in the [Reads] section .data : a list of the values under the [Data] section .datafields : a list of field names for the data section""" def __init__(self, path): self.log = logging.getLogger(__name__) if os.path.exists(path): self.parse(path) else: raise os.error(" sample sheet cannot be found at {0}".format(path)) def parse(self, path): flag = None header = {} reads = [] settings = [] csvlines = [] data = [] flag = 'data' # in case of HiSeq samplesheet only data section is present separator = "," with open(path, newline='') as csvfile: # Ignore empty lines (for instance the Illumina Experiment Manager # generates sample sheets with empty lines lines = [nonempty_line for nonempty_line in (line.rstrip() for line in csvfile) if nonempty_line] # Now parse the file for line in lines: if '[Header]' in line: flag = 'HEADER' elif '[Reads]' in line: flag = 'READS' elif '[Settings]' in line: flag = 'SETTINGS' elif '[Data]' in line: flag = 'data' else: tokens = line.split(separator) if flag == 'HEADER': if len(tokens) < 2: self.log.error("file {} does not have a", "correct format.".format(path)) raise RuntimeError("Could not parse the", "samplesheet, the file does", "not seem to have a correct format.") header[tokens[0]] = tokens[1] elif flag == 'READS': reads.append(tokens[0]) elif flag == 'SETTINGS': settings.append(tokens[0]) elif flag == 'data': csvlines.append(line) reader = csv.DictReader(csvlines) for row in reader: linedict = {} for field in reader.fieldnames: linedict[field] = row[field] data.append(linedict) self.datafields = reader.fieldnames self.dfield_sid = self._get_pattern_datafield(r'sample_?id') self.dfield_snm = self._get_pattern_datafield(r'sample_?name') self.dfield_proj = self._get_pattern_datafield(r'.*?project') self.data = data self.settings = settings self.header = header self.reads = reads def _get_pattern_datafield(self, pattern): for fld in self.datafields: if re.search(pattern, fld, re.IGNORECASE): return fld return '' class RunInfoParser(object): """Parses RunInfo.xml. Should be instancied with the file path as an argument. .data : a list of hand-picked values : -Run ID -Run Number -Instrument -Flowcell name -Run Date -Reads metadata -Flowcell layout """ def __init__(self, path): self.data = {} self.recipe = None self.path = path if os.path.exists(path): self.parse() else: raise os.error(" run info cannot be found at {0}".format(path)) def parse(self): data = {} tree = ET.parse(self.path) root = tree.getroot() run = root.find('Run') data['Id'] = run.get('Id') data['Number'] = run.get('Number') data['Instrument'] = run.find('Instrument').text data['Flowcell'] = run.find('Flowcell').text # Change Novaseq date format from # 10/17/2017 10:59:16 AM to 171017 (yymmdd) if len(run.find('Date').text) > 6: try: data['Date'] = datetime.strptime(run.find('Date').text.split(" ")[0], "%m/%d/%Y").strftime("%y%m%d") except ValueError: data['Date'] = datetime.strptime(run.find('Date').text, "%Y-%m-%dT%H:%M:%SZ").strftime("%y%m%d") else: data['Date'] = run.find('Date').text data['Reads'] = [] for read in run.find('Reads').findall('Read'): data['Reads'].append(read.attrib) layout = run.find('FlowcellLayout') data['FlowcellLayout'] = layout.attrib self.data = data self.recipe = make_run_recipe(self.data.get('Reads', {})) def get_read_configuration(self): """return a list of dicts containig the Read Configuration """ readConfig = [] try: readConfig = self.data['Reads'] return sorted(readConfig, key=lambda r: int(r.get("Number", 0))) except IOError: raise RuntimeError('Reads section not present in RunInfo.', 'Check the FC folder.') class RunParametersParser(object): """Parses a runParameters.xml file. This is a much more general xml parser, it will build a dict from the xml data. Attributes might be replaced if children nodes have the same tag as the attributes. This does not happen in the current xml file, but if you're planning to reuse this, it may be of interest. """ def __init__(self, path): self.data = {} self.recipe = None self.path = path if os.path.exists(path): self.parse() else: raise os.error("RunParameters file cannot be found at {0}".format(path)) def parse(self): tree = ET.parse(self.path) root = tree.getroot() self.data = xml_to_dict(root) self.recipe = make_run_recipe(self.data.get( 'Setup', {}).get( 'Reads', {}).get( 'Read', {})) def make_run_recipe(reads): """Based on either runParameters of RunInfo, gathers the information as to how many readings are done and their length, e.g. 2x150""" nb_reads = 0 nb_indexed_reads = 0 numCycles = 0 for read in reads: nb_reads += 1 if read['IsIndexedRead'] == 'Y': nb_indexed_reads += 1 else: if numCycles and numCycles != read['NumCycles']: logging.warn("NumCycles in not coherent") else: numCycles = read['NumCycles'] if reads: return "{0}x{1}".format(nb_reads-nb_indexed_reads, numCycles) return None def xml_to_dict(root): current = None children = list(root) if children: current = {} duplicates = {} for child in children: if len(root.findall(child.tag)) > 1: if child.tag not in duplicates: duplicates[child.tag] = [] lower = xml_to_dict(child) duplicates[child.tag].extend(list(lower.values())) current.update(duplicates) else: lower = xml_to_dict(child) current.update(lower) if root.attrib: if current: if [x in current for x in root.attrib]: current.update(root.attrib) else: current.update({'attribs': root.attribs}) else: current = root.attrib if root.text and root.text.strip() != "": if current: if 'text' not in current: current['text'] = root.text else: # you're really pushing here, pal current['xml_text'] = root.text else: current = root.text return {root.tag: current} class CycleTimesParser(object): def __init__(self, path): if os.path.exists(path): self.path = path self.cycles = [] self.parse() else: raise os.error("file {0} cannot be found".format(path)) def parse(self): """ parse CycleTimes.txt and return ordered list of cycles CycleTimes.txt contains records: <date> <time> <barcode> <cycle> <info> one cycle contains a few records (defined by <cycle>) parser goes over records and saves the first record of each cycle as start time and the last record of each cycle as end time """ data = [] date_format = '%m/%d/%Y-%H:%M:%S.%f' with open(self.path, 'r') as file: cycle_times = file.readlines() # if file is empty, return if not cycle_times: return # first line is header, don't read it for cycle_line in cycle_times[1:]: # split line into strings cycle_list = cycle_line.split() cycle_time_obj = {} # parse datetime cycle_time_obj['datetime'] = datetime.strptime( "{date}-{time}".format( date=cycle_list[0], time=cycle_list[1]), date_format) # parse cycle number cycle_time_obj['cycle'] = int(cycle_list[3]) # add object in the list data.append(cycle_time_obj) # take the first record as current cycle current_cycle = { 'cycle_number': data[0]['cycle'], 'start': data[0]['datetime'], 'end': data[0]['datetime'] } # compare each record with current cycle (except the first one) for record in data[1:]: # if we are at the same cycle if record['cycle'] == current_cycle['cycle_number']: # override end of cycle with current record current_cycle['end'] = record['datetime'] # if a new cycle starts else: # save previous cycle self.cycles.append(current_cycle) # initialize new current_cycle current_cycle = { 'cycle_number': record['cycle'], 'start': record['datetime'], 'end': record['datetime'] } # the last records is not saved inside the loop if current_cycle not in self.cycles: self.cycles.append(current_cycle) class StatsParser(object): def __init__(self, path): if os.path.exists(path): self.path = path self.cycles = [] self.data = None self.parse() else: raise os.error("file {0} cannot be found".format(path)) def parse(self): with open(self.path) as data: self.data = json.load(data)
#!/usr/bin/python import cwiid import sys import time from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor import atexit # create a default object, no changes to I2C address or frequency mh = Adafruit_MotorHAT(addr=0x60) mesg = False led = 0 rpt_mode = 0 rumble = 0 erect_state = 0 # Thing is down (1 is up) erect_change = 0.0 erect_thres = 1.0 turbo = False # Motor state 0 is stopped, 50 is normal, 100 is with trigger down left_motor_state = 0 right_motor_state = 0 def main(): #Connect to address given on command-line, if present print 'Put Wiimote in discoverable mode now (press 1+2)...' global wiimote global rpt_mode global connected connected = False print("Trying Connection") print ("Press 1+2") while not connected: try: wiimote = cwiid.Wiimote() print("Connected!") connected = True except: print("Trying Again, please press 1+2") time.sleep(1) wiimote.mesg_callback = callback print("For Thing we enable ACC and Button") rpt_mode ^= cwiid.RPT_ACC rpt_mode ^= cwiid.RPT_BTN # Enable the messages in callback wiimote.enable(cwiid.FLAG_MESG_IFC); # print(dir(cwiid)) # sys.exit(0) wiimote.rpt_mode = rpt_mode exit = 0 while not exit: c = sys.stdin.read(1) exit = handle_input(wiimote, c) wiimote.close() def handle_buttons(buttons): global left_motor_state global right_motor_state global turbo new_left_state = 0 new_right_state = 0 if (buttons & cwiid.BTN_B): print("Setting turbo True") turbo = True else: print("Setting turbo False") turbo = False if (buttons & cwiid.BTN_UP): if turbo == True: print("Fast Forward!") new_left_state += 100 new_right_state += 100 else: print("Slow forward") new_left_state += 50 new_right_state += 50 elif (buttons & cwiid.BTN_DOWN): if turbo == True: new_left_state += -100 new_right_state += -100 else: new_left_state += -50 new_right_state += -50 elif (buttons & cwiid.BTN_LEFT): if turbo == True: new_right_state += 50 new_left_state += -50 else: new_right_state += 25 new_left_state += -25 elif (buttons & cwiid.BTN_RIGHT): if turbo == True: new_right_state += -50 new_left_state += 50 else: new_right_state += -25 new_left_state += 25 else: # No directions are pressed, if the state of either motor is > 0 then we clear new_right_state = 0 new_left_state = 0 if new_right_state > 100: new_right_state = 100 elif new_right_state < -100: new_right_state = -100 if new_left_state > 100: new_left_state = 100 elif new_left_state < -100: new_left_state = -100 if left_motor_state != new_left_state or right_motor_state != new_right_state: left_motor_state = new_left_state right_motor_state = new_right_state setMotors() if (buttons & cwiid.BTN_1): print("Button 1") elif (buttons & cwiid.BTN_2): print("Button 2") elif (buttons & cwiid.BTN_PLUS): print("Plus") elif (buttons & cwiid.BTN_MINUS): print("Minus") elif (buttons & cwiid.BTN_A): print("A") elif (buttons & cwiid.BTN_HOME): print("Home") #BTN_1', 'BTN_2', 'BTN_A', 'BTN_B', 'BTN_DOWN', 'BTN_HOME', 'BTN_LEFT', 'BTN_MINUS', 'BTN_PLUS', 'BTN_RIGHT', 'BTN_UP', def speedTranslator(speed): sp = 0 di = Adafruit_MotorHAT.FORWARD if speed == 0: pass elif abs(speed) == 25: sp = 64 elif abs(speed) == 50: sp = 128 elif abs(speed) == 75: sp = 192 elif abs(speed) == 100: sp = 255 if speed < 0: di = Adafruit_MotorHAT.BACKWARD return sp, di def setMotors(): global left_motor_state global right_motor_state ls, ld = speedTranslator(left_motor_state) rs, rd = speedTranslator(right_motor_state) print("Changing Speed!-----------------------") print("\tSetting the left motor to speed: %s - Direction: %s" % (ls, ld)) print("\tSetting the right motor to speed: %s - Direction: %s" % (rs, rd)) print("") lm = mh.getMotor(1) rm = mh.getMotor(2) # set the speed to start, from 0 (off) to 255 (max speed) lm.setSpeed(ls) lm.run(ld) rm.setSpeed(rs) rm.run(rd) def callback(mesg_list, time): global erect_state global erect_change global erect_thres for mesg in mesg_list: if mesg[0] == cwiid.MESG_BTN: handle_buttons(mesg[1]) # print("Time: %s" % time) # print 'Button Report: %.4X' % mesg[1] elif mesg[0] == cwiid.MESG_ACC: x = mesg[1][cwiid.X] y = mesg[1][cwiid.Y] z = mesg[1][cwiid.Z] thing_state = 0 # Down thing_state = 1 # Up if y > 115: thing_state = 0 elif y < 110 : thing_state = 1 if thing_state != erect_state: if time - erect_change > erect_thres: print("Time: %s" % time) if thing_state == 1: print("Putting Thing up") else: print("Putting Thing Down") erect_state = thing_state erect_change = time # print 'Acc Report: x=%d, y=%d, z=%d' % \ # (mesg[1][cwiid.X], mesg[1][cwiid.Y], mesg[1][cwiid.Z]) else: print 'Unknown Report' # recommended for auto-disabling motors on shutdown! def turnOffMotors(): mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE) mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE) # mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE) # mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE) if __name__ == "__main__": main()
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import mock import six import testtools from stackalytics.processor import record_processor from stackalytics.processor import runtime_storage from stackalytics.processor import utils RELEASES = [ { 'release_name': 'prehistory', 'end_date': utils.date_to_timestamp('2011-Apr-21') }, { 'release_name': 'Diablo', 'end_date': utils.date_to_timestamp('2011-Sep-08') }, { 'release_name': 'Zoo', 'end_date': utils.date_to_timestamp('2035-Sep-08') }, ] REPOS = [ { "branches": ["master"], "module": "stackalytics", "project_type": "stackforge", "uri": "git://github.com/stackforge/stackalytics.git" } ] class TestRecordProcessor(testtools.TestCase): def setUp(self): super(TestRecordProcessor, self).setUp() self.read_json_from_uri_patch = mock.patch( 'stackalytics.processor.utils.read_json_from_uri') self.read_launchpad = self.read_json_from_uri_patch.start() self.lp_profile_by_launchpad_id_patch = mock.patch( 'stackalytics.processor.launchpad_utils.' 'lp_profile_by_launchpad_id') self.lp_profile_by_launchpad_id = ( self.lp_profile_by_launchpad_id_patch.start()) self.lp_profile_by_launchpad_id.return_value = None self.lp_profile_by_email_patch = mock.patch( 'stackalytics.processor.launchpad_utils.lp_profile_by_email') self.lp_profile_by_email = ( self.lp_profile_by_email_patch.start()) self.lp_profile_by_email.return_value = None def tearDown(self): super(TestRecordProcessor, self).tearDown() self.read_json_from_uri_patch.stop() self.lp_profile_by_launchpad_id_patch.stop() self.lp_profile_by_email_patch.stop() # get_company_by_email def test_get_company_by_email_mapped(self): record_processor_inst = self.make_record_processor( companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}] ) email = '[email protected]' res = record_processor_inst._get_company_by_email(email) self.assertEqual('IBM', res) def test_get_company_by_email_with_long_suffix_mapped(self): record_processor_inst = self.make_record_processor( companies=[{'company_name': 'NEC', 'domains': ['nec.co.jp']}] ) email = '[email protected]' res = record_processor_inst._get_company_by_email(email) self.assertEqual('NEC', res) def test_get_company_by_email_with_long_suffix_mapped_2(self): record_processor_inst = self.make_record_processor( companies=[{'company_name': 'NEC', 'domains': ['nec.co.jp', 'nec.com']}] ) email = '[email protected]' res = record_processor_inst._get_company_by_email(email) self.assertEqual('NEC', res) def test_get_company_by_email_not_mapped(self): record_processor_inst = self.make_record_processor() email = '[email protected]' res = record_processor_inst._get_company_by_email(email) self.assertEqual(None, res) # get_lp_info def test_get_lp_info_invalid_email(self): self.read_launchpad.return_value = None record_processor_inst = self.make_record_processor(users=[]) self.assertEqual((None, None), record_processor_inst._get_lp_info('error.root')) # commit processing def test_process_commit_existing_user(self): record_processor_inst = self.make_record_processor( users=[ { 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]', '[email protected]'], 'companies': [ {'company_name': '*independent', 'end_date': 1234567890}, {'company_name': 'NEC', 'end_date': 0}, ] } ]) processed_commit = list(record_processor_inst.process( generate_commits(author_email='[email protected]', author_name='John Doe')))[0] expected_commit = { 'launchpad_id': 'john_doe', 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': 'NEC', } self.assertRecordsMatch(expected_commit, processed_commit) def test_process_commit_existing_user_old_job(self): record_processor_inst = self.make_record_processor( users=[ { 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]', '[email protected]'], 'companies': [ {'company_name': '*independent', 'end_date': 1234567890}, {'company_name': 'NEC', 'end_date': 0}, ] } ]) processed_commit = list(record_processor_inst.process( generate_commits(author_email='[email protected]', author_name='John Doe', date=1000000000)))[0] expected_commit = { 'launchpad_id': 'john_doe', 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': '*independent', } self.assertRecordsMatch(expected_commit, processed_commit) def test_process_commit_existing_user_new_email_known_company(self): # User is known to LP, his email is new to us, and maps to other # company. Should return other company instead of those mentioned # in user profile record_processor_inst = self.make_record_processor( users=[ {'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]'], 'companies': [{'company_name': 'NEC', 'end_date': 0}]} ], companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}], lp_info={'[email protected]': {'name': 'john_doe', 'display_name': 'John Doe'}}) processed_commit = list(record_processor_inst.process( generate_commits(author_email='[email protected]', author_name='John Doe')))[0] expected_commit = { 'launchpad_id': 'john_doe', 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': 'IBM', } self.assertRecordsMatch(expected_commit, processed_commit) self.assertIn('[email protected]', utils.load_user( record_processor_inst.runtime_storage_inst, 'john_doe')['emails']) def test_process_commit_existing_user_new_email_unknown_company(self): # User is known to LP, but his email is new to us. Should match # the user and return company from user profile record_processor_inst = self.make_record_processor( users=[ {'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]'], 'companies': [{'company_name': 'NEC', 'end_date': 0}]} ], companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}], lp_info={'[email protected]': {'name': 'john_doe', 'display_name': 'John Doe'}}) processed_commit = list(record_processor_inst.process( generate_commits(author_email='[email protected]', author_name='John Doe')))[0] expected_commit = { 'launchpad_id': 'john_doe', 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': 'NEC', } self.assertRecordsMatch(expected_commit, processed_commit) self.assertIn('[email protected]', utils.load_user( record_processor_inst.runtime_storage_inst, 'john_doe')['emails']) def test_process_commit_existing_user_new_email_known_company_update(self): record_processor_inst = self.make_record_processor( users=[ {'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]'], 'companies': [{'company_name': '*independent', 'end_date': 0}]} ], companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}], lp_info={'[email protected]': {'name': 'john_doe', 'display_name': 'John Doe'}}) processed_commit = list(record_processor_inst.process( generate_commits(author_email='[email protected]', author_name='John Doe')))[0] expected_commit = { 'launchpad_id': 'john_doe', 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': 'IBM', } self.assertRecordsMatch(expected_commit, processed_commit) user = utils.load_user( record_processor_inst.runtime_storage_inst, 'john_doe') self.assertIn('[email protected]', user['emails']) self.assertEqual('IBM', user['companies'][0]['company_name'], message='User affiliation should be updated') def test_process_commit_new_user(self): # User is known to LP, but new to us # Should add new user and set company depending on email record_processor_inst = self.make_record_processor( companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}], lp_info={'[email protected]': {'name': 'john_doe', 'display_name': 'John Doe'}}) processed_commit = list(record_processor_inst.process( generate_commits(author_email='[email protected]', author_name='John Doe')))[0] expected_commit = { 'launchpad_id': 'john_doe', 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': 'IBM', } self.assertRecordsMatch(expected_commit, processed_commit) user = utils.load_user( record_processor_inst.runtime_storage_inst, 'john_doe') self.assertIn('[email protected]', user['emails']) self.assertEqual('IBM', user['companies'][0]['company_name']) def test_process_commit_new_user_unknown_to_lb(self): # User is new to us and not known to LP # Should set user name and empty LPid record_processor_inst = self.make_record_processor( companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}]) processed_commit = list(record_processor_inst.process( generate_commits(author_email='[email protected]', author_name='John Doe')))[0] expected_commit = { 'launchpad_id': None, 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': 'IBM', } self.assertRecordsMatch(expected_commit, processed_commit) user = utils.load_user( record_processor_inst.runtime_storage_inst, '[email protected]') self.assertIn('[email protected]', user['emails']) self.assertEqual('IBM', user['companies'][0]['company_name']) self.assertEqual(None, user['launchpad_id']) # process records complex scenarios def test_process_blueprint_one_draft_spawned_lp_doesnt_know_user(self): # In: blueprint record # LP doesn't know user # Out: blueprint-draft record # new user profile created record_processor_inst = self.make_record_processor() processed_records = list(record_processor_inst.process([ {'record_type': 'bp', 'id': 'mod:blueprint', 'self_link': 'http://launchpad.net/blueprint', 'owner': 'john_doe', 'date_created': 1234567890} ])) self.assertRecordsMatch( {'record_type': 'bpd', 'launchpad_id': 'john_doe', 'author_name': 'john_doe', 'company_name': '*independent'}, processed_records[0]) user = utils.load_user( record_processor_inst.runtime_storage_inst, 'john_doe') self.assertEqual({ 'seq': 1, 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'john_doe', 'emails': [], 'companies': [{'company_name': '*independent', 'end_date': 0}] }, user) def test_process_blueprint_one_draft_spawned_lp_knows_user(self): # In: blueprint record # LP knows user # Out: blueprint-draft record # new user profile created, name is taken from LP profile record_processor_inst = self.make_record_processor( lp_user_name={ 'john_doe': {'name': 'john_doe', 'display_name': 'John Doe'}}) processed_records = list(record_processor_inst.process([ {'record_type': 'bp', 'id': 'mod:blueprint', 'self_link': 'http://launchpad.net/blueprint', 'owner': 'john_doe', 'date_created': 1234567890} ])) self.assertRecordsMatch( {'record_type': 'bpd', 'launchpad_id': 'john_doe', 'author_name': 'John Doe', 'company_name': '*independent'}, processed_records[0]) user = utils.load_user( record_processor_inst.runtime_storage_inst, 'john_doe') self.assertEqual({ 'seq': 1, 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': [], 'companies': [{'company_name': '*independent', 'end_date': 0}] }, user) def test_process_blueprint_then_review(self): record_processor_inst = self.make_record_processor( lp_user_name={ 'john_doe': {'name': 'john_doe', 'display_name': 'John Doe'}}) processed_records = list(record_processor_inst.process([ {'record_type': 'bp', 'id': 'mod:blueprint', 'self_link': 'http://launchpad.net/blueprint', 'owner': 'john_doe', 'date_created': 1234567890}, {'record_type': 'review', 'id': 'I1045730e47e9e6ad31fcdfbaefdad77e2f3b2c3e', 'subject': 'Fix AttributeError in Keypair._add_details()', 'owner': {'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}, 'createdOn': 1379404951, 'module': 'nova', 'branch': 'master'} ])) self.assertRecordsMatch( {'record_type': 'bpd', 'launchpad_id': 'john_doe', 'author_name': 'John Doe', 'company_name': '*independent'}, processed_records[0]) self.assertRecordsMatch( {'record_type': 'review', 'launchpad_id': 'john_doe', 'author_name': 'John Doe', 'author_email': '[email protected]', 'company_name': '*independent'}, processed_records[1]) user = {'seq': 1, 'core': [], 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]'], 'companies': [{'company_name': '*independent', 'end_date': 0}]} self.assertEqual(user, utils.load_user( record_processor_inst.runtime_storage_inst, 'john_doe')) self.assertEqual(user, utils.load_user( record_processor_inst.runtime_storage_inst, '[email protected]')) def test_process_blueprint_then_commit(self): record_processor_inst = self.make_record_processor( lp_user_name={ 'john_doe': {'name': 'john_doe', 'display_name': 'John Doe'}}, lp_info={'[email protected]': {'name': 'john_doe', 'display_name': 'John Doe'}}) processed_records = list(record_processor_inst.process([ {'record_type': 'bp', 'id': 'mod:blueprint', 'self_link': 'http://launchpad.net/blueprint', 'owner': 'john_doe', 'date_created': 1234567890}, {'record_type': 'commit', 'commit_id': 'de7e8f297c193fb310f22815334a54b9c76a0be1', 'author_name': 'John Doe', 'author_email': '[email protected]', 'date': 1234567890, 'lines_added': 25, 'lines_deleted': 9, 'release_name': 'havana'} ])) self.assertRecordsMatch( {'record_type': 'bpd', 'launchpad_id': 'john_doe', 'author_name': 'John Doe', 'company_name': '*independent'}, processed_records[0]) self.assertRecordsMatch( {'record_type': 'commit', 'launchpad_id': 'john_doe', 'author_name': 'John Doe', 'author_email': '[email protected]', 'company_name': '*independent'}, processed_records[1]) user = {'seq': 1, 'core': [], 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]'], 'companies': [{'company_name': '*independent', 'end_date': 0}]} self.assertEqual(user, utils.load_user( record_processor_inst.runtime_storage_inst, 'john_doe')) self.assertEqual(user, utils.load_user( record_processor_inst.runtime_storage_inst, '[email protected]')) def test_process_review_then_blueprint(self): record_processor_inst = self.make_record_processor( lp_user_name={ 'john_doe': {'name': 'john_doe', 'display_name': 'John Doe'}}) processed_records = list(record_processor_inst.process([ {'record_type': 'review', 'id': 'I1045730e47e9e6ad31fcdfbaefdad77e2f3b2c3e', 'subject': 'Fix AttributeError in Keypair._add_details()', 'owner': {'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}, 'createdOn': 1379404951, 'module': 'nova', 'branch': 'master'}, {'record_type': 'bp', 'id': 'mod:blueprint', 'self_link': 'http://launchpad.net/blueprint', 'owner': 'john_doe', 'date_created': 1234567890} ])) self.assertRecordsMatch( {'record_type': 'review', 'launchpad_id': 'john_doe', 'author_name': 'John Doe', 'author_email': '[email protected]', 'company_name': '*independent'}, processed_records[0]) self.assertRecordsMatch( {'record_type': 'bpd', 'launchpad_id': 'john_doe', 'author_name': 'John Doe', 'company_name': '*independent'}, processed_records[1]) user = {'seq': 1, 'core': [], 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]'], 'companies': [{'company_name': '*independent', 'end_date': 0}]} self.assertEqual(user, utils.load_user( record_processor_inst.runtime_storage_inst, 'john_doe')) self.assertEqual(user, utils.load_user( record_processor_inst.runtime_storage_inst, '[email protected]')) def test_process_email_then_review(self): # it is expected that the user profile will contain both email and # LP id record_processor_inst = self.make_record_processor() list(record_processor_inst.process([ {'record_type': 'email', 'message_id': '<message-id>', 'author_email': '[email protected]', 'subject': 'hello, world!', 'body': 'lorem ipsum', 'date': 1234567890}, {'record_type': 'review', 'id': 'I1045730e47e9e6ad31fcdfbaefdad77e2f3b2c3e', 'subject': 'Fix AttributeError in Keypair._add_details()', 'owner': {'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}, 'createdOn': 1379404951, 'module': 'nova', 'branch': 'master'} ])) user = {'seq': 1, 'core': [], 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]'], 'companies': [{'company_name': '*independent', 'end_date': 0}]} self.assertEqual(user, utils.load_user( record_processor_inst.runtime_storage_inst, '[email protected]')) self.assertEqual(user, utils.load_user( record_processor_inst.runtime_storage_inst, 'john_doe')) def test_process_commit_then_review_with_different_email(self): record_processor_inst = self.make_record_processor( lp_info={'[email protected]': {'name': 'john_doe', 'display_name': 'John Doe'}}, companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}]) list(record_processor_inst.process([ {'record_type': 'commit', 'commit_id': 'de7e8f297c193fb310f22815334a54b9c76a0be1', 'author_name': 'John Doe', 'author_email': '[email protected]', 'date': 1234567890, 'lines_added': 25, 'lines_deleted': 9, 'release_name': 'havana'}, {'record_type': 'review', 'id': 'I1045730e47e9e6ad31fcdfbaefdad77e2f3b2c3e', 'subject': 'Fix AttributeError in Keypair._add_details()', 'owner': {'name': 'Bill Smith', 'email': '[email protected]', 'username': 'bsmith'}, 'createdOn': 1379404951, 'module': 'nova', 'branch': 'master', 'patchSets': [ {'number': '1', 'revision': '4d8984e92910c37b7d101c1ae8c8283a2e6f4a76', 'ref': 'refs/changes/16/58516/1', 'uploader': {'name': 'Bill Smith', 'email': '[email protected]', 'username': 'bsmith'}, 'createdOn': 1385470730, 'approvals': [ {'type': 'CRVW', 'description': 'Code Review', 'value': '1', 'grantedOn': 1385478464, 'by': {'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}}]}]} ])) user = {'seq': 1, 'core': [], 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]', '[email protected]'], 'companies': [{'company_name': 'IBM', 'end_date': 0}]} self.assertEqual(user, utils.load_user( record_processor_inst.runtime_storage_inst, 'john_doe')) self.assertEqual(user, utils.load_user( record_processor_inst.runtime_storage_inst, '[email protected]')) self.assertEqual(user, utils.load_user( record_processor_inst.runtime_storage_inst, '[email protected]')) def test_merge_users(self): record_processor_inst = self.make_record_processor( lp_user_name={ 'john_doe': {'name': 'john_doe', 'display_name': 'John Doe'} }, companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}], ) runtime_storage_inst = record_processor_inst.runtime_storage_inst runtime_storage_inst.set_records(record_processor_inst.process([ {'record_type': 'bp', 'id': 'mod:blueprint', 'self_link': 'http://launchpad.net/blueprint', 'owner': 'john_doe', 'date_created': 1234567890}, {'record_type': 'email', 'message_id': '<message-id>', 'author_email': '[email protected]', 'author_name': 'John Doe', 'subject': 'hello, world!', 'body': 'lorem ipsum', 'date': 1234567890}, {'record_type': 'review', 'id': 'I1045730e47e9e6ad31fcdfbaefdad77e2f3b2c3e', 'subject': 'Fix AttributeError in Keypair._add_details()', 'owner': {'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}, 'createdOn': 1379404951, 'module': 'nova', 'branch': 'master'} ])) record_processor_inst.update() user = {'seq': 2, 'core': [], 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]'], 'companies': [{'company_name': 'IBM', 'end_date': 0}]} runtime_storage_inst = record_processor_inst.runtime_storage_inst self.assertEqual(2, runtime_storage_inst.get_by_key('user:count')) self.assertEqual(None, utils.load_user(runtime_storage_inst, 1)) self.assertEqual(user, utils.load_user(runtime_storage_inst, 2)) self.assertEqual(user, utils.load_user(runtime_storage_inst, 'john_doe')) self.assertEqual(user, utils.load_user(runtime_storage_inst, '[email protected]')) # all records should have the same user_id and company name for record in runtime_storage_inst.get_all_records(): self.assertEqual('john_doe', record['user_id'], message='Record %s' % record['primary_key']) self.assertEqual('IBM', record['company_name'], message='Record %s' % record['primary_key']) def test_core_user_guess(self): record_processor_inst = self.make_record_processor( lp_user_name={ 'john_doe': {'name': 'john_doe', 'display_name': 'John Doe'} }, companies=[{'company_name': 'IBM', 'domains': ['ibm.com']}], ) runtime_storage_inst = record_processor_inst.runtime_storage_inst runtime_storage_inst.set_records(record_processor_inst.process([ {'record_type': 'review', 'id': 'I1045730e47e9e6ad31fcdfbaefdad77e2f3b2c3e', 'subject': 'Fix AttributeError in Keypair._add_details()', 'owner': {'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}, 'createdOn': 1379404951, 'module': 'nova', 'branch': 'master', 'patchSets': [ {'number': '1', 'revision': '4d8984e92910c37b7d101c1ae8c8283a2e6f4a76', 'ref': 'refs/changes/16/58516/1', 'uploader': { 'name': 'Bill Smith', 'email': '[email protected]', 'username': 'bsmith'}, 'createdOn': 1385470730, 'approvals': [ {'type': 'CRVW', 'description': 'Code Review', 'value': '2', 'grantedOn': 1385478464, 'by': { 'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}}, {'type': 'CRVW', 'description': 'Code Review', 'value': '-1', 'grantedOn': 1385478465, 'by': { 'name': 'Homer Simpson', 'email': '[email protected]', 'username': 'homer'}} ] }]} ])) record_processor_inst.update() user_1 = {'seq': 1, 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]'], 'core': [('nova', 'master')], 'companies': [{'company_name': 'IBM', 'end_date': 0}]} user_2 = {'seq': 2, 'user_id': 'homer', 'launchpad_id': 'homer', 'user_name': 'Homer Simpson', 'emails': ['[email protected]'], 'core': [], 'companies': [{'company_name': '*independent', 'end_date': 0}]} runtime_storage_inst = record_processor_inst.runtime_storage_inst self.assertEqual(user_1, utils.load_user(runtime_storage_inst, 'john_doe')) self.assertEqual(user_2, utils.load_user(runtime_storage_inst, 'homer')) def test_process_commit_with_coauthors(self): record_processor_inst = self.make_record_processor( lp_info={'[email protected]': {'name': 'jimi', 'display_name': 'Jimi Hendrix'}, '[email protected]': {'name': 'tupac', 'display_name': 'Tupac Shakur'}, '[email protected]': {'name': 'bob', 'display_name': 'Bob Dylan'}}) processed_commits = list(record_processor_inst.process([ {'record_type': 'commit', 'commit_id': 'de7e8f297c193fb310f22815334a54b9c76a0be1', 'author_name': 'Jimi Hendrix', 'author_email': '[email protected]', 'date': 1234567890, 'lines_added': 25, 'lines_deleted': 9, 'release_name': 'havana', 'coauthor': [{'author_name': 'Tupac Shakur', 'author_email': '[email protected]'}, {'author_name': 'Bob Dylan', 'author_email': '[email protected]'}]}])) self.assertEqual(3, len(processed_commits)) self.assertRecordsMatch({ 'launchpad_id': 'tupac', 'author_email': '[email protected]', 'author_name': 'Tupac Shakur', }, processed_commits[0]) self.assertRecordsMatch({ 'launchpad_id': 'jimi', 'author_email': '[email protected]', 'author_name': 'Jimi Hendrix', }, processed_commits[2]) self.assertEqual('tupac', processed_commits[0]['coauthor'][0]['user_id']) self.assertEqual('bob', processed_commits[0]['coauthor'][1]['user_id']) self.assertEqual('jimi', processed_commits[0]['coauthor'][2]['user_id']) # record post-processing def test_blueprint_mention_count(self): record_processor_inst = self.make_record_processor() runtime_storage_inst = record_processor_inst.runtime_storage_inst runtime_storage_inst.set_records(record_processor_inst.process([ {'record_type': 'bp', 'id': 'mod:blueprint', 'self_link': 'http://launchpad.net/blueprint', 'owner': 'john_doe', 'date_created': 1234567890}, {'record_type': 'bp', 'id': 'mod:ignored', 'self_link': 'http://launchpad.net/ignored', 'owner': 'john_doe', 'date_created': 1234567890}, {'record_type': 'email', 'message_id': '<message-id>', 'author_email': '[email protected]', 'author_name': 'John Doe', 'subject': 'hello, world!', 'body': 'lorem ipsum', 'date': 1234567890, 'blueprint_id': ['mod:blueprint']}, {'record_type': 'email', 'message_id': '<another-message-id>', 'author_email': '[email protected]', 'author_name': 'John Doe', 'subject': 'hello, world!', 'body': 'lorem ipsum', 'date': 1234567895, 'blueprint_id': ['mod:blueprint', 'mod:invalid']}, ])) record_processor_inst.update() bp1 = runtime_storage_inst.get_by_primary_key('bpd:mod:blueprint') self.assertEqual(2, bp1['mention_count']) self.assertEqual(1234567895, bp1['mention_date']) bp2 = runtime_storage_inst.get_by_primary_key('bpd:mod:ignored') self.assertEqual(0, bp2['mention_count']) self.assertEqual(0, bp2['mention_date']) email = runtime_storage_inst.get_by_primary_key('<another-message-id>') self.assertTrue('mod:blueprint' in email['blueprint_id']) self.assertFalse('mod:invalid' in email['blueprint_id']) def test_review_number(self): record_processor_inst = self.make_record_processor() runtime_storage_inst = record_processor_inst.runtime_storage_inst runtime_storage_inst.set_records(record_processor_inst.process([ {'record_type': 'review', 'id': 'I111', 'subject': 'Fix AttributeError in Keypair._add_details()', 'owner': {'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}, 'createdOn': 10, 'module': 'nova', 'branch': 'master'}, {'record_type': 'review', 'id': 'I222', 'subject': 'Fix AttributeError in Keypair._add_details()', 'owner': {'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}, 'createdOn': 5, 'module': 'glance', 'branch': 'master'}, ])) record_processor_inst.update() review1 = runtime_storage_inst.get_by_primary_key('I111') self.assertEqual(2, review1['review_number']) review2 = runtime_storage_inst.get_by_primary_key('I222') self.assertEqual(1, review2['review_number']) def test_mark_disagreement(self): record_processor_inst = self.make_record_processor( users=[ {'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]'], 'core': [('nova', 'master')], 'companies': [{'company_name': 'IBM', 'end_date': 0}]} ], ) runtime_storage_inst = record_processor_inst.runtime_storage_inst runtime_storage_inst.set_records(record_processor_inst.process([ {'record_type': 'review', 'id': 'I1045730e47e9e6ad31fcdfbaefdad77e2f3b2c3e', 'subject': 'Fix AttributeError in Keypair._add_details()', 'owner': {'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}, 'createdOn': 1379404951, 'module': 'nova', 'branch': 'master', 'patchSets': [ {'number': '1', 'revision': '4d8984e92910c37b7d101c1ae8c8283a2e6f4a76', 'ref': 'refs/changes/16/58516/1', 'uploader': { 'name': 'Bill Smith', 'email': '[email protected]', 'username': 'bsmith'}, 'createdOn': 1385470730, 'approvals': [ {'type': 'CRVW', 'description': 'Code Review', 'value': '1', 'grantedOn': 1385478465, 'by': { 'name': 'Homer Simpson', 'email': '[email protected]', 'username': 'homer'}}, {'type': 'CRVW', 'description': 'Code Review', 'value': '-2', 'grantedOn': 1385478466, 'by': { 'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}} ] }]} ])) record_processor_inst.update() marks = list([r for r in runtime_storage_inst.get_all_records() if r['record_type'] == 'mark']) homer_mark = next(itertools.ifilter( lambda x: x['date'] == 1385478465, marks), None) self.assertTrue(homer_mark['x']) # disagreement def test_commit_merge_date(self): record_processor_inst = self.make_record_processor() runtime_storage_inst = record_processor_inst.runtime_storage_inst runtime_storage_inst.set_records(record_processor_inst.process([ {'record_type': 'commit', 'commit_id': 'de7e8f2', 'change_id': ['I104573'], 'author_name': 'John Doe', 'author_email': '[email protected]', 'date': 1234567890, 'lines_added': 25, 'lines_deleted': 9, 'release_name': 'havana'}, {'record_type': 'review', 'id': 'I104573', 'subject': 'Fix AttributeError in Keypair._add_details()', 'owner': {'name': 'John Doe', 'email': '[email protected]', 'username': 'john_doe'}, 'createdOn': 1385478465, 'lastUpdated': 1385490000, 'status': 'MERGED', 'module': 'nova', 'branch': 'master'}, ])) record_processor_inst.update() commit = runtime_storage_inst.get_by_primary_key('de7e8f2') self.assertEqual(1385490000, commit['date']) # update records def _generate_record_commit(self): yield {'commit_id': u'0afdc64bfd041b03943ceda7849c4443940b6053', 'lines_added': 9, 'module': u'stackalytics', 'record_type': 'commit', 'message': u'Closes bug 1212953\n\nChange-Id: ' u'I33f0f37b6460dc494abf2520dc109c9893ace9e6\n', 'subject': u'Fixed affiliation of Edgar and Sumit', 'loc': 10, 'user_id': u'john_doe', 'primary_key': u'0afdc64bfd041b03943ceda7849c4443940b6053', 'author_email': u'[email protected]', 'company_name': u'SuperCompany', 'record_id': 6, 'lines_deleted': 1, 'week': 2275, 'blueprint_id': None, 'bug_id': u'1212953', 'files_changed': 1, 'author_name': u'John Doe', 'date': 1376737923, 'launchpad_id': u'john_doe', 'branches': set([u'master']), 'change_id': u'I33f0f37b6460dc494abf2520dc109c9893ace9e6', 'release': u'havana'} # mail processing def test_process_mail(self): record_processor_inst = self.make_record_processor( users=[ { 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]', '[email protected]'], 'companies': [ {'company_name': 'NEC', 'end_date': 0}, ] } ], repos=[{"module": "stackalytics"}] ) processed_commit = list(record_processor_inst.process( generate_emails( author_email='[email protected]', author_name='John Doe', subject='[openstack-dev] [Stackalytics] Configuration files') ))[0] expected_commit = { 'launchpad_id': 'john_doe', 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': 'NEC', 'module': 'stackalytics', } self.assertRecordsMatch(expected_commit, processed_commit) def test_process_mail_guessed(self): record_processor_inst = self.make_record_processor( users=[ { 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]', '[email protected]'], 'companies': [ {'company_name': 'NEC', 'end_date': 0}, ] } ], repos=[{'module': 'nova'}, {'module': 'neutron'}] ) processed_commit = list(record_processor_inst.process( generate_emails( author_email='[email protected]', author_name='John Doe', subject='[openstack-dev] [Neutron] [Nova] Integration issue') ))[0] expected_commit = { 'launchpad_id': 'john_doe', 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': 'NEC', 'module': 'neutron', } self.assertRecordsMatch(expected_commit, processed_commit) def test_process_mail_guessed_module_in_body_override(self): record_processor_inst = self.make_record_processor( users=[ { 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]', '[email protected]'], 'companies': [ {'company_name': 'NEC', 'end_date': 0}, ] } ], repos=[{'module': 'nova'}, {'module': 'neutron'}] ) processed_commit = list(record_processor_inst.process( generate_emails( author_email='[email protected]', author_name='John Doe', module='nova', subject='[openstack-dev] [neutron] Comments/questions on the') ))[0] expected_commit = { 'launchpad_id': 'john_doe', 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': 'NEC', 'module': 'neutron', } self.assertRecordsMatch(expected_commit, processed_commit) def test_process_mail_guessed_module_in_body(self): record_processor_inst = self.make_record_processor( users=[ { 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]', '[email protected]'], 'companies': [ {'company_name': 'NEC', 'end_date': 0}, ] } ], repos=[{'module': 'nova'}, {'module': 'neutron'}] ) processed_commit = list(record_processor_inst.process( generate_emails( author_email='[email protected]', author_name='John Doe', module='nova', subject='[openstack-dev] Comments/questions on the') ))[0] expected_commit = { 'launchpad_id': 'john_doe', 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': 'NEC', 'module': 'nova', } self.assertRecordsMatch(expected_commit, processed_commit) def test_process_mail_unmatched(self): record_processor_inst = self.make_record_processor( users=[ { 'user_id': 'john_doe', 'launchpad_id': 'john_doe', 'user_name': 'John Doe', 'emails': ['[email protected]', '[email protected]'], 'companies': [ {'company_name': 'NEC', 'end_date': 0}, ] } ], repos=[{'module': 'nova'}, {'module': 'neutron'}] ) processed_commit = list(record_processor_inst.process( generate_emails( author_email='[email protected]', author_name='John Doe', subject='[openstack-dev] Comments/questions on the') ))[0] expected_commit = { 'launchpad_id': 'john_doe', 'author_email': '[email protected]', 'author_name': 'John Doe', 'company_name': 'NEC', 'module': 'unknown', } self.assertRecordsMatch(expected_commit, processed_commit) def test_get_modules(self): record_processor_inst = self.make_record_processor() with mock.patch('stackalytics.processor.utils.load_repos') as patch: patch.return_value = [{'module': 'nova'}, {'module': 'python-novaclient'}, {'module': 'neutron'}] modules = record_processor_inst._get_modules() self.assertEqual(set(['nova', 'neutron']), set(modules)) def assertRecordsMatch(self, expected, actual): for key, value in six.iteritems(expected): self.assertEqual(value, actual[key], 'Values for key %s do not match' % key) # Helpers def make_record_processor(self, users=None, companies=None, releases=None, repos=None, lp_info=None, lp_user_name=None): rp = record_processor.RecordProcessor(make_runtime_storage( users=users, companies=companies, releases=releases, repos=repos)) if lp_info is not None: self.lp_profile_by_email.side_effect = ( lambda x: lp_info.get(x)) if lp_user_name is not None: self.lp_profile_by_launchpad_id.side_effect = ( lambda x: lp_user_name.get(x)) return rp def generate_commits(author_name='John Doe', author_email='[email protected]', date=1999999999): yield { 'record_type': 'commit', 'commit_id': 'de7e8f297c193fb310f22815334a54b9c76a0be1', 'author_name': author_name, 'author_email': author_email, 'date': date, 'lines_added': 25, 'lines_deleted': 9, 'release_name': 'havana', } def generate_emails(author_name='John Doe', author_email='[email protected]', date=1999999999, subject='[openstack-dev]', module=None): yield { 'record_type': 'email', 'message_id': 'de7e8f297c193fb310f22815334a54b9c76a0be1', 'author_name': author_name, 'author_email': author_email, 'date': date, 'subject': subject, 'module': module, 'body': 'lorem ipsum', } def make_runtime_storage(users=None, companies=None, releases=None, repos=None): runtime_storage_cache = {} runtime_storage_record_keys = set([]) def get_by_key(key): if key == 'companies': return _make_companies(companies or [ {"company_name": "*independent", "domains": [""]}, ]) elif key == 'users': return _make_users(users or []) elif key == 'releases': return releases or RELEASES elif key == 'repos': return repos or REPOS else: return runtime_storage_cache.get(key) def set_by_key(key, value): runtime_storage_cache[key] = value def delete_by_key(key): del runtime_storage_cache[key] def inc_user_count(): count = runtime_storage_cache.get('user:count') or 0 count += 1 runtime_storage_cache['user:count'] = count return count def get_all_users(): for n in xrange(0, (runtime_storage_cache.get('user:count') or 0) + 1): u = runtime_storage_cache.get('user:%s' % n) if u: yield u def set_records(records_iterator): for record in records_iterator: runtime_storage_cache[record['primary_key']] = record runtime_storage_record_keys.add(record['primary_key']) def get_all_records(): return [runtime_storage_cache[key] for key in runtime_storage_record_keys] def get_by_primary_key(primary_key): return runtime_storage_cache.get(primary_key) rs = mock.Mock(runtime_storage.RuntimeStorage) rs.get_by_key = mock.Mock(side_effect=get_by_key) rs.set_by_key = mock.Mock(side_effect=set_by_key) rs.delete_by_key = mock.Mock(side_effect=delete_by_key) rs.inc_user_count = mock.Mock(side_effect=inc_user_count) rs.get_all_users = mock.Mock(side_effect=get_all_users) rs.set_records = mock.Mock(side_effect=set_records) rs.get_all_records = mock.Mock(side_effect=get_all_records) rs.get_by_primary_key = mock.Mock(side_effect=get_by_primary_key) if users: for user in users: set_by_key('user:%s' % user['user_id'], user) if user.get('launchpad_id'): set_by_key('user:%s' % user['launchpad_id'], user) for email in user.get('emails') or []: set_by_key('user:%s' % email, user) return rs def _make_users(users): users_index = {} for user in users: if 'user_id' in user: users_index[user['user_id']] = user if 'launchpad_id' in user: users_index[user['launchpad_id']] = user for email in user['emails']: users_index[email] = user return users_index def _make_companies(companies): domains_index = {} for company in companies: for domain in company['domains']: domains_index[domain] = company['company_name'] return domains_index
#! /usr/bin/env python import mock import errno import socket import ssl from collections import deque from nsq import connection from nsq import constants from nsq import exceptions from nsq import response from nsq import util from nsq import json from common import MockedSocketTest, HttpClientIntegrationTest class TestConnection(MockedSocketTest): '''Tests about our connection''' def test_alive(self): self.assertTrue(self.connection.alive()) def test_close(self): '''Should mark the connection as closed''' self.connection.close() self.assertFalse(self.connection.alive()) def test_blocking(self): '''Sets blocking on the socket''' self.connection.setblocking(0) self.socket.setblocking.assert_called_with(0) def test_pending(self): '''Appends to pending''' self.connection.nop() self.assertEqual( list(self.connection.pending()), [constants.NOP + constants.NL]) def test_flush_partial(self): '''Keeps its place when flushing out partial messages''' # We'll tell the connection it has only sent one byte when flushing with mock.patch.object(self.socket, 'send'): self.socket.send.return_value = 1 self.connection.nop() self.connection.flush() # We expect all but the first byte to remain message = constants.NOP + constants.NL self.assertEqual(list(self.connection.pending()), [message[1:]]) def test_flush_full(self): '''Pops off messages it has flushed completely''' # We'll tell the connection it has only sent one byte when flushing self.connection.nop() self.connection.flush() # The nop message was sent, so we expect it to be popped self.assertEqual(list(self.connection.pending()), []) def test_flush_count(self): '''Returns how many bytes were sent''' message = constants.NOP + constants.NL # Ensure this doesn't invoke our normal flush self.connection.nop() self.assertEqual(self.connection.flush(), len(message)) def test_flush_empty(self): '''Returns 0 if there are no pending messages''' self.assertEqual(self.connection.flush(), 0) def test_flush_multiple(self): '''Flushes as many messages as possible''' pending = deque([b'hello'] * 5) with mock.patch.object(self.connection, '_pending', pending): self.connection.flush() self.assertEqual(len(self.connection.pending()), 0) def test_flush_would_block(self): '''Honors EAGAIN / EWOULDBLOCK''' pending = deque([b'1', b'2', b'3']) with mock.patch.object(self.connection, '_socket') as mock_socket: with mock.patch.object(self.connection, '_pending', pending): mock_socket.send.side_effect = socket.error(errno.EAGAIN) self.assertEqual(self.connection.flush(), 0) def test_flush_would_block_ssl_write(self): '''Honors ssl.SSL_ERROR_WANT_WRITE''' pending = deque([b'1', b'2', b'3']) with mock.patch.object(self.connection, '_socket') as mock_socket: with mock.patch.object(self.connection, '_pending', pending): mock_socket.send.side_effect = ssl.SSLError( ssl.SSL_ERROR_WANT_WRITE) self.assertEqual(self.connection.flush(), 0) def test_flush_would_block_ssl_read(self): '''Honors ssl.SSL_ERROR_WANT_READ''' pending = deque([b'1', b'2', b'3']) with mock.patch.object(self.connection, '_socket') as mock_socket: with mock.patch.object(self.connection, '_pending', pending): mock_socket.send.side_effect = ssl.SSLError( ssl.SSL_ERROR_WANT_READ) self.assertEqual(self.connection.flush(), 0) def test_flush_would_block_ssl_write_buffer(self): '''ssl.SSL_ERROR_WANT_WRITE usesthe same buffer on next send''' pending = deque([b'1', b'2', b'3']) with mock.patch.object(self.connection, '_pending', pending): with mock.patch.object(self.connection, '_socket') as mock_socket: mock_socket.send.side_effect = ssl.SSLError( ssl.SSL_ERROR_WANT_WRITE) self.assertFalse(self.connection._out_buffer) self.connection.flush() self.assertEqual(self.connection._out_buffer, b'123') # With some more pending items, make sure we still only get '123' sent pending = deque([b'4', b'5', b'6']) with mock.patch.object(self.connection, '_pending', pending): with mock.patch.object(self.connection, '_socket') as mock_socket: mock_socket.send.return_value = 3 # The first flush should see the existing buffer self.connection.flush() mock_socket.send.assert_called_with(b'123') # The second flush should see the pending requests self.connection.flush() mock_socket.send.assert_called_with(b'456') def test_flush_socket_error(self): '''Re-raises socket non-EAGAIN errors''' pending = deque([b'1', b'2', b'3']) with mock.patch.object(self.connection, '_socket') as mock_socket: with mock.patch.object(self.connection, '_pending', pending): mock_socket.send.side_effect = socket.error('foo') self.assertRaises(socket.error, self.connection.flush) def test_eager_flush(self): '''Sending on a non-blocking connection does not eagerly flushes''' with mock.patch.object(self.connection, 'flush') as mock_flush: self.connection.send(b'foo') mock_flush.assert_not_called() def test_close_flush(self): '''Closing the connection flushes all remaining messages''' def fake_flush(): self.connection._pending = False self.connection._fake_flush_called = True with mock.patch.object(self.connection, 'flush', fake_flush): self.connection.send(b'foo') self.connection.close() self.assertTrue(self.connection._fake_flush_called) def test_magic(self): '''Sends the NSQ magic bytes''' self.assertTrue(self.socket.read().startswith(constants.MAGIC_V2)) def test_identify(self): '''The connection sends the identify commands''' expected = b''.join([ constants.MAGIC_V2, constants.IDENTIFY, constants.NL, util.pack(json.dumps(self.connection._identify_options).encode())]) self.assertEqual(self.socket.read(), expected) def test_read_timeout(self): '''Returns no results after a socket timeout''' with mock.patch.object(self.connection, '_socket') as mock_socket: mock_socket.recv.side_effect = socket.timeout self.assertEqual(self.connection.read(), []) def test_read_socket_error(self): '''Re-raises socket non-errno socket errors''' with mock.patch.object(self.connection, '_socket') as mock_socket: mock_socket.recv.side_effect = socket.error('foo') self.assertRaises(socket.error, self.connection.read) def test_read_would_block(self): '''Returns no results if it would block''' with mock.patch.object(self.connection, '_socket') as mock_socket: mock_socket.recv.side_effect = socket.error(errno.EAGAIN) self.assertEqual(self.connection.read(), []) def test_read_would_block_ssl_write(self): '''Returns no results if it would block on a SSL socket''' with mock.patch.object(self.connection, '_socket') as mock_socket: mock_socket.recv.side_effect = ssl.SSLError(ssl.SSL_ERROR_WANT_WRITE) self.assertEqual(self.connection.read(), []) def test_read_would_block_ssl_read(self): '''Returns no results if it would block on a SSL socket''' with mock.patch.object(self.connection, '_socket') as mock_socket: mock_socket.recv.side_effect = ssl.SSLError(ssl.SSL_ERROR_WANT_READ) self.assertEqual(self.connection.read(), []) def test_read_partial(self): '''Returns nothing if it has only read partial results''' self.socket.write(b'f') self.assertEqual(self.connection.read(), []) def test_read_size_partial(self): '''Returns one response size is complete, but content is partial''' self.socket.write(response.Response.pack(b'hello')[:-1]) self.assertEqual(self.connection.read(), []) def test_read_whole(self): '''Returns a single message if it has read a complete one''' self.socket.write(response.Response.pack(b'hello')) expected = response.Response( self.connection, constants.FRAME_TYPE_RESPONSE, b'hello') self.assertEqual(self.connection.read(), [expected]) def test_read_multiple(self): '''Returns multiple responses if available''' self.socket.write(response.Response.pack(b'hello') * 10) expected = response.Response( self.connection, constants.FRAME_TYPE_RESPONSE, b'hello') self.assertEqual(self.connection.read(), [expected] * 10) def test_fileno(self): '''Returns the connection's file descriptor appropriately''' self.assertEqual( self.connection.fileno(), self.socket.fileno()) def test_fileno_closed(self): '''Raises an exception if the connection's closed''' with mock.patch.object(self.connection, '_socket', None): self.assertRaises(exceptions.ConnectionClosedException, self.connection.fileno) def test_str_alive(self): '''Sane str representation for an alive connection''' with mock.patch.object(self.connection, 'alive', return_value=True): with mock.patch.object( self.connection, 'fileno', return_value=7): with mock.patch.object(self.connection, 'host', 'host'): with mock.patch.object(self.connection, 'port', 'port'): self.assertEqual(str(self.connection), '<Connection host:port (alive on FD 7)>') def test_str_dead(self): '''Sane str representation for an alive connection''' with mock.patch.object(self.connection, 'alive', return_value=False): with mock.patch.object( self.connection, 'fileno', return_value=7): with mock.patch.object(self.connection, 'host', 'host'): with mock.patch.object(self.connection, 'port', 'port'): self.assertEqual(str(self.connection), '<Connection host:port (dead on FD 7)>') def test_send_no_message(self): '''Appropriately sends packed data without message''' self.socket.read() self.connection.nop() self.connection.flush() expected = constants.NOP + constants.NL self.assertEqual(self.socket.read(), expected) def test_send_message(self): '''Appropriately sends packed data with message''' self.socket.read() self.connection.identify({}) self.connection.flush() expected = b''.join( (constants.IDENTIFY, constants.NL, util.pack(b'{}'))) self.assertEqual(self.socket.read(), expected) def assertSent(self, expected, function, *args, **kwargs): '''Assert that the connection sends the expected payload''' self.socket.read() function(*args, **kwargs) self.connection.flush() self.assertEqual(self.socket.read(), expected) def test_auth(self): '''Appropriately send auth''' expected = b''.join((constants.AUTH, constants.NL, util.pack(b'hello'))) self.assertSent(expected, self.connection.auth, b'hello') def test_sub(self): '''Appropriately sends sub''' expected = b''.join((constants.SUB, b' foo bar', constants.NL)) self.assertSent(expected, self.connection.sub, b'foo', b'bar') def test_pub(self): '''Appropriately sends pub''' expected = b''.join( (constants.PUB, b' foo', constants.NL, util.pack(b'hello'))) self.assertSent(expected, self.connection.pub, b'foo', b'hello') def test_mpub(self): '''Appropriately sends mpub''' expected = b''.join(( constants.MPUB, b' foo', constants.NL, util.pack([b'hello', b'howdy']))) self.assertSent(expected, self.connection.mpub, b'foo', b'hello', b'howdy') def test_ready(self): '''Appropriately sends ready''' expected = b''.join((constants.RDY, b' 5', constants.NL)) self.assertSent(expected, self.connection.rdy, 5) def test_fin(self): '''Appropriately sends fin''' expected = b''.join((constants.FIN, b' message_id', constants.NL)) self.assertSent(expected, self.connection.fin, b'message_id') def test_req(self): '''Appropriately sends req''' expected = b''.join((constants.REQ, b' message_id 10', constants.NL)) self.assertSent(expected, self.connection.req, b'message_id', 10) def test_touch(self): '''Appropriately sends touch''' expected = b''.join((constants.TOUCH, b' message_id', constants.NL)) self.assertSent(expected, self.connection.touch, b'message_id') def test_cls(self): '''Appropriately sends cls''' expected = b''.join((constants.CLS, constants.NL)) self.assertSent(expected, self.connection.cls) def test_nop(self): '''Appropriately sends nop''' expected = b''.join((constants.NOP, constants.NL)) self.assertSent(expected, self.connection.nop) # Some tests very closely aimed at identification def test_calls_identified(self): '''Upon getting an identification response, we call 'identified''' with mock.patch.object( connection.Connection, 'identified') as mock_identified: self.connect({'foo': 'bar'}) self.assertTrue(mock_identified.called) def test_identified_tolerates_ok(self): '''The identified handler tolerates OK responses''' res = mock.Mock(data='OK') self.assertEqual(self.connection.identified(res).data, 'OK') def test_identify_defaults(self): '''Identify provides default options''' self.assertEqual(self.connection._identify_options, { 'feature_negotiation': True, 'client_id': socket.getfqdn().split('.')[0], 'hostname': socket.gethostname(), 'user_agent': self.connection.USER_AGENT }) def test_identify_override_defaults(self): '''Identify allows us to override defaults''' with mock.patch('nsq.connection.Connection.connect'): conn = connection.Connection('host', 0, long_id='not-your-fqdn') self.assertEqual(conn._identify_options['long_id'], 'not-your-fqdn') def test_identify_tls_unsupported(self): '''Raises an exception about the lack of TLS support''' with mock.patch('nsq.connection.TLSSocket', None): self.assertRaises(exceptions.UnsupportedException, connection.Connection, 'host', 0, tls_v1=True) def test_identify_snappy_unsupported(self): '''Raises an exception about the lack of snappy support''' with mock.patch('nsq.connection.SnappySocket', None): self.assertRaises(exceptions.UnsupportedException, connection.Connection, 'host', 0, snappy=True) def test_identify_deflate_unsupported(self): '''Raises an exception about the lack of deflate support''' with mock.patch('nsq.connection.DeflateSocket', None): self.assertRaises(exceptions.UnsupportedException, connection.Connection, 'host', 0, deflate=True) def test_identify_no_deflate_level(self): '''Raises an exception about the lack of deflate_level support''' with mock.patch('nsq.connection.DeflateSocket', None): self.assertRaises(exceptions.UnsupportedException, connection.Connection, 'host', 0, deflate_level=True) def test_identify_no_snappy_and_deflate(self): '''We should yell early about incompatible snappy and deflate options''' self.assertRaises(exceptions.UnsupportedException, connection.Connection, 'host', 0, snappy=True, deflate=True) def test_identify_saves_identify_response(self): '''Saves the identify response from the server''' expected = {'foo': 'bar'} conn = self.connect(expected) self.assertEqual(conn._identify_response, expected) def test_identify_saves_max_rdy_count(self): '''Saves the max ready count if it's provided''' conn = self.connect({'max_rdy_count': 100}) self.assertEqual(conn.max_rdy_count, 100) def test_ready_to_reconnect(self): '''Alias for the reconnection attempt's ready method''' with mock.patch.object( self.connection, '_reconnnection_counter') as ctr: self.connection.ready_to_reconnect() ctr.ready.assert_called_with() def test_reconnect_living_socket(self): '''Don't reconnect a living connection''' before = self.connection._socket self.connection.connect() self.assertEqual(self.connection._socket, before) def test_connect_socket_error_return_value(self): '''Socket errors has connect return False''' self.connection.close() with mock.patch('nsq.connection.socket') as mock_socket: mock_socket.socket = mock.Mock(side_effect=socket.error) self.assertFalse(self.connection.connect()) def test_connect_socket_error_reset(self): '''Invokes reset if the socket raises an error''' self.connection.close() with mock.patch('nsq.connection.socket') as mock_socket: with mock.patch.object(self.connection, '_reset') as mock_reset: mock_socket.socket = mock.Mock(side_effect=socket.error) self.connection.connect() mock_reset.assert_called_with() def test_connect_timeout(self): '''Times out when connection instantiation is too slow''' socket = self.connection._socket self.connection.close() with mock.patch.object(self.connection, '_read', return_value=[]): with mock.patch.object(self.connection, '_timeout', 0.05): with mock.patch( 'nsq.connection.socket.socket', return_value=socket): self.assertFalse(self.connection.connect()) def test_connect_resets_state(self): '''Upon connection, makes a call to reset its state''' socket = self.connection._socket self.connection.close() with mock.patch.object(self.connection, '_read', return_value=[]): with mock.patch.object(self.connection, '_reset') as mock_reset: with mock.patch.object(self.connection, '_timeout', 0.05): with mock.patch( 'nsq.connection.socket.socket', return_value=socket): self.connection.connect() mock_reset.assert_called_with() def test_close_resets_state(self): '''On closing a connection, reset its state''' with mock.patch.object(self.connection, '_reset') as mock_reset: self.connection.close() mock_reset.assert_called_with() def test_reset_socket(self): '''Resets socket''' self.connection._socket = True self.connection._reset() self.assertEqual(self.connection._socket, None) def test_reset_pending(self): '''Resets pending''' self.connection._pending = True self.connection._reset() self.assertEqual(self.connection._pending, deque()) def test_reset_out_buffer(self): '''Resets the outbound buffer''' self.connection._out_buffer = True self.connection._reset() self.assertEqual(self.connection._out_buffer, b'') def test_reset_buffer(self): '''Resets buffer''' self.connection._buffer = True self.connection._reset() self.assertEqual(self.connection._buffer, b'') def test_reset_identify_response(self): '''Resets identify_response''' self.connection._identify_response = True self.connection._reset() self.assertEqual(self.connection._identify_response, {}) def test_reset_last_ready_sent(self): '''Resets last_ready_sent''' self.connection.last_ready_sent = True self.connection._reset() self.assertEqual(self.connection.last_ready_sent, 0) def test_reset_ready(self): '''Resets ready''' self.connection.ready = True self.connection._reset() self.assertEqual(self.connection.ready, 0) def test_ok_response(self): '''Sets our _identify_response to {} if 'OK' is provided''' res = response.Response( self.connection, response.Response.FRAME_TYPE, 'OK') self.connection.identified(res) self.assertEqual(self.connection._identify_response, {}) def test_tls_unsupported(self): '''Raises an exception if the server does not support TLS''' res = response.Response(self.connection, response.Response.FRAME_TYPE, json.dumps({'tls_v1': False})) options = {'tls_v1': True} with mock.patch.object(self.connection, '_identify_options', options): self.assertRaises(exceptions.UnsupportedException, self.connection.identified, res) def test_auth_required_not_provided(self): '''Raises an exception if auth is required but not provided''' res = response.Response(self.connection, response.Response.FRAME_TYPE, json.dumps({'auth_required': True})) self.assertRaises(exceptions.UnsupportedException, self.connection.identified, res) def test_auth_required_provided(self): '''Sends the auth message if required and provided''' res = response.Response(self.connection, response.Response.FRAME_TYPE, json.dumps({'auth_required': True})) with mock.patch.object(self.connection, 'auth') as mock_auth: with mock.patch.object(self.connection, '_auth_secret', 'hello'): self.connection.identified(res) mock_auth.assert_called_with('hello') def test_auth_provided_not_required(self): '''Logs a warning if you provide auth when none is required''' res = response.Response(self.connection, response.Response.FRAME_TYPE, json.dumps({'auth_required': False})) with mock.patch('nsq.connection.logger') as mock_logger: with mock.patch.object(self.connection, '_auth_secret', 'hello'): self.connection.identified(res) mock_logger.warning.assert_called_with( 'Authentication secret provided but not required') class TestTLSConnectionIntegration(HttpClientIntegrationTest): '''We can establish a connection with TLS''' def setUp(self): HttpClientIntegrationTest.setUp(self) self.connection = connection.Connection('localhost', 14150, tls_v1=True) self.connection.setblocking(0) def test_alive(self): '''The connection is alive''' self.assertTrue(self.connection.alive()) def test_basic(self): '''Can send and receive things''' self.connection.pub(b'foo', b'bar') self.connection.flush() responses = [] while not responses: responses = self.connection.read() self.assertEqual(len(responses), 1) self.assertEqual(responses[0].data, b'OK')
import pandas as pd import numpy as np import datetime import math import configuration import rews import binning import warnings warnings.simplefilter('ignore', np.RankWarning) def getSeparatorValue(separator): try: return {"TAB":"\t", "SPACE":" ", "COMMA": ",", "SEMI-COLON":";"}[separator.upper()] except: raise Exception("Unkown separator: '%s'" % separator) def getDecimalValue(decimal): try: return {"FULL STOP":".", "COMMA":","}[decimal.upper()] except: raise Exception("Unkown decimal: '%s'" % decimal) class DeviationMatrix(object): def __init__(self,deviationMatrix,countMatrix): self.matrix = deviationMatrix self.count = countMatrix class CalibrationBase: def __init__(self, x, y): self.x = x self.y = y self.requiredColumns = [self.x, self.y] def variance(self, df, col): return ((df[col].mean() - df[col]) ** 2.0).sum() def covariance(self, df, colA, colB): return df[[colA,colB]].cov()[colA][colB] # assumes unbiased estimator (normalises with N-1) def sigA(self,df,slope, intercept, count): sumPredYfromX = sum((df[self.y] - (intercept + df[self.x]*slope ))**2) sumX = (df[self.x]).sum() sumXX = (df[self.x]**2).sum() return ((sumPredYfromX/(count-2))*(sumXX/(count*sumXX - sumX**2)))**0.5 def sigB(self,df,slope, intercept, count): sumPredYfromX = sum((df[self.y] - (intercept + df[self.x]*slope ))**2) sumX = (df[self.x]).sum() sumXX = (df[self.x]**2).sum() return ((sumPredYfromX/(count-2))/(count*sumXX - sumX**2))**0.5 def mean(self, df, col): return df[col].mean() def intercept(self, df, slope): return self.mean(df, self.y) - slope * self.mean(df, self.x) class York(CalibrationBase): def covariance(self, df, colA, colB): return ((df[colA].mean() - df[colA]) * (df[colB].mean() - df[colB])).sum() def __init__(self, x, y, timeStepInSeconds, df): movingAverageWindow = self.calculateMovingAverageWindow(timeStepInSeconds) self.xRolling = "xRolling" self.yRolling = "yRolling" self.xDiffSq = "xDiffSq" self.yDiffSq = "yDiffSq" df[self.xRolling] = pd.rolling_mean(df[x], window = movingAverageWindow, min_periods = 1) df[self.yRolling] = pd.rolling_mean(df[y], window = movingAverageWindow, min_periods = 1) df[self.xDiffSq] = ((df[x] - df[self.xRolling])** 2.0) df[self.yDiffSq] = ((df[y] - df[self.yRolling])** 2.0) # this needed in uncertainty? CalibrationBase.__init__(self, x, y) self.requiredColumns += [self.xDiffSq, self.yDiffSq] def calculateMovingAverageWindow(self, timeStepInSeconds): movingAverageMultiplier = 3 minimumMovingAveageWindowInSeconds = movingAverageMultiplier * 60 * 60 movingAveageWindowInSeconds = max([minimumMovingAveageWindowInSeconds, movingAverageMultiplier * timeStepInSeconds]) if movingAveageWindowInSeconds % timeStepInSeconds != 0: raise Exception("Cannot calculate moving average window. Moving average window (%ds) is not integer multiple of timestep (%ds)" % (movingAveageWindowInSeconds, timeStepInSeconds)) movingAverageWindow = movingAveageWindowInSeconds / timeStepInSeconds return movingAverageWindow def slope(self, df): alpha = self.calculateAlpha(df) varianceX = self.variance(df, self.x) varianceY = self.variance(df, self.y) covarianceXY = self.covariance(df, self.x, self.y) gradientNumerator = math.sin(alpha) * varianceY + math.cos(alpha) * covarianceXY gradientDenominator = math.sin(alpha) * covarianceXY + math.cos(alpha) * varianceX return (gradientNumerator / gradientDenominator) def calculateAlpha(self, df): xYorkVariance = df[self.xDiffSq].dropna().sum() yYorkVariance = df[self.yDiffSq].dropna().sum() covarianceXY = self.covariance(df, self.x, self.y) varianceX = self.variance(df, self.x) print covarianceXY,varianceX,xYorkVariance return math.atan2(covarianceXY ** 2.0 / varianceX ** 2.0 * xYorkVariance, yYorkVariance) class RatioOfMeans(CalibrationBase): def slope(self, df): return self.mean(df, self.y) / self.mean(df, self.x) class LeastSquares(CalibrationBase): def _slope(self, df): varianceX = self.variance(df, self.x) covarianceXY = self.covariance(df, self.x, self.y) return covarianceXY ** 2.0 / varianceX ** 2.0 def slope(self, df): A =np.vstack([df[self.x].as_matrix(), np.ones(len(df))]).T slope, residual, rank, s = np.linalg.lstsq(A, df[self.y]) return slope[0] class SiteCalibrationCalculator: def __init__(self, directionBinColumn, valueColumn, calibrationSectorDataframe, actives = None): self.calibrationSectorDataframe = calibrationSectorDataframe self.valueColumn = valueColumn self.directionBinColumn = directionBinColumn if actives != None: activeSectors = [] for direction in actives: if actives[direction]: activeSectors.append(int(direction)) self.calibrationSectorDataframe = self.calibrationSectorDataframe.loc[activeSectors,:] self.calibrationSectorDataframe['SpeedUpAt10'] = (10*self.calibrationSectorDataframe['Slope'] + self.calibrationSectorDataframe['Offset'])/10.0 self.IECLimitCalculator() def turbineValue(self, row): directionBin = row[self.directionBinColumn] if np.isnan(directionBin): return np.nan if not directionBin in self.calibrationSectorDataframe.index: return np.nan value = row[self.valueColumn] if np.isnan(value): return np.nan return self.calibrate(directionBin, value) def calibrate(self, directionBin, value): return self.calibrationSectorDataframe['Offset'][directionBin] + self.calibrationSectorDataframe['Slope'][directionBin] * value def IECLimitCalculator(self): if len(self.calibrationSectorDataframe.index) == 36 and 'vRatio' in self.calibrationSectorDataframe.columns: self.calibrationSectorDataframe['pctSpeedUp'] = (self.calibrationSectorDataframe['vRatio']-1)*100 self.calibrationSectorDataframe['LowerLimit'] = p d.Series(data=np.roll(((self.calibrationSectorDataframe['vRatio']-1)*100)-2.0,1),index=self.calibrationSectorDataframe.index) self.calibrationSectorDataframe['UpperLimit'] = pd.Series(data=np.roll(((self.calibrationSectorDataframe['vRatio']-1)*100)+2.0,1),index=self.calibrationSectorDataframe.index) self.calibrationSectorDataframe['IECValid'] = np.logical_and(self.calibrationSectorDataframe['pctSpeedUp'] > self.calibrationSectorDataframe['LowerLimit'], self.calibrationSectorDataframe['pctSpeedUp'] < self.calibrationSectorDataframe['UpperLimit']) print self.calibrationSectorDataframe[['pctSpeedUp','LowerLimit','UpperLimit','IECValid']] return True def getSectorValidity(self, key, timeStep): ba = self.calibrationSectorDataframe.loc[key,'belowAbove'] return ba[0]*(timeStep/3600.0) > 6.0 and ba[1]*(timeStep/3600.0) > 6.0 class ShearExponentCalculator: def __init__(self, shearMeasurements): self.shearMeasurements = shearMeasurements def calculateMultiPointShear(self, row): # 3 point measurement: return shear= 1/ (numpy.polyfit(x, y, deg, rcond=None, full=False) ) windspeeds = [np.log(row[col]) for col in self.shearMeasurements.values()] heights = [np.log(height) for height in self.shearMeasurements.keys()] deg = 1 # linear if len([ws for ws in windspeeds if not np.isnan(ws)]) < 1: return np.nan polyfitResult = np.polyfit(windspeeds, heights, deg, rcond=None, full=False) shearThreePT = 1/ polyfitResult[0] return shearThreePT def calculateTwoPointShear(self,row): # superseded by self.calculateMultiPointShear return math.log(row[self.upperColumn] / row[self.lowerColumn]) * self.overOneLogHeightRatio def shearExponent(self, row): return self.calculateMultiPointShear(row) class Dataset: def __init__(self, config, rotorGeometry, analysisConfig): self.relativePath = configuration.RelativePath(config.path) self.nameColumn = "Dataset Name" self.name = config.name self.timeStepInSeconds = config.timeStepInSeconds self.timeStamp = config.timeStamp self.actualPower = "Actual Power" self.hasAllPowers = None not in (config.powerMin,config.powerMax,config.powerSD) self.powerMin = "Power Min" self.powerMax = "Power Max" self.powerSD = "Power SD" self.hubWindSpeed = "Hub Wind Speed" self.hubTurbulence = "Hub Turbulence" self.hubDensity = "Hub Density" self.shearExponent = "Shear Exponent" self.referenceShearExponent = "Reference Shear Exponent" self.turbineShearExponent = "Turbine Shear Exponent" self.windDirection = "Wind Direction" self.profileRotorWindSpeed = "Profile Rotor Wind Speed" self.profileHubWindSpeed = "Profile Hub Wind Speed" self.profileHubToRotorRatio = "Hub to Rotor Ratio" self.profileHubToRotorDeviation = "Hub to Rotor Deviation" self.residualWindSpeed = "Residual Wind Speed" self.hasShear = len(config.shearMeasurements) > 1 self.hasDirection = config.referenceWindDirection not in (None,'') self.shearCalibration = "TurbineLocation" in config.shearMeasurements.keys() and "ReferenceLocation" in config.shearMeasurements.keys() self.hubWindSpeedForTurbulence = self.hubWindSpeed if config.turbulenceWSsource != 'Reference' else config.referenceWindSpeed self.turbRenormActive = analysisConfig.turbRenormActive self.turbulencePower = 'Turbulence Power' self.rewsDefined = config.rewsDefined self.sensitivityDataColumns = config.sensitivityDataColumns dateConverter = lambda x: datetime.datetime.strptime(x, config.dateFormat) dataFrame = pd.read_csv(self.relativePath.convertToAbsolutePath(config.inputTimeSeriesPath), index_col=config.timeStamp, \ parse_dates = True, date_parser = dateConverter, sep = getSeparatorValue(config.separator), \ skiprows = config.headerRows, decimal = getDecimalValue(config.decimal)).replace(config.badData, np.nan) if config.startDate != None and config.endDate != None: dataFrame = dataFrame[config.startDate : config.endDate] elif config.startDate != None: dataFrame = dataFrame[config.startDate : ] elif config.endDate != None: dataFrame = dataFrame[ : config.endDate] dataFrame[self.nameColumn] = config.name dataFrame[self.timeStamp] = dataFrame.index if self.hasDirection: dataFrame[self.windDirection] = dataFrame[config.referenceWindDirection] if self.hasShear: if not self.shearCalibration: dataFrame[self.shearExponent] = dataFrame.apply(ShearExponentCalculator(config.shearMeasurements).shearExponent, axis=1) else: dataFrame[self.turbineShearExponent] = dataFrame.apply(ShearExponentCalculator(config.shearMeasurements["TurbineLocation"]).shearExponent, axis=1) dataFrame[self.referenceShearExponent] = dataFrame.apply(ShearExponentCalculator(config.shearMeasurements["ReferenceLocation"]).shearExponent, axis=1) dataFrame[self.shearExponent] = dataFrame[self.referenceShearExponent] dataFrame[self.residualWindSpeed] = 0.0 if config.calculateHubWindSpeed: if dataFrame[config.referenceWindSpeed].count() < 1: raise Exception("Reference wind speed column is empty: cannot apply calibration") if dataFrame[config.referenceWindDirection].count() < 1: raise Exception("Reference wind direction column is empty: cannot apply calibration") self.calibrationCalculator = self.createCalibration(dataFrame, config, config.timeStepInSeconds) dataFrame[self.hubWindSpeed] = dataFrame.apply(self.calibrationCalculator.turbineValue, axis=1) if dataFrame[self.hubWindSpeed].count() < 1: raise Exception("Hub wind speed column is empty after application of calibration") if (config.hubTurbulence != ''): dataFrame[self.hubTurbulence] = dataFrame[config.hubTurbulence] else: dataFrame[self.hubTurbulence] = dataFrame[config.referenceWindSpeedStdDev] / dataFrame[self.hubWindSpeedForTurbulence] if config.calibrationMethod != "Specified": dataFrame[self.residualWindSpeed] = (dataFrame[self.hubWindSpeed] - dataFrame[config.turbineLocationWindSpeed]) / dataFrame[self.hubWindSpeed] windSpeedBin = "Wind Speed Bin" turbulenceBin = "Turbulence Bin" windSpeedBins = binning.Bins(analysisConfig.powerCurveFirstBin, analysisConfig.powerCurveBinSize, analysisConfig.powerCurveLastBin) turbulenceBins = binning.Bins(0.01, 0.01/windSpeedBins.numberOfBins, 0.02) aggregations = binning.Aggregations(analysisConfig.powerCurveMinimumCount) dataFrame[windSpeedBin] = dataFrame[self.hubWindSpeed].map(windSpeedBins.binCenter) dataFrame[turbulenceBin] = dataFrame[self.hubTurbulence].map(turbulenceBins.binCenter) self.residualWindSpeedMatrix = DeviationMatrix( dataFrame[self.residualWindSpeed].groupby([dataFrame[windSpeedBin], dataFrame[turbulenceBin]]).aggregate(aggregations.average), dataFrame[self.residualWindSpeed].groupby([dataFrame[windSpeedBin], dataFrame[turbulenceBin]]).count()) else: self.residualWindSpeedMatrix = None else: dataFrame[self.hubWindSpeed] = dataFrame[config.hubWindSpeed] if (config.hubTurbulence != ''): dataFrame[self.hubTurbulence] = dataFrame[config.hubTurbulence] else: dataFrame[self.hubTurbulence] = dataFrame[config.referenceWindSpeedStdDev] / dataFrame[self.hubWindSpeedForTurbulence] self.residualWindSpeedMatrix = None if self.shearCalibration and config.shearCalibrationMethod != "Reference": self.shearCalibrationCalculator = self.createShearCalibration(dataFrame,config, config.timeStepInSeconds) dataFrame[self.shearExponent] = dataFrame.apply(self.shearCalibrationCalculator.turbineValue, axis=1) if config.calculateDensity: dataFrame[self.hubDensity] = 100.0 * dataFrame[config.pressure] / (273.15 + dataFrame[config.temperature]) / 287.058 self.hasDensity = True else: if config.density != None: dataFrame[self.hubDensity] = dataFrame[config.density] self.hasDensity = True else: self.hasDensity = False if config.power != None: dataFrame[self.actualPower] = dataFrame[config.power] self.hasActualPower = True else: self.hasActualPower = False if self.hasAllPowers: dataFrame[self.powerMin] = dataFrame[config.powerMin] dataFrame[self.powerMax] = dataFrame[config.powerMax] dataFrame[self.powerSD] = dataFrame[config.powerSD] dataFrame = self.filterDataFrame(dataFrame, config.filters) dataFrame = self.excludeData(dataFrame, config) if self.rewsDefined: dataFrame = self.defineREWS(dataFrame, config, rotorGeometry) self.fullDataFrame = dataFrame.copy() self.dataFrame = self.extractColumns(dataFrame).dropna() if self.windDirection in self.dataFrame.columns: self.fullDataFrame[self.windDirection] = self.fullDataFrame[self.windDirection].astype(float) self.analysedDirections = (round(self.fullDataFrame[self.windDirection].min() + config.referenceWindDirectionOffset), round(self.fullDataFrame[self.windDirection].max()+config.referenceWindDirectionOffset)) def createShearCalibration(self, dataFrame, config, timeStepInSeconds): df = dataFrame.copy() if config.shearCalibrationMethod == "Specified": raise NotImplementedError else: calibration = self.getCalibrationMethod(config.shearCalibrationMethod, self.referenceShearExponent, self.turbineShearExponent, timeStepInSeconds, dataFrame) if hasattr(self,"filteredCalibrationDataframe"): dataFrame = self.filteredCalibrationDataframe else: dataFrame = self.filterDataFrame(dataFrame, config.calibrationFilters) self.filteredCalibrationDataframe = dataFrame.copy() if config.calibrationStartDate != None and config.calibrationEndDate != None: dataFrame = dataFrame[config.calibrationStartDate : config.calibrationEndDate] dataFrame = dataFrame[calibration.requiredColumns + [self.referenceDirectionBin, config.referenceWindDirection]].dropna() if len(dataFrame) < 1: raise Exception("No data are available to carry out calibration.") siteCalibCalc = self.createSiteCalibrationCalculator(dataFrame, self.referenceShearExponent, calibration) dataFrame = df return siteCalibCalc def createCalibration(self, dataFrame, config, timeStepInSeconds): self.referenceDirectionBin = "Reference Direction Bin Centre" dataFrame[config.referenceWindDirection] = (dataFrame[config.referenceWindDirection] + config.referenceWindDirectionOffset) % 360 siteCalibrationBinWidth = 360.0 / config.siteCalibrationNumberOfSectors dataFrame[self.referenceDirectionBin] = (dataFrame[config.referenceWindDirection] - config.siteCalibrationCenterOfFirstSector) / siteCalibrationBinWidth dataFrame[self.referenceDirectionBin] = np.round(dataFrame[self.referenceDirectionBin], 0) * siteCalibrationBinWidth + config.siteCalibrationCenterOfFirstSector dataFrame[self.referenceDirectionBin] = (dataFrame[self.referenceDirectionBin] + 360) % 360 #dataFrame[self.referenceDirectionBin] -= float(config.siteCalibrationCenterOfFirstSector) if config.calibrationMethod == "Specified": if all([dir in config.calibrationSlopes.keys() for dir in config.calibrationActives.keys()]): print "Applying Specified calibration" print "Direction\tSlope\tOffset\tApplicable Datapoints" for direction in config.calibrationSlopes: if config.calibrationActives[direction]: mask = (dataFrame[self.referenceDirectionBin] == direction) dataCount = dataFrame[mask][self.referenceDirectionBin].count() print "%0.2f\t%0.2f\t%0.2f\t%d" % (direction, config.calibrationSlopes[direction], config.calibrationOffsets[direction], dataCount) df = pd.DataFrame([config.calibrationSlopes, config.calibrationOffsets], index=['Slope','Offset']).T return SiteCalibrationCalculator( self.referenceDirectionBin, config.referenceWindSpeed,df, actives = config.calibrationActives) else: raise Exception("The specified slopes have different bin centres to that specified by siteCalibrationCenterOfFirstSector which is: {0}".format(config.siteCalibrationCenterOfFirstSector)) else: df = dataFrame.copy() calibration = self.getCalibrationMethod(config.calibrationMethod,config.referenceWindSpeed, config.turbineLocationWindSpeed, timeStepInSeconds, dataFrame) if config.calibrationStartDate != None and config.calibrationEndDate != None: dataFrame = dataFrame[config.calibrationStartDate : config.calibrationEndDate] dataFrame = self.filterDataFrame(dataFrame, config.calibrationFilters) self.filteredCalibrationDataframe = dataFrame.copy() dataFrame = dataFrame[calibration.requiredColumns + [self.referenceDirectionBin, config.referenceWindDirection]].dropna() if len(dataFrame) < 1: raise Exception("No data are available to carry out calibration.") siteCalibCalc = self.createSiteCalibrationCalculator(dataFrame,config.referenceWindSpeed, calibration) dataFrame = df return siteCalibCalc def getCalibrationMethod(self,calibrationMethod,referenceColumn, turbineLocationColumn, timeStepInSeconds, dataFrame): if calibrationMethod == "RatioOfMeans": calibration = RatioOfMeans(referenceColumn, turbineLocationColumn) elif calibrationMethod == "LeastSquares": calibration = LeastSquares(referenceColumn, turbineLocationColumn) elif calibrationMethod == "York": calibration = York(referenceColumn, turbineLocationColumn, timeStepInSeconds, dataFrame) else: raise Exception("Calibration method not recognised: %s" % calibrationMethod) return calibration def createSiteCalibrationCalculator(self,dataFrame, valueColumn, calibration ): groups = dataFrame[calibration.requiredColumns].groupby(dataFrame[self.referenceDirectionBin]) slopes = {} intercepts = {} counts = {} belowAbove = {} sigA = {} sigB = {} cov = {} corr = {} vRatio= {} for group in groups: directionBinCenter = group[0] sectorDataFrame = group[1].dropna() if len(sectorDataFrame.index)>1: slopes[directionBinCenter] = calibration.slope(sectorDataFrame) intercepts[directionBinCenter] = calibration.intercept(sectorDataFrame, slopes[directionBinCenter]) counts[directionBinCenter] = sectorDataFrame[valueColumn].count() try: sigA[directionBinCenter] = calibration.sigA(sectorDataFrame,slopes[directionBinCenter], intercepts[directionBinCenter], counts[directionBinCenter]) # 'ErrInGradient' sigB[directionBinCenter] = calibration.sigB(sectorDataFrame,slopes[directionBinCenter], intercepts[directionBinCenter], counts[directionBinCenter]) # 'ErrInIntercept' #cov[directionBinCenter] = calibration.covariance(sectorDataFrame, calibration.x,calibration.y ) cov[directionBinCenter] = sigA[directionBinCenter]*sigB[directionBinCenter]*(-1.0 * sectorDataFrame[calibration.x].sum())/((counts[directionBinCenter] * (sectorDataFrame[calibration.x]**2).sum())**0.5) corr[directionBinCenter] =sectorDataFrame[[calibration.x, calibration.y]].corr()[calibration.x][calibration.y] vRatio[directionBinCenter] = (sectorDataFrame[calibration.y]/sectorDataFrame[calibration.x]).mean()# T_A1/R_A1 - this is currently mean of all data except: pass if valueColumn == self.hubWindSpeedForTurbulence: belowAbove[directionBinCenter] = (sectorDataFrame[sectorDataFrame[valueColumn] <= 8.0][valueColumn].count(),sectorDataFrame[sectorDataFrame[valueColumn] > 8.0][valueColumn].count()) calibrationSectorDataframe = pd.DataFrame([slopes,intercepts,counts, sigA, sigB, cov, corr, vRatio], ["Slope","Offset","Count","SigA","SigB","Cov","Corr","vRatio"] ).T if len(belowAbove.keys()): calibrationSectorDataframe['belowAbove'] = belowAbove.values() print calibrationSectorDataframe return SiteCalibrationCalculator(self.referenceDirectionBin, valueColumn, calibrationSectorDataframe) def isValidText(self, text): if text == None: return False return len(text) > 0 def excludeData(self, dataFrame, config): mask = pd.Series([True]*len(dataFrame),index=dataFrame.index) print "Data set length prior to exclusions: {0}".format(len(mask[mask])) for exclusion in config.exclusions: startDate = exclusion[0] endDate = exclusion[1] active = exclusion[2] if active: subMask = (dataFrame[self.timeStamp] >= startDate) & (dataFrame[self.timeStamp] <= endDate) mask = mask & ~subMask print "Applied exclusion: {0} to {1}\n\t- data set length: {2}".format(exclusion[0].strftime("%Y-%m-%d %H:%M"),exclusion[1].strftime("%Y-%m-%d %H:%M"),len(mask[mask])) print "Data set length after exclusions: {0}".format(len(mask[mask])) return dataFrame[mask] def extractColumns(self, dataFrame): requiredCols = [] requiredCols.append(self.nameColumn) requiredCols.append(self.timeStamp) requiredCols.append(self.hubWindSpeed) requiredCols.append(self.hubTurbulence) if self.hasDensity: requiredCols.append(self.hubDensity) if self.hasShear: requiredCols.append(self.shearExponent) if self.hasDirection: requiredCols.append(self.windDirection) if self.rewsDefined: requiredCols.append(self.profileRotorWindSpeed) requiredCols.append(self.profileHubWindSpeed) requiredCols.append(self.profileHubToRotorRatio) requiredCols.append(self.profileHubToRotorDeviation) if self.hasAllPowers: requiredCols.append(self.powerMin) requiredCols.append(self.powerMax) requiredCols.append(self.powerSD) if self.hasActualPower: requiredCols.append(self.actualPower) for col in self.sensitivityDataColumns: if col not in requiredCols: requiredCols.append(col) if len(dataFrame[requiredCols].dropna()[requiredCols[0]]) > 0: return dataFrame[requiredCols] else: print "Number of null columns:" print dataFrame[requiredCols].isnull().sum() text = "One of the required columns is empty.\n" for col in requiredCols: text += "- %s: %d\n" % (col, dataFrame[col].dropna().count()) raise Exception(text) def createDerivedColumn(self,df,cols): d = df.copy() d['Derived'] = 1 for col in cols: d['Derived'] *= ((df[col[0]]*float(col[1]))+float(col[2]))**float(col[3]) return d['Derived'] def applyToDFilter(self,mask,componentFilter,dataFrame,printMsg=True): startTime = (dataFrame.index - datetime.timedelta(seconds=self.timeStepInSeconds)) endTime = dataFrame.index # explicit assumption is that we're using end format data. dayMask = dataFrame[self.timeStamp].apply(lambda x,d : True if x.isoweekday() in d else False, args=[componentFilter.daysOfTheWeek] ) todMask = np.logical_and( startTime.time >= componentFilter.startTime.time(), endTime.time <= componentFilter.endTime.time() ) if len(componentFilter.months) > 0: monthMask = dataFrame[self.timeStamp].apply(lambda x,d : True if x.month in d else False, args=[componentFilter.months] ) dayMask = dayMask & monthMask totalMask = dayMask & todMask mask = mask | totalMask if printMsg: print "Applied filter:", str(componentFilter) return mask.copy() def applySimpleFilter(self,mask,componentFilter,dataFrame,printMsg=True): filterColumn = componentFilter.column filterType = componentFilter.filterType filterInclusive = componentFilter.inclusive if not componentFilter.derived: filterValue = componentFilter.value else: filterValue = self.createDerivedColumn(dataFrame,componentFilter.value) #print (filterColumn, filterType, filterInclusive, filterValue) if filterType.lower() == "below": mask = self.addFilterBelow(dataFrame, mask, filterColumn, filterValue, filterInclusive) elif filterType.lower() == "above": mask = self.addFilterAbove(dataFrame, mask, filterColumn, filterValue, filterInclusive) elif filterType.lower() == "aboveorbelow" or filterType.lower() == "notequal": mask = self.addFilterBelow(dataFrame, mask, filterColumn, filterValue, filterInclusive) mask = self.addFilterAbove(dataFrame, mask, filterColumn, filterValue, filterInclusive) else: raise Exception("Filter type not recognised: %s" % filterType) if printMsg: print "Applied Filter:{col}-{typ}-{val}\n\tData set length:{leng}".format( col=filterColumn,typ=filterType,val="Derived Column" if type(filterValue) == pd.Series else filterValue,leng=len(mask[~mask])) return mask.copy() def applyRelationshipFilter(self, mask, componentFilter, dataFrame): filterConjunction = componentFilter.conjunction if filterConjunction not in ("AND","OR"): raise NotImplementedError("Filter conjunction not implemented, please use AND or OR...") filterConjuction = np.logical_or if filterConjunction == "OR" else np.logical_and masks = [] newMask = pd.Series([False]*len(mask),index=mask.index) if len(componentFilter.clauses) < 2: raise Exception("Number of clauses in a relationship must be > 1") for filter in componentFilter.clauses: filterMask = self.applySimpleFilter(newMask,filter,dataFrame,printMsg=False) masks.append(filterMask) baseMask = masks[0] for filterMask in masks[1:]: baseMask = filterConjuction(baseMask,filterMask) # only if commutative (e.g. AND / OR) mask = np.logical_or(mask,baseMask) print "Applied Relationship (AND/OR) Filter:\n\tData set length:{leng}".format(leng=len(mask[~mask])) return mask.copy() def filterDataFrame(self, dataFrame, filters): if len(filters) < 1: return dataFrame print "" print "Filter Details" print "Derived\tColumn\tFilterType\tInclusive\tValue" for componentFilter in filters: if componentFilter.active: componentFilter.printSummary() print "" mask = pd.Series([False]*len(dataFrame),index=dataFrame.index) print "Data set length prior to filtering: {0}".format(len(mask[~mask])) print "" for componentFilter in filters: if componentFilter.active: if not componentFilter.applied: try: if hasattr(componentFilter,"startTime"): mask = self.applyToDFilter(mask,componentFilter,dataFrame) elif hasattr(componentFilter, "clauses"): mask = self.applyRelationshipFilter(mask, componentFilter, dataFrame) else: mask = self.applySimpleFilter(mask,componentFilter,dataFrame) print dataFrame[~mask][self.timeStamp].min() , " to " , dataFrame[~mask][self.timeStamp].max() componentFilter.applied = True except: componentFilter.applied = False print "" return dataFrame[~mask] def addFilterBelow(self, dataFrame, mask, filterColumn, filterValue, filterInclusive): if filterInclusive: return mask | (dataFrame[filterColumn] <= filterValue) else: return mask | (dataFrame[filterColumn] < filterValue) def addFilterAbove(self, dataFrame, mask, filterColumn, filterValue, filterInclusive): if filterInclusive: return mask | (dataFrame[filterColumn] >= filterValue) else: return mask | (dataFrame[filterColumn] > filterValue) def defineREWS(self, dataFrame, config, rotorGeometry): profileLevels = rews.ProfileLevels(rotorGeometry, config.windSpeedLevels) if config.rotorMode == "EvenlySpacedLevels": self.rotor = rews.EvenlySpacedRotor(rotorGeometry, config.numberOfRotorLevels) elif config.rotorMode == "ProfileLevels": self.rotor = rews.ProfileLevelsRotor(rotorGeometry, profileLevels) else: raise Exception("Unknown rotor mode: % s" % config.rotorMode) rotorEquivalentWindSpeedCalculator = rews.RotorEquivalentWindSpeed(profileLevels, self.rotor) if config.hubMode == "Interpolated": profileHubWindSpeedCalculator = rews.InterpolatedHubWindSpeed(profileLevels, rotorGeometry) elif config.hubMode == "PiecewiseExponent": profileHubWindSpeedCalculator = rews.PiecewiseExponentHubWindSpeed(profileLevels, rotorGeometry) else: raise Exception("Unknown hub mode: % s" % config.hubMode) dataFrame[self.profileHubWindSpeed] = dataFrame.apply(profileHubWindSpeedCalculator.hubWindSpeed, axis=1) dataFrame[self.profileRotorWindSpeed] = dataFrame.apply(rotorEquivalentWindSpeedCalculator.rotorWindSpeed, axis=1) dataFrame[self.profileHubToRotorRatio] = dataFrame[self.profileRotorWindSpeed] / dataFrame[self.profileHubWindSpeed] dataFrame[self.profileHubToRotorDeviation] = dataFrame[self.profileHubToRotorRatio] - 1.0 return dataFrame
import datetime import unittest import pymantic.rdf import pymantic.util from pymantic.primitives import * XSD = Prefix('http://www.w3.org/2001/XMLSchema#') RDF = Prefix("http://www.w3.org/1999/02/22-rdf-syntax-ns#") class TestRDF(unittest.TestCase): def tearDown(self): pymantic.rdf.MetaResource._classes = {} def testCurieURI(self): """Test CURIE parsing of explicit URIs.""" test_ns = {'http': Prefix('WRONG!'), 'urn': Prefix('WRONG!'),} self.assertEqual(pymantic.rdf.parse_curie('http://oreilly.com', test_ns), NamedNode('http://oreilly.com')) self.assertEqual(pymantic.rdf.parse_curie('urn:isbn:1234567890123', test_ns), NamedNode('urn:isbn:1234567890123')) def testCurieDefaultPrefix(self): """Test CURIE parsing of CURIEs in the default Prefix.""" test_ns = {'': Prefix('foo/'), 'wrong': Prefix('WRONG!')} self.assertEqual(pymantic.rdf.parse_curie('bar', test_ns), NamedNode('foo/bar')) self.assertEqual(pymantic.rdf.parse_curie('[bar]', test_ns), NamedNode('foo/bar')) self.assertEqual(pymantic.rdf.parse_curie('baz', test_ns), NamedNode('foo/baz')) self.assertEqual(pymantic.rdf.parse_curie('[aap]', test_ns), NamedNode('foo/aap')) def testCurieprefixes(self): """Test CURIE parsing of CURIEs in non-default prefixes.""" test_ns = {'': Prefix('WRONG!'), 'foo': Prefix('foobly/'), 'bar': Prefix('bardle/'), 'http': Prefix('reallybadidea/'),} self.assertEqual(pymantic.rdf.parse_curie('foo:aap', test_ns), NamedNode('foobly/aap')) self.assertEqual(pymantic.rdf.parse_curie('[bar:aap]', test_ns), NamedNode('bardle/aap')) self.assertEqual(pymantic.rdf.parse_curie('[foo:baz]', test_ns), NamedNode('foobly/baz')) self.assertEqual(pymantic.rdf.parse_curie('bar:baz', test_ns), NamedNode('bardle/baz')) self.assertEqual(pymantic.rdf.parse_curie('[http://oreilly.com]', test_ns), NamedNode('reallybadidea///oreilly.com')) def testCurieNoSuffix(self): """Test CURIE parsing of CURIEs with no suffix.""" pass def testUnparseableCuries(self): """Test some CURIEs that shouldn't parse.""" test_ns = {'foo': Prefix('WRONG!'),} self.assertRaises(ValueError, pymantic.rdf.parse_curie, '[bar]', test_ns) self.assertRaises(ValueError, pymantic.rdf.parse_curie, 'bar', test_ns) self.assertRaises(ValueError, pymantic.rdf.parse_curie, 'bar:baz', test_ns) self.assertRaises(ValueError, pymantic.rdf.parse_curie, '[bar:baz]', test_ns) def testMetaResourceNothingUseful(self): """Test applying a MetaResource to a class without anything it uses.""" class Foo(object): __metaclass__ = pymantic.rdf.MetaResource def testMetaResourceprefixes(self): """Test the handling of prefixes by MetaResource.""" class Foo(object): __metaclass__ = pymantic.rdf.MetaResource prefixes = {'foo': 'bar', 'baz': 'garply', 'meme': 'lolcatz!',} self.assertEqual(Foo.prefixes, {'foo': Prefix('bar'), 'baz': Prefix('garply'), 'meme': Prefix('lolcatz!'),}) def testMetaResourcePrefixInheritance(self): """Test the composition of Prefix dictionaries by MetaResource.""" class Foo(object): __metaclass__ = pymantic.rdf.MetaResource prefixes = {'foo': 'bar', 'baz': 'garply', 'meme': 'lolcatz!',} class Bar(Foo): prefixes = {'allyourbase': 'arebelongtous!', 'bunny': 'pancake',} self.assertEqual(Foo.prefixes, {'foo': Prefix('bar'), 'baz': Prefix('garply'), 'meme': Prefix('lolcatz!'),}) self.assertEqual(Bar.prefixes, {'foo': Prefix('bar'), 'baz': Prefix('garply'), 'meme': Prefix('lolcatz!'), 'allyourbase': Prefix('arebelongtous!'), 'bunny': Prefix('pancake'),}) def testMetaResourcePrefixInheritanceReplacement(self): """Test the composition of Prefix dictionaries by MetaResource where some prefixes on the parent get replaced.""" class Foo(object): __metaclass__ = pymantic.rdf.MetaResource prefixes = {'foo': 'bar', 'baz': 'garply', 'meme': 'lolcatz!',} class Bar(Foo): prefixes = {'allyourbase': 'arebelongtous!', 'bunny': 'pancake', 'foo': 'notbar', 'baz': 'notgarply',} self.assertEqual(Foo.prefixes, {'foo': Prefix('bar'), 'baz': Prefix('garply'), 'meme': Prefix('lolcatz!'),}) self.assertEqual(Bar.prefixes, {'foo': Prefix('notbar'), 'baz': Prefix('notgarply'), 'meme': Prefix('lolcatz!'), 'allyourbase': Prefix('arebelongtous!'), 'bunny': Prefix('pancake'),}) def testResourceEquality(self): graph = Graph() otherGraph = Graph() testResource = pymantic.rdf.Resource(graph, 'foo') self.assertEqual(testResource, pymantic.rdf.Resource( graph, 'foo')) self.assertEqual(testResource, NamedNode('foo')) self.assertEqual(testResource, 'foo') self.assertNotEqual(testResource, pymantic.rdf.Resource( graph, 'bar')) self.assertEqual(testResource, pymantic.rdf.Resource( otherGraph, 'foo')) self.assertNotEqual(testResource, NamedNode('bar')) self.assertNotEqual(testResource, 'bar') self.assertNotEqual(testResource, 42) def testClassification(self): """Test classification of a resource.""" @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } test_subject = NamedNode('http://example.com/athing') graph = Graph() graph.add(Triple(test_subject, Offering.resolve('rdf:type'), Offering.resolve('gr:Offering'))) offering = pymantic.rdf.Resource.classify(graph, test_subject) self.assert_(isinstance(offering, Offering)) def testMulticlassClassification(self): """Test classification of a resource that matches multiple registered classes.""" @pymantic.rdf.register_class('foaf:Organization') class Organization(pymantic.rdf.Resource): prefixes = { 'foaf': 'http://xmlns.com/foaf/0.1/', } @pymantic.rdf.register_class('foaf:Group') class Group(pymantic.rdf.Resource): prefixes = { 'foaf': 'http://xmlns.com/foaf/0.1/', } test_subject1 = NamedNode('http://example.com/aorganization') test_subject2 = NamedNode('http://example.com/agroup') test_subject3 = NamedNode('http://example.com/aorgandgroup') graph = Graph() graph.add(Triple(test_subject1, Organization.resolve('rdf:type'), Organization.resolve('foaf:Organization'))) graph.add(Triple(test_subject2, Group.resolve('rdf:type'), Group.resolve('foaf:Group'))) graph.add(Triple(test_subject3, Organization.resolve('rdf:type'), Organization.resolve('foaf:Organization'))) graph.add(Triple(test_subject3, Organization.resolve('rdf:type'), Organization.resolve('foaf:Group'))) organization = pymantic.rdf.Resource.classify(graph, test_subject1) group = pymantic.rdf.Resource.classify(graph, test_subject2) both = pymantic.rdf.Resource.classify(graph, test_subject3) self.assert_(isinstance(organization, Organization)) self.assertFalse(isinstance(organization, Group)) self.assertFalse(isinstance(group, Organization)) self.assert_(isinstance(group, Group)) self.assert_(isinstance(both, Organization)) self.assert_(isinstance(both, Group)) def testStr(self): """Test str-y serialization of Resources.""" graph = Graph() test_subject1 = NamedNode('http://example.com/aorganization') test_label = Literal('Test Label', language='en') graph.add(Triple(test_subject1, pymantic.rdf.Resource.resolve('rdfs:label'), test_label)) r = pymantic.rdf.Resource(graph, test_subject1) self.assertEqual(r['rdfs:label'], test_label) self.assertEqual(str(r), test_label.value) def testGetSetDelPredicate(self): """Test getting, setting, and deleting a multi-value predicate.""" graph = Graph() test_subject1 = NamedNode('http://example.com/') r = pymantic.rdf.Resource(graph, test_subject1) r['rdfs:example'] = set(('foo', 'bar')) example_values = set(r['rdfs:example']) print example_values self.assert_(Literal('foo') in example_values) self.assert_(Literal('bar') in example_values) self.assertEqual(len(example_values), 2) del r['rdfs:example'] example_values = set(r['rdfs:example']) self.assertEqual(len(example_values), 0) def testGetSetDelScalarPredicate(self): """Test getting, setting, and deleting a scalar predicate.""" graph = Graph() test_subject1 = NamedNode('http://example.com/') r = pymantic.rdf.Resource(graph, test_subject1) r['rdfs:label'] = 'foo' self.assertEqual(r['rdfs:label'], Literal('foo', language='en')) del r['rdfs:label'] self.assertEqual(r['rdfs:label'], None) def testGetSetDelPredicateLanguage(self): """Test getting, setting and deleting a multi-value predicate with an explicit language.""" graph = Graph() test_subject1 = NamedNode('http://example.com/') r = pymantic.rdf.Resource(graph, test_subject1) r['rdfs:example', 'en'] = set(('baz',)) r['rdfs:example', 'fr'] = set(('foo', 'bar')) example_values = set(r['rdfs:example', 'fr']) self.assert_(Literal('foo', language='fr') in example_values) self.assert_(Literal('bar', language='fr') in example_values) self.assert_(Literal('baz', language='en') not in example_values) self.assertEqual(len(example_values), 2) example_values = set(r['rdfs:example', 'en']) self.assert_(Literal('foo', language='fr') not in example_values) self.assert_(Literal('bar', language='fr') not in example_values) self.assert_(Literal('baz', language='en') in example_values) self.assertEqual(len(example_values), 1) del r['rdfs:example', 'fr'] example_values = set(r['rdfs:example', 'fr']) self.assertEqual(len(example_values), 0) example_values = set(r['rdfs:example', 'en']) self.assert_(Literal('foo', language='fr') not in example_values) self.assert_(Literal('bar', language='fr') not in example_values) self.assert_(Literal('baz', language='en') in example_values) self.assertEqual(len(example_values), 1) def testGetSetDelScalarPredicateLanguage(self): """Test getting, setting, and deleting a scalar predicate with an explicit language.""" graph = Graph() test_subject1 = NamedNode('http://example.com/') r = pymantic.rdf.Resource(graph, test_subject1) r['rdfs:label'] = 'foo' r['rdfs:label', 'fr'] = 'bar' self.assertEqual(r['rdfs:label'], Literal('foo', language='en')) self.assertEqual(r['rdfs:label', 'en'], Literal('foo', language='en')) self.assertEqual(r['rdfs:label', 'fr'], Literal('bar', language='fr')) del r['rdfs:label'] self.assertEqual(r['rdfs:label'], None) self.assertEqual(r['rdfs:label', 'en'], None) self.assertEqual(r['rdfs:label', 'fr'], Literal('bar', language='fr')) def testGetSetDelPredicateDatatype(self): """Test getting, setting and deleting a multi-value predicate with an explicit datatype.""" graph = Graph() test_subject1 = NamedNode('http://example.com/') r = pymantic.rdf.Resource(graph, test_subject1) now = datetime.datetime.now() then = datetime.datetime.now() - datetime.timedelta(days=1) number = 42 r['rdfs:example', XSD('integer')] = set((number,)) r['rdfs:example', XSD('dateTime')] = set((now, then,)) example_values = set(r['rdfs:example', XSD('dateTime')]) print example_values self.assert_(Literal(now) in example_values) self.assert_(Literal(then) in example_values) self.assert_(Literal(number) not in example_values) self.assertEqual(len(example_values), 2) example_values = set(r['rdfs:example', XSD('integer')]) self.assert_(Literal(now) not in example_values) self.assert_(Literal(then) not in example_values) self.assert_(Literal(number) in example_values) self.assertEqual(len(example_values), 1) del r['rdfs:example', XSD('dateTime')] example_values = set(r['rdfs:example', XSD('dateTime')]) self.assertEqual(len(example_values), 0) example_values = set(r['rdfs:example', XSD('integer')]) self.assert_(Literal(now) not in example_values) self.assert_(Literal(then) not in example_values) self.assert_(Literal(number) in example_values) self.assertEqual(len(example_values), 1) def testGetSetDelScalarPredicateDatatype(self): """Test getting, setting, and deleting a scalar predicate with an explicit datatype.""" graph = Graph() test_subject1 = NamedNode('http://example.com/') r = pymantic.rdf.Resource(graph, test_subject1) now = datetime.datetime.now() number = 42 r['rdfs:label', XSD('integer')] = number self.assertEqual(r['rdfs:label', XSD('integer')], Literal(number, datatype=XSD('integer'))) self.assertEqual(r['rdfs:label', XSD('dateTime')], None) self.assertEqual(r['rdfs:label'], Literal(number, datatype=XSD('integer'))) r['rdfs:label', XSD('dateTime')] = now self.assertEqual(r['rdfs:label', XSD('dateTime')], Literal(now)) self.assertEqual(r['rdfs:label', XSD('integer')], None) self.assertEqual(r['rdfs:label'], Literal(now)) del r['rdfs:label', XSD('integer')] self.assertEqual(r['rdfs:label', XSD('dateTime')], Literal(now)) self.assertEqual(r['rdfs:label', XSD('integer')], None) self.assertEqual(r['rdfs:label'], Literal(now)) del r['rdfs:label', XSD('dateTime')] self.assertEqual(r['rdfs:label'], None) r['rdfs:label', XSD('integer')] = number self.assertEqual(r['rdfs:label', XSD('integer')], Literal(number, datatype=XSD('integer'))) self.assertEqual(r['rdfs:label', XSD('dateTime')], None) self.assertEqual(r['rdfs:label'], Literal(number, datatype=XSD('integer'))) del r['rdfs:label'] self.assertEqual(r['rdfs:label'], None) def testGetSetDelPredicateType(self): """Test getting, setting and deleting a multi-value predicate with an explicit expected RDF Class.""" graph = Graph() test_subject1 = NamedNode('http://example.com/offering') test_subject2 = NamedNode('http://example.com/aposi1') test_subject3 = NamedNode('http://example.com/aposi2') test_subject4 = NamedNode('http://example.com/possip1') shared_prefixes = { 'gr': 'http://purl.org/goodrelations/', } @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = shared_prefixes @pymantic.rdf.register_class('gr:ActualProductOrServiceInstance') class ActualProduct(pymantic.rdf.Resource): prefixes = shared_prefixes @pymantic.rdf.register_class('gr:ProductOrServicesSomeInstancesPlaceholder') class PlaceholderProduct(pymantic.rdf.Resource): prefixes = shared_prefixes offering = Offering.new(graph, test_subject1) aposi1 = ActualProduct.new(graph, test_subject2) aposi2 = ActualProduct.new(graph, test_subject3) possip1 = PlaceholderProduct.new(graph, test_subject4) offering['gr:includes', ActualProduct] = set((aposi1, aposi2,)) offering['gr:includes', PlaceholderProduct] = set((possip1,)) example_values = set(offering['gr:includes', ActualProduct]) self.assert_(aposi1 in example_values) self.assert_(aposi2 in example_values) self.assert_(possip1 not in example_values) self.assertEqual(len(example_values), 2) example_values = set(offering['gr:includes', PlaceholderProduct]) self.assert_(aposi1 not in example_values) self.assert_(aposi2 not in example_values) self.assert_(possip1 in example_values) self.assertEqual(len(example_values), 1) del offering['gr:includes', ActualProduct] example_values = set(offering['gr:includes', ActualProduct]) self.assertEqual(len(example_values), 0) example_values = set(offering['gr:includes', PlaceholderProduct]) self.assert_(aposi1 not in example_values) self.assert_(aposi2 not in example_values) self.assert_(possip1 in example_values) self.assertEqual(len(example_values), 1) def testGetSetDelScalarPredicateType(self): """Test getting, setting, and deleting a scalar predicate with an explicit language.""" graph = Graph() test_subject1 = NamedNode('http://example.com/offering') test_subject2 = NamedNode('http://example.com/aposi') test_subject4 = NamedNode('http://example.com/possip') shared_prefixes = { 'gr': 'http://purl.org/goodrelations/', } @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = shared_prefixes scalars = frozenset(('gr:includes',)) @pymantic.rdf.register_class('gr:ActualProductOrServiceInstance') class ActualProduct(pymantic.rdf.Resource): prefixes = shared_prefixes @pymantic.rdf.register_class('gr:ProductOrServicesSomeInstancesPlaceholder') class PlaceholderProduct(pymantic.rdf.Resource): prefixes = shared_prefixes offering = Offering.new(graph, test_subject1) aposi1 = ActualProduct.new(graph, test_subject2) possip1 = PlaceholderProduct.new(graph, test_subject4) offering['gr:includes', ActualProduct] = aposi1 self.assertEqual(aposi1, offering['gr:includes', ActualProduct]) self.assertEqual(None, offering['gr:includes', PlaceholderProduct]) self.assertEqual(aposi1, offering['gr:includes']) offering['gr:includes', PlaceholderProduct] = possip1 self.assertEqual(None, offering['gr:includes', ActualProduct]) self.assertEqual(possip1, offering['gr:includes', PlaceholderProduct]) self.assertEqual(possip1, offering['gr:includes']) del offering['gr:includes', ActualProduct] self.assertEqual(offering['gr:includes', ActualProduct], None) self.assertEqual(possip1, offering['gr:includes', PlaceholderProduct]) del offering['gr:includes', PlaceholderProduct] self.assertEqual(offering['gr:includes', ActualProduct], None) self.assertEqual(offering['gr:includes', PlaceholderProduct], None) offering['gr:includes', ActualProduct] = aposi1 self.assertEqual(aposi1, offering['gr:includes', ActualProduct]) self.assertEqual(None, offering['gr:includes', PlaceholderProduct]) self.assertEqual(aposi1, offering['gr:includes']) del offering['gr:includes'] self.assertEqual(None, offering['gr:includes', ActualProduct]) self.assertEqual(None, offering['gr:includes', PlaceholderProduct]) self.assertEqual(None, offering['gr:includes']) def testSetMixedScalarPredicate(self): """Test getting and setting a scalar predicate with mixed typing.""" graph = Graph() test_subject1 = NamedNode('http://example.com/offering') test_subject2 = NamedNode('http://example.com/aposi') shared_prefixes = { 'gr': 'http://purl.org/goodrelations/', } @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = shared_prefixes scalars = frozenset(('gr:includes',)) @pymantic.rdf.register_class('gr:ActualProductOrServiceInstance') class ActualProduct(pymantic.rdf.Resource): prefixes = shared_prefixes offering = Offering.new(graph, test_subject1) aposi1 = ActualProduct.new(graph, test_subject2) test_en = Literal('foo', language='en') test_fr = Literal('le foo', language='fr') test_dt = Literal('42', datatype = XSD('integer')) offering['gr:includes'] = aposi1 self.assertEqual(offering['gr:includes'], aposi1) offering['gr:includes'] = test_dt self.assertEqual(offering['gr:includes'], test_dt) self.assertEqual(offering['gr:includes', ActualProduct], None) offering['gr:includes'] = test_en self.assertEqual(offering['gr:includes', ActualProduct], None) self.assertEqual(offering['gr:includes', XSD('integer')], None) self.assertEqual(offering['gr:includes'], test_en) self.assertEqual(offering['gr:includes', 'en'], test_en) self.assertEqual(offering['gr:includes', 'fr'], None) offering['gr:includes'] = test_fr self.assertEqual(offering['gr:includes', ActualProduct], None) self.assertEqual(offering['gr:includes', XSD('integer')], None) self.assertEqual(offering['gr:includes'], test_en) self.assertEqual(offering['gr:includes', 'en'], test_en) self.assertEqual(offering['gr:includes', 'fr'], test_fr) offering['gr:includes'] = aposi1 self.assertEqual(offering['gr:includes'], aposi1) self.assertEqual(offering['gr:includes', XSD('integer')], None) self.assertEqual(offering['gr:includes', 'en'], None) self.assertEqual(offering['gr:includes', 'fr'], None) def testResourcePredicate(self): """Test instantiating a class when accessing a predicate.""" @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } @pymantic.rdf.register_class('gr:PriceSpecification') class PriceSpecification(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } test_subject1 = NamedNode('http://example.com/offering') test_subject2 = NamedNode('http://example.com/price') graph = Graph() graph.add(Triple(test_subject1, Offering.resolve('rdf:type'), Offering.resolve('gr:Offering'))) graph.add(Triple(test_subject1, Offering.resolve('gr:hasPriceSpecification'), test_subject2)) graph.add(Triple(test_subject2, PriceSpecification.resolve('rdf:type'), PriceSpecification.resolve('gr:PriceSpecification'))) offering = Offering(graph, test_subject1) price_specification = PriceSpecification(graph, test_subject2) prices = set(offering['gr:hasPriceSpecification']) self.assertEqual(len(prices), 1) self.assert_(price_specification in prices) def testResourcePredicateAssignment(self): """Test assigning an instance of a resource to a predicate.""" @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } @pymantic.rdf.register_class('gr:PriceSpecification') class PriceSpecification(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } test_subject1 = NamedNode('http://example.com/offering') test_subject2 = NamedNode('http://example.com/price') graph = Graph() graph.add(Triple(test_subject1, Offering.resolve('rdf:type'), Offering.resolve('gr:Offering'))) graph.add(Triple(test_subject2, PriceSpecification.resolve('rdf:type'), PriceSpecification.resolve('gr:PriceSpecification'))) offering = Offering(graph, test_subject1) price_specification = PriceSpecification(graph, test_subject2) before_prices = set(offering['gr:hasPriceSpecification']) self.assertEqual(len(before_prices), 0) offering['gr:hasPriceSpecification'] = price_specification after_prices = set(offering['gr:hasPriceSpecification']) self.assertEqual(len(after_prices), 1) self.assert_(price_specification in after_prices) def testNewResource(self): """Test creating a new resource.""" graph = Graph() @pymantic.rdf.register_class('foaf:Person') class Person(pymantic.rdf.Resource): prefixes = { 'foaf': 'http://xmlns.com/foaf/0.1/', } test_subject = NamedNode('http://example.com/') p = Person.new(graph, test_subject) def testGetAllResourcesInGraph(self): """Test iterating over all of the resources in a graph with a particular RDF type.""" @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } graph = Graph() test_subject_base = NamedNode('http://example.com/') for i in range(10): graph.add(Triple(NamedNode(test_subject_base + str(i)), Offering.resolve('rdf:type'), Offering.resolve('gr:Offering'))) offerings = Offering.in_graph(graph) self.assertEqual(len(offerings), 10) for i in range(10): this_subject = NamedNode(test_subject_base + str(i)) offering = Offering(graph, this_subject) self.assert_(offering in offerings) def testContained(self): """Test in against a multi-value predicate.""" graph = Graph() test_subject1 = NamedNode('http://example.com/') r = pymantic.rdf.Resource(graph, test_subject1) r['rdfs:example'] = set(('foo', 'bar')) self.assert_('rdfs:example' in r) self.assertFalse(('rdfs:example', 'en') in r) self.assertFalse(('rdfs:example', 'fr') in r) self.assertFalse('rdfs:examplefoo' in r) del r['rdfs:example'] self.assertFalse('rdfs:example' in r) self.assertFalse(('rdfs:example', 'en') in r) self.assertFalse(('rdfs:example', 'fr') in r) self.assertFalse('rdfs:examplefoo' in r) r['rdfs:example', 'fr'] = 'le foo' def testBack(self): """Test following a predicate backwards.""" @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } @pymantic.rdf.register_class('gr:PriceSpecification') class PriceSpecification(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } graph = Graph() offering1 = Offering.new(graph, 'http://example.com/offering1') offering2 = Offering.new(graph, 'http://example.com/offering2') offering3 = Offering.new(graph, 'http://example.com/offering3') price1 = PriceSpecification.new(graph, 'http://example.com/price1') price2 = PriceSpecification.new(graph, 'http://example.com/price2') price3 = PriceSpecification.new(graph, 'http://example.com/price3') offering1['gr:hasPriceSpecification'] = set((price1, price2, price3,)) offering2['gr:hasPriceSpecification'] = set((price2, price3,)) self.assertEqual(set(price1.object_of(predicate='gr:hasPriceSpecification')), set((offering1,))) self.assertEqual(set(price2.object_of(predicate='gr:hasPriceSpecification')), set((offering1,offering2,))) self.assertEqual(set(price3.object_of(predicate='gr:hasPriceSpecification')), set((offering1,offering2,))) def testGetAllValues(self): """Test getting all values for a predicate.""" @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } en = Literal('foo', language='en') fr = Literal('bar', language='fr') es = Literal('baz', language='es') xsdstring = Literal('aap') xsddecimal = Literal('9.95', datatype=XSD('decimal')) graph = Graph() offering = Offering.new(graph, 'http://example.com/offering') offering['gr:description'] = set((en, fr, es,)) self.assertEqual(frozenset(offering['gr:description']), frozenset((en,fr,es,))) self.assertEqual(frozenset(offering['gr:description', 'en']), frozenset((en,))) self.assertEqual(frozenset(offering['gr:description', 'fr']), frozenset((fr,))) self.assertEqual(frozenset(offering['gr:description', 'es']), frozenset((es,))) self.assertEqual(frozenset(offering['gr:description', None]), frozenset((en, fr, es,))) offering['gr:description'] = set((xsdstring, xsddecimal,)) self.assertEqual(frozenset(offering['gr:description', '']), frozenset((xsdstring,))) self.assertEqual(frozenset(offering['gr:description', XSD('string')]), frozenset((xsdstring,))) self.assertEqual(frozenset(offering['gr:description', XSD('decimal')]), frozenset((xsddecimal,))) self.assertEqual(frozenset(offering['gr:description', None]), frozenset((xsdstring, xsddecimal,))) offering['gr:description'] = set((en, fr, es, xsdstring, xsddecimal,)) self.assertEqual(frozenset(offering['gr:description']), frozenset((en, fr, es, xsdstring, xsddecimal,))) self.assertEqual(frozenset(offering['gr:description', 'en']), frozenset((en,))) self.assertEqual(frozenset(offering['gr:description', 'fr']), frozenset((fr,))) self.assertEqual(frozenset(offering['gr:description', 'es']), frozenset((es,))) self.assertEqual(frozenset(offering['gr:description', '']), frozenset((xsdstring,))) self.assertEqual(frozenset(offering['gr:description', XSD('string')]), frozenset((xsdstring,))) self.assertEqual(frozenset(offering['gr:description', XSD('decimal')]), frozenset((xsddecimal,))) self.assertEqual(frozenset(offering['gr:description', None]), frozenset((en, fr, es, xsdstring, xsddecimal,))) def testGetAllValuesScalar(self): """Test getting all values for a predicate.""" @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } scalars = frozenset(('gr:description',)) en = Literal('foo', language='en') fr = Literal('bar', language='fr') es = Literal('baz', language='es') graph = Graph() offering = Offering.new(graph, 'http://example.com/offering') offering['gr:description'] = en offering['gr:description'] = fr offering['gr:description'] = es self.assertEqual(offering['gr:description'], en) self.assertEqual(offering['gr:description', 'en'], en) self.assertEqual(offering['gr:description', 'fr'], fr) self.assertEqual(offering['gr:description', 'es'], es) self.assertEqual(frozenset(offering['gr:description', None]), frozenset((en, fr, es,))) def testErase(self): """Test erasing an object from the graph.""" @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } scalars = frozenset(('gr:name',)) graph = Graph() offering1 = Offering.new(graph, 'http://example.com/offering1') offering2 = Offering.new(graph, 'http://example.com/offering2') offering1['gr:name'] = 'Foo' offering1['gr:description'] = set(('Baz', 'Garply',)) offering2['gr:name'] = 'Bar' offering2['gr:description'] = set(('Aap', 'Mies',)) self.assert_(offering1.is_a()) self.assert_(offering2.is_a()) offering1.erase() self.assertFalse(offering1.is_a()) self.assert_(offering2.is_a()) self.assertFalse(offering1['gr:name']) self.assertFalse(frozenset(offering1['gr:description'])) self.assertEqual(offering2['gr:name'], Literal('Bar', language='en')) def testUnboundClass(self): """Test classifying objects with one or more unbound classes.""" @pymantic.rdf.register_class('gr:Offering') class Offering(pymantic.rdf.Resource): prefixes = { 'gr': 'http://purl.org/goodrelations/', } graph = Graph() funky_class = NamedNode('http://example.com/AFunkyClass') funky_subject = NamedNode('http://example.com/aFunkyResource') offering1 = Offering.new(graph, 'http://example.com/offering1') graph.add(Triple(offering1.subject, RDF('type'), funky_class)) self.assertEqual(type(pymantic.rdf.Resource.classify(graph, offering1.subject)), Offering) graph.add(Triple(funky_subject, RDF('type'), funky_class)) self.assertEqual(type(pymantic.rdf.Resource.classify(graph, funky_subject)), pymantic.rdf.Resource)
# coding: utf-8 """ No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: 1.0.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from datetime import datetime from pprint import pformat from six import iteritems import re class ProjectPage(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'page_index': 'int', 'page_size': 'int', 'total_pages': 'int', 'content': 'list[ProjectRest]' } attribute_map = { 'page_index': 'pageIndex', 'page_size': 'pageSize', 'total_pages': 'totalPages', 'content': 'content' } def __init__(self, page_index=None, page_size=None, total_pages=None, content=None): """ ProjectPage - a model defined in Swagger """ self._page_index = None self._page_size = None self._total_pages = None self._content = None if page_index is not None: self.page_index = page_index if page_size is not None: self.page_size = page_size if total_pages is not None: self.total_pages = total_pages if content is not None: self.content = content @property def page_index(self): """ Gets the page_index of this ProjectPage. :return: The page_index of this ProjectPage. :rtype: int """ return self._page_index @page_index.setter def page_index(self, page_index): """ Sets the page_index of this ProjectPage. :param page_index: The page_index of this ProjectPage. :type: int """ self._page_index = page_index @property def page_size(self): """ Gets the page_size of this ProjectPage. :return: The page_size of this ProjectPage. :rtype: int """ return self._page_size @page_size.setter def page_size(self, page_size): """ Sets the page_size of this ProjectPage. :param page_size: The page_size of this ProjectPage. :type: int """ self._page_size = page_size @property def total_pages(self): """ Gets the total_pages of this ProjectPage. :return: The total_pages of this ProjectPage. :rtype: int """ return self._total_pages @total_pages.setter def total_pages(self, total_pages): """ Sets the total_pages of this ProjectPage. :param total_pages: The total_pages of this ProjectPage. :type: int """ self._total_pages = total_pages @property def content(self): """ Gets the content of this ProjectPage. :return: The content of this ProjectPage. :rtype: list[ProjectRest] """ return self._content @content.setter def content(self, content): """ Sets the content of this ProjectPage. :param content: The content of this ProjectPage. :type: list[ProjectRest] """ self._content = content def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) elif isinstance(value, datetime): result[attr] = str(value.date()) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, ProjectPage): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
''' Entry point module to start the interactive console. ''' from _pydev_imps._pydev_saved_modules import thread from _pydevd_bundle.pydevd_constants import IS_JYTHON, dict_iter_items start_new_thread = thread.start_new_thread try: from code import InteractiveConsole except ImportError: from _pydevd_bundle.pydevconsole_code_for_ironpython import InteractiveConsole from code import compile_command from code import InteractiveInterpreter import os import sys from _pydev_imps._pydev_saved_modules import threading from _pydevd_bundle.pydevd_constants import INTERACTIVE_MODE_AVAILABLE, dict_keys import traceback from _pydev_bundle import fix_getpass fix_getpass.fix_getpass() from _pydevd_bundle import pydevd_vars, pydevd_save_locals from _pydev_bundle.pydev_imports import Exec, _queue try: import __builtin__ except: import builtins as __builtin__ # @UnresolvedImport from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface, BaseStdIn from _pydev_bundle.pydev_console_utils import CodeFragment IS_PYTHON_3_ONWARDS = sys.version_info[0] >= 3 IS_PY24 = sys.version_info[0] == 2 and sys.version_info[1] == 4 class Command: def __init__(self, interpreter, code_fragment): """ :type code_fragment: CodeFragment :type interpreter: InteractiveConsole """ self.interpreter = interpreter self.code_fragment = code_fragment self.more = None def symbol_for_fragment(code_fragment): if code_fragment.is_single_line: symbol = 'single' else: if IS_JYTHON: symbol = 'single' # Jython doesn't support exec else: symbol = 'exec' return symbol symbol_for_fragment = staticmethod(symbol_for_fragment) def run(self): text = self.code_fragment.text symbol = self.symbol_for_fragment(self.code_fragment) self.more = self.interpreter.runsource(text, '<input>', symbol) try: try: execfile #Not in Py3k except NameError: from _pydev_bundle.pydev_imports import execfile __builtin__.execfile = execfile except: pass # Pull in runfile, the interface to UMD that wraps execfile from _pydev_bundle.pydev_umd import runfile, _set_globals_function if sys.version_info[0] >= 3: import builtins # @UnresolvedImport builtins.runfile = runfile else: import __builtin__ __builtin__.runfile = runfile #======================================================================================================================= # InterpreterInterface #======================================================================================================================= class InterpreterInterface(BaseInterpreterInterface): ''' The methods in this class should be registered in the xml-rpc server. ''' def __init__(self, host, client_port, mainThread, connect_status_queue=None): BaseInterpreterInterface.__init__(self, mainThread, connect_status_queue) self.client_port = client_port self.host = host self.namespace = {} self.interpreter = InteractiveConsole(self.namespace) self._input_error_printed = False def do_add_exec(self, codeFragment): command = Command(self.interpreter, codeFragment) command.run() return command.more def get_namespace(self): return self.namespace def getCompletions(self, text, act_tok): try: from _pydev_bundle._pydev_completer import Completer completer = Completer(self.namespace, None) return completer.complete(act_tok) except: import traceback traceback.print_exc() return [] def close(self): sys.exit(0) def get_greeting_msg(self): return 'PyDev console: starting.\n' class _ProcessExecQueueHelper: _debug_hook = None _return_control_osc = False def set_debug_hook(debug_hook): _ProcessExecQueueHelper._debug_hook = debug_hook def activate_mpl_if_already_imported(interpreter): if interpreter.mpl_modules_for_patching: for module in dict_keys(interpreter.mpl_modules_for_patching): if module in sys.modules: activate_function = interpreter.mpl_modules_for_patching.pop(module) activate_function() def init_set_return_control_back(interpreter): from pydev_ipython.inputhook import set_return_control_callback def return_control(): ''' A function that the inputhooks can call (via inputhook.stdin_ready()) to find out if they should cede control and return ''' if _ProcessExecQueueHelper._debug_hook: # Some of the input hooks check return control without doing # a single operation, so we don't return True on every # call when the debug hook is in place to allow the GUI to run # XXX: Eventually the inputhook code will have diverged enough # from the IPython source that it will be worthwhile rewriting # it rather than pretending to maintain the old API _ProcessExecQueueHelper._return_control_osc = not _ProcessExecQueueHelper._return_control_osc if _ProcessExecQueueHelper._return_control_osc: return True if not interpreter.exec_queue.empty(): return True return False set_return_control_callback(return_control) def init_mpl_in_console(interpreter): init_set_return_control_back(interpreter) if not INTERACTIVE_MODE_AVAILABLE: return activate_mpl_if_already_imported(interpreter) from _pydev_bundle.pydev_import_hook import import_hook_manager for mod in dict_keys(interpreter.mpl_modules_for_patching): import_hook_manager.add_module_name(mod, interpreter.mpl_modules_for_patching.pop(mod)) if sys.platform != 'win32': def pid_exists(pid): # Note that this function in the face of errors will conservatively consider that # the pid is still running (because we'll exit the current process when it's # no longer running, so, we need to be 100% sure it actually exited). import errno if pid == 0: # According to "man 2 kill" PID 0 has a special meaning: # it refers to <<every process in the process group of the # calling process>> so we don't want to go any further. # If we get here it means this UNIX platform *does* have # a process with id 0. return True try: os.kill(pid, 0) except OSError as err: if err.errno == errno.ESRCH: # ESRCH == No such process return False elif err.errno == errno.EPERM: # EPERM clearly means there's a process to deny access to return True else: # According to "man 2 kill" possible error values are # (EINVAL, EPERM, ESRCH) therefore we should never get # here. If we do, although it's an error, consider it # exists (see first comment in this function). return True else: return True else: def pid_exists(pid): # Note that this function in the face of errors will conservatively consider that # the pid is still running (because we'll exit the current process when it's # no longer running, so, we need to be 100% sure it actually exited). import ctypes kernel32 = ctypes.windll.kernel32 PROCESS_QUERY_INFORMATION = 0x0400 PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 ERROR_INVALID_PARAMETER = 0x57 STILL_ACTIVE = 259 process = kernel32.OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_QUERY_LIMITED_INFORMATION, 0, pid) if not process: err = kernel32.GetLastError() if err == ERROR_INVALID_PARAMETER: # Means it doesn't exist (pid parameter is wrong). return False # There was some unexpected error (such as access denied), so # consider it exists (although it could be something else, but we don't want # to raise any errors -- so, just consider it exists). return True try: zero = ctypes.c_int(0) exit_code = ctypes.pointer(zero) exit_code_suceeded = kernel32.GetExitCodeProcess(process, exit_code) if not exit_code_suceeded: # There was some unexpected error (such as access denied), so # consider it exists (although it could be something else, but we don't want # to raise any errors -- so, just consider it exists). return True elif bool(exit_code.contents.value) and int(exit_code.contents.value) != STILL_ACTIVE: return False finally: kernel32.CloseHandle(process) return True def process_exec_queue(interpreter): init_mpl_in_console(interpreter) from pydev_ipython.inputhook import get_inputhook try: kill_if_pid_not_alive = int(os.environ.get('PYDEV_ECLIPSE_PID', '-1')) except: kill_if_pid_not_alive = -1 while 1: if kill_if_pid_not_alive != -1: if not pid_exists(kill_if_pid_not_alive): exit() # Running the request may have changed the inputhook in use inputhook = get_inputhook() if _ProcessExecQueueHelper._debug_hook: _ProcessExecQueueHelper._debug_hook() if inputhook: try: # Note: it'll block here until return_control returns True. inputhook() except: import traceback;traceback.print_exc() try: try: code_fragment = interpreter.exec_queue.get(block=True, timeout=1/20.) # 20 calls/second except _queue.Empty: continue if callable(code_fragment): # It can be a callable (i.e.: something that must run in the main # thread can be put in the queue for later execution). code_fragment() else: more = interpreter.add_exec(code_fragment) except KeyboardInterrupt: interpreter.buffer = None continue except SystemExit: raise except: type, value, tb = sys.exc_info() traceback.print_exception(type, value, tb, file=sys.__stderr__) exit() if 'IPYTHONENABLE' in os.environ: IPYTHON = os.environ['IPYTHONENABLE'] == 'True' else: # There is an open bug here: https://github.com/Microsoft/ptvsd/issues/920 # and here: https://github.com/Microsoft/vscode-python/issues/2860. # When syncing this, try syncing without this commit and if that does not # work then try syncing with this commit. IPYTHON = False try: try: exitfunc = sys.exitfunc except AttributeError: exitfunc = None if IPYTHON: from _pydev_bundle.pydev_ipython_console import InterpreterInterface if exitfunc is not None: sys.exitfunc = exitfunc else: try: delattr(sys, 'exitfunc') except: pass except: IPYTHON = False pass #======================================================================================================================= # _DoExit #======================================================================================================================= def do_exit(*args): ''' We have to override the exit because calling sys.exit will only actually exit the main thread, and as we're in a Xml-rpc server, that won't work. ''' try: import java.lang.System java.lang.System.exit(1) except ImportError: if len(args) == 1: os._exit(args[0]) else: os._exit(0) #======================================================================================================================= # start_console_server #======================================================================================================================= def start_console_server(host, port, interpreter): try: if port == 0: host = '' #I.e.: supporting the internal Jython version in PyDev to create a Jython interactive console inside Eclipse. from _pydev_bundle.pydev_imports import SimpleXMLRPCServer as XMLRPCServer #@Reimport try: if IS_PY24: server = XMLRPCServer((host, port), logRequests=False) else: server = XMLRPCServer((host, port), logRequests=False, allow_none=True) except: sys.stderr.write('Error starting server with host: "%s", port: "%s", client_port: "%s"\n' % (host, port, interpreter.client_port)) sys.stderr.flush() raise # Tell UMD the proper default namespace _set_globals_function(interpreter.get_namespace) server.register_function(interpreter.execLine) server.register_function(interpreter.execMultipleLines) server.register_function(interpreter.getCompletions) server.register_function(interpreter.getFrame) server.register_function(interpreter.getVariable) server.register_function(interpreter.changeVariable) server.register_function(interpreter.getDescription) server.register_function(interpreter.close) server.register_function(interpreter.interrupt) server.register_function(interpreter.handshake) server.register_function(interpreter.connectToDebugger) server.register_function(interpreter.hello) server.register_function(interpreter.getArray) server.register_function(interpreter.evaluate) server.register_function(interpreter.ShowConsole) server.register_function(interpreter.loadFullValue) # Functions for GUI main loop integration server.register_function(interpreter.enableGui) if port == 0: (h, port) = server.socket.getsockname() print(port) print(interpreter.client_port) while True: try: server.serve_forever() except: # Ugly code to be py2/3 compatible # https://sw-brainwy.rhcloud.com/tracker/PyDev/534: # Unhandled "interrupted system call" error in the pydevconsol.py e = sys.exc_info()[1] retry = False try: retry = e.args[0] == 4 #errno.EINTR except: pass if not retry: raise # Otherwise, keep on going return server except: traceback.print_exc() # Notify about error to avoid long waiting connection_queue = interpreter.get_connect_status_queue() if connection_queue is not None: connection_queue.put(False) def start_server(host, port, client_port): #replace exit (see comments on method) #note that this does not work in jython!!! (sys method can't be replaced). sys.exit = do_exit interpreter = InterpreterInterface(host, client_port, threading.currentThread()) start_new_thread(start_console_server,(host, port, interpreter)) process_exec_queue(interpreter) def get_ipython_hidden_vars(): if IPYTHON and hasattr(__builtin__, 'interpreter'): interpreter = get_interpreter() return interpreter.get_ipython_hidden_vars_dict() def get_interpreter(): try: interpreterInterface = getattr(__builtin__, 'interpreter') except AttributeError: interpreterInterface = InterpreterInterface(None, None, threading.currentThread()) __builtin__.interpreter = interpreterInterface sys.stderr.write(interpreterInterface.get_greeting_msg()) sys.stderr.flush() return interpreterInterface def get_completions(text, token, globals, locals): interpreterInterface = get_interpreter() interpreterInterface.interpreter.update(globals, locals) return interpreterInterface.getCompletions(text, token) #=============================================================================== # Debugger integration #=============================================================================== def exec_code(code, globals, locals, debugger): interpreterInterface = get_interpreter() interpreterInterface.interpreter.update(globals, locals) res = interpreterInterface.need_more(code) if res: return True interpreterInterface.add_exec(code, debugger) return False class ConsoleWriter(InteractiveInterpreter): skip = 0 def __init__(self, locals=None): InteractiveInterpreter.__init__(self, locals) def write(self, data): #if (data.find("global_vars") == -1 and data.find("pydevd") == -1): if self.skip > 0: self.skip -= 1 else: if data == "Traceback (most recent call last):\n": self.skip = 1 sys.stderr.write(data) def showsyntaxerror(self, filename=None): """Display the syntax error that just occurred.""" #Override for avoid using sys.excepthook PY-12600 type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value sys.last_traceback = tb if filename and type is SyntaxError: # Work hard to stuff the correct filename in the exception try: msg, (dummy_filename, lineno, offset, line) = value.args except ValueError: # Not the format we expect; leave it alone pass else: # Stuff in the right filename value = SyntaxError(msg, (filename, lineno, offset, line)) sys.last_value = value list = traceback.format_exception_only(type, value) sys.stderr.write(''.join(list)) def showtraceback(self, *args, **kwargs): """Display the exception that just occurred.""" #Override for avoid using sys.excepthook PY-12600 try: type, value, tb = sys.exc_info() sys.last_type = type sys.last_value = value sys.last_traceback = tb tblist = traceback.extract_tb(tb) del tblist[:1] lines = traceback.format_list(tblist) if lines: lines.insert(0, "Traceback (most recent call last):\n") lines.extend(traceback.format_exception_only(type, value)) finally: tblist = tb = None sys.stderr.write(''.join(lines)) def console_exec(thread_id, frame_id, expression, dbg): """returns 'False' in case expression is partially correct """ frame = pydevd_vars.find_frame(thread_id, frame_id) is_multiline = expression.count('@LINE@') > 1 expression = str(expression.replace('@LINE@', '\n')) #Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329 #(Names not resolved in generator expression in method) #See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html updated_globals = {} updated_globals.update(frame.f_globals) updated_globals.update(frame.f_locals) #locals later because it has precedence over the actual globals if IPYTHON: need_more = exec_code(CodeFragment(expression), updated_globals, frame.f_locals, dbg) if not need_more: pydevd_save_locals.save_locals(frame) return need_more interpreter = ConsoleWriter() if not is_multiline: try: code = compile_command(expression) except (OverflowError, SyntaxError, ValueError): # Case 1 interpreter.showsyntaxerror() return False if code is None: # Case 2 return True else: code = expression #Case 3 try: Exec(code, updated_globals, frame.f_locals) except SystemExit: raise except: interpreter.showtraceback() else: pydevd_save_locals.save_locals(frame) return False #======================================================================================================================= # main #======================================================================================================================= if __name__ == '__main__': #Important: don't use this module directly as the __main__ module, rather, import itself as pydevconsole #so that we don't get multiple pydevconsole modules if it's executed directly (otherwise we'd have multiple #representations of its classes). #See: https://sw-brainwy.rhcloud.com/tracker/PyDev/446: #'Variables' and 'Expressions' views stopped working when debugging interactive console import pydevconsole sys.stdin = pydevconsole.BaseStdIn(sys.stdin) port, client_port = sys.argv[1:3] from _pydev_bundle import pydev_localhost if int(port) == 0 and int(client_port) == 0: (h, p) = pydev_localhost.get_socket_name() client_port = p pydevconsole.start_server(pydev_localhost.get_localhost(), int(port), int(client_port))
import bpy from bpy.props import StringProperty, BoolProperty, EnumProperty from ..rfb_utils import shadergraph_utils from ..rfb_utils import object_utils from ..rfb_logger import rfb_log from .. import rman_bl_nodes from ..rman_constants import RMAN_STYLIZED_FILTERS, RMAN_STYLIZED_PATTERNS, RMAN_UTILITY_PATTERN_NAMES class PRMAN_OT_Enable_Sylized_Looks(bpy.types.Operator): bl_idname = "scene.rman_enable_stylized_looks" bl_label = "Enable Stylized Looks" bl_description = "Enable stylized looks. Objects still need to have a stylzed pattern connected to their material network, and stylized filters need to be added to the scene." bl_options = {'INTERNAL'} open_editor: BoolProperty(name="", default=False) def execute(self, context): scene = context.scene rm = scene.renderman rm.render_rman_stylized = 1 world = scene.world world.update_tag() bpy.ops.renderman.dspy_displays_reload('EXEC_DEFAULT') if self.properties.open_editor: bpy.ops.scene.rman_open_stylized_editor('INVOKE_DEFAULT') return {"FINISHED"} class PRMAN_OT_Disable_Sylized_Looks(bpy.types.Operator): bl_idname = "scene.rman_disable_stylized_looks" bl_label = "Disable Stylized Looks" bl_description = "Disable stylized looks." bl_options = {'INTERNAL'} def execute(self, context): scene = context.scene rm = scene.renderman rm.render_rman_stylized = 0 world = scene.world world.update_tag() bpy.ops.renderman.dspy_displays_reload('EXEC_DEFAULT') return {"FINISHED"} class PRMAN_OT_Attach_Stylized_Pattern(bpy.types.Operator): bl_idname = "node.rman_attach_stylized_pattern" bl_label = "Attach Stylized Pattern" bl_description = "Attach a stylized pattern node to your material network." bl_options = {'INTERNAL'} def rman_stylized_patterns(self, context): items = [] for f in RMAN_STYLIZED_PATTERNS: items.append((f, f, "")) return items stylized_pattern: EnumProperty(name="", items=rman_stylized_patterns) def add_manifolds(self, nt, pattern_node): pxr_to_float3_nm = rman_bl_nodes.__BL_NODES_MAP__['PxrToFloat3'] pxr_manifold3d_nm = rman_bl_nodes.__BL_NODES_MAP__['PxrManifold3D'] pxr_projector_nm = rman_bl_nodes.__BL_NODES_MAP__['PxrProjector'] pxr_manifold3d = nt.nodes.new(pxr_manifold3d_nm) pxr_to_float3_1 = nt.nodes.new(pxr_to_float3_nm) nt.links.new(pxr_manifold3d.outputs['resultX'], pxr_to_float3_1.inputs['inputR']) nt.links.new(pxr_manifold3d.outputs['resultY'], pxr_to_float3_1.inputs['inputG']) nt.links.new(pxr_manifold3d.outputs['resultZ'], pxr_to_float3_1.inputs['inputB']) nt.links.new(pxr_to_float3_1.outputs['resultRGB'], pattern_node.inputs['inputPtriplanar']) pxr_to_float3_1.inputs['inputR'].ui_open = False pxr_to_float3_1.inputs['inputG'].ui_open = False pxr_to_float3_1.inputs['inputB'].ui_open = False pattern_node.inputs['inputPtriplanar'].ui_open = False pxr_to_float3_2 = nt.nodes.new(pxr_to_float3_nm) pxr_projector = nt.nodes.new(pxr_projector_nm) pxr_projector.coordsys = "NDC" nt.links.new(pxr_projector.outputs['resultS'], pxr_to_float3_2.inputs['inputR']) nt.links.new(pxr_projector.outputs['resultT'], pxr_to_float3_2.inputs['inputG']) nt.links.new(pxr_to_float3_2.outputs['resultRGB'], pattern_node.inputs['inputTextureCoords']) pxr_to_float3_2.inputs['inputR'].ui_open = False pxr_to_float3_2.inputs['inputG'].ui_open = False pattern_node.inputs['inputTextureCoords'].ui_open = False def attach_pattern(self, context, ob): mat = object_utils.get_active_material(ob) if not mat: bpy.ops.object.rman_add_bxdf('EXEC_DEFAULT', bxdf_name='PxrSurface') mat = object_utils.get_active_material(ob) if not mat: self.report({'ERROR'}, 'Cannot find a material for: %s' % ob.name) nt = mat.node_tree output = shadergraph_utils.is_renderman_nodetree(mat) if not output: bpy.ops.material.rman_add_rman_nodetree('EXEC_DEFAULT', idtype='material', bxdf_Name='PxrSurface') socket = output.inputs[0] if not socket.is_linked: return link = socket.links[0] node = link.from_node prop_name = '' pattern_node_name = None pattern_settings = None if self.properties.stylized_pattern in RMAN_STYLIZED_PATTERNS: pattern_node_name = rman_bl_nodes.__BL_NODES_MAP__[self.properties.stylized_pattern] else: return for nm in RMAN_UTILITY_PATTERN_NAMES: if hasattr(node, nm): prop_name = nm if shadergraph_utils.has_stylized_pattern_node(ob, node=node): continue prop_meta = node.prop_meta[prop_name] if prop_meta['renderman_type'] == 'array': array_len = getattr(node, '%s_arraylen' % prop_name) array_len += 1 setattr(node, '%s_arraylen' % prop_name, array_len) pattern_node = nt.nodes.new(pattern_node_name) if pattern_settings: for param_name, param_settings in pattern_settings['params'].items(): val = param_settings['value'] setattr(pattern_node, param_name, val) sub_prop_nm = '%s[%d]' % (prop_name, array_len-1) nt.links.new(pattern_node.outputs['resultAOV'], node.inputs[sub_prop_nm]) # Add manifolds self.add_manifolds(nt, pattern_node) else: if node.inputs[prop_name].is_linked: continue pattern_node = nt.nodes.new(pattern_node_name) if pattern_settings: for param_name, param_settings in pattern_settings['params'].items(): val = param_settings['value'] setattr(pattern_node, param_name, val) nt.links.new(pattern_node.outputs['resultAOV'], node.inputs[prop_name]) # Add manifolds self.add_manifolds(nt, pattern_node) def execute(self, context): scene = context.scene selected_objects = context.selected_objects obj = getattr(context, "selected_obj", None) if obj: self.attach_pattern(context, obj) else: for ob in selected_objects: self.attach_pattern(context, ob) op = getattr(context, 'op_ptr', None) if op: op.selected_obj_name = '0' if context.view_layer.objects.active: context.view_layer.objects.active.select_set(False) context.view_layer.objects.active = None return {"FINISHED"} class PRMAN_OT_Add_Stylized_Filter(bpy.types.Operator): bl_idname = "node.rman_add_stylized_filter" bl_label = "Add Stylized Filter" bl_description = "Add a stylized filter to the scene." bl_options = {'INTERNAL'} def rman_stylized_filters(self, context): items = [] scene = context.scene world = scene.world for f in RMAN_STYLIZED_FILTERS: found = False for n in shadergraph_utils.find_displayfilter_nodes(world): if n.bl_label == f: found = True break if found: continue items.append((f, f, "")) if len(items) < 1: items.append(('0', '', '')) return items filter_name: EnumProperty(items=rman_stylized_filters, name="Filter Name") node_name: StringProperty(name="", default="") def execute(self, context): scene = context.scene world = scene.world rm = world.renderman nt = world.node_tree output = shadergraph_utils.find_node(world, 'RendermanDisplayfiltersOutputNode') if not output: bpy.ops.material.rman_add_rman_nodetree('EXEC_DEFAULT', idtype='world') output = shadergraph_utils.find_node(world, 'RendermanDisplayfiltersOutputNode') filter_name = self.properties.filter_name filter_node_name = rman_bl_nodes.__BL_NODES_MAP__[filter_name] filter_node = nt.nodes.new(filter_node_name) free_socket = None for i, socket in enumerate(output.inputs): if not socket.is_linked: free_socket = socket break if not free_socket: bpy.ops.node.rman_add_displayfilter_node_socket('EXEC_DEFAULT') free_socket = output.inputs[len(output.inputs)-1] nt.links.new(filter_node.outputs[0], free_socket) if self.properties.node_name != "": filter_node.name = self.properties.node_name op = getattr(context, 'op_ptr', None) if op: op.stylized_filter = filter_node.name world.update_tag() return {"FINISHED"} classes = [ PRMAN_OT_Enable_Sylized_Looks, PRMAN_OT_Disable_Sylized_Looks, PRMAN_OT_Attach_Stylized_Pattern, PRMAN_OT_Add_Stylized_Filter ] def register(): for cls in classes: bpy.utils.register_class(cls) def unregister(): for cls in classes: try: bpy.utils.unregister_class(cls) except RuntimeError: rfb_log().debug('Could not unregister class: %s' % str(cls)) pass
""" xarray extensions (accessors). """ from collections import defaultdict import warnings import attr import numpy as np from xarray import as_variable, Dataset, register_dataset_accessor from .drivers import XarraySimulationDriver from .model import get_model_variables, Model from .utils import Frozen, variables_dict from .variable import VarType @register_dataset_accessor("filter") def filter_accessor(dataset): """A temporary hack until ``filter`` is available in xarray (GH916).""" def filter(func=None, like=None, regex=None): variables = {k: v for k, v in dataset._variables.items() if func(v)} coord_names = [c for c in dataset._coord_names if c in variables] return dataset._replace_vars_and_dims(variables, coord_names=coord_names) return filter def _maybe_get_model_from_context(model): """Return the given model or try to find it in the context if there was none supplied. """ if model is None: if not Model.active: raise ValueError("No model found in context") model = Model.active[0] if not isinstance(model, Model): raise TypeError(f"{model} is not an instance of xsimlab.Model") return model def as_variable_key(key): """Returns ``key`` as a tuple of the form ``('process_name', 'var_name')``. If ``key`` is given as a string, then process name and variable name must be separated unambiguously by '__' (double underscore) and must not be empty. """ key_tuple = None if isinstance(key, tuple) and len(key) == 2: key_tuple = key elif isinstance(key, str): key_split = key.split("__") if len(key_split) == 2: p_name, var_name = key_split if p_name and var_name: key_tuple = (p_name, var_name) if key_tuple is None: raise ValueError(f"{key!r} is not a valid input variable key") return key_tuple def _flatten_inputs(input_vars): """Returns ``input_vars`` as a flat dictionary where keys are tuples in the form ``(process_name, var_name)``. Raises an error if the given format appears to be invalid. """ flatten_vars = {} for key, val in input_vars.items(): if isinstance(key, str) and isinstance(val, dict): for var_name, var_value in val.items(): flatten_vars[(key, var_name)] = var_value else: flatten_vars[as_variable_key(key)] = val return flatten_vars def _flatten_outputs(output_vars): """Returns ``output_vars`` as a flat dictionary where keys are clock names (or None) and values are lists of tuples in the form ``(process_name, var_name)``. """ flatten_vars = {} for clock, out_vars in output_vars.items(): if isinstance(out_vars, dict): var_list = [] for p_name, var_names in out_vars.items(): if isinstance(var_names, str): var_list.append((p_name, var_names)) else: var_list += [(p_name, vname) for vname in var_names] elif isinstance(out_vars, (tuple, str)): var_list = [as_variable_key(out_vars)] elif isinstance(out_vars, list): var_list = [as_variable_key(k) for k in out_vars] else: raise ValueError( f"Cannot interpret {out_vars!r} as valid output variable key(s)" ) flatten_vars[clock] = var_list return flatten_vars @register_dataset_accessor("xsimlab") class SimlabAccessor: """Simlab extension to :class:`xarray.Dataset`.""" _clock_key = "__xsimlab_output_clock__" _main_clock_key = "__xsimlab_main_clock__" _output_vars_key = "__xsimlab_output_vars__" def __init__(self, ds): self._ds = ds self._main_clock_dim = None self._clock_coords = None @property def clock_coords(self): """Mapping from clock dimensions to :class:`xarray.DataArray` objects corresponding to their coordinates. Cannot be modified directly. """ if self._clock_coords is None: self._clock_coords = { k: coord for k, coord in self._ds.coords.items() if self._clock_key in coord.attrs } return Frozen(self._clock_coords) @property def clock_sizes(self): """Mapping from clock dimensions to lengths. Cannot be modified directly. """ return Frozen({k: coord.size for k, coord in self.clock_coords.items()}) @property def master_clock_dim(self): """Dimension used as main clock for model runs. Returns None if no dimension is set as main clock. to be deprecated in favour of `main_clock_dim` See Also -------- :meth:`Dataset.xsimlab.update_clocks` """ warnings.warn( "master_clock is to be deprecated in favour of main_clock", FutureWarning, ) return self.main_clock_dim @property def main_clock_dim(self): """Dimension used as main clock for model runs. Returns None if no dimension is set as main clock. See Also -------- :meth:`Dataset.xsimlab.update_clocks` """ # it is fine to cache the value here as inconsistency may appear # only when deleting the main clock coordinate from the dataset, # which would raise early anyway if self._main_clock_dim is not None: return self._main_clock_dim else: for c in self._ds.coords.values(): if c.attrs.get(self._main_clock_key, False): dim = c.dims[0] self._main_clock_dim = dim return dim return None @property def master_clock_coord(self): """Main clock coordinate (as a :class:`xarray.DataArray` object). To be deprecated in a future release in favour of `main_clock_coord` Returns None if no main clock is defined in the dataset. """ warnings.warn( "master_clock_coord is to be deprecated in favour of main_clock", FutureWarning, ) return self.main_clock_coord @property def main_clock_coord(self): """Main clock coordinate (as a :class:`xarray.DataArray` object). Returns None if no main clock is defined in the dataset. """ return self._ds.get(self.main_clock_dim) @property def nsteps(self): """Number of simulation steps, computed from the main clock coordinate. Returns 0 if no main clock is defined in the dataset. """ if self.main_clock_dim is None: return 0 else: return self._ds[self.main_clock_dim].size - 1 def get_output_save_steps(self): """Returns save steps for each clock as boolean values. Returns ------- save_steps : :class:`xarray.Dataset` A new Dataset with boolean data variables for each clock dimension other than the main clock, where values specify whether or not to save outputs at every step of a simulation. """ ds = Dataset(coords={self.main_clock_dim: self.main_clock_coord}) for clock, coord in self.clock_coords.items(): if clock != self.main_clock_dim: save_steps = np.in1d(self.main_clock_coord.values, coord.values) ds[clock] = (self.main_clock_dim, save_steps) return ds def _set_clock_coord(self, dim, data): xr_var = as_variable(data, name=dim) if xr_var.dims != (dim,): raise ValueError( "Invalid dimension(s) given for clock coordinate " f"{dim!r}: found {xr_var.dims!r}, " f"expected {dim!r}" ) xr_var.attrs[self._clock_key] = np.uint8(True) self._ds.coords[dim] = xr_var def _uniformize_clock_coords(self, dim=None, units=None, calendar=None): """Ensure consistency across all clock coordinates. - maybe update main clock dimension - maybe set or update the same units and/or calendar for all coordinates as attributes - check that all clocks are synchronized with main clock, i.e., there is no coordinate label that is not present in main clock """ if dim is not None: if self.main_clock_dim is not None: old_mclock_coord = self._ds[self.main_clock_dim] old_mclock_coord.attrs.pop(self._main_clock_key) if dim not in self._ds.coords: raise KeyError( f"Main clock dimension name {dim} as no " "defined coordinate in Dataset" ) self._ds[dim].attrs[self._main_clock_key] = np.uint8(True) self._main_clock_dim = dim if units is not None: for coord in self.clock_coords.values(): coord.attrs["units"] = units if calendar is not None: for coord in self.clock_coords.values(): coord.attrs["calendar"] = calendar main_clock_idx = self._ds.indexes.get(self.main_clock_dim) for clock_dim in self.clock_coords: if clock_dim == self.main_clock_dim: continue clock_idx = self._ds.indexes[clock_dim] diff_idx = clock_idx.difference(main_clock_idx) if diff_idx.size: raise ValueError( f"Clock coordinate {clock_dim} is not synchronized " f"with main clock coordinate {self.main_clock_dim}. " "The following coordinate labels are " f"absent in main clock: {diff_idx.values}" ) def _set_input_vars(self, model, input_vars): invalid_inputs = set(input_vars) - set(model.input_vars) if invalid_inputs: raise KeyError( ", ".join([str(k) for k in invalid_inputs]) + f" is/are not valid key(s) for input variables in model {model}", ) for var_key, data in input_vars.items(): var_metadata = model.cache[var_key]["metadata"] xr_var_name = model.cache[var_key]["name"] try: xr_var = as_variable(data) except TypeError: # try retrieve dimension labels from model variable's # dimension labels that match the number of dimensions ndims = len(np.shape(data)) dim_labels = {len(d): d for d in var_metadata["dims"]} dims = dim_labels.get(ndims) if dims is None: raise TypeError( "Could not get dimension labels from model " f"for variable {xr_var_name!r} with value {data}" ) xr_var = as_variable((dims, data)) if var_metadata["description"]: xr_var.attrs["description"] = var_metadata["description"] xr_var.attrs.update(var_metadata["attrs"]) # maybe delete first to avoid merge conflicts # (we just want to replace here) if xr_var_name in self._ds: del self._ds[xr_var_name] self._ds[xr_var_name] = xr_var def _set_output_vars_attr(self, clock, value): # avoid update attrs in original dataset if clock is None: attrs = self._ds.attrs.copy() else: attrs = self._ds[clock].attrs.copy() if value is None: attrs.pop(self._output_vars_key, None) else: attrs[self._output_vars_key] = value if clock is None: self._ds.attrs = attrs else: new_coord = self._ds.coords[clock].copy() new_coord.attrs = attrs self._ds[clock] = new_coord def _set_output_vars(self, model, output_vars, clear=False): # TODO: remove this ugly code (depreciated output_vars format) o_vars = {} for k, v in output_vars.items(): if k is None or k in self.clock_coords: warnings.warn( "Setting clock dimensions or `None` as keys for `output_vars`" " is depreciated; use variable names instead (and clock " "dimensions or `None` as values, see docs).", FutureWarning, stacklevel=2, ) o_vars.update({vn: k for vn in _flatten_outputs({k: v})[k]}) else: o_vars[k] = v output_vars = _flatten_inputs(o_vars) # end of depreciated code block if not clear: _output_vars = {k: v for k, v in self.output_vars.items()} _output_vars.update(output_vars) output_vars = _output_vars invalid_outputs = set(output_vars) - set(model.all_vars) if invalid_outputs: raise KeyError( ", ".join([f"{pn}__{vn}" for pn, vn in invalid_outputs]) + f" is/are not valid key(s) for variables in model {model}", ) forbidden_output_types = (VarType.OBJECT, VarType.GROUP, VarType.GROUP_DICT) forbidden_model_vars = get_model_variables( model, func=lambda var: var.metadata["var_type"] in forbidden_output_types ) forbidden_outputs = set(output_vars) & set(forbidden_model_vars) if forbidden_outputs: raise ValueError( f"Object or group variables can't be set as model outputs: " + ", ".join([f"{pn}__{vn}" for pn, vn in forbidden_outputs]) ) clock_vars = defaultdict(list) for (p_name, var_name), clock in output_vars.items(): if clock is not None and clock not in self.clock_coords: raise ValueError( f"{clock!r} coordinate is not a valid clock coordinate." ) xr_var_name = p_name + "__" + var_name clock_vars[clock].append(xr_var_name) for clock, var_list in clock_vars.items(): var_str = ",".join(var_list) self._set_output_vars_attr(clock, var_str) # reset clock_coords cache as attributes of those coords # may have been updated self._clock_coords = None def _reset_output_vars(self, model, output_vars): self._set_output_vars_attr(None, None) for clock in self.clock_coords: self._set_output_vars_attr(clock, None) self._set_output_vars(model, output_vars, clear=True) @property def output_vars(self): """Returns a dictionary of output variable names - in the form of ``('p_name', 'var_name')`` tuples - as keys and the clock dimension names (or None) on which to save snapshots as values. Cannot be modified directly. """ def xr_attr_to_dict(attrs, clock): var_str = attrs.get(self._output_vars_key) if var_str is None: return {} else: return {as_variable_key(k): clock for k in var_str.split(",")} o_vars = {} for clock, coord in self.clock_coords.items(): o_vars.update(xr_attr_to_dict(coord.attrs, clock)) o_vars.update(xr_attr_to_dict(self._ds.attrs, None)) return Frozen(o_vars) @property def output_vars_by_clock(self): """Returns a dictionary of output variables grouped by clock (keys). Cannot be modified directly. """ o_vars = defaultdict(list) for k, clock in self.output_vars.items(): o_vars[clock].append(k) return Frozen(dict(o_vars)) def update_clocks( self, model=None, clocks=None, main_clock=None, master_clock=None ): """Set or update clock coordinates. Also copy from the replaced coordinates any attribute that is specific to model output variables. Parameters ---------- model : :class:`xsimlab.Model` object, optional Reference model. If None, tries to get model from context. clocks : dict, optional Used to create one or several clock coordinates. Dictionary values are anything that can be easily converted to :class:`xarray.IndexVariable` objects (e.g., a 1-d :class:`numpy.ndarray` or a :class:`pandas.Index`). main_clock : str or dict, optional Name of the clock coordinate (dimension) to use as main clock. If not set, the name is inferred from ``clocks`` (only if one coordinate is given and if Dataset has no main clock defined yet). A dictionary can also be given with one of several of these keys: - ``dim`` : name of the main clock dimension/coordinate - ``units`` : units of all clock coordinate labels - ``calendar`` : a unique calendar for all (time) clock coordinates master_clock : str or dict, optional Same as `main_clock`, to be deprecated Returns ------- updated : Dataset Another Dataset with new or replaced coordinates. See Also -------- :meth:`xsimlab.create_setup` """ model = _maybe_get_model_from_context(model) ds = self._ds.copy() if master_clock is not None and main_clock is None: warnings.warn( "master_clock is to be deprecated in favour of main_clock", FutureWarning, ) main_clock = master_clock if isinstance(main_clock, str): main_clock_dict = {"dim": main_clock} elif main_clock is None: if clocks is not None and len(clocks) == 1 and self.main_clock_dim is None: main_clock_dict = {"dim": list(clocks.keys())[0]} else: main_clock_dict = {"dim": self.main_clock_dim} else: main_clock_dict = main_clock main_clock_dim = main_clock_dict.get("dim", self.main_clock_dim) if clocks is not None: if main_clock_dim is None: raise ValueError( "Cannot determine which clock coordinate is the main clock" ) elif ( main_clock_dim not in clocks and main_clock_dim not in self.clock_coords ): raise KeyError( f"Main clock dimension name {main_clock_dim!r} not found " "in `clocks` nor in Dataset" ) for dim, data in clocks.items(): ds.xsimlab._set_clock_coord(dim, data) ds.xsimlab._uniformize_clock_coords(**main_clock_dict) # operations on clock coords may have discarded coord attributes o_vars = {k: v for k, v in self.output_vars.items() if v is None or v in ds} ds.xsimlab._set_output_vars(model, o_vars) return ds def update_vars(self, model=None, input_vars=None, output_vars=None): """Update model input values and/or output variable names. More details about the values allowed for the parameters below can be found in the doc of :meth:`xsimlab.create_setup`. Parameters ---------- model : :class:`xsimlab.Model` object, optional Reference model. If None, tries to get model from context. input_vars : dict, optional Model input values (may be grouped per process name, as dict of dicts). output_vars : dict, optional Model variables to save as simulation output (time-dependent or time-independent). Returns ------- updated : Dataset Another Dataset with new or replaced variables (inputs) and/or attributes (snapshots). See Also -------- :meth:`xsimlab.create_setup` """ model = _maybe_get_model_from_context(model) ds = self._ds.copy() if input_vars is not None: ds.xsimlab._set_input_vars(model, _flatten_inputs(input_vars)) if output_vars is not None: ds.xsimlab._set_output_vars(model, output_vars) return ds def reset_vars(self, model=None): """Set or reset Dataset variables with model input default values (if any). Parameters ---------- model : :class:`xsimlab.Model` object, optional Reference model. If None, tries to get model from context. Returns ------- updated : Dataset Another Dataset with new and/or replaced variables. See Also -------- :meth:`Dataset.xsimlab.update_vars` """ model = _maybe_get_model_from_context(model) ds = self._ds.copy() input_vars_default = {} for p_name, var_name in model.input_vars: p_obj = model[p_name] var = variables_dict(type(p_obj))[var_name] if var.default is not attr.NOTHING: input_vars_default[(p_name, var_name)] = var.default ds.xsimlab._set_input_vars(model, input_vars_default) return ds def filter_vars(self, model=None): """Filter Dataset content according to Model. Keep only data variables and coordinates that correspond to inputs of the model (keep clock coordinates too). Also update xsimlab-specific attributes so that output variables given per clock only refer to processes and variables defined in the model. Parameters ---------- model : :class:`xsimlab.Model` object, optional Reference model. If None, tries to get model from context. Returns ------- filtered : Dataset Another Dataset with (maybe) dropped variables and updated attributes. See Also -------- :meth:`Dataset.xsimlab.update_vars` """ model = _maybe_get_model_from_context(model) # drop variables drop_variables = [] for xr_var_name in self._ds.variables: if xr_var_name in self.clock_coords: continue try: p_name, var_name = xr_var_name.split("__") except ValueError: # not a xsimlab model input: make sure to remove it p_name, var_name = ("", xr_var_name) if (p_name, var_name) not in model.input_vars: drop_variables.append(xr_var_name) ds = self._ds.drop(drop_variables) # update output variable attributes o_vars = {k: v for k, v in self.output_vars.items() if k in model.all_vars} ds.xsimlab._reset_output_vars(model, o_vars) return ds def run( self, model=None, batch_dim=None, check_dims="strict", validate="inputs", store=None, encoding=None, decoding=None, hooks=None, parallel=False, scheduler=None, safe_mode=True, ): """Run the model. Parameters ---------- model : :class:`xsimlab.Model` object, optional Reference model. If None, tries to get model from context. batch_dim : str, optional Dimension label in the input dataset used to run a batch of simulations. check_dims : {'strict', 'transpose'}, optional Check the dimension(s) of each input variable given in Dataset. It may be one of the following options: - 'strict': the dimension labels must exactly correspond to (one of) the label sequences defined by their respective model variables (default) - 'transpose': input variables might be transposed in order to match (one of) the label sequences defined by their respective model variables Note that ``batch_dim`` (if any) and clock dimensions are excluded from this check. If None is given, no check is performed. validate : {'inputs', 'all'}, optional Define what will be validated using the variable's validators defined in ``model``'s processes (if any). It may be one of the following options: - 'inputs': validate only values given as inputs (default) - 'all': validate both input values and values set through foreign variables in process classes The latter may significantly impact performance, but it may be useful for debugging. If None is given, no validation is performed. store : str or :class:`collections.abc.MutableMapping` or :class:`zarr.Group` object, optional If a string (path) is given, simulation I/O data will be saved in that specified directory in the file system. If None is given (default), all data will be saved in memory. This parameter also directly accepts a zarr group object or (most of) zarr store objects for more storage options (see notes below). encoding : dict, optional Nested dictionary with variable names as keys and dictionaries of variable specific encodings as values, e.g., ``{'my_variable': {'dtype': 'int16', 'fill_value': -9999,}, ...}``. Encoding options provided here override encoding options defined in model variables (see :func:`~xsimlab.variable` for a full list of of options available). Additionally, 'chunks' and 'synchronizer' options are supported here. decoding : dict, optional Options passed as keyword arguments to :func:`xarray.open_zarr` to load the simulation outputs from the zarr store as a new xarray dataset. hooks : list, optional One or more runtime hooks, i.e., functions decorated with :func:`~xsimlab.runtime_hook` or instances of :class:`~xsimlab.RuntimeHook`. The latter can also be used using the ``with`` statement or using their ``register()`` method. parallel : bool, optional If True, run the simulation(s) in parallel using Dask (default: False). If a dimension label is set for ``batch_dim``, each simulation in the batch will be run in parallel. Otherwise, the processes in ``model`` will be executed in parallel for each simulation stage. scheduler : str, optional Dask's scheduler used to run the simulation(s) in parallel. See :func:`dask.compute`. It also accepts any instance of ``distributed.Client``. safe_mode : bool, optional If True (default), a clone of ``model`` will be used to run each simulation so that it is safe to run multiple simulations simultaneously (provided that the code executed in ``model`` is thread-safe too). Generally safe mode shouldn't be disabled, except in a few cases (e.g., debugging). Returns ------- output : Dataset Another Dataset with both model inputs and outputs. The data is (lazily) loaded from the zarr store used to save inputs and outputs. Notes ----- xarray-simlab uses the zarr library (https://zarr.readthedocs.io) to save model inputs and outputs during a simulation. zarr provides a common interface to multiple storage solutions (e.g., in memory, on disk, cloud-based storage, databases, etc.). Some stores may not work well with xarray-simlab, though. For example :class:`zarr.storage.ZipStore` is not supported because it is not possible to write data to a dataset after it has been created. xarray-simlab uses the dask library (https://docs.dask.org) to run the simulation(s) in parallel. Dask is a powerful library that allows running tasks (either simulations or model processes) on a single machine (multi-threads or multi-processes) or even on a distributed architecture (HPC, Cloud). Even though xarray-simlab includes some safeguards against race conditions, those might still occur under some circumstances and thus require extra care. In particular: - The code implemented in the process classes of ``model`` must be thread-safe if a dask multi-threaded scheduler is used, and must be serializable if a multi-process or distributed scheduler is used. - Multi-process or distributed schedulers are not well supported or may have poor performance when running the ``model`` processes in parallel (i.e., single-model parallelism), depending on the amount of data shared between the processes. See :meth:`xsimlab.Model.execute` for more details. - Not all zarr stores are safe to write in multiple threads or processes. For example, :class:`zarr.storage.MemoryStore` used by default is safe to write in multiple threads but not in multiple processes. - If chunks are specified in ``encoding`` with chunk size > 1 for ``batch_dim``, then one of the zarr synchronizers should be used too, otherwise model output values will not be saved properly. Pick :class:`zarr.sync.ThreadSynchronizer` or :class:`zarr.sync.ProcessSynchronizer` depending on which dask scheduler is used. Also, check that the (distributed) scheduler doesn't use both multiple processes and multiple threads. """ model = _maybe_get_model_from_context(model) if safe_mode: model = model.clone() driver = XarraySimulationDriver( self._ds, model, batch_dim=batch_dim, store=store, encoding=encoding, decoding=decoding, check_dims=check_dims, validate=validate, hooks=hooks, parallel=parallel, scheduler=scheduler, ) driver.run_model() return driver.get_results() def create_setup( model=None, clocks=None, master_clock=None, main_clock=None, input_vars=None, output_vars=None, fill_default=True, ): """Create a specific setup for model runs. This convenient function creates a new :class:`xarray.Dataset` object with everything needed to run a model (i.e., input values, time steps, output variables to save at given times) as data variables, coordinates and attributes. Parameters ---------- model : :class:`xsimlab.Model` object, optional Create a simulation setup for this model. If None, tries to get model from context. clocks : dict, optional Used to create one or several clock coordinates. Dictionary values are anything that can be easily converted to :class:`xarray.IndexVariable` objects (e.g., a 1-d :class:`numpy.ndarray` or a :class:`pandas.Index`). master_clock: str or dict, optional See main_clock to be deprecated main_clock : str or dict, optional Name of the clock coordinate (dimension) to use as main clock. If not set, the name is inferred from ``clocks`` (only if one coordinate is given and if Dataset has no main clock defined yet). A dictionary can also be given with one of several of these keys: - ``dim`` : name of the main clock dimension/coordinate - ``units`` : units of all clock coordinate labels - ``calendar`` : a unique calendar for all (time) clock coordinates input_vars : dict, optional Dictionary with values given for model inputs. Entries of the dictionary may look like: - ``'foo': {'bar': value, ...}`` or - ``('foo', 'bar'): value`` or - ``'foo__bar': value`` where ``foo`` is the name of a existing process in the model and ``bar`` is the name of an (input) variable declared in that process. Values are anything that can be easily converted to :class:`xarray.Variable` objects, e.g., single values, array-like, ``(dims, data, attrs)`` tuples or xarray objects. For array-like values with no dimension labels, xarray-simlab will look in ``model`` variables metadata for labels matching the number of dimensions of those arrays. output_vars : dict, optional Dictionary with model variable names to save as simulation output (time-dependent or time-independent). Entries of the dictionary look similar than for ``input_vars`` (see here above), except that here ``value`` must correspond to the dimension of a clock coordinate (i.e., new output values will be saved at each time given by the coordinate labels) or ``None`` (i.e., only one value will be saved at the end of the simulation). fill_default : bool, optional If True (default), automatically fill the dataset with all model inputs missing in ``input_vars`` and their default value (if any). Returns ------- dataset : :class:`xarray.Dataset` A new Dataset object with model inputs as data variables or coordinates (depending on their given value) and clock coordinates. The names of the input variables also include the name of their process (i.e., 'foo__bar'). Notes ----- Output variable names are added in Dataset as specific attributes (global and/or clock coordinate attributes). """ model = _maybe_get_model_from_context(model) def maybe_fill_default(ds): if fill_default: return ds.xsimlab.reset_vars(model=model) else: return ds if master_clock is not None and main_clock is None: warnings.warn( "master_clock is to be deprecated in favour of main_clock", FutureWarning, ) main_clock = master_clock ds = ( Dataset() .xsimlab.update_clocks(model=model, clocks=clocks, main_clock=main_clock) .pipe(maybe_fill_default) .xsimlab.update_vars( model=model, input_vars=input_vars, output_vars=output_vars ) ) return ds
import numpy as np import pytest from pandas.core.dtypes.common import is_categorical_dtype from pandas.core.dtypes.dtypes import CategoricalDtype import pandas as pd from pandas import ( Categorical, CategoricalIndex, DataFrame, Index, Interval, Series, Timestamp, ) from pandas.api.types import CategoricalDtype as CDT from pandas.util import testing as tm from pandas.util.testing import assert_frame_equal, assert_series_equal class TestCategoricalIndex: def setup_method(self, method): self.df = DataFrame( { "A": np.arange(6, dtype="int64"), "B": Series(list("aabbca")).astype(CDT(list("cab"))), } ).set_index("B") self.df2 = DataFrame( { "A": np.arange(6, dtype="int64"), "B": Series(list("aabbca")).astype(CDT(list("cabe"))), } ).set_index("B") self.df3 = DataFrame( { "A": np.arange(6, dtype="int64"), "B": (Series([1, 1, 2, 1, 3, 2]).astype(CDT([3, 2, 1], ordered=True))), } ).set_index("B") self.df4 = DataFrame( { "A": np.arange(6, dtype="int64"), "B": (Series([1, 1, 2, 1, 3, 2]).astype(CDT([3, 2, 1], ordered=False))), } ).set_index("B") def test_loc_scalar(self): result = self.df.loc["a"] expected = DataFrame( {"A": [0, 1, 5], "B": (Series(list("aaa")).astype(CDT(list("cab"))))} ).set_index("B") assert_frame_equal(result, expected) df = self.df.copy() df.loc["a"] = 20 expected = DataFrame( { "A": [20, 20, 2, 3, 4, 20], "B": (Series(list("aabbca")).astype(CDT(list("cab")))), } ).set_index("B") assert_frame_equal(df, expected) # value not in the categories with pytest.raises(KeyError, match=r"^'d'$"): df.loc["d"] msg = "cannot append a non-category item to a CategoricalIndex" with pytest.raises(TypeError, match=msg): df.loc["d"] = 10 msg = ( "cannot insert an item into a CategoricalIndex that is not" " already an existing category" ) with pytest.raises(TypeError, match=msg): df.loc["d", "A"] = 10 with pytest.raises(TypeError, match=msg): df.loc["d", "C"] = 10 def test_getitem_scalar(self): cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")]) s = Series([1, 2], index=cats) expected = s.iloc[0] result = s[cats[0]] assert result == expected def test_slicing_directly(self): cat = Categorical(["a", "b", "c", "d", "a", "b", "c"]) sliced = cat[3] assert sliced == "d" sliced = cat[3:5] expected = Categorical(["d", "a"], categories=["a", "b", "c", "d"]) tm.assert_numpy_array_equal(sliced._codes, expected._codes) tm.assert_index_equal(sliced.categories, expected.categories) def test_slicing(self): cat = Series(Categorical([1, 2, 3, 4])) reversed = cat[::-1] exp = np.array([4, 3, 2, 1], dtype=np.int64) tm.assert_numpy_array_equal(reversed.__array__(), exp) df = DataFrame({"value": (np.arange(100) + 1).astype("int64")}) df["D"] = pd.cut(df.value, bins=[0, 25, 50, 75, 100]) expected = Series([11, Interval(0, 25)], index=["value", "D"], name=10) result = df.iloc[10] tm.assert_series_equal(result, expected) expected = DataFrame( {"value": np.arange(11, 21).astype("int64")}, index=np.arange(10, 20).astype("int64"), ) expected["D"] = pd.cut(expected.value, bins=[0, 25, 50, 75, 100]) result = df.iloc[10:20] tm.assert_frame_equal(result, expected) expected = Series([9, Interval(0, 25)], index=["value", "D"], name=8) result = df.loc[8] tm.assert_series_equal(result, expected) def test_slicing_and_getting_ops(self): # systematically test the slicing operations: # for all slicing ops: # - returning a dataframe # - returning a column # - returning a row # - returning a single value cats = Categorical( ["a", "c", "b", "c", "c", "c", "c"], categories=["a", "b", "c"] ) idx = Index(["h", "i", "j", "k", "l", "m", "n"]) values = [1, 2, 3, 4, 5, 6, 7] df = DataFrame({"cats": cats, "values": values}, index=idx) # the expected values cats2 = Categorical(["b", "c"], categories=["a", "b", "c"]) idx2 = Index(["j", "k"]) values2 = [3, 4] # 2:4,: | "j":"k",: exp_df = DataFrame({"cats": cats2, "values": values2}, index=idx2) # :,"cats" | :,0 exp_col = Series(cats, index=idx, name="cats") # "j",: | 2,: exp_row = Series(["b", 3], index=["cats", "values"], dtype="object", name="j") # "j","cats | 2,0 exp_val = "b" # iloc # frame res_df = df.iloc[2:4, :] tm.assert_frame_equal(res_df, exp_df) assert is_categorical_dtype(res_df["cats"]) # row res_row = df.iloc[2, :] tm.assert_series_equal(res_row, exp_row) assert isinstance(res_row["cats"], str) # col res_col = df.iloc[:, 0] tm.assert_series_equal(res_col, exp_col) assert is_categorical_dtype(res_col) # single value res_val = df.iloc[2, 0] assert res_val == exp_val # loc # frame res_df = df.loc["j":"k", :] tm.assert_frame_equal(res_df, exp_df) assert is_categorical_dtype(res_df["cats"]) # row res_row = df.loc["j", :] tm.assert_series_equal(res_row, exp_row) assert isinstance(res_row["cats"], str) # col res_col = df.loc[:, "cats"] tm.assert_series_equal(res_col, exp_col) assert is_categorical_dtype(res_col) # single value res_val = df.loc["j", "cats"] assert res_val == exp_val # ix # frame # res_df = df.loc["j":"k",[0,1]] # doesn't work? res_df = df.loc["j":"k", :] tm.assert_frame_equal(res_df, exp_df) assert is_categorical_dtype(res_df["cats"]) # row res_row = df.loc["j", :] tm.assert_series_equal(res_row, exp_row) assert isinstance(res_row["cats"], str) # col res_col = df.loc[:, "cats"] tm.assert_series_equal(res_col, exp_col) assert is_categorical_dtype(res_col) # single value res_val = df.loc["j", df.columns[0]] assert res_val == exp_val # iat res_val = df.iat[2, 0] assert res_val == exp_val # at res_val = df.at["j", "cats"] assert res_val == exp_val # fancy indexing exp_fancy = df.iloc[[2]] res_fancy = df[df["cats"] == "b"] tm.assert_frame_equal(res_fancy, exp_fancy) res_fancy = df[df["values"] == 3] tm.assert_frame_equal(res_fancy, exp_fancy) # get_value res_val = df.at["j", "cats"] assert res_val == exp_val # i : int, slice, or sequence of integers res_row = df.iloc[2] tm.assert_series_equal(res_row, exp_row) assert isinstance(res_row["cats"], str) res_df = df.iloc[slice(2, 4)] tm.assert_frame_equal(res_df, exp_df) assert is_categorical_dtype(res_df["cats"]) res_df = df.iloc[[2, 3]] tm.assert_frame_equal(res_df, exp_df) assert is_categorical_dtype(res_df["cats"]) res_col = df.iloc[:, 0] tm.assert_series_equal(res_col, exp_col) assert is_categorical_dtype(res_col) res_df = df.iloc[:, slice(0, 2)] tm.assert_frame_equal(res_df, df) assert is_categorical_dtype(res_df["cats"]) res_df = df.iloc[:, [0, 1]] tm.assert_frame_equal(res_df, df) assert is_categorical_dtype(res_df["cats"]) def test_slicing_doc_examples(self): # GH 7918 cats = Categorical( ["a", "b", "b", "b", "c", "c", "c"], categories=["a", "b", "c"] ) idx = Index(["h", "i", "j", "k", "l", "m", "n"]) values = [1, 2, 2, 2, 3, 4, 5] df = DataFrame({"cats": cats, "values": values}, index=idx) result = df.iloc[2:4, :] expected = DataFrame( { "cats": Categorical(["b", "b"], categories=["a", "b", "c"]), "values": [2, 2], }, index=["j", "k"], ) tm.assert_frame_equal(result, expected) result = df.iloc[2:4, :].dtypes expected = Series(["category", "int64"], ["cats", "values"]) tm.assert_series_equal(result, expected) result = df.loc["h":"j", "cats"] expected = Series( Categorical(["a", "b", "b"], categories=["a", "b", "c"]), index=["h", "i", "j"], name="cats", ) tm.assert_series_equal(result, expected) result = df.loc["h":"j", df.columns[0:1]] expected = DataFrame( {"cats": Categorical(["a", "b", "b"], categories=["a", "b", "c"])}, index=["h", "i", "j"], ) tm.assert_frame_equal(result, expected) def test_getitem_category_type(self): # GH 14580 # test iloc() on Series with Categorical data s = Series([1, 2, 3]).astype("category") # get slice result = s.iloc[0:2] expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3])) tm.assert_series_equal(result, expected) # get list of indexes result = s.iloc[[0, 1]] expected = Series([1, 2]).astype(CategoricalDtype([1, 2, 3])) tm.assert_series_equal(result, expected) # get boolean array result = s.iloc[[True, False, False]] expected = Series([1]).astype(CategoricalDtype([1, 2, 3])) tm.assert_series_equal(result, expected) def test_loc_listlike(self): # list of labels result = self.df.loc[["c", "a"]] expected = self.df.iloc[[4, 0, 1, 5]] assert_frame_equal(result, expected, check_index_type=True) result = self.df2.loc[["a", "b", "e"]] exp_index = CategoricalIndex(list("aaabbe"), categories=list("cabe"), name="B") expected = DataFrame({"A": [0, 1, 5, 2, 3, np.nan]}, index=exp_index) assert_frame_equal(result, expected, check_index_type=True) # element in the categories but not in the values with pytest.raises(KeyError, match=r"^'e'$"): self.df2.loc["e"] # assign is ok df = self.df2.copy() df.loc["e"] = 20 result = df.loc[["a", "b", "e"]] exp_index = CategoricalIndex(list("aaabbe"), categories=list("cabe"), name="B") expected = DataFrame({"A": [0, 1, 5, 2, 3, 20]}, index=exp_index) assert_frame_equal(result, expected) df = self.df2.copy() result = df.loc[["a", "b", "e"]] exp_index = CategoricalIndex(list("aaabbe"), categories=list("cabe"), name="B") expected = DataFrame({"A": [0, 1, 5, 2, 3, np.nan]}, index=exp_index) assert_frame_equal(result, expected, check_index_type=True) # not all labels in the categories with pytest.raises( KeyError, match="'a list-indexer must only include values that are in the" " categories'", ): self.df2.loc[["a", "d"]] def test_loc_listlike_dtypes(self): # GH 11586 # unique categories and codes index = CategoricalIndex(["a", "b", "c"]) df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=index) # unique slice res = df.loc[["a", "b"]] exp_index = CategoricalIndex(["a", "b"], categories=index.categories) exp = DataFrame({"A": [1, 2], "B": [4, 5]}, index=exp_index) tm.assert_frame_equal(res, exp, check_index_type=True) # duplicated slice res = df.loc[["a", "a", "b"]] exp_index = CategoricalIndex(["a", "a", "b"], categories=index.categories) exp = DataFrame({"A": [1, 1, 2], "B": [4, 4, 5]}, index=exp_index) tm.assert_frame_equal(res, exp, check_index_type=True) msg = "a list-indexer must only include values that are in the categories" with pytest.raises(KeyError, match=msg): df.loc[["a", "x"]] # duplicated categories and codes index = CategoricalIndex(["a", "b", "a"]) df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=index) # unique slice res = df.loc[["a", "b"]] exp = DataFrame( {"A": [1, 3, 2], "B": [4, 6, 5]}, index=CategoricalIndex(["a", "a", "b"]) ) tm.assert_frame_equal(res, exp, check_index_type=True) # duplicated slice res = df.loc[["a", "a", "b"]] exp = DataFrame( {"A": [1, 3, 1, 3, 2], "B": [4, 6, 4, 6, 5]}, index=CategoricalIndex(["a", "a", "a", "a", "b"]), ) tm.assert_frame_equal(res, exp, check_index_type=True) msg = "a list-indexer must only include values that are in the categories" with pytest.raises(KeyError, match=msg): df.loc[["a", "x"]] # contains unused category index = CategoricalIndex(["a", "b", "a", "c"], categories=list("abcde")) df = DataFrame({"A": [1, 2, 3, 4], "B": [5, 6, 7, 8]}, index=index) res = df.loc[["a", "b"]] exp = DataFrame( {"A": [1, 3, 2], "B": [5, 7, 6]}, index=CategoricalIndex(["a", "a", "b"], categories=list("abcde")), ) tm.assert_frame_equal(res, exp, check_index_type=True) res = df.loc[["a", "e"]] exp = DataFrame( {"A": [1, 3, np.nan], "B": [5, 7, np.nan]}, index=CategoricalIndex(["a", "a", "e"], categories=list("abcde")), ) tm.assert_frame_equal(res, exp, check_index_type=True) # duplicated slice res = df.loc[["a", "a", "b"]] exp = DataFrame( {"A": [1, 3, 1, 3, 2], "B": [5, 7, 5, 7, 6]}, index=CategoricalIndex(["a", "a", "a", "a", "b"], categories=list("abcde")), ) tm.assert_frame_equal(res, exp, check_index_type=True) msg = "a list-indexer must only include values that are in the categories" with pytest.raises(KeyError, match=msg): df.loc[["a", "x"]] def test_get_indexer_array(self): arr = np.array( [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")], dtype=object, ) cats = [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")] ci = CategoricalIndex(cats, categories=cats, ordered=False, dtype="category") result = ci.get_indexer(arr) expected = np.array([0, 1], dtype="intp") tm.assert_numpy_array_equal(result, expected) def test_get_indexer_same_categories_same_order(self): ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["a", "b"])) expected = np.array([1, 1], dtype="intp") tm.assert_numpy_array_equal(result, expected) def test_get_indexer_same_categories_different_order(self): # https://github.com/pandas-dev/pandas/issues/19551 ci = CategoricalIndex(["a", "b"], categories=["a", "b"]) result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["b", "a"])) expected = np.array([1, 1], dtype="intp") tm.assert_numpy_array_equal(result, expected) def test_getitem_with_listlike(self): # GH 16115 cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")]) expected = DataFrame( [[1, 0], [0, 1]], dtype="uint8", index=[0, 1], columns=cats ) dummies = pd.get_dummies(cats) result = dummies[[c for c in dummies.columns]] assert_frame_equal(result, expected) def test_setitem_listlike(self): # GH 9469 # properly coerce the input indexers np.random.seed(1) c = Categorical( np.random.randint(0, 5, size=150000).astype(np.int8) ).add_categories([-1000]) indexer = np.array([100000]).astype(np.int64) c[indexer] = -1000 # we are asserting the code result here # which maps to the -1000 category result = c.codes[np.array([100000]).astype(np.int64)] tm.assert_numpy_array_equal(result, np.array([5], dtype="int8")) def test_ix_categorical_index(self): # GH 12531 df = DataFrame(np.random.randn(3, 3), index=list("ABC"), columns=list("XYZ")) cdf = df.copy() cdf.index = CategoricalIndex(df.index) cdf.columns = CategoricalIndex(df.columns) expect = Series(df.loc["A", :], index=cdf.columns, name="A") assert_series_equal(cdf.loc["A", :], expect) expect = Series(df.loc[:, "X"], index=cdf.index, name="X") assert_series_equal(cdf.loc[:, "X"], expect) exp_index = CategoricalIndex(list("AB"), categories=["A", "B", "C"]) expect = DataFrame(df.loc[["A", "B"], :], columns=cdf.columns, index=exp_index) assert_frame_equal(cdf.loc[["A", "B"], :], expect) exp_columns = CategoricalIndex(list("XY"), categories=["X", "Y", "Z"]) expect = DataFrame(df.loc[:, ["X", "Y"]], index=cdf.index, columns=exp_columns) assert_frame_equal(cdf.loc[:, ["X", "Y"]], expect) # non-unique df = DataFrame(np.random.randn(3, 3), index=list("ABA"), columns=list("XYX")) cdf = df.copy() cdf.index = CategoricalIndex(df.index) cdf.columns = CategoricalIndex(df.columns) exp_index = CategoricalIndex(list("AA"), categories=["A", "B"]) expect = DataFrame(df.loc["A", :], columns=cdf.columns, index=exp_index) assert_frame_equal(cdf.loc["A", :], expect) exp_columns = CategoricalIndex(list("XX"), categories=["X", "Y"]) expect = DataFrame(df.loc[:, "X"], index=cdf.index, columns=exp_columns) assert_frame_equal(cdf.loc[:, "X"], expect) expect = DataFrame( df.loc[["A", "B"], :], columns=cdf.columns, index=CategoricalIndex(list("AAB")), ) assert_frame_equal(cdf.loc[["A", "B"], :], expect) expect = DataFrame( df.loc[:, ["X", "Y"]], index=cdf.index, columns=CategoricalIndex(list("XXY")), ) assert_frame_equal(cdf.loc[:, ["X", "Y"]], expect) def test_read_only_source(self): # GH 10043 rw_array = np.eye(10) rw_df = DataFrame(rw_array) ro_array = np.eye(10) ro_array.setflags(write=False) ro_df = DataFrame(ro_array) assert_frame_equal(rw_df.iloc[[1, 2, 3]], ro_df.iloc[[1, 2, 3]]) assert_frame_equal(rw_df.iloc[[1]], ro_df.iloc[[1]]) assert_series_equal(rw_df.iloc[1], ro_df.iloc[1]) assert_frame_equal(rw_df.iloc[1:3], ro_df.iloc[1:3]) assert_frame_equal(rw_df.loc[[1, 2, 3]], ro_df.loc[[1, 2, 3]]) assert_frame_equal(rw_df.loc[[1]], ro_df.loc[[1]]) assert_series_equal(rw_df.loc[1], ro_df.loc[1]) assert_frame_equal(rw_df.loc[1:3], ro_df.loc[1:3]) def test_reindexing(self): # reindexing # convert to a regular index result = self.df2.reindex(["a", "b", "e"]) expected = DataFrame( {"A": [0, 1, 5, 2, 3, np.nan], "B": Series(list("aaabbe"))} ).set_index("B") assert_frame_equal(result, expected, check_index_type=True) result = self.df2.reindex(["a", "b"]) expected = DataFrame( {"A": [0, 1, 5, 2, 3], "B": Series(list("aaabb"))} ).set_index("B") assert_frame_equal(result, expected, check_index_type=True) result = self.df2.reindex(["e"]) expected = DataFrame({"A": [np.nan], "B": Series(["e"])}).set_index("B") assert_frame_equal(result, expected, check_index_type=True) result = self.df2.reindex(["d"]) expected = DataFrame({"A": [np.nan], "B": Series(["d"])}).set_index("B") assert_frame_equal(result, expected, check_index_type=True) # since we are actually reindexing with a Categorical # then return a Categorical cats = list("cabe") result = self.df2.reindex(Categorical(["a", "d"], categories=cats)) expected = DataFrame( {"A": [0, 1, 5, np.nan], "B": Series(list("aaad")).astype(CDT(cats))} ).set_index("B") assert_frame_equal(result, expected, check_index_type=True) result = self.df2.reindex(Categorical(["a"], categories=cats)) expected = DataFrame( {"A": [0, 1, 5], "B": Series(list("aaa")).astype(CDT(cats))} ).set_index("B") assert_frame_equal(result, expected, check_index_type=True) result = self.df2.reindex(["a", "b", "e"]) expected = DataFrame( {"A": [0, 1, 5, 2, 3, np.nan], "B": Series(list("aaabbe"))} ).set_index("B") assert_frame_equal(result, expected, check_index_type=True) result = self.df2.reindex(["a", "b"]) expected = DataFrame( {"A": [0, 1, 5, 2, 3], "B": Series(list("aaabb"))} ).set_index("B") assert_frame_equal(result, expected, check_index_type=True) result = self.df2.reindex(["e"]) expected = DataFrame({"A": [np.nan], "B": Series(["e"])}).set_index("B") assert_frame_equal(result, expected, check_index_type=True) # give back the type of categorical that we received result = self.df2.reindex( Categorical(["a", "d"], categories=cats, ordered=True) ) expected = DataFrame( { "A": [0, 1, 5, np.nan], "B": Series(list("aaad")).astype(CDT(cats, ordered=True)), } ).set_index("B") assert_frame_equal(result, expected, check_index_type=True) result = self.df2.reindex(Categorical(["a", "d"], categories=["a", "d"])) expected = DataFrame( {"A": [0, 1, 5, np.nan], "B": Series(list("aaad")).astype(CDT(["a", "d"]))} ).set_index("B") assert_frame_equal(result, expected, check_index_type=True) # passed duplicate indexers are not allowed msg = "cannot reindex with a non-unique indexer" with pytest.raises(ValueError, match=msg): self.df2.reindex(["a", "a"]) # args NotImplemented ATM msg = r"argument {} is not implemented for CategoricalIndex\.reindex" with pytest.raises(NotImplementedError, match=msg.format("method")): self.df2.reindex(["a"], method="ffill") with pytest.raises(NotImplementedError, match=msg.format("level")): self.df2.reindex(["a"], level=1) with pytest.raises(NotImplementedError, match=msg.format("limit")): self.df2.reindex(["a"], limit=2) def test_loc_slice(self): # slicing # not implemented ATM # GH9748 msg = ( "cannot do slice indexing on {klass} with these " r"indexers \[1\] of {kind}".format( klass=str(CategoricalIndex), kind=str(int) ) ) with pytest.raises(TypeError, match=msg): self.df.loc[1:5] # result = df.loc[1:5] # expected = df.iloc[[1,2,3,4]] # assert_frame_equal(result, expected) def test_loc_and_at_with_categorical_index(self): # GH 20629 s = Series([1, 2, 3], index=pd.CategoricalIndex(["A", "B", "C"])) assert s.loc["A"] == 1 assert s.at["A"] == 1 df = DataFrame( [[1, 2], [3, 4], [5, 6]], index=pd.CategoricalIndex(["A", "B", "C"]) ) assert df.loc["B", 1] == 4 assert df.at["B", 1] == 4 def test_boolean_selection(self): df3 = self.df3 df4 = self.df4 result = df3[df3.index == "a"] expected = df3.iloc[[]] assert_frame_equal(result, expected) result = df4[df4.index == "a"] expected = df4.iloc[[]] assert_frame_equal(result, expected) result = df3[df3.index == 1] expected = df3.iloc[[0, 1, 3]] assert_frame_equal(result, expected) result = df4[df4.index == 1] expected = df4.iloc[[0, 1, 3]] assert_frame_equal(result, expected) # since we have an ordered categorical # CategoricalIndex([1, 1, 2, 1, 3, 2], # categories=[3, 2, 1], # ordered=True, # name='B') result = df3[df3.index < 2] expected = df3.iloc[[4]] assert_frame_equal(result, expected) result = df3[df3.index > 1] expected = df3.iloc[[]] assert_frame_equal(result, expected) # unordered # cannot be compared # CategoricalIndex([1, 1, 2, 1, 3, 2], # categories=[3, 2, 1], # ordered=False, # name='B') msg = "Unordered Categoricals can only compare equality or not" with pytest.raises(TypeError, match=msg): df4[df4.index < 2] with pytest.raises(TypeError, match=msg): df4[df4.index > 1] def test_indexing_with_category(self): # https://github.com/pandas-dev/pandas/issues/12564 # consistent result if comparing as Dataframe cat = DataFrame({"A": ["foo", "bar", "baz"]}) exp = DataFrame({"A": [True, False, False]}) res = cat[["A"]] == "foo" tm.assert_frame_equal(res, exp) cat["A"] = cat["A"].astype("category") res = cat[["A"]] == "foo" tm.assert_frame_equal(res, exp) def test_map_with_dict_or_series(self): orig_values = ["a", "B", 1, "a"] new_values = ["one", 2, 3.0, "one"] cur_index = pd.CategoricalIndex(orig_values, name="XXX") expected = pd.CategoricalIndex( new_values, name="XXX", categories=[3.0, 2, "one"] ) mapper = pd.Series(new_values[:-1], index=orig_values[:-1]) output = cur_index.map(mapper) # Order of categories in output can be different tm.assert_index_equal(expected, output) mapper = {o: n for o, n in zip(orig_values[:-1], new_values[:-1])} output = cur_index.map(mapper) # Order of categories in output can be different tm.assert_index_equal(expected, output)
# Copyright (c) 2016 Anton Kozhevnikov, Thomas Schulthess # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that # the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the # following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions # and the following disclaimer in the documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED # WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A # PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import json import sys import re import xml.etree.ElementTree as ET def str2bool(v): return v.lower() in ("yes", "true", "t", "1") def parse_header(upf_dict, root): # header node = root.findall("./PP_HEADER")[0] upf_dict['header'] = {} upf_dict['header']['number_of_proj'] = int(node.attrib['number_of_proj']) upf_dict['header']['core_correction'] = str2bool(node.attrib['core_correction']) upf_dict['header']['element'] = node.attrib['element'].strip() upf_dict['header']['pseudo_type'] = node.attrib['pseudo_type'] upf_dict['header']['z_valence'] = float(node.attrib['z_valence']) upf_dict['header']['mesh_size'] = int(node.attrib['mesh_size']) upf_dict['header']['is_ultrasoft'] = str2bool(node.attrib['is_ultrasoft']) upf_dict['header']['number_of_wfc'] = int(node.attrib['number_of_wfc']) upf_dict['header']['SpinOrbit'] = str2bool(node.attrib['has_so']) def parse_radial_grid(upf_dict, root): # radial grid node = root.findall("./PP_MESH/PP_R")[0] rg = [float(e) for e in str.split(node.text)] np = int(node.attrib['size']) if np != len(rg): print("Wrong number of radial points") upf_dict['radial_grid'] = rg ########################################################################## #### Read non-local part: basis PS and AE (for PAW) functions, #### beta(or p for PAW)-projectors, Qij augmentation coefs, Dij ########################################################################## def parse_non_local(upf_dict, root): #---------------------------------------------------- #------ Read beta (or p for PAW) - projectors ------ #---------------------------------------------------- upf_dict['beta_projectors'] = [] proj_num = upf_dict['header']['number_of_proj'] for i in range(proj_num): node = root.findall("./PP_NONLOCAL/PP_BETA.%i"%(i+1))[0] nr = int(node.attrib['cutoff_radius_index']) upf_dict['beta_projectors'].append({}) beta = [float(e) for e in str.split(node.text)] upf_dict['beta_projectors'][i]['radial_function'] = beta[0:nr] upf_dict['beta_projectors'][i]['label'] = node.attrib['label'] upf_dict['beta_projectors'][i]['angular_momentum'] = int(node.attrib['angular_momentum']) #upf_dict['beta_projectors'][i]['cutoff_radius_index'] = int(node.attrib['cutoff_radius_index']) upf_dict['beta_projectors'][i]['cutoff_radius'] = float(node.attrib['cutoff_radius']) upf_dict['beta_projectors'][i]['ultrasoft_cutoff_radius'] = float(node.attrib['ultrasoft_cutoff_radius']) #if upf_dict['header']['SpinOrbit']: # node1 = root.findall("./PP_SPIN_ORB/PP_RELBETA.%i"%(i+1))[0] # upf_dict['beta_projectors'][i]['total_angular_momentum'] = float(node1.attrib['jjj']) #-------------------------- #------- Dij matrix ------- #-------------------------- node = root.findall('./PP_NONLOCAL/PP_DIJ')[0] dij = [float(e) for e in str.split(node.text)] upf_dict['D_ion'] = [float(e) / 2 for e in str.split(node.text)] #convert to hartree #if upf_dict['header']['pseudo_type'] == 'NC': return if not upf_dict['header']['is_ultrasoft']: return #------------------------------------ #------- augmentation part: Qij ---- #------------------------------------ node = root.findall('./PP_NONLOCAL/PP_AUGMENTATION')[0] if node.attrib['q_with_l'] != 'T': print("Don't know how to parse this 'q_with_l != T'") sys.exit(0) upf_dict['augmentation'] = [] nb = upf_dict['header']['number_of_proj'] #----------------------------- #--------- read Qij ---------- #----------------------------- for i in range(nb): li = upf_dict['beta_projectors'][i]['angular_momentum'] for j in range(i, nb): lj = upf_dict['beta_projectors'][j]['angular_momentum'] for l in range(abs(li-lj), li+lj+1): if (li + lj + l) % 2 == 0: node = root.findall("./PP_NONLOCAL/PP_AUGMENTATION/PP_QIJL.%i.%i.%i"%(i+1,j+1,l))[0] qij = {} qij['radial_function'] = [float(e) for e in str.split(node.text)] qij['i'] = i qij['j'] = j qij['angular_momentum'] = int(node.attrib['angular_momentum']) if l != qij['angular_momentum']: print("Wrong angular momentum for Qij") sys.exit(0) upf_dict['augmentation'].append(qij) #################################################### ############# Read PAW data ######################## #################################################### def parse_PAW(upf_dict, root): if upf_dict['header']['pseudo_type'] != "PAW": return node = root.findall('./PP_NONLOCAL/PP_AUGMENTATION')[0] upf_dict['header']['cutoff_radius_index'] = int(node.attrib['cutoff_r_index']) upf_dict["paw_data"] = {} #------------------------------------- #---- Read PP_Q and PP_MULTIPOLES ---- #------------------------------------- node = root.findall('./PP_NONLOCAL/PP_AUGMENTATION/PP_Q')[0] upf_dict['paw_data']['aug_integrals'] = [float(e) for e in str.split(node.text)] node = root.findall('./PP_NONLOCAL/PP_AUGMENTATION/PP_MULTIPOLES')[0] upf_dict['paw_data']['aug_multipoles'] = [float(e) for e in str.split(node.text)] #---------------------------------------- #---- Read AE and PS basis wave functions #---------------------------------------- nb = upf_dict['header']['number_of_proj'] #----- Read AE wfc ----- upf_dict['paw_data']['ae_wfc']=[] for i in range(nb): wfc={} node = root.findall("./PP_FULL_WFC/PP_AEWFC.%i"%(i+1))[0] wfc['radial_function'] = [float(e) for e in str.split(node.text)] wfc['angular_momentum'] = int(node.attrib['l']) wfc['label'] = node.attrib['label'] wfc['index'] = int(node.attrib['index']) - 1 upf_dict['paw_data']['ae_wfc'].append(wfc) #----- Read PS wfc ----- upf_dict['paw_data']['ps_wfc']=[] for i in range(nb): wfc={} node = root.findall("./PP_FULL_WFC/PP_PSWFC.%i"%(i+1))[0] wfc['radial_function'] = [float(e) for e in str.split(node.text)] wfc['angular_momentum'] = int(node.attrib['l']) wfc['label'] = node.attrib['label'] wfc['index'] = int(node.attrib['index']) - 1 upf_dict['paw_data']['ps_wfc'].append(wfc) #------ Read PP_PAW section: occupation, AE_NLCC, AE_VLOC node = root.findall("./PP_PAW")[0] upf_dict['header']["paw_core_energy"] = float(node.attrib['core_energy']) / 2 # convert to Ha node = root.findall("./PP_PAW/PP_OCCUPATIONS")[0] size = int(node.attrib['size']) #---- occupation for i in range(size): upf_dict['paw_data']['occupations'] = [float(e) for e in str.split(node.text)] #---- Read AE core correction (density of core charge) node = root.findall("./PP_PAW/PP_AE_NLCC")[0] size = int(node.attrib['size']) for i in range(size): upf_dict['paw_data']['ae_core_charge_density'] = [float(e) for e in str.split(node.text)] #---- Read AE local potential node = root.findall("./PP_PAW/PP_AE_VLOC")[0] size = int(node.attrib['size']) for i in range(size): upf_dict['paw_data']['ae_local_potential'] = [float(e) / 2 for e in str.split(node.text)] # convert to Ha #################################################### ############# Read starting wave functions ######### #################################################### def parse_pswfc(upf_dict, root): #if upf_dict['header']['pseudo_type'] != 'NC': return upf_dict['atomic_wave_functions']=[] for i in range(upf_dict['header']['number_of_wfc']): wfc={} node = root.findall("./PP_PSWFC/PP_CHI.%i"%(i+1))[0] wfc['radial_function'] = [float(e) for e in str.split(node.text)] wfc['angular_momentum'] = int(node.attrib['l']) wfc['label'] = node.attrib['label'] wfc['occupation'] = float(node.attrib['occupation']) upf_dict['atomic_wave_functions'].append(wfc) #################################################### ############# Spin orbit coupling ################# #################################################### def parse_SpinOrbit(upf_dict, root): if not upf_dict['header']['SpinOrbit']: return ### Spin orbit informations for the projectors proj_num = upf_dict['header']['number_of_proj'] for i in range(proj_num): node = root.findall("./PP_SPIN_ORB/PP_RELBETA.%i"%(i+1))[0] upf_dict['beta_projectors'][i]['angular_momentum'] = float(node.attrib['lll']) upf_dict['beta_projectors'][i]['total_angular_momentum'] = float(node.attrib['jjj']) ### spin orbit information for the AEWFC # wfc_num = upf_dict['header']['number_of_wfc'] # for i in range(wfc_num): # node = root.findall("./PP_SPIN_ORB/PP_RELWFC.%i"%(i+1))[0] # upf_dict['paw_data']['ae_wfc']['ae_wfc_rel'] = float(node) # upf_dict['paw_data']['ae_wfc']['total_angular_momentum'] = float(node('jchi')) # upf_dict['paw_data']['ps_wfc']['total_angular_momentum'] = float(node('jchi')) ###################################################### ################## MAIN ############################## ###################################################### def main(): tree = ET.parse(sys.argv[1]) root = tree.getroot() upf_dict = {} parse_header(upf_dict, root) parse_radial_grid(upf_dict, root) # non linear core correction if upf_dict['header']['core_correction']: node = root.findall("./PP_NLCC")[0] rc = [float(e) for e in str.split(node.text)] np = int(node.attrib['size']) if np != len(rc): print("Wrong number of points") upf_dict['core_charge_density'] = rc # local part of potential node = root.findall("./PP_LOCAL")[0] vloc = [float(e) / 2 for e in str.split(node.text)] # convert to Ha np = int(node.attrib['size']) if np != len(vloc): print("Wrong number of points") upf_dict['local_potential'] = vloc # non-local part of potential parse_non_local(upf_dict, root) # parse PAW data parse_PAW(upf_dict, root) # parse pseudo wavefunctions parse_pswfc(upf_dict, root) # parse data for spin orbit coupling parse_SpinOrbit(upf_dict, root) # rho node = root.findall("./PP_RHOATOM")[0] rho = [float(e) for e in str.split(node.text)] np = int(node.attrib['size']) if np != len(rho): print("Wrong number of points") upf_dict['total_charge_density'] = rho pp_dict = {} pp_dict["pseudo_potential"] = upf_dict fout = open(sys.argv[1] + ".json", "w") # Match comma, space, newline and an arbitrary number of spaces ',\s\n\s*' with the # following conditions: a digit before (?<=[0-9]) and a minus or a digit after (?=[-|0-9]). # Replace found sequence with comma and space. fout.write(re.sub(r"(?<=[0-9]),\s\n\s*(?=[-|0-9])", r", ", json.dumps(pp_dict, indent=2))) fout.close() if __name__ == "__main__": main()
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django import http from django.conf import settings from django.core.urlresolvers import reverse from mox import IsA from openstack_dashboard import api from openstack_dashboard.test import helpers as test from .tables import SecurityGroupsTable, RulesTable INDEX_URL = reverse('horizon:project:access_and_security:index') SG_CREATE_URL = reverse('horizon:project:access_and_security:' 'security_groups:create') def strip_absolute_base(uri): return uri.split(settings.TESTSERVER, 1)[-1] class SecurityGroupsViewTests(test.TestCase): def setUp(self): super(SecurityGroupsViewTests, self).setUp() sec_group = self.security_groups.first() self.detail_url = reverse('horizon:project:access_and_security:' 'security_groups:detail', args=[sec_group.id]) self.edit_url = reverse('horizon:project:access_and_security:' 'security_groups:add_rule', args=[sec_group.id]) def test_create_security_groups_get(self): res = self.client.get(SG_CREATE_URL) self.assertTemplateUsed(res, 'project/access_and_security/security_groups/create.html') def test_create_security_groups_post(self): sec_group = self.security_groups.first() self.mox.StubOutWithMock(api.nova, 'security_group_create') api.nova.security_group_create(IsA(http.HttpRequest), sec_group.name, sec_group.description) \ .AndReturn(sec_group) self.mox.ReplayAll() formData = {'method': 'CreateGroup', 'name': sec_group.name, 'description': sec_group.description} res = self.client.post(SG_CREATE_URL, formData) self.assertRedirectsNoFollow(res, INDEX_URL) def test_create_security_groups_post_exception(self): sec_group = self.security_groups.first() self.mox.StubOutWithMock(api.nova, 'security_group_create') api.nova.security_group_create(IsA(http.HttpRequest), sec_group.name, sec_group.description) \ .AndRaise(self.exceptions.nova) self.mox.ReplayAll() formData = {'method': 'CreateGroup', 'name': sec_group.name, 'description': sec_group.description} res = self.client.post(SG_CREATE_URL, formData) self.assertMessageCount(error=1) self.assertRedirectsNoFollow(res, INDEX_URL) def test_create_security_groups_post_wrong_name(self): sec_group = self.security_groups.first() self.mox.StubOutWithMock(api.nova, 'security_group_create') fail_name = sec_group.name + ' invalid' self.mox.ReplayAll() formData = {'method': 'CreateGroup', 'name': fail_name, 'description': sec_group.description} res = self.client.post(SG_CREATE_URL, formData) self.assertTemplateUsed(res, 'project/access_and_security/security_groups/create.html') self.assertContains(res, "ASCII") def test_detail_get(self): sec_group = self.security_groups.first() self.mox.StubOutWithMock(api.nova, 'security_group_get') api.nova.security_group_get(IsA(http.HttpRequest), sec_group.id).AndReturn(sec_group) self.mox.ReplayAll() res = self.client.get(self.detail_url) self.assertTemplateUsed(res, 'project/access_and_security/security_groups/detail.html') def test_detail_get_exception(self): sec_group = self.security_groups.first() self.mox.StubOutWithMock(api.nova, 'security_group_get') api.nova.security_group_get(IsA(http.HttpRequest), sec_group.id) \ .AndRaise(self.exceptions.nova) self.mox.ReplayAll() res = self.client.get(self.detail_url) self.assertRedirectsNoFollow(res, INDEX_URL) def test_detail_add_rule_cidr(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() self.mox.StubOutWithMock(api.nova, 'security_group_rule_create') self.mox.StubOutWithMock(api.nova, 'security_group_list') api.nova.security_group_rule_create(IsA(http.HttpRequest), sec_group.id, rule.ip_protocol, int(rule.from_port), int(rule.to_port), rule.ip_range['cidr'], None).AndReturn(rule) api.nova.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': rule.from_port, 'ip_protocol': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'source': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) def test_detail_add_rule_self_as_source_group(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.get(id=3) self.mox.StubOutWithMock(api.nova, 'security_group_rule_create') self.mox.StubOutWithMock(api.nova, 'security_group_list') api.nova.security_group_rule_create( IsA(http.HttpRequest), sec_group.id, rule.ip_protocol, int(rule.from_port), int(rule.to_port), None, u'%s' % sec_group.id).AndReturn(rule) api.nova.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': rule.from_port, 'ip_protocol': rule.ip_protocol, 'cidr': '0.0.0.0/0', 'security_group': sec_group.id, 'source': 'sg'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) def test_detail_invalid_port_range(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() self.mox.StubOutWithMock(api.nova, 'security_group_list') api.nova.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'range', 'from_port': rule.from_port, 'to_port': int(rule.from_port) - 1, 'ip_protocol': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'source': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "greater than or equal to") @test.create_stubs({api.nova: ('security_group_get', 'security_group_list')}) def test_detail_invalid_icmp_rule(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() icmp_rule = self.security_group_rules.list()[1] # 1st Test api.nova.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) # 2nd Test api.nova.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) # 3rd Test api.nova.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) # 4th Test api.nova.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'icmp_type': 256, 'icmp_code': icmp_rule.to_port, 'ip_protocol': icmp_rule.ip_protocol, 'cidr': icmp_rule.ip_range['cidr'], 'source': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "The ICMP type not in range (-1, 255)") formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'icmp_type': icmp_rule.from_port, 'icmp_code': 256, 'ip_protocol': icmp_rule.ip_protocol, 'cidr': icmp_rule.ip_range['cidr'], 'source': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "The ICMP code not in range (-1, 255)") formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'icmp_type': icmp_rule.from_port, 'icmp_code': None, 'ip_protocol': icmp_rule.ip_protocol, 'cidr': icmp_rule.ip_range['cidr'], 'source_group': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "The ICMP code is invalid") formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'icmp_type': None, 'icmp_code': icmp_rule.to_port, 'ip_protocol': icmp_rule.ip_protocol, 'cidr': icmp_rule.ip_range['cidr'], 'source': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertNoMessages() self.assertContains(res, "The ICMP type is invalid") def test_detail_add_rule_exception(self): sec_group = self.security_groups.first() sec_group_list = self.security_groups.list() rule = self.security_group_rules.first() self.mox.StubOutWithMock(api.nova, 'security_group_rule_create') self.mox.StubOutWithMock(api.nova, 'security_group_list') api.nova.security_group_rule_create( IsA(http.HttpRequest), sec_group.id, rule.ip_protocol, int(rule.from_port), int(rule.to_port), rule.ip_range['cidr'], None).AndRaise(self.exceptions.nova) api.nova.security_group_list( IsA(http.HttpRequest)).AndReturn(sec_group_list) self.mox.ReplayAll() formData = {'method': 'AddRule', 'id': sec_group.id, 'port_or_range': 'port', 'port': rule.from_port, 'ip_protocol': rule.ip_protocol, 'cidr': rule.ip_range['cidr'], 'source': 'cidr'} res = self.client.post(self.edit_url, formData) self.assertRedirectsNoFollow(res, self.detail_url) def test_detail_delete_rule(self): sec_group = self.security_groups.first() rule = self.security_group_rules.first() self.mox.StubOutWithMock(api.nova, 'security_group_rule_delete') api.nova.security_group_rule_delete(IsA(http.HttpRequest), rule.id) self.mox.ReplayAll() form_data = {"action": "rules__delete__%s" % rule.id} req = self.factory.post(self.edit_url, form_data) kwargs = {'security_group_id': sec_group.id} table = RulesTable(req, sec_group.rules, **kwargs) handled = table.maybe_handle() self.assertEqual(strip_absolute_base(handled['location']), self.detail_url) def test_detail_delete_rule_exception(self): sec_group = self.security_groups.first() rule = self.security_group_rules.first() self.mox.StubOutWithMock(api.nova, 'security_group_rule_delete') api.nova.security_group_rule_delete( IsA(http.HttpRequest), rule.id).AndRaise(self.exceptions.nova) self.mox.ReplayAll() form_data = {"action": "rules__delete__%s" % rule.id} req = self.factory.post(self.edit_url, form_data) kwargs = {'security_group_id': sec_group.id} table = RulesTable(req, self.security_group_rules.list(), **kwargs) handled = table.maybe_handle() self.assertEqual(strip_absolute_base(handled['location']), self.detail_url) def test_delete_group(self): sec_group = self.security_groups.get(name="other_group") self.mox.StubOutWithMock(api.nova, 'security_group_delete') api.nova.security_group_delete(IsA(http.HttpRequest), sec_group.id) self.mox.ReplayAll() form_data = {"action": "security_groups__delete__%s" % sec_group.id} req = self.factory.post(INDEX_URL, form_data) table = SecurityGroupsTable(req, self.security_groups.list()) handled = table.maybe_handle() self.assertEqual(strip_absolute_base(handled['location']), INDEX_URL) def test_delete_group_exception(self): sec_group = self.security_groups.get(name="other_group") self.mox.StubOutWithMock(api.nova, 'security_group_delete') api.nova.security_group_delete( IsA(http.HttpRequest), sec_group.id).AndRaise(self.exceptions.nova) self.mox.ReplayAll() form_data = {"action": "security_groups__delete__%s" % sec_group.id} req = self.factory.post(INDEX_URL, form_data) table = SecurityGroupsTable(req, self.security_groups.list()) handled = table.maybe_handle() self.assertEqual(strip_absolute_base(handled['location']), INDEX_URL)
"""Functions for scoring frame-level diarization output.""" # TODO: Module is too long. Refactor. from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import re import shutil import subprocess import tempfile import numpy as np from scipy.optimize import linear_sum_assignment from scipy.sparse import coo_matrix, issparse from .rttm import write_rttm from .uem import gen_uem, write_uem from .utils import clip, xor __all__ = ['bcubed', 'conditional_entropy', 'contingency_matrix', 'der', 'goodman_kruskal_tau', 'jer', 'mutual_information'] EPS = np.finfo(float).eps def contingency_matrix(ref_labels, sys_labels): """Return contingency matrix between ``ref_labels`` and ``sys_labels``. Parameters ---------- ref_labels : ndarray, (n_samples,) or (n_samples, n_ref_classes) Reference labels encoded using one-hot scheme. sys_labels : ndarray, (n_samples,) or ((n_samples, n_sys_classes) System labels encoded using one-hot scheme. Returns ------- cm : ndarray, (n_ref_classes, n_sys_classes) Contigency matrix whose ``i, j``-th entry is the number of times the ``i``-th reference label and ``j``-th system label co-occur. """ if ref_labels.ndim != sys_labels.ndim: raise ValueError( 'ref_labels and sys_labels should either both be 1D arrays of ' 'labels or both be 2D arrays of one-hot encoded labels: shapes ' 'are %r, %r' % (ref_labels.shape, sys_labels.shape)) if ref_labels.shape[0] != sys_labels.shape[0]: raise ValueError( 'ref_labels and sys_labels must have same size: received %d ' 'and %d' % (ref_labels.shape[0], sys_labels.shape[0])) if ref_labels.ndim == 1: ref_classes, ref_class_inds = np.unique( ref_labels, return_inverse=True) sys_classes, sys_class_inds = np.unique( sys_labels, return_inverse=True) n_frames = ref_labels.size cm = coo_matrix( (np.ones(n_frames), (ref_class_inds, sys_class_inds)), shape=(ref_classes.size, sys_classes.size), dtype=np.int) cm = cm.toarray() else: ref_labels = ref_labels.astype('int64', copy=False) sys_labels = sys_labels.astype('int64', copy=False) cm = ref_labels.T.dot(sys_labels) if issparse(cm): cm = cm.toarray() return cm def bcubed(ref_labels, sys_labels, cm=None): """Return B-cubed precision, recall, and F1. The B-cubed precision of an item is the proportion of items with its system label that share its reference label (Bagga and Baldwin, 1998). Similarly, the B-cubed recall of an item is the proportion of items with its reference label that share its system label. The overall B-cubed precision and recall, then, are the means of the precision and recall for each item. Parameters ---------- ref_labels : ndarray, (n_frames,) Reference labels. sys_labels : ndarray, (n_frames,) System labels. cm : ndarray, (n_ref_classes, n_sys_classes) Contingency matrix between reference and system labelings. If None, will be computed automatically from ``ref_labels`` and ``sys_labels``. Otherwise, the given value will be used and ``ref_labels`` and ``sys_labels`` ignored. (Default: None) Returns ------- precision : float B-cubed precision. recall : float B-cubed recall. f1 : float B-cubed F1. References ---------- Bagga, A. and Baldwin, B. (1998). "Algorithms for scoring coreference chains." Proceedings of LREC 1998. """ if cm is None: cm = contingency_matrix(ref_labels, sys_labels) cm = cm.astype('float64') cm_norm = cm / cm.sum() precision = np.sum(cm_norm * (cm / cm.sum(axis=0))) recall = np.sum(cm_norm * (cm / np.expand_dims(cm.sum(axis=1), 1))) f1 = 2*(precision*recall)/(precision + recall) return precision, recall, f1 def goodman_kruskal_tau(ref_labels, sys_labels, cm=None): """Return Goodman-Kruskal tau between ``ref_labels`` and ``sys_labels``. Parameters ---------- ref_labels : ndarray, (n_frames,) Reference labels. sys_labels : ndarray, (n_frames,) System labels. cm : ndarray, (n_ref_classes, n_sys_classes) Contingency matrix between reference and system labelings. If None, will be computed automatically from ``ref_labels`` and ``sys_labels``. Otherwise, the given value will be used and ``ref_labels`` and ``sys_labels`` ignored. (Default: None) Returns ------- tau_ref_sys : float Value between 0 and 1 that is high when ``ref_labels`` is predictive of ``sys_labels`` and low when ``ref_labels`` provides essentially no information about ``sys_labels``. tau_sys_ref : float Value between 0 and 1 that is high when ``sys_labels`` is predictive of ``ref_labels`` and low when ``sys_labels`` provides essentially no information about ``ref_labels``. References ---------- - Goodman, L.A. and Kruskal, W.H. (1954). "Measures of association for cross classifications." Journal of the American Statistical Association. - Pearson, R. (2016). GoodmanKruskal: Association Analysis for Categorical Variables. https://CRAN.R-project.org/package=GoodmanKruskal. """ if cm is None: cm = contingency_matrix(ref_labels, sys_labels) cm = cm.astype('float64') cm = cm / cm.sum() ref_marginals = cm.sum(axis=1) sys_marginals = cm.sum(axis=0) n_ref_classes, n_sys_classes = cm.shape # Tau(ref, sys). if n_sys_classes == 1: # Special case: only single class in system labeling, so any # reference labeling is perfectly predictive. tau_ref_sys = 1. else: vy = 1 - np.sum(sys_marginals**2) xy_term = np.sum(cm**2, axis=1) vy_bar_x = 1 - np.sum(xy_term / ref_marginals) tau_ref_sys = (vy - vy_bar_x) / vy # Tau(sys, ref). if n_ref_classes == 1: # Special case: only single class in reference labeling, so any # system labeling is perfectly predictive. tau_sys_ref = 1. else: vx = 1 - np.sum(ref_marginals**2) yx_term = np.sum(cm**2, axis=0) vx_bar_y = 1 - np.sum(yx_term / sys_marginals) tau_sys_ref = (vx - vx_bar_y) / vx return tau_ref_sys, tau_sys_ref def conditional_entropy(ref_labels, sys_labels, cm=None, nats=False): """Return conditional entropy of ``ref_labels`` given ``sys_labels``. The conditional entropy ``H(ref | sys)`` quantifies how much information is needed to describe the reference labeling given that the system labeling is known. It is 0 when the labelings are identical and increases as the system labeling becomes less descriptive of the reference labeling. Parameters ---------- ref_labels : ndarray, (n_frames,) Reference labels. sys_labels : ndarray, (n_frames,) System labels. cm : ndarray, (n_ref_classes, n_sys_classes) Contingency matrix between reference and system labelings. If None, will be computed automatically from ``ref_labels`` and ``sys_labels``. Otherwise, the given value will be used and ``ref_labels`` and ``sys_labels`` ignored. (Default: None) nats : bool, optional If True, return conditional entropy in nats. Otherwise, return in bits. (Default: False) References ---------- - https://en.wikipedia.org/wiki/Conditional_entropy - Cover, T.M. and Thomas, J.A. (1991). Elements of Information Theory. - Rosenberg, A. and Hirschberg, J. (2007). "V-Measure: A conditional entropy-based external cluster evaluation measure." Proceedings of EMNLP 2007. """ log = np.log if nats else np.log2 if cm is None: cm = contingency_matrix(ref_labels, sys_labels) sys_marginals = cm.sum(axis=0) N = cm.sum() ref_inds, sys_inds = np.nonzero(cm) vals = cm[ref_inds, sys_inds] # Non-zero values of contingency matrix. sys_marginals = sys_marginals[sys_inds] # Corresponding marginals. sigma = vals/N * (log(sys_marginals) - log(vals)) return sigma.sum() VALID_NORM_METHODS = set(['min', 'sum', 'sqrt', 'max']) def mutual_information(ref_labels, sys_labels, cm=None, nats=False, norm_method='sqrt'): """Return mutual information between ``ref_labels`` and ``sys_labels``. The mutual information ``I(ref, sys)`` quantifies how much information is shared by the reference and system labelings; that is, how much knowing one labeling reduces uncertainty about the other. It is 0 in the case that the labelings are independent and increases as they become more predictive of each other with a least upper bound of ``min(H(ref), H(sys))``. Normalized mutual information converts mutual information into a similarity metric ranging on [0, 1]. Multiple normalization schemes are available, set by the ``norm_method`` argument, which takes the following values: - ``min`` -- normalize by ``min(H(ref), H(sys))`` - ``sum`` -- normalize by ``0.5*(H(ref) + H(sys))`` - ``sqrt`` -- normalize by ``sqrt(H(ref)*H(sys))`` - ``max`` -- normalize by ``max(H(ref), H(sys))`` Parameters ---------- ref_labels : ndarray, (n_frames,) Reference labels. sys_labels : ndarray, (n_frames,) System labels. cm : ndarray, (n_ref_classes, n_sys_classes) Contingency matrix between reference and system labelings. If None, will be computed automatically from ``ref_labels`` and ``sys_labels``. Otherwise, the given value will be used and ``ref_labels`` and ``sys_labels`` ignored. (Default: None) nats : bool, optional If True, return nats. Otherwise, return bits. (Default: False) norm_method : str, optional Normalization method for NMI computation. (Default: 'sqrt') Returns ------- mi : float Mutual information. nmi : float Normalized mutual information. References ---------- - https://en.wikipedia.org/wiki/Mutual_information - Cover, T.M. and Thomas, J.A. (1991). Elements of Information Theory. - Strehl, A. and Ghosh, J. (2002). "Cluster ensembles -- A knowledge reuse framework for combining multiple partitions." Journal of Machine Learning Research. - Nguyen, X.V., Epps, J., and Bailey, J. (2010). "Information theoretic measures for clustering comparison: Variants, properties, normalization and correction for chance." Journal of Machine Learning Research. """ if norm_method not in VALID_NORM_METHODS: raise ValueError('"%s" is not a valid NMI normalization method.') log = np.log if nats else np.log2 if cm is None: cm = contingency_matrix(ref_labels, sys_labels) # Special cases in which one or more of H(ref) and H(sys) is # 0. n_ref_classes, n_sys_classes = cm.shape if xor(n_ref_classes == 1, n_sys_classes == 1): # Case 1: MI is by definition 0 as should be NMI, regardless of # normalization. return 0.0, 0.0 if n_ref_classes == n_sys_classes == 1: # Case 2: MI is 0, but as the data is not split, each clustering # is perfectly predictive of the other, so set NMI to 1. return 0.0, 1.0 # Mutual information. N = cm.sum() ref_marginals = cm.sum(axis=1) sys_marginals = cm.sum(axis=0) ref_inds, sys_inds = np.nonzero(cm) vals = cm[ref_inds, sys_inds] # Non-zero values of contingency matrix. outer = ref_marginals[ref_inds]*sys_marginals[sys_inds] sigma = (vals/N) * ( log(vals) - log(outer) + log(N)) mi = sigma.sum() mi = max(mi, 0.) # Normalized mutual information. def h(p): p = p[p > 0] return max(-np.sum(p*log(p)), 0) h_ref = h(ref_marginals / N) h_sys = h(sys_marginals / N) if norm_method == 'max': denom = max(h_ref, h_sys) elif norm_method == 'sum': denom = 0.5*(h_ref + h_sys) elif norm_method == 'sqrt': denom = np.sqrt(h_ref*h_sys) elif norm_method == 'min': denom = min(h_ref, h_sys) nmi = mi / denom nmi = clip(nmi, 0., 1.) return mi, nmi SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) MDEVAL_BIN = os.path.join(SCRIPT_DIR, 'md-eval-22.pl') FILE_REO = re.compile(r'(?<=Speaker Diarization for).+(?=\*\*\*)') SCORED_SPEAKER_REO = re.compile(r'(?<=SCORED SPEAKER TIME =)[\d.]+') MISS_SPEAKER_REO = re.compile(r'(?<=MISSED SPEAKER TIME =)[\d.]+') FA_SPEAKER_REO = re.compile(r'(?<=FALARM SPEAKER TIME =)[\d.]+') ERROR_SPEAKER_REO = re.compile(r'(?<=SPEAKER ERROR TIME =)[\d.]+') # TODO: Working with md-eval is a PITA, even with modifications to the # reporting. Suggest looking into moving over to pyannote's # implementation. def der(ref_turns, sys_turns, collar=0.0, ignore_overlaps=False, uem=None): """Return overall diarization error rate. Diarization error rate (DER), introduced for the NIST Rich Transcription evaluations, is computed as the sum of the following: - speaker error -- percentage of scored time for which the wrong speaker id is assigned within a speech region - false alarm speech -- percentage of scored time for which a nonspeech region is incorrectly marked as containing speech - missed speech -- percentage of scored time for which a speech region is incorrectly marked as not containing speech As with word error rate, a score of zero indicates perfect performance and higher scores (which may exceed 100) indicate poorer performance. DER is computed as defined in the NIST RT-09 evaluation plan using version 22 of the ``md-eval.pl`` scoring script. When ``ignore_overlaps=False``, this is equivalent to running the following command: md-eval.pl -r ref.rttm -s sys.rttm -c collar -u uemf where ``ref.rttm`` and ``sys.rttm`` are RTTM files produced from ``ref_turns`` and ``sys_turns`` respectively and ``uemf`` is an Un-partitioned Evaluation Map (UEM) file delimiting the scoring regions. If a ``UEM`` instance is supplied via the``uem`` argument, this file will be created from the supplied UEM. Otherwise, it will be generated automatically from ``ref_turns`` and ``sys_turns`` using the ``uem.gen_uem`` function. Similarly, when ``ignore_overlaps=True``: md-eval.pl -r ref.rttm -s sys.rttm -c collar -u uemf -1 Parameters ---------- ref_turns : list of Turn Reference speaker turns. sys_turns : list of Turn System speaker turns. collar : float, optional Size of forgiveness collar in seconds. Diarization output will not be evaluated within +/- ``collar`` seconds of reference speaker boundaries. (Default: 0.0) ignore_overlaps : bool, optional If True, ignore regions in the reference diarization in which more than one speaker is speaking. (Default: False) uem : UEM, optional Evaluation map. If not supplied, will be generated automatically from ``ref_turns`` and ``sys_turns``. (Default: None) Returns ------- file_to_der : dict Mapping from files to diarization error rates (in percent) for those files. global_der : float Overall diarization error rate (in percent). References ---------- NIST. (2009). The 2009 (RT-09) Rich Transcription Meeting Recognition Evaluation Plan. https://web.archive.org/web/20100606041157if_/http://www.itl.nist.gov/iad/mig/tests/rt/2009/docs/rt09-meeting-eval-plan-v2.pdf """ tmp_dir = tempfile.mkdtemp() # Write RTTMs. ref_rttm_fn = os.path.join(tmp_dir, 'ref.rttm') write_rttm(ref_rttm_fn, ref_turns) sys_rttm_fn = os.path.join(tmp_dir, 'sys.rttm') write_rttm(sys_rttm_fn, sys_turns) # Write UEM. if uem is None: uem = gen_uem(ref_turns, sys_turns) uemf = os.path.join(tmp_dir, 'all.uem') write_uem(uemf, uem) # Actually score. try: cmd = [MDEVAL_BIN, '-af', '-r', ref_rttm_fn, '-s', sys_rttm_fn, '-c', str(collar), '-u', uemf, ] if ignore_overlaps: cmd.append('-1') stdout = subprocess.check_output(cmd, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: stdout = e.output finally: shutil.rmtree(tmp_dir) # Parse md-eval output to extract by-file and total scores. stdout = stdout.decode('utf-8') file_ids = [m.strip() for m in FILE_REO.findall(stdout)] file_ids = [file_id[2:] if file_id.startswith('f=') else file_id for file_id in file_ids] scored_speaker_times = np.array( [float(m) for m in SCORED_SPEAKER_REO.findall(stdout)]) miss_speaker_times = np.array( [float(m) for m in MISS_SPEAKER_REO.findall(stdout)]) fa_speaker_times = np.array( [float(m) for m in FA_SPEAKER_REO.findall(stdout)]) error_speaker_times = np.array( [float(m) for m in ERROR_SPEAKER_REO.findall(stdout)]) with np.errstate(invalid='ignore', divide='ignore'): error_times = miss_speaker_times + fa_speaker_times + error_speaker_times ders = error_times / scored_speaker_times ders[np.isnan(ders)] = 0 # Numerator and denominator both 0. ders[np.isinf(ders)] = 1 # Numerator > 0, but denominator = 0. ders *= 100. # Convert to percent. # Reconcile with UEM, keeping in mind that in the edge case where no # reference turns are observed for a file, md-eval doesn't report results # for said file. file_to_der_base = dict(zip(file_ids, ders)) file_to_der = {} for file_id in uem: try: der = file_to_der_base[file_id] except KeyError: # Check for any system turns for that file, which should be FAs, # assuming that the turns have been cropped to the UEM scoring # regions. n_sys_turns = len( [turn for turn in sys_turns if turn.file_id == file_id]) der = 100. if n_sys_turns else 0.0 file_to_der[file_id] = der global_der = file_to_der_base['ALL'] return file_to_der, global_der def jer(file_to_ref_durs, file_to_sys_durs, file_to_cm, min_ref_dur=0): """Return Jacard error rate. Jaccard error rate (JER) rate is based on the Jaccard index, a similarity measure used to evaluate the output of image segmentation systems. An optimal mapping between reference and system speakers is determined and for each pair the Jaccard index is computed. The Jaccard error rate is then defined as 1 minus the average of these scores. More concretely, assume we have ``N`` reference speakers and ``M`` system speakers. An optimal mapping between speakers is determined using the Hungarian algorithm so that each reference speaker is paired with at most one system speaker and each system speaker with at most one reference speaker. Then, for each reference speaker ``ref`` the speaker-specific Jaccard error rate is ``(FA + MISS)/TOTAL``, where: - ``TOTAL`` is the duration of the union of reference and system speaker segments; if the reference speaker was not paired with a system speaker, it is the duration of all reference speaker segments - ``FA`` is the total system speaker time not attributed to the reference speaker; if the reference speaker was not paired with a system speaker, it is 0 - ``MISS`` is the total reference speaker time not attributed to the system speaker; if the reference speaker was not paired with a system speaker, it is equal to ``TOTAL`` The Jaccard error rate then is the average of the speaker specific Jaccard error rates. JER and DER are highly correlated with JER typically being higher, especially in recordings where one or more speakers is particularly dominant. Where it tends to track DER is in outliers where the diarization is especially bad, resulting on one or more unmapped system speakers whose speech is not then penalized. In these cases, where DER can easily exceed 500%, JER will never exceed 100% and may be far lower if the reference speakers are handled correctly. For this reason, it may be useful to pair JER with another metric evaluating speech detection and/or speaker overlap detection. Parameters ---------- file_to_ref_durs : dict Mapping from files to durations of reference speakers in those files. file_to_sys_durs : dict Mapping from files to durations of system speakers in those files. file_to_cm : dict Mapping from files to contingency matrices for speakers in those files. min_ref_dur : float, optional Minimum reference speaker duration. Reference speakers with durations less than ``min_ref_dur`` will be excluded for scoring purposes. Setting this to a small non-zero number may stabilize JER when the reference segmentation contains multiple extraneous speakers. (Default: 0.0) Returns ------- file_to_jer : dict Mapping from files to Jaccard error rates (in percent) for those files. global_jer : float Overall Jaccard error rate (in percent). References ---------- https://en.wikipedia.org/wiki/Jaccard_index """ # TODO: Explore treating non-speech as additional speaker for computation to # more gracefully deal with exceptionally poor system performance. ref_dur_fids = set(file_to_ref_durs.keys()) sys_dur_fids = set(file_to_sys_durs.keys()) cm_fids = set(file_to_cm.keys()) if not ref_dur_fids == sys_dur_fids == cm_fids: raise ValueError( 'All passed dicts must have same keys.') file_ids = ref_dur_fids file_to_jer = {} all_speaker_jers = [] n_ref_speakers_global = 0 n_sys_speakers_global = 0 for file_id in file_ids: # Filter. ref_durs = file_to_ref_durs[file_id] sys_durs = file_to_sys_durs[file_id] cm = file_to_cm[file_id] ref_keep = ref_durs >= min_ref_dur ref_durs = ref_durs[ref_keep] cm = cm[ref_keep, ] n_ref_speakers = ref_durs.size n_sys_speakers = sys_durs.size n_ref_speakers_global += n_ref_speakers n_sys_speakers_global += n_sys_speakers # Handle edge cases where either reference or system segmentation # posited no speech. if n_ref_speakers == 0 and n_sys_speakers > 0: # Case 1: no reference speech. file_to_jer[file_id] = 100.0 continue elif n_ref_speakers > 0 and n_sys_speakers == 0: # Case 2: no system speech. file_to_jer[file_id] = 100.0 all_speaker_jers.extend([100.]*n_ref_speakers) continue elif n_ref_speakers == 0 and n_sys_speakers == 0: # Case 3: no reference or system speech file_to_jer[file_id] = 0.0 continue # Determine all speaker-level JER. ref_durs = np.tile(ref_durs, [n_sys_speakers, 1]).T sys_durs = np.tile(sys_durs, [n_ref_speakers, 1]) intersect = cm union = ref_durs + sys_durs - intersect jer_speaker = 1 - intersect / union # Find dominant mapping by Hungarian algorithm (scipy >= 0.17) and compute # JER. ref_speaker_inds, sys_speaker_inds = linear_sum_assignment(jer_speaker) jers = np.ones(n_ref_speakers, dtype='float64') for ref_speaker_ind, sys_speaker_ind in zip( ref_speaker_inds, sys_speaker_inds): jers[ref_speaker_ind] = jer_speaker[ref_speaker_ind, sys_speaker_ind] jers *= 100. file_to_jer[file_id] = jers.mean() all_speaker_jers.extend(jers) # Determine global JER. if n_ref_speakers_global == 0 and n_sys_speakers_global > 0: # Case 1: no reference speech on ANY file. global_jer = 100. elif n_ref_speakers_global > 0 and n_sys_speakers_global == 0: # Case 2: no system speech on ANY file. global_jer = 100. elif n_ref_speakers_global == n_sys_speakers_global == 0: # Case 3: no reference OR system speech on ANY file. global_jer = 0.0 else: # General case: at least 1 reference and 1 system speaker present. global_jer = np.mean(all_speaker_jers) return file_to_jer, global_jer
import numpy as np import lxml.etree as lxml import datetime import sys from __tools__ import XmlParser from __xtpMolecule__ import * ryd2ev=13.605692 hrt2ev=2*ryd2ev def appendarrayorNone(datalist): if type(datalist)==list: if len(datalist)==0: return None else: for element in datalist: if type(element)==list and len(element)==0: return None return np.array(datalist) else: return None def readexcitonlogfile(filename): dftlist=[] gwa=[] qp=[] s=[] t=[] fs=[] levelid=[] levelidqp=[] fragAS=[] fragBS=[] fragAT=[] fragBT=[] holeS=[] electronS=[] holeT=[] TrDip=[] electronT=[] RPAlevel=None homo=None lumo=None dft=False tbool=False sbool=False qpbool=False dftenergy=None add=0 with open(filename,"r") as f: for line in f.readlines(): if "GWBSE" in line and ( "DBG" in line or "INF" in line): add=1 if "QM energy[eV]" in line and dftenergy==None: dftenergy=float(line.split()[-1]) elif "Set RPA level range" in line: RPAlevel=int((line.split()[8]).split(":")[-1][:-1]) elif "====== Perturbative quasiparticle energies (Hartree) ======" in line: conversion=hrt2ev dft=True elif "====== Perturbative quasiparticle energies (Rydberg) ======" in line: conversion=ryd2ev dft=True elif dft and "S-C" in line and "S-X" in line: entries=line.split() levelid.append(int(entries[4+add])-1) dftlist.append(conversion*float(entries[7+add])) gwa.append(conversion*float(entries[-1])) if "HOMO"==entries[2+add] and homo==None: homo=int(entries[4+add])-1 #print entries if "LUMO"==entries[2+add] and lumo==None: lumo=int(entries[4+add])-1 #print entries elif "====== Diagonalized quasiparticle energies" in line: qpbool=True elif qpbool and qp and "PQP" in line and "DQP" in line: levelidqp.append(int(line.split()[4+add])) qp.append(conversion*float(line.split()[-1])) elif "====== triplet energies (eV) ======" in line: tbool=True elif tbool and "T =" in line: t.append(float(line.split()[7+add])) elif tbool and "Fragment A" in line: tok=line.split() fragAT.append(float(tok[12+add])) holeT.append(float(tok[6+add].strip("%"))) electronT.append(float(tok[8+add].strip("%"))) elif tbool and "Fragment B" in line: fragBT.append(float(line.split()[12+add])) elif "====== singlet energies (eV) ======" in line: sbool=True elif sbool and "S =" in line: s.append(float(line.split()[7+add])) elif sbool and "TrDipole length gauge" in line: tok=line.split() fs.append(float(tok[-1])) x=float(tok[7+add]) y=float(tok[10+add]) z=float(tok[13+add]) TrDip.append(np.array([x,y,z])) elif sbool and "Fragment A" in line: tok=line.split() fragAS.append(float(tok[12+add])) holeS.append(float(tok[6+add].strip("%"))) electronS.append(float(tok[8+add].strip("%"))) elif sbool and "Fragment B" in line: fragBS.append(float(line.split()[12+add])) elif "Filled DFT Basis of size" in line: basissize=int(line.split()[-1]) elif "Filled Auxbasis of size" in line: auxsize=int(line.split()[-1]) results=molecule() results.addHomoLumo(homo,lumo) results.setRPAlevels(RPAlevel) results.addEgroundstate(dftenergy) results.addDFTenergies(appendarrayorNone([levelid,dftlist,gwa])) results.addQPenergies(appendarrayorNone([levelidqp,qp])) results.addSinglets(appendarrayorNone(s),appendarrayorNone(fs),appendarrayorNone(TrDip)) results.addTriplets(appendarrayorNone(t)) results.addFragmentsSinglet(appendarrayorNone([fragAS,fragBS]),appendarrayorNone([holeS,electronS])) results.addFragmentsTriplet(appendarrayorNone([fragAT,fragBT]),appendarrayorNone([holeT,electronT])) results.auxsize=auxsize results.basissize=basissize return results def getcouplingfromsplit(filename,states): dft=False singlets=False triplets=False coupling=[] for state in states: if "e" in state or "h" in state: dft=True if "s" in state: singlets=True if "t" in state: triplets=True results=readexcitonlogfile(filename,dft=dft,qp=False,singlets=singlets,triplets=triplets) #print results for state in states: stateindex=None resultsindex=None if state=="e_dft" or state=="e": stateindex=results[0][1] resultsindex=1 elif state=="h_dft" or state=="h": stateindex=results[0][0]-1 resultsindex=1 elif state=="e_gw": stateindex=results[0][1] resultsindex=2 elif state=="h_gw": stateindex=results[0][0]-1 resultsindex=2 elif "s" in state: stateindex=2*int(state[1:])-2 resultsindex=4 elif "t" in state: stateindex=2*int(state[1:])-2 resultsindex=5 else: print "state {} not known".format(state) splitt=results[resultsindex][stateindex+1]-results[resultsindex][stateindex] #if state=="e": #print results[resultsindex][stateindex+1]/conversion #print results[resultsindex][stateindex]/conversion #print 0.5*splitt coupling.append(0.5*splitt) return coupling def readcouplingxml(filename): root=XmlParser(filename) Je=[] Jh=[] for pair in root: homoA=int(pair.get("homoA")) homoB=int(pair.get("homoB")) for overlap in pair: orbA=int(overlap.get("orbA")) orbB=int(overlap.get("orbB")) if orbA==homoA and orbB==homoB: Je.append((float(overlap.text))) elif orbA==homoA+1 and orbB==homoB+1: Jh.append((float(overlap.text))) return [Je,Jh] def readexcitonxml(filename): root=XmlParser(filename) return readexcitonxml_molecule(root) def readexcitonxml_egwbse(filename): results=[] root=XmlParser(filename) for job in root.iter('job'): output=job.find("output") segment=output.find("segment") gwbse=segment.find("GWBSE") mol=readexcitonxml_molecule(gwbse) mol.setId(int(segment.get("id"))) mol.setName(segment.get("type")) results.append(mol) return results def readexcitonxml_molecule(root): dftlist=[] gwa=[] qp=[] s=[] t=[] fs=[] TrDip=[] fragAS=[] fragBS=[] fragAT=[] fragBT=[] holeS=[] electronS=[] levelid=[] levelidqp=[] holeT=[] electronT=[] homo=None lumo=None tbool=False sbool=False qpbool=False dftenergy=float(root.get("DFTEnergy")) dft=root.find("dft") homo=int(dft.get("HOMO")) lumo=int(dft.get("LUMO")) for level in dft.iter('level'): lid=int(level.get("number")) levelid.append(lid) levelidqp.append(lid) dftlist.append(float((level.find("dft_energy")).text)) gwa.append(float((level.find("gw_energy")).text)) if level.find("qp_energy")!=None: qp.append(float((level.find("qp_energy")).text)) singlets=root.find("singlets") if singlets!=None: for level in singlets.iter('level'): s.append(float((level.find("omega")).text)) fs.append(float((level.find("f")).text)) dipole=(level.find("Trdipole")).text TrDip.append(np.array(dipole.split(),dtype=float)) triplets=root.find("triplets") if triplets!=None: for level in triplets.iter('level'): t.append(float((level.find("omega")).text)) results=molecule() results.addHomoLumo(homo,lumo) results.addEgroundstate(dftenergy) results.addDFTenergies(appendarrayorNone([levelid,dftlist,gwa])) results.addQPenergies(appendarrayorNone([levelidqp,qp])) results.addSinglets(appendarrayorNone(s),appendarrayorNone(fs),appendarrayorNone(TrDip)) results.addTriplets(appendarrayorNone(t)) results.addFragmentsSinglet(appendarrayorNone([fragAS,fragBS]),appendarrayorNone([holeS,electronS])) results.addFragmentsTriplet(appendarrayorNone([fragAT,fragBT]),appendarrayorNone([holeT,electronT])) return results def readexcitoncouplingxml(filename,states): root=XmlParser(filename) resultlist=[] for pair in root: types=pair[0] couplings=[] for state in states: results=None if state[0]=="s": results=types.find("singlets") elif state[0]=="t": results=types.find("triplets") else: print "state not known" number=int(state[1:]) #print number for coupling in results: noA=int(coupling.get("excitonA")) noB=int(coupling.get("excitonB")) if noA+1==number and noB+1==number: couplings.append((float(coupling.text))) break resultlist.append(couplings) return resultlist def readexcitoncoulingclassical(filename): root=XmlParser(filename) results=[] for pair in root: Coupling=pair[0] results.append(float(Coupling.get("jABstatic"))) return results def datetimefromstring(day,time): return datetime.datetime.strptime("{} {}".format(day,time),"%Y-%m-%d %H:%M:%S") def readbenchmarkexciton(filename): singletdiag=None singletsetup=None tripletdiag=None tripletsetup=None with open(filename,"r") as f: for line in f.readlines(): if "DFT data was created by" in line: startday=line.split()[2] starttime=line.split()[3] start=datetimefromstring(startday,starttime) elif "Direct part of e-h interaction" in line: startday=line.split()[2] starttime=line.split()[3] tripletsetup=datetimefromstring(startday,starttime) elif "Solved BSE for triplets" in line: startday=line.split()[2] starttime=line.split()[3] tripletdiag=datetimefromstring(startday,starttime) elif "Exchange part of e-h interaction" in line: startday=line.split()[2] starttime=line.split()[3] singletsetup=datetimefromstring(startday,starttime) elif "Solved BSE for singlets" in line: startday=line.split()[2] starttime=line.split()[3] singletdiag=datetimefromstring(startday,starttime) result=[None,None,None,None] if tripletsetup!=None: result[0]=(tripletsetup-start).total_seconds() if tripletdiag!=None: result[1]=(tripletdiag-tripletsetup).total_seconds() if singletsetup!=None: if(tripletdiag!=None): result[2]=(singletsetup-tripletdiag).total_seconds() else: result[2]=(singletsetup-tripletsetup).total_seconds() if singletdiag!=None: result[3]=(singletdiag-singletsetup).total_seconds() return result
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Helper classes for tensor shape inference.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import tensor_shape_pb2 from tensorflow.python import tf2 from tensorflow.python.framework import dtypes from tensorflow.python.util import compat from tensorflow.python.util.tf_export import tf_export _TENSORSHAPE_V2_OVERRIDE = None @tf_export(v1=["enable_v2_tensorshape"]) def enable_v2_tensorshape(): """In TensorFlow 2.0, iterating over a TensorShape instance returns values. This enables the new behavior. Concretely, `tensor_shape[i]` returned a Dimension instance in V1, but it V2 it returns either an integer, or None. Examples: ``` ####################### # If you had this in V1: value = tensor_shape[i].value # Do this in V2 instead: value = tensor_shape[i] ####################### # If you had this in V1: for dim in tensor_shape: value = dim.value print(value) # Do this in V2 instead: for value in tensor_shape: print(value) ####################### # If you had this in V1: dim = tensor_shape[i] dim.assert_is_compatible_with(other_shape) # or using any other shape method # Do this in V2 instead: if tensor_shape.rank is None: dim = Dimension(None) else: dim = tensor_shape.dims[i] dim.assert_is_compatible_with(other_shape) # or using any other shape method # The V2 suggestion above is more explicit, which will save you from # the following trap (present in V1): # you might do in-place modifications to `dim` and expect them to be reflected # in `tensor_shape[i]`, but they would not be. ``` """ global _TENSORSHAPE_V2_OVERRIDE, TensorShape # pylint: disable=invalid-name _TENSORSHAPE_V2_OVERRIDE = True TensorShape = TensorShapeV2 @tf_export(v1=["disable_v2_tensorshape"]) def disable_v2_tensorshape(): """Disables the V2 TensorShape behavior and reverts to V1 behavior. See docstring for `enable_v2_tensorshape` for details about the new behavior. """ global _TENSORSHAPE_V2_OVERRIDE, TensorShape # pylint: disable=invalid-name _TENSORSHAPE_V2_OVERRIDE = False TensorShape = TensorShapeV1 @tf_export(v1=["dimension_value"]) def dimension_value(dimension): """Compatibility utility required to allow for both V1 and V2 behavior in TF. Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to coexist with the new behavior. This utility is a bridge between the two. When accessing the value of a TensorShape dimension, use this utility, like this: ``` # If you had this in your V1 code: value = tensor_shape[i].value # Use `dimension_value` as direct replacement compatible with both V1 & V2: value = dimension_value(tensor_shape[i]) # This would be the V2 equivalent: value = tensor_shape[i] # Warning: this will return the dim value in V2! ``` Arguments: dimension: Either a `Dimension` instance, an integer, or None. Returns: A plain value, i.e. an integer or None. """ if isinstance(dimension, Dimension): return dimension.value return dimension @tf_export(v1=["dimension_at_index"]) def dimension_at_index(shape, index): """Compatibility utility required to allow for both V1 and V2 behavior in TF. Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to coexist with the new behavior. This utility is a bridge between the two. If you want to retrieve the Dimension instance corresponding to a certain index in a TensorShape instance, use this utility, like this: ``` # If you had this in your V1 code: dim = tensor_shape[i] # Use `dimension_at_index` as direct replacement compatible with both V1 & V2: dim = dimension_at_index(tensor_shape, i) # Another possibility would be this, but WARNING: it only works if the # tensor_shape instance has a defined rank. dim = tensor_shape.dims[i] # `dims` may be None if the rank is undefined! # In native V2 code, we recommend instead being more explicit: if tensor_shape.rank is None: dim = Dimension(None) else: dim = tensor_shape.dims[i] # Being more explicit will save you from the following trap (present in V1): # you might do in-place modifications to `dim` and expect them to be reflected # in `tensor_shape[i]`, but they would not be (as the Dimension object was # instantiated on the fly. ``` Arguments: shape: A TensorShape instance. index: An integer index. Returns: A dimension object. """ assert isinstance(shape, TensorShape) if shape.rank is None: return Dimension(None) else: return shape.dims[index] @tf_export(v1=["Dimension"]) class Dimension(object): """Represents the value of one dimension in a TensorShape.""" def __init__(self, value): """Creates a new Dimension with the given value.""" if value is None: self._value = None elif isinstance(value, Dimension): self._value = value.value elif isinstance(value, dtypes.DType): raise TypeError("Cannot convert %s to Dimension" % value) else: self._value = int(value) if (not isinstance(value, compat.bytes_or_text_types) and self._value != value): raise ValueError("Ambiguous dimension: %s" % value) if self._value < 0: raise ValueError("Dimension %d must be >= 0" % self._value) def __repr__(self): return "Dimension(%s)" % repr(self._value) def __str__(self): value = self._value return "?" if value is None else str(value) def __eq__(self, other): """Returns true if `other` has the same known value as this Dimension.""" try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return None return self._value == other.value def __ne__(self, other): """Returns true if `other` has a different known value from `self`.""" try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return None return self._value != other.value def __int__(self): return self._value # This is needed for Windows. # See https://github.com/tensorflow/tensorflow/pull/9780 def __long__(self): return self._value def __index__(self): # Allow use in Python 3 range return self._value @property def value(self): """The value of this dimension, or None if it is unknown.""" return self._value def is_compatible_with(self, other): """Returns true if `other` is compatible with this Dimension. Two known Dimensions are compatible if they have the same value. An unknown Dimension is compatible with all other Dimensions. Args: other: Another Dimension. Returns: True if this Dimension and `other` are compatible. """ other = as_dimension(other) return (self._value is None or other.value is None or self._value == other.value) def assert_is_compatible_with(self, other): """Raises an exception if `other` is not compatible with this Dimension. Args: other: Another Dimension. Raises: ValueError: If `self` and `other` are not compatible (see is_compatible_with). """ if not self.is_compatible_with(other): raise ValueError("Dimensions %s and %s are not compatible" % (self, other)) def merge_with(self, other): """Returns a Dimension that combines the information in `self` and `other`. Dimensions are combined as follows: ```python tf.Dimension(n) .merge_with(tf.Dimension(n)) == tf.Dimension(n) tf.Dimension(n) .merge_with(tf.Dimension(None)) == tf.Dimension(n) tf.Dimension(None).merge_with(tf.Dimension(n)) == tf.Dimension(n) tf.Dimension(None).merge_with(tf.Dimension(None)) == tf.Dimension(None) # raises ValueError for n != m tf.Dimension(n) .merge_with(tf.Dimension(m)) ``` Args: other: Another Dimension. Returns: A Dimension containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not compatible (see is_compatible_with). """ other = as_dimension(other) self.assert_is_compatible_with(other) if self._value is None: return Dimension(other.value) else: return Dimension(self._value) def __add__(self, other): """Returns the sum of `self` and `other`. Dimensions are summed as follows: ```python tf.Dimension(m) + tf.Dimension(n) == tf.Dimension(m + n) tf.Dimension(m) + tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) + tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) + tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the sum of `self` and `other`. """ other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value + other.value) def __radd__(self, other): """Returns the sum of `other` and `self`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the sum of `self` and `other`. """ return self + other def __sub__(self, other): """Returns the subtraction of `other` from `self`. Dimensions are subtracted as follows: ```python tf.Dimension(m) - tf.Dimension(n) == tf.Dimension(m - n) tf.Dimension(m) - tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) - tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) - tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the subtraction of `other` from `self`. """ other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value - other.value) def __rsub__(self, other): """Returns the subtraction of `self` from `other`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the subtraction of `self` from `other`. """ other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(other.value - self._value) def __mul__(self, other): """Returns the product of `self` and `other`. Dimensions are summed as follows: ```python tf.Dimension(m) * tf.Dimension(n) == tf.Dimension(m * n) tf.Dimension(m) * tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) * tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) * tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the product of `self` and `other`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value * other.value) def __rmul__(self, other): """Returns the product of `self` and `other`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is the product of `self` and `other`. """ return self * other def __floordiv__(self, other): """Returns the quotient of `self` and `other` rounded down. Dimensions are divided as follows: ```python tf.Dimension(m) // tf.Dimension(n) == tf.Dimension(m // n) tf.Dimension(m) // tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) // tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) // tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value // other.value) def __rfloordiv__(self, other): """Returns the quotient of `other` and `self` rounded down. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`. """ other = as_dimension(other) if self._value is None or other.value is None: return Dimension(None) else: return Dimension(other.value // self._value) def __div__(self, other): """DEPRECATED: Use `__floordiv__` via `x // y` instead. This function exists only for backwards compatibility purposes; new code should use `__floordiv__` via the syntax `x // y`. Using `x // y` communicates clearly that the result rounds down, and is forward compatible to Python 3. Args: other: Another `Dimension`. Returns: A `Dimension` whose value is the integer quotient of `self` and `other`. """ return self // other def __mod__(self, other): """Returns `self` modulo `other`. Dimension moduli are computed as follows: ```python tf.Dimension(m) % tf.Dimension(n) == tf.Dimension(m % n) tf.Dimension(m) % tf.Dimension(None) == tf.Dimension(None) tf.Dimension(None) % tf.Dimension(n) == tf.Dimension(None) tf.Dimension(None) % tf.Dimension(None) == tf.Dimension(None) ``` Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is `self` modulo `other`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented if self._value is None or other.value is None: return Dimension(None) else: return Dimension(self._value % other.value) def __rmod__(self, other): """Returns `other` modulo `self`. Args: other: Another Dimension, or a value accepted by `as_dimension`. Returns: A Dimension whose value is `other` modulo `self`. """ try: other = as_dimension(other) except (TypeError, ValueError): return NotImplemented return other % self def __lt__(self, other): """Returns True if `self` is known to be less than `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) < tf.Dimension(n)) == (m < n) (tf.Dimension(m) < tf.Dimension(None)) == None (tf.Dimension(None) < tf.Dimension(n)) == None (tf.Dimension(None) < tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value < other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value < other.value def __le__(self, other): """Returns True if `self` is known to be less than or equal to `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) <= tf.Dimension(n)) == (m <= n) (tf.Dimension(m) <= tf.Dimension(None)) == None (tf.Dimension(None) <= tf.Dimension(n)) == None (tf.Dimension(None) <= tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value <= other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value <= other.value def __gt__(self, other): """Returns True if `self` is known to be greater than `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) > tf.Dimension(n)) == (m > n) (tf.Dimension(m) > tf.Dimension(None)) == None (tf.Dimension(None) > tf.Dimension(n)) == None (tf.Dimension(None) > tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value > other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value > other.value def __ge__(self, other): """Returns True if `self` is known to be greater than or equal to `other`. Dimensions are compared as follows: ```python (tf.Dimension(m) >= tf.Dimension(n)) == (m >= n) (tf.Dimension(m) >= tf.Dimension(None)) == None (tf.Dimension(None) >= tf.Dimension(n)) == None (tf.Dimension(None) >= tf.Dimension(None)) == None ``` Args: other: Another Dimension. Returns: The value of `self.value >= other.value` if both are known, otherwise None. """ other = as_dimension(other) if self._value is None or other.value is None: return None else: return self._value >= other.value def __reduce__(self): return Dimension, (self._value,) def as_dimension(value): """Converts the given value to a Dimension. A Dimension input will be returned unmodified. An input of `None` will be converted to an unknown Dimension. An integer input will be converted to a Dimension with that value. Args: value: The value to be converted. Returns: A Dimension corresponding to the given value. """ if isinstance(value, Dimension): return value else: return Dimension(value) @tf_export(v1=["TensorShape"]) class TensorShapeV1(object): """Represents the shape of a `Tensor`. A `TensorShape` represents a possibly-partial shape specification for a `Tensor`. It may be one of the following: * *Fully-known shape:* has a known number of dimensions and a known size for each dimension. e.g. `TensorShape([16, 256])` * *Partially-known shape:* has a known number of dimensions, and an unknown size for one or more dimension. e.g. `TensorShape([None, 256])` * *Unknown shape:* has an unknown number of dimensions, and an unknown size in all dimensions. e.g. `TensorShape(None)` If a tensor is produced by an operation of type `"Foo"`, its shape may be inferred if there is a registered shape function for `"Foo"`. See [Shape functions](https://tensorflow.org/extend/adding_an_op#shape_functions_in_c) for details of shape functions and how to register them. Alternatively, the shape may be set explicitly using `tf.Tensor.set_shape`. """ def __init__(self, dims): """Creates a new TensorShape with the given dimensions. Args: dims: A list of Dimensions, or None if the shape is unspecified. Raises: TypeError: If dims cannot be converted to a list of dimensions. """ if dims is None: self._dims = None elif isinstance(dims, compat.bytes_or_text_types): raise TypeError("A string has ambiguous TensorShape, please wrap in a " "list or convert to an int: %s" % dims) elif isinstance(dims, tensor_shape_pb2.TensorShapeProto): if dims.unknown_rank: self._dims = None else: self._dims = [ # Protos store variable-size dimensions as -1 as_dimension(dim.size if dim.size != -1 else None) for dim in dims.dim ] elif isinstance(dims, TensorShape): self._dims = dims.dims else: try: dims_iter = iter(dims) except TypeError: # Treat as a singleton dimension self._dims = [as_dimension(dims)] else: # Got a list of dimensions self._dims = [as_dimension(d) for d in dims_iter] @property def _v2_behavior(self): if _TENSORSHAPE_V2_OVERRIDE is None: return False return _TENSORSHAPE_V2_OVERRIDE def __repr__(self): if self._v2_behavior: if self._dims is not None: return "TensorShape(%r)" % [dim.value for dim in self._dims] else: return "TensorShape(None)" else: return "TensorShape(%r)" % self._dims def __str__(self): if self.rank is None: return "<unknown>" elif self.rank == 1: if self._v2_behavior: return "(%s,)" % self._dims[0].value else: return "(%s,)" % self._dims[0] else: if self._v2_behavior: return "(%s)" % ", ".join(str(d.value) for d in self._dims) else: return "(%s)" % ", ".join(str(d) for d in self._dims) @property def rank(self): """Returns the rank of this shape, or None if it is unspecified.""" if self._dims is not None: return len(self._dims) return None @property def dims(self): """Returns a list of Dimensions, or None if the shape is unspecified.""" return self._dims @dims.setter def dims(self, dims): self._dims = dims @property def ndims(self): """Deprecated accessor for `rank`.""" return self.rank def __len__(self): """Returns the rank of this shape, or raises ValueError if unspecified.""" if self._dims is None: raise ValueError("Cannot take the length of shape with unknown rank.") return len(self._dims) def __bool__(self): """Returns True if this shape contains non-zero information.""" return self._dims is not None # Python 3 wants __bool__, Python 2.7 wants __nonzero__ __nonzero__ = __bool__ def __iter__(self): """Returns `self.dims` if the rank is known, otherwise raises ValueError.""" if self._dims is None: raise ValueError("Cannot iterate over a shape with unknown rank.") else: if self._v2_behavior: return iter(d.value for d in self._dims) else: return iter(d for d in self._dims) def __getitem__(self, key): """Returns the value of a dimension or a shape, depending on the key. Args: key: If `key` is an integer, returns the dimension at that index; otherwise if `key` is a slice, returns a TensorShape whose dimensions are those selected by the slice from `self`. Returns: An integer if `key` is an integer, or a `TensorShape` if `key` is a slice. Raises: ValueError: If `key` is a slice and `self` is completely unknown and the step is set. """ if self._dims is not None: if isinstance(key, slice): return TensorShape(self._dims[key]) else: if self._v2_behavior: return self._dims[key].value else: return self._dims[key] else: if isinstance(key, slice): start = key.start if key.start is not None else 0 stop = key.stop if key.step is not None: # TODO(mrry): Handle these maybe. raise ValueError("Steps are not yet handled") if stop is None: # NOTE(mrry): This implies that TensorShape(None) is compatible with # TensorShape(None)[1:], which is obviously not true. It would be # possible to track the number of dimensions symbolically, # and perhaps we should do that. return unknown_shape() elif start < 0 or stop < 0: # TODO(mrry): Handle this better, as it will be useful for handling # suffixes of otherwise unknown shapes. return unknown_shape() else: return unknown_shape(rank=stop - start) else: if self._v2_behavior: return None else: return Dimension(None) def num_elements(self): """Returns the total number of elements, or none for incomplete shapes.""" if self.is_fully_defined(): size = 1 for dim in self._dims: size *= dim.value return size else: return None def merge_with(self, other): """Returns a `TensorShape` combining the information in `self` and `other`. The dimensions in `self` and `other` are merged elementwise, according to the rules defined for `Dimension.merge_with()`. Args: other: Another `TensorShape`. Returns: A `TensorShape` containing the combined information of `self` and `other`. Raises: ValueError: If `self` and `other` are not compatible. """ other = as_shape(other) if self._dims is None: return other else: try: self.assert_same_rank(other) new_dims = [] for i, dim in enumerate(self._dims): new_dims.append(dim.merge_with(other[i])) return TensorShape(new_dims) except ValueError: raise ValueError("Shapes %s and %s are not compatible" % (self, other)) def concatenate(self, other): """Returns the concatenation of the dimension in `self` and `other`. *N.B.* If either `self` or `other` is completely unknown, concatenation will discard information about the other shape. In future, we might support concatenation that preserves this information for use with slicing. Args: other: Another `TensorShape`. Returns: A `TensorShape` whose dimensions are the concatenation of the dimensions in `self` and `other`. """ # TODO(mrry): Handle the case where we concatenate a known shape with a # completely unknown shape, so that we can use the partial information. other = as_shape(other) if self._dims is None or other.dims is None: return unknown_shape() else: return TensorShape(self._dims + other.dims) def assert_same_rank(self, other): """Raises an exception if `self` and `other` do not have compatible ranks. Args: other: Another `TensorShape`. Raises: ValueError: If `self` and `other` do not represent shapes with the same rank. """ other = as_shape(other) if self.rank is not None and other.rank is not None: if self.rank != other.rank: raise ValueError("Shapes %s and %s must have the same rank" % (self, other)) def assert_has_rank(self, rank): """Raises an exception if `self` is not compatible with the given `rank`. Args: rank: An integer. Raises: ValueError: If `self` does not represent a shape with the given `rank`. """ if self.rank not in (None, rank): raise ValueError("Shape %s must have rank %d" % (self, rank)) def with_rank(self, rank): """Returns a shape based on `self` with the given rank. This method promotes a completely unknown shape to one with a known rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with the given rank. Raises: ValueError: If `self` does not represent a shape with the given `rank`. """ try: return self.merge_with(unknown_shape(rank=rank)) except ValueError: raise ValueError("Shape %s must have rank %d" % (self, rank)) def with_rank_at_least(self, rank): """Returns a shape based on `self` with at least the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at least the given rank. Raises: ValueError: If `self` does not represent a shape with at least the given `rank`. """ if self.rank is not None and self.rank < rank: raise ValueError("Shape %s must have rank at least %d" % (self, rank)) else: return self def with_rank_at_most(self, rank): """Returns a shape based on `self` with at most the given rank. Args: rank: An integer. Returns: A shape that is at least as specific as `self` with at most the given rank. Raises: ValueError: If `self` does not represent a shape with at most the given `rank`. """ if self.rank is not None and self.rank > rank: raise ValueError("Shape %s must have rank at most %d" % (self, rank)) else: return self def is_compatible_with(self, other): """Returns True iff `self` is compatible with `other`. Two possibly-partially-defined shapes are compatible if there exists a fully-defined shape that both shapes can represent. Thus, compatibility allows the shape inference code to reason about partially-defined shapes. For example: * TensorShape(None) is compatible with all shapes. * TensorShape([None, None]) is compatible with all two-dimensional shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is not compatible with, for example, TensorShape([None]) or TensorShape([None, None, None]). * TensorShape([32, None]) is compatible with all two-dimensional shapes with size 32 in the 0th dimension, and also TensorShape([None, None]) and TensorShape(None). It is not compatible with, for example, TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]). * TensorShape([32, 784]) is compatible with itself, and also TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None, None]) and TensorShape(None). It is not compatible with, for example, TensorShape([32, 1, 784]) or TensorShape([None]). The compatibility relation is reflexive and symmetric, but not transitive. For example, TensorShape([32, 784]) is compatible with TensorShape(None), and TensorShape(None) is compatible with TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with TensorShape([4, 4]). Args: other: Another TensorShape. Returns: True iff `self` is compatible with `other`. """ other = as_shape(other) if self._dims is not None and other.dims is not None: if self.rank != other.rank: return False for x_dim, y_dim in zip(self._dims, other.dims): if not x_dim.is_compatible_with(y_dim): return False return True def assert_is_compatible_with(self, other): """Raises exception if `self` and `other` do not represent the same shape. This method can be used to assert that there exists a shape that both `self` and `other` represent. Args: other: Another TensorShape. Raises: ValueError: If `self` and `other` do not represent the same shape. """ if not self.is_compatible_with(other): raise ValueError("Shapes %s and %s are incompatible" % (self, other)) def most_specific_compatible_shape(self, other): """Returns the most specific TensorShape compatible with `self` and `other`. * TensorShape([None, 1]) is the most specific TensorShape compatible with both TensorShape([2, 1]) and TensorShape([5, 1]). Note that TensorShape(None) is also compatible with above mentioned TensorShapes. * TensorShape([1, 2, 3]) is the most specific TensorShape compatible with both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more less specific TensorShapes compatible with above mentioned TensorShapes, e.g. TensorShape([1, 2, None]), TensorShape(None). Args: other: Another `TensorShape`. Returns: A `TensorShape` which is the most specific compatible shape of `self` and `other`. """ other = as_shape(other) if self._dims is None or other.dims is None or self.rank != other.rank: return unknown_shape() dims = [(Dimension(None))] * self.rank for i, (d1, d2) in enumerate(zip(self._dims, other.dims)): if d1 is not None and d2 is not None and d1 == d2: dims[i] = d1 return TensorShape(dims) def is_fully_defined(self): """Returns True iff `self` is fully defined in every dimension.""" return (self._dims is not None and all(dim.value is not None for dim in self._dims)) def assert_is_fully_defined(self): """Raises an exception if `self` is not fully defined in every dimension. Raises: ValueError: If `self` does not have a known value for every dimension. """ if not self.is_fully_defined(): raise ValueError("Shape %s is not fully defined" % self) def as_list(self): """Returns a list of integers or `None` for each dimension. Returns: A list of integers or `None` for each dimension. Raises: ValueError: If `self` is an unknown shape with an unknown rank. """ if self._dims is None: raise ValueError("as_list() is not defined on an unknown TensorShape.") return [dim.value for dim in self._dims] def as_proto(self): """Returns this shape as a `TensorShapeProto`.""" if self._dims is None: return tensor_shape_pb2.TensorShapeProto(unknown_rank=True) else: return tensor_shape_pb2.TensorShapeProto(dim=[ tensor_shape_pb2.TensorShapeProto.Dim(size=-1 if d.value is None else d.value) for d in self._dims ]) def __eq__(self, other): """Returns True if `self` is equivalent to `other`.""" try: other = as_shape(other) except TypeError: return NotImplemented return self._dims == other.dims def __ne__(self, other): """Returns True if `self` is known to be different from `other`.""" try: other = as_shape(other) except TypeError: return NotImplemented if self.rank is None or other.rank is None: raise ValueError("The inequality of unknown TensorShapes is undefined.") if self.rank != other.rank: return True return self._dims != other.dims def __reduce__(self): return TensorShape, (self._dims,) def __concat__(self, other): return self.concatenate(other) def as_shape(shape): """Converts the given object to a TensorShape.""" if isinstance(shape, TensorShape): return shape else: return TensorShape(shape) def unknown_shape(rank=None, **kwargs): """Returns an unknown TensorShape, optionally with a known rank. Args: rank: (Optional) If specified, the number of dimensions in the shape. **kwargs: For backwards compatibility. Returns: An unknown TensorShape. Raises: TypeError: In case of invalid arguments. """ if rank is None and "ndims" in kwargs: rank = kwargs.pop("ndims") if kwargs: raise TypeError("Unknown argument: %s" % kwargs) if rank is None: return TensorShape(None) else: return TensorShape([Dimension(None)] * rank) @tf_export("TensorShape", v1=[]) class TensorShapeV2(TensorShapeV1): @property def _v2_behavior(self): if _TENSORSHAPE_V2_OVERRIDE is None: return True return _TENSORSHAPE_V2_OVERRIDE if tf2.enabled(): TensorShape = TensorShapeV2 else: TensorShape = TensorShapeV1 def scalar(): """Returns a shape representing a scalar.""" return TensorShape([]) def vector(length): """Returns a shape representing a vector. Args: length: The length of the vector, which may be None if unknown. Returns: A TensorShape representing a vector of the given length. """ return TensorShape([length]) def matrix(rows, cols): """Returns a shape representing a matrix. Args: rows: The number of rows in the matrix, which may be None if unknown. cols: The number of columns in the matrix, which may be None if unknown. Returns: A TensorShape representing a matrix of the given size. """ return TensorShape([rows, cols])
# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ LVM class for performing LVM operations. """ import math import os import re from os_brick import executor from oslo_concurrency import processutils as putils from oslo_log import log as logging from oslo_utils import excutils from six import moves from cinder import exception from cinder.i18n import _LE, _LI from cinder import utils LOG = logging.getLogger(__name__) class LVM(executor.Executor): """LVM object to enable various LVM related operations.""" LVM_CMD_PREFIX = ['env', 'LC_ALL=C'] def __init__(self, vg_name, root_helper, create_vg=False, physical_volumes=None, lvm_type='default', executor=putils.execute, lvm_conf=None): """Initialize the LVM object. The LVM object is based on an LVM VolumeGroup, one instantiation for each VolumeGroup you have/use. :param vg_name: Name of existing VG or VG to create :param root_helper: Execution root_helper method to use :param create_vg: Indicates the VG doesn't exist and we want to create it :param physical_volumes: List of PVs to build VG on :param lvm_type: VG and Volume type (default, or thin) :param executor: Execute method to use, None uses common/processutils """ super(LVM, self).__init__(execute=executor, root_helper=root_helper) self.vg_name = vg_name self.pv_list = [] self.vg_size = 0.0 self.vg_free_space = 0.0 self.vg_lv_count = 0 self.vg_uuid = None self.vg_thin_pool = None self.vg_thin_pool_size = 0.0 self.vg_thin_pool_free_space = 0.0 self._supports_snapshot_lv_activation = None self._supports_lvchange_ignoreskipactivation = None self.vg_provisioned_capacity = 0.0 # Ensure LVM_SYSTEM_DIR has been added to LVM.LVM_CMD_PREFIX # before the first LVM command is executed, and use the directory # where the specified lvm_conf file is located as the value. if lvm_conf and os.path.isfile(lvm_conf): lvm_sys_dir = os.path.dirname(lvm_conf) LVM.LVM_CMD_PREFIX = ['env', 'LC_ALL=C', 'LVM_SYSTEM_DIR=' + lvm_sys_dir] if create_vg and physical_volumes is not None: self.pv_list = physical_volumes try: self._create_vg(physical_volumes) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error creating Volume Group')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise exception.VolumeGroupCreationFailed(vg_name=self.vg_name) if self._vg_exists() is False: LOG.error(_LE('Unable to locate Volume Group %s'), vg_name) raise exception.VolumeGroupNotFound(vg_name=vg_name) # NOTE: we assume that the VG has been activated outside of Cinder if lvm_type == 'thin': pool_name = "%s-pool" % self.vg_name if self.get_volume(pool_name) is None: try: self.create_thin_pool(pool_name) except putils.ProcessExecutionError: # Maybe we just lost the race against another copy of # this driver being in init in parallel - e.g. # cinder-volume and cinder-backup starting in parallel if self.get_volume(pool_name) is None: raise self.vg_thin_pool = pool_name self.activate_lv(self.vg_thin_pool) self.pv_list = self.get_all_physical_volumes(root_helper, vg_name) def _vg_exists(self): """Simple check to see if VG exists. :returns: True if vg specified in object exists, else False """ exists = False cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', '-o', 'name', self.vg_name] (out, _err) = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if out is not None: volume_groups = out.split() if self.vg_name in volume_groups: exists = True return exists def _create_vg(self, pv_list): cmd = ['vgcreate', self.vg_name, ','.join(pv_list)] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) def _get_thin_pool_free_space(self, vg_name, thin_pool_name): """Returns available thin pool free space. :param vg_name: the vg where the pool is placed :param thin_pool_name: the thin pool to gather info for :returns: Free space in GB (float), calculated using data_percent """ cmd = LVM.LVM_CMD_PREFIX +\ ['lvs', '--noheadings', '--unit=g', '-o', 'size,data_percent', '--separator', ':', '--nosuffix'] # NOTE(gfidente): data_percent only applies to some types of LV so we # make sure to append the actual thin pool name cmd.append("/dev/%s/%s" % (vg_name, thin_pool_name)) free_space = 0.0 try: (out, err) = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if out is not None: out = out.strip() data = out.split(':') pool_size = float(data[0]) data_percent = float(data[1]) consumed_space = pool_size / 100 * data_percent free_space = pool_size - consumed_space free_space = round(free_space, 2) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error querying thin pool about data_percent')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) return free_space @staticmethod def get_lvm_version(root_helper): """Static method to get LVM version from system. :param root_helper: root_helper to use for execute :returns: version 3-tuple """ cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--version'] (out, _err) = putils.execute(*cmd, root_helper=root_helper, run_as_root=True) lines = out.split('\n') for line in lines: if 'LVM version' in line: version_list = line.split() # NOTE(gfidente): version is formatted as follows: # major.minor.patchlevel(library API version)[-customisation] version = version_list[2] version_filter = r"(\d+)\.(\d+)\.(\d+).*" r = re.search(version_filter, version) version_tuple = tuple(map(int, r.group(1, 2, 3))) return version_tuple @staticmethod def supports_thin_provisioning(root_helper): """Static method to check for thin LVM support on a system. :param root_helper: root_helper to use for execute :returns: True if supported, False otherwise """ return LVM.get_lvm_version(root_helper) >= (2, 2, 95) @property def supports_snapshot_lv_activation(self): """Property indicating whether snap activation changes are supported. Check for LVM version >= 2.02.91. (LVM2 git: e8a40f6 Allow to activate snapshot) :returns: True/False indicating support """ if self._supports_snapshot_lv_activation is not None: return self._supports_snapshot_lv_activation self._supports_snapshot_lv_activation = ( self.get_lvm_version(self._root_helper) >= (2, 2, 91)) return self._supports_snapshot_lv_activation @property def supports_lvchange_ignoreskipactivation(self): """Property indicating whether lvchange can ignore skip activation. Check for LVM version >= 2.02.99. (LVM2 git: ab789c1bc add --ignoreactivationskip to lvchange) """ if self._supports_lvchange_ignoreskipactivation is not None: return self._supports_lvchange_ignoreskipactivation self._supports_lvchange_ignoreskipactivation = ( self.get_lvm_version(self._root_helper) >= (2, 2, 99)) return self._supports_lvchange_ignoreskipactivation @staticmethod def get_lv_info(root_helper, vg_name=None, lv_name=None): """Retrieve info about LVs (all, in a VG, or a single LV). :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :param lv_name: optional, gathers info for only the specified LV :returns: List of Dictionaries with LV info """ cmd = LVM.LVM_CMD_PREFIX + ['lvs', '--noheadings', '--unit=g', '-o', 'vg_name,name,size', '--nosuffix'] if lv_name is not None and vg_name is not None: cmd.append("%s/%s" % (vg_name, lv_name)) elif vg_name is not None: cmd.append(vg_name) try: (out, _err) = putils.execute(*cmd, root_helper=root_helper, run_as_root=True) except putils.ProcessExecutionError as err: with excutils.save_and_reraise_exception(reraise=True) as ctx: if "not found" in err.stderr or "Failed to find" in err.stderr: ctx.reraise = False LOG.info(_LI("Logical Volume not found when querying " "LVM info. (vg_name=%(vg)s, lv_name=%(lv)s"), {'vg': vg_name, 'lv': lv_name}) out = None lv_list = [] if out is not None: volumes = out.split() iterator = moves.zip(*[iter(volumes)] * 3) # pylint: disable=E1101 for vg, name, size in iterator: lv_list.append({"vg": vg, "name": name, "size": size}) return lv_list def get_volumes(self, lv_name=None): """Get all LV's associated with this instantiation (VG). :returns: List of Dictionaries with LV info """ return self.get_lv_info(self._root_helper, self.vg_name, lv_name) def get_volume(self, name): """Get reference object of volume specified by name. :returns: dict representation of Logical Volume if exists """ ref_list = self.get_volumes(name) for r in ref_list: if r['name'] == name: return r return None @staticmethod def get_all_physical_volumes(root_helper, vg_name=None): """Static method to get all PVs on a system. :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :returns: List of Dictionaries with PV info """ field_sep = '|' cmd = LVM.LVM_CMD_PREFIX + ['pvs', '--noheadings', '--unit=g', '-o', 'vg_name,name,size,free', '--separator', field_sep, '--nosuffix'] (out, _err) = putils.execute(*cmd, root_helper=root_helper, run_as_root=True) pvs = out.split() if vg_name is not None: pvs = [pv for pv in pvs if vg_name == pv.split(field_sep)[0]] pv_list = [] for pv in pvs: fields = pv.split(field_sep) pv_list.append({'vg': fields[0], 'name': fields[1], 'size': float(fields[2]), 'available': float(fields[3])}) return pv_list @staticmethod def get_all_volume_groups(root_helper, vg_name=None): """Static method to get all VGs on a system. :param root_helper: root_helper to use for execute :param vg_name: optional, gathers info for only the specified VG :returns: List of Dictionaries with VG info """ cmd = LVM.LVM_CMD_PREFIX + ['vgs', '--noheadings', '--unit=g', '-o', 'name,size,free,lv_count,uuid', '--separator', ':', '--nosuffix'] if vg_name is not None: cmd.append(vg_name) (out, _err) = putils.execute(*cmd, root_helper=root_helper, run_as_root=True) vg_list = [] if out is not None: vgs = out.split() for vg in vgs: fields = vg.split(':') vg_list.append({'name': fields[0], 'size': float(fields[1]), 'available': float(fields[2]), 'lv_count': int(fields[3]), 'uuid': fields[4]}) return vg_list def update_volume_group_info(self): """Update VG info for this instantiation. Used to update member fields of object and provide a dict of info for caller. :returns: Dictionaries of VG info """ vg_list = self.get_all_volume_groups(self._root_helper, self.vg_name) if len(vg_list) != 1: LOG.error(_LE('Unable to find VG: %s'), self.vg_name) raise exception.VolumeGroupNotFound(vg_name=self.vg_name) self.vg_size = float(vg_list[0]['size']) self.vg_free_space = float(vg_list[0]['available']) self.vg_lv_count = int(vg_list[0]['lv_count']) self.vg_uuid = vg_list[0]['uuid'] total_vols_size = 0.0 if self.vg_thin_pool is not None: # NOTE(xyang): If providing only self.vg_name, # get_lv_info will output info on the thin pool and all # individual volumes. # get_lv_info(self._root_helper, 'stack-vg') # sudo lvs --noheadings --unit=g -o vg_name,name,size # --nosuffix stack-vg # stack-vg stack-pool 9.51 # stack-vg volume-13380d16-54c3-4979-9d22-172082dbc1a1 1.00 # stack-vg volume-629e13ab-7759-46a5-b155-ee1eb20ca892 1.00 # stack-vg volume-e3e6281c-51ee-464c-b1a7-db6c0854622c 1.00 # # If providing both self.vg_name and self.vg_thin_pool, # get_lv_info will output only info on the thin pool, but not # individual volumes. # get_lv_info(self._root_helper, 'stack-vg', 'stack-pool') # sudo lvs --noheadings --unit=g -o vg_name,name,size # --nosuffix stack-vg/stack-pool # stack-vg stack-pool 9.51 # # We need info on both the thin pool and the volumes, # therefore we should provide only self.vg_name, but not # self.vg_thin_pool here. for lv in self.get_lv_info(self._root_helper, self.vg_name): lvsize = lv['size'] # get_lv_info runs "lvs" command with "--nosuffix". # This removes "g" from "1.00g" and only outputs "1.00". # Running "lvs" command without "--nosuffix" will output # "1.00g" if "g" is the unit. # Remove the unit if it is in lv['size']. if not lv['size'][-1].isdigit(): lvsize = lvsize[:-1] if lv['name'] == self.vg_thin_pool: self.vg_thin_pool_size = lvsize tpfs = self._get_thin_pool_free_space(self.vg_name, self.vg_thin_pool) self.vg_thin_pool_free_space = tpfs else: total_vols_size = total_vols_size + float(lvsize) total_vols_size = round(total_vols_size, 2) self.vg_provisioned_capacity = total_vols_size def _calculate_thin_pool_size(self): """Calculates the correct size for a thin pool. Ideally we would use 100% of the containing volume group and be done. But the 100%VG notation to lvcreate is not implemented and thus cannot be used. See https://bugzilla.redhat.com/show_bug.cgi?id=998347 Further, some amount of free space must remain in the volume group for metadata for the contained logical volumes. The exact amount depends on how much volume sharing you expect. :returns: An lvcreate-ready string for the number of calculated bytes. """ # make sure volume group information is current self.update_volume_group_info() # leave 5% free for metadata return "%sg" % (self.vg_free_space * 0.95) def create_thin_pool(self, name=None, size_str=None): """Creates a thin provisioning pool for this VG. The syntax here is slightly different than the default lvcreate -T, so we'll just write a custom cmd here and do it. :param name: Name to use for pool, default is "<vg-name>-pool" :param size_str: Size to allocate for pool, default is entire VG :returns: The size string passed to the lvcreate command """ if not self.supports_thin_provisioning(self._root_helper): LOG.error(_LE('Requested to setup thin provisioning, ' 'however current LVM version does not ' 'support it.')) return None if name is None: name = '%s-pool' % self.vg_name vg_pool_name = '%s/%s' % (self.vg_name, name) if not size_str: size_str = self._calculate_thin_pool_size() cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-L', size_str, vg_pool_name] LOG.debug("Creating thin pool '%(pool)s' with size %(size)s of " "total %(free)sg", {'pool': vg_pool_name, 'size': size_str, 'free': self.vg_free_space}) self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) self.vg_thin_pool = name return size_str def create_volume(self, name, size_str, lv_type='default', mirror_count=0): """Creates a logical volume on the object's VG. :param name: Name to use when creating Logical Volume :param size_str: Size to use when creating Logical Volume :param lv_type: Type of Volume (default or thin) :param mirror_count: Use LVM mirroring with specified count """ if lv_type == 'thin': pool_path = '%s/%s' % (self.vg_name, self.vg_thin_pool) cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-T', '-V', size_str, '-n', name, pool_path] else: cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '-n', name, self.vg_name, '-L', size_str] if mirror_count > 0: cmd.extend(['-m', mirror_count, '--nosync', '--mirrorlog', 'mirrored']) terras = int(size_str[:-1]) / 1024.0 if terras >= 1.5: rsize = int(2 ** math.ceil(math.log(terras) / math.log(2))) # NOTE(vish): Next power of two for region size. See: # http://red.ht/U2BPOD cmd.extend(['-R', str(rsize)]) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error creating Volume')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise @utils.retry(putils.ProcessExecutionError) def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): """Creates a snapshot of a logical volume. :param name: Name to assign to new snapshot :param source_lv_name: Name of Logical Volume to snapshot :param lv_type: Type of LV (default or thin) """ source_lvref = self.get_volume(source_lv_name) if source_lvref is None: LOG.error(_LE("Trying to create snapshot by non-existent LV: %s"), source_lv_name) raise exception.VolumeDeviceNotFound(device=source_lv_name) cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot', '%s/%s' % (self.vg_name, source_lv_name)] if lv_type != 'thin': size = source_lvref['size'] cmd.extend(['-L', '%sg' % (size)]) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error creating snapshot')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise def _mangle_lv_name(self, name): # Linux LVM reserves name that starts with snapshot, so that # such volume name can't be created. Mangle it. if not name.startswith('snapshot'): return name return '_' + name def _lv_is_active(self, name): cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name)] out, _err = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if out: out = out.strip() if (out[4] == 'a'): return True return False def deactivate_lv(self, name): lv_path = self.vg_name + '/' + self._mangle_lv_name(name) cmd = ['lvchange', '-a', 'n'] cmd.append(lv_path) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error deactivating LV')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise # Wait until lv is deactivated to return in # order to prevent a race condition. self._wait_for_volume_deactivation(name) @utils.retry(exceptions=exception.VolumeNotDeactivated, retries=3, backoff_rate=1) def _wait_for_volume_deactivation(self, name): LOG.debug("Checking to see if volume %s has been deactivated.", name) if self._lv_is_active(name): LOG.debug("Volume %s is still active.", name) raise exception.VolumeNotDeactivated(name=name) else: LOG.debug("Volume %s has been deactivated.", name) def activate_lv(self, name, is_snapshot=False, permanent=False): """Ensure that logical volume/snapshot logical volume is activated. :param name: Name of LV to activate :param is_snapshot: whether LV is a snapshot :param permanent: whether we should drop skipactivation flag :raises: putils.ProcessExecutionError """ # This is a no-op if requested for a snapshot on a version # of LVM that doesn't support snapshot activation. # (Assume snapshot LV is always active.) if is_snapshot and not self.supports_snapshot_lv_activation: return lv_path = self.vg_name + '/' + self._mangle_lv_name(name) # Must pass --yes to activate both the snap LV and its origin LV. # Otherwise lvchange asks if you would like to do this interactively, # and fails. cmd = ['lvchange', '-a', 'y', '--yes'] if self.supports_lvchange_ignoreskipactivation: cmd.append('-K') # If permanent=True is specified, drop the skipactivation flag in # order to make this LV automatically activated after next reboot. if permanent: cmd += ['-k', 'n'] cmd.append(lv_path) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error activating LV')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise @utils.retry(putils.ProcessExecutionError) def delete(self, name): """Delete logical volume or snapshot. :param name: Name of LV to delete """ def run_udevadm_settle(): self._execute('udevadm', 'settle', root_helper=self._root_helper, run_as_root=True, check_exit_code=False) # LV removal seems to be a race with other writers or udev in # some cases (see LP #1270192), so we enable retry deactivation LVM_CONFIG = 'activation { retry_deactivation = 1} ' try: self._execute( 'lvremove', '--config', LVM_CONFIG, '-f', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.debug('Error reported running lvremove: CMD: %(command)s, ' 'RESPONSE: %(response)s', {'command': err.cmd, 'response': err.stderr}) LOG.debug('Attempting udev settle and retry of lvremove...') run_udevadm_settle() # The previous failing lvremove -f might leave behind # suspended devices; when lvmetad is not available, any # further lvm command will block forever. # Therefore we need to skip suspended devices on retry. LVM_CONFIG += 'devices { ignore_suspended_devices = 1}' self._execute( 'lvremove', '--config', LVM_CONFIG, '-f', '%s/%s' % (self.vg_name, name), root_helper=self._root_helper, run_as_root=True) LOG.debug('Successfully deleted volume: %s after ' 'udev settle.', name) def revert(self, snapshot_name): """Revert an LV from snapshot. :param snapshot_name: Name of snapshot to revert """ self._execute('lvconvert', '--merge', snapshot_name, root_helper=self._root_helper, run_as_root=True) def lv_has_snapshot(self, name): cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name)] out, _err = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) if out: out = out.strip() if (out[0] == 'o') or (out[0] == 'O'): return True return False def lv_is_snapshot(self, name): """Return True if LV is a snapshot, False otherwise.""" cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name)] out, _err = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) out = out.strip() if out: if (out[0] == 's'): return True return False def lv_is_open(self, name): """Return True if LV is currently open, False otherwise.""" cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Attr', '%s/%s' % (self.vg_name, name)] out, _err = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) out = out.strip() if out: if (out[5] == 'o'): return True return False def lv_get_origin(self, name): """Return the origin of an LV that is a snapshot, None otherwise.""" cmd = LVM.LVM_CMD_PREFIX + ['lvdisplay', '--noheading', '-C', '-o', 'Origin', '%s/%s' % (self.vg_name, name)] out, _err = self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) out = out.strip() if out: return out return None def extend_volume(self, lv_name, new_size): """Extend the size of an existing volume.""" # Volumes with snaps have attributes 'o' or 'O' and will be # deactivated, but Thin Volumes with snaps have attribute 'V' # and won't be deactivated because the lv_has_snapshot method looks # for 'o' or 'O' if self.lv_has_snapshot(lv_name): self.deactivate_lv(lv_name) try: cmd = LVM.LVM_CMD_PREFIX + ['lvextend', '-L', new_size, '%s/%s' % (self.vg_name, lv_name)] self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error extending Volume')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise def vg_mirror_free_space(self, mirror_count): free_capacity = 0.0 disks = [] for pv in self.pv_list: disks.append(float(pv['available'])) while True: disks = sorted([a for a in disks if a > 0.0], reverse=True) if len(disks) <= mirror_count: break # consume the smallest disk disk = disks[-1] disks = disks[:-1] # match extents for each mirror on the largest disks for index in list(range(mirror_count)): disks[index] -= disk free_capacity += disk return free_capacity def vg_mirror_size(self, mirror_count): return (self.vg_free_space / (mirror_count + 1)) def rename_volume(self, lv_name, new_name): """Change the name of an existing volume.""" try: self._execute('lvrename', self.vg_name, lv_name, new_name, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error renaming logical volume')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise
# -*- coding: utf-8 -*- # Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union from google.api_core import gapic_v1 from google.api_core import grpc_helpers_async from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore import grpc # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.dialogflow_v2beta1.types import session from google.cloud.dialogflow_v2beta1.types import session as gcd_session from .base import SessionsTransport, DEFAULT_CLIENT_INFO from .grpc import SessionsGrpcTransport class SessionsGrpcAsyncIOTransport(SessionsTransport): """gRPC AsyncIO backend transport for Sessions. A service used for session interactions. For more information, see the `API interactions guide <https://cloud.google.com/dialogflow/docs/api-overview>`__. This class defines the same methods as the primary client, so the primary client can load the underlying transport implementation and call it. It sends protocol buffers over the wire using gRPC (which is built on top of HTTP/2); the ``grpcio`` package must be installed. """ _grpc_channel: aio.Channel _stubs: Dict[str, Callable] = {} @classmethod def create_channel( cls, host: str = "dialogflow.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. quota_project_id (Optional[str]): An optional project to use for billing and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: aio.Channel: A gRPC AsyncIO channel object. """ return grpc_helpers_async.create_channel( host, credentials=credentials, credentials_file=credentials_file, quota_project_id=quota_project_id, default_scopes=cls.AUTH_SCOPES, scopes=scopes, default_host=cls.DEFAULT_HOST, **kwargs, ) def __init__( self, *, host: str = "dialogflow.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. This argument is ignored if ``channel`` is provided. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. channel (Optional[aio.Channel]): A ``Channel`` instance through which to make calls. api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or application default SSL credentials. client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): Deprecated. A callback to provide client SSL certificate bytes and private key bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for the grpc channel. It is ignored if ``channel`` is provided. client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): A callback to provide client certificate bytes and private key bytes, both in PEM format. It is used to configure a mutual TLS channel. It is ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) if client_cert_source: warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: # Ignore credentials if a channel was passed. credentials = False # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None else: if api_mtls_endpoint: host = api_mtls_endpoint # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: cert, key = client_cert_source() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) else: self._ssl_channel_credentials = SslCredentials().ssl_credentials else: if client_cert_source_for_mtls and not ssl_channel_credentials: cert, key = client_cert_source_for_mtls() self._ssl_channel_credentials = grpc.ssl_channel_credentials( certificate_chain=cert, private_key=key ) # The base transport sets the host, credentials and scopes super().__init__( host=host, credentials=credentials, credentials_file=credentials_file, scopes=scopes, quota_project_id=quota_project_id, client_info=client_info, always_use_jwt_access=always_use_jwt_access, ) if not self._grpc_channel: self._grpc_channel = type(self).create_channel( self._host, # use the credentials which are saved credentials=self._credentials, # Set ``credentials_file`` to ``None`` here as # the credentials that we saved earlier should be used. credentials_file=None, scopes=self._scopes, ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), ("grpc.max_receive_message_length", -1), ], ) # Wrap messages. This must be done after self._grpc_channel exists self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: """Create the channel designed to connect to this service. This property caches on the instance; repeated calls return the same channel. """ # Return the channel from cache. return self._grpc_channel @property def detect_intent( self, ) -> Callable[ [gcd_session.DetectIntentRequest], Awaitable[gcd_session.DetectIntentResponse] ]: r"""Return a callable for the detect intent method over gRPC. Processes a natural language query and returns structured, actionable data as a result. This method is not idempotent, because it may cause contexts and session entity types to be updated, which in turn might affect results of future queries. If you might use `Agent Assist <https://cloud.google.com/dialogflow/docs/#aa>`__ or other CCAI products now or in the future, consider using [AnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.AnalyzeContent] instead of ``DetectIntent``. ``AnalyzeContent`` has additional functionality for Agent Assist and other CCAI products. Note: Always use agent versions for production traffic. See `Versions and environments <https://cloud.google.com/dialogflow/es/docs/agents-versions>`__. Returns: Callable[[~.DetectIntentRequest], Awaitable[~.DetectIntentResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "detect_intent" not in self._stubs: self._stubs["detect_intent"] = self.grpc_channel.unary_unary( "/google.cloud.dialogflow.v2beta1.Sessions/DetectIntent", request_serializer=gcd_session.DetectIntentRequest.serialize, response_deserializer=gcd_session.DetectIntentResponse.deserialize, ) return self._stubs["detect_intent"] @property def streaming_detect_intent( self, ) -> Callable[ [session.StreamingDetectIntentRequest], Awaitable[session.StreamingDetectIntentResponse], ]: r"""Return a callable for the streaming detect intent method over gRPC. Processes a natural language query in audio format in a streaming fashion and returns structured, actionable data as a result. This method is only available via the gRPC API (not REST). If you might use `Agent Assist <https://cloud.google.com/dialogflow/docs/#aa>`__ or other CCAI products now or in the future, consider using [StreamingAnalyzeContent][google.cloud.dialogflow.v2beta1.Participants.StreamingAnalyzeContent] instead of ``StreamingDetectIntent``. ``StreamingAnalyzeContent`` has additional functionality for Agent Assist and other CCAI products. Note: Always use agent versions for production traffic. See `Versions and environments <https://cloud.google.com/dialogflow/es/docs/agents-versions>`__. Returns: Callable[[~.StreamingDetectIntentRequest], Awaitable[~.StreamingDetectIntentResponse]]: A function that, when called, will call the underlying RPC on the server. """ # Generate a "stub function" on-the-fly which will actually make # the request. # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "streaming_detect_intent" not in self._stubs: self._stubs["streaming_detect_intent"] = self.grpc_channel.stream_stream( "/google.cloud.dialogflow.v2beta1.Sessions/StreamingDetectIntent", request_serializer=session.StreamingDetectIntentRequest.serialize, response_deserializer=session.StreamingDetectIntentResponse.deserialize, ) return self._stubs["streaming_detect_intent"] def close(self): return self.grpc_channel.close() __all__ = ("SessionsGrpcAsyncIOTransport",)
from jirabulkloader.task_extractor_exceptions \ import TaskExtractorTemplateErrorProject from mock import MagicMock, call import pytest def test_create_issue_replace_rt_variable(te, tf): tf.write("\nh5. h5 task1 *assignee* [TASK_KEY1]" "\nh5. h5 task2 *assignee* [TASK_KEY2]" "\nh5. h5 task3 *assignee*" "\n=description $TASK_KEY1" "\n# Sub-task *assignee*" "\n=description $TASK_KEY2") test_issue_id = 'TEST-RUN-XXXX' te._create_issue_http = MagicMock() te._create_issue_http.return_value = test_issue_id te.update_issue_desc = MagicMock() expected_result = [{'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task1', 'rt_ext': 'TASK_KEY1', 'issuetype': 'Task', 'line_number': 2}, {'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task2', 'rt_ext': 'TASK_KEY2', 'issuetype': 'Task', 'line_number': 3}, {'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task3', 'issuetype': 'Task', 'description': 'description $TASK_KEY1', 'line_number': 4}, {'assignee': 'assignee', 'markup': '#', 'summary': 'Sub-task', 'issuetype': 'Sub-task', 'description': 'description $TASK_KEY2', 'line_number': 6}] load_result = te.load(tf) assert expected_result == load_result expected_result_load = ('h5. h5 task1 (TEST-RUN-XXXX)\n' 'h5. h5 task2 (TEST-RUN-XXXX)\n' 'h5. h5 task3 (TEST-RUN-XXXX)\n' 'description TEST-RUN-XXXX\n' '# Sub-task (TEST-RUN-XXXX)') assert te.create_tasks(load_result) == expected_result_load te.update_issue_desc.assert_called_once_with(test_issue_id, ('description TEST-RUN-XXXX\n' '# Sub-task (TEST-RUN-XXXX)') ) def test_create_issue_add_rt_var(te): test_issue_id = 'TEST-RUN-XXXX' te._create_issue_http = MagicMock() te._create_issue_http.return_value = test_issue_id input_dict = {'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task', 'rt_ext': 'TASK_KEY'} assert test_issue_id == te.create_issue(input_dict) assert {'TASK_KEY': test_issue_id} == te.rt_vars def test_create_tasks_Several_tasks(te, dry_run_key): te.create_issue = MagicMock() te.create_issue.return_value = dry_run_key te.update_issue_desc = MagicMock() te.create_tasks([{'assignee': 'assignee', 'markup': 'h4.', 'description': 'h4 task description', 'summary': 'h4 task', 'issuetype': 'User Story'}, {'issuetype': 'Task', 'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task', 'description': 'h5 task description'}, {'assignee': 'assignee', 'markup': '#', 'description': 'line1 description\nline2 description', 'summary': 'sub-task', 'issuetype': 'Sub-task'}]) assert [call({'assignee': 'assignee', 'markup': 'h4.', 'description': 'h4 task description', 'summary': 'h4 task', 'issuetype': 'User Story'}), call({'issuetype': 'Task', 'assignee': 'assignee', 'markup': 'h5.', 'description': 'h5 task description', 'summary': 'h5 task'}), call({'description': 'line1 description\nline2 description', 'parent': dry_run_key, 'markup': '#', 'summary': 'sub-task', 'assignee': 'assignee', 'issuetype': 'Sub-task'})] == te.create_issue.call_args_list assert te.update_issue_desc.called_once_with(dry_run_key, 'h5 task description\n' '# sub-task ({0})' .format(dry_run_key)) def test_create_tasks_Several_tasks_without_description(te, dry_run_key): te.create_issue = MagicMock() te.create_issue.return_value = dry_run_key te.update_issue_desc = MagicMock() input_list = [{'issuetype': 'User Story', 'assignee': 'assignee', 'markup': 'h4.', 'summary': 'h4 task'}, {'issuetype': 'Task', 'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task'}, {'issuetype': 'Sub-task', 'assignee': 'assignee', 'markup': '#', 'summary': 'sub-task'}] assert ('h4. h4 task ({0})\nh5. h5 task ({0})\n# sub-task ({0})' .format(dry_run_key)) == te.create_tasks(input_list) assert [call({'issuetype': 'User Story', 'assignee': 'assignee', 'markup': 'h4.', 'summary': 'h4 task'}), call({'issuetype': 'Task', 'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task'}), call({'parent': dry_run_key, 'markup': '#', 'summary': 'sub-task', 'assignee': 'assignee', 'issuetype': 'Sub-task'})] == te.create_issue.call_args_list assert te.update_issue_desc.called_once_with(dry_run_key, '# sub-task ({0})' .format(dry_run_key)) def test_create_tasks_Single_h4_task_with_sub_task(te, dry_run_key): te.create_issue = MagicMock() te.create_issue.return_value = dry_run_key input_list = [{'issuetype': 'User Story', 'assignee': 'assignee', 'markup': 'h4.', 'description': 'h4 task description', 'summary': 'h4 task'}, {'issuetype': 'Sub-task', 'assignee': 'assignee', 'markup': '#', 'summary': 'sub-task'}] assert ('h4. h4 task ({0})\n# sub-task ({0})' .format(dry_run_key)) == te.create_tasks(input_list) assert [call({'issuetype': 'User Story', 'assignee': 'assignee', 'markup': 'h4.', 'description': 'h4 task description', 'summary': 'h4 task'}), call({'issuetype': 'Sub-task', 'assignee': 'assignee', 'markup': '#', 'parent': dry_run_key, 'summary': 'sub-task'})] == te.create_issue.call_args_list def test_create_tasks_Single_h5_task(te, dry_run_key): te.create_issue = MagicMock() te.create_issue.return_value = dry_run_key te.update_issue_desc = MagicMock() input_list = [{'issuetype': 'Task', 'assignee': 'assignee', 'markup': 'h5.', 'description': 'h5 task description', 'summary': 'h5 task'}] assert ('h5. h5 task ({0})\nh5 task description' .format(dry_run_key)) == te.create_tasks(input_list) assert [call({'issuetype': 'Task', 'assignee': 'assignee', 'markup': 'h5.', 'description': 'h5 task description', 'summary': 'h5 task'})] == te.create_issue.call_args_list assert te.update_issue_desc.call_count == 0 def test_create_tasks_Tasks_with_no_h4_task(te, dry_run_key): te.create_issue = MagicMock() te.create_issue.return_value = dry_run_key input_list = [{'issuetype': 'Task', 'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task 1'}, {'issuetype': 'Task', 'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task 2'}, {'issuetype': 'Sub-task', 'assignee': 'assignee', 'markup': '#', 'summary': 'sub-task of h5 task 2'}] assert ('h5. h5 task 1 ({0})\nh5. h5 task 2 ({0})\n' '# sub-task of h5 task 2 ({0})' .format(dry_run_key)) == te.create_tasks(input_list) assert [call({'issuetype': 'Task', 'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task 1'}), call({'issuetype': 'Task', 'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task 2'}), call({'parent': dry_run_key, 'markup': '#', 'summary': 'sub-task of h5 task 2', 'assignee': 'assignee', 'issuetype': 'Sub-task'})] == te.create_issue.call_args_list def test_create_tasks_Tasks_with_text(te, dry_run_key): te._create_issue_http = MagicMock() te._create_issue_http.return_value = dry_run_key input_list = [{'issuetype': 'User Story', 'assignee': 'assignee', 'markup': 'h4.', 'summary': 'h4 task'}, {'assignee': 'assignee', 'markup': 'h5.', 'summary': 'h5 task', 'description': 'h5 desc'}, {'text': 'text line'}, {'assignee': 'assignee', 'markup': '#', 'summary': 'sub-task'}] assert ('h4. h4 task ({0})\nh5. h5 task ({0})\nh5 desc\n' 'text line\n# sub-task ({0})' .format(dry_run_key)) == te.create_tasks(input_list) def test_validate_load_raise_error_if_no_project(te): input_list = [{'issuetype': 'Task', 'assignee': 'user1', 'markup': 'h5.', 'summary': 'h5 task'}] te._validate_user = MagicMock() with pytest.raises(TaskExtractorTemplateErrorProject): te.validate_load(input_list) def test_validate_load_call_validate_user(te): input_list = [{'issuetype': 'Task', 'assignee': 'user1', 'markup': 'h5.', 'summary': 'h5 task', 'tmpl_ext': {'project': {'key': 'project1'}}}, {'text': 'sample text'}, {'issuetype': 'Task', 'assignee': 'user2', 'markup': 'h5.', 'summary': 'h5 task', 'tmpl_ext': {'project': {'key': 'project2'}}}] te._validate_user = MagicMock() te.validate_load(input_list) assert te._validate_user.call_args_list == [call('user1', 'project1'), call('user2', 'project2')] def test_subtask_for_existing_h5_task(te, dry_run_key): input_list = [{'markup': '...', 'issue_key': 'JIRA-1234', 'description': 'line1\nline2', 'line_number': 3}, {'assignee': 'assignee', 'markup': '#', 'issuetype': 'Sub-task', 'summary': 'sub-task', 'line_number': 4}] te.create_issue = MagicMock() te.create_issue.return_value = dry_run_key te.update_issue_desc = MagicMock() te.update_issue_desc.return_value = dry_run_key assert te.create_tasks(input_list) == \ "... JIRA-1234\nline1\nline2\n# sub-task ({0})".format(dry_run_key) assert te.create_issue.call_args_list == [call( {'issuetype': 'Sub-task', 'assignee': 'assignee', 'markup': '#', 'parent': 'JIRA-1234', 'summary': 'sub-task', 'line_number': 4})] assert te.update_issue_desc.call_args_list == \ [call('JIRA-1234', u'line1\nline2\n# sub-task ({0})' .format(dry_run_key))] def test_subtask_for_existing_h4_task(te, dry_run_key): input_list = [{'markup': '..', 'issue_key': 'JIRA-1234', 'line_number': 3}, {'assignee': 'assignee', 'markup': '#', 'issuetype': 'Sub-task', 'summary': 'sub-task', 'line_number': 4}] te.create_issue = MagicMock() te.create_issue.return_value = dry_run_key te.update_issue_desc = MagicMock() te.update_issue_desc.return_value = dry_run_key assert te.create_tasks(input_list) == \ ".. JIRA-1234\n# sub-task ({0})".format(dry_run_key) assert te.create_issue.call_args_list == [call( {'issuetype': 'Sub-task', 'assignee': 'assignee', 'markup': '#', 'parent': 'JIRA-1234', 'summary': 'sub-task', 'line_number': 4})]
'''tests for weightstats, compares with replication no failures but needs cleanup update 2012-09-09: added test after fixing bug in covariance TODOs: - I do not remember what all the commented out code is doing - should be refactored to use generator or inherited tests - still gaps in test coverage - value/diff in ttest_ind is tested in test_tost.py - what about pandas data structures? Author: Josef Perktold License: BSD (3-clause) ''' import numpy as np from scipy import stats import pandas as pd from numpy.testing import assert_, assert_almost_equal, assert_allclose from statsmodels.stats.weightstats import (DescrStatsW, CompareMeans, ttest_ind, ztest, zconfint) from statsmodels.tools.testing import Holder # Mixin for tests against other packages. class CheckExternalMixin(object): @classmethod def get_descriptives(cls, ddof=0): cls.descriptive = DescrStatsW(cls.data, cls.weights, ddof) # TODO: not a test, belongs elsewhere? @classmethod def save_data(cls, fname="data.csv"): # Utility to get data into another package. df = pd.DataFrame(index=np.arange(len(cls.weights))) df["weights"] = cls.weights if cls.data.ndim == 1: df["data1"] = cls.data else: for k in range(cls.data.shape[1]): df["data%d" % (k + 1)] = cls.data[:, k] df.to_csv(fname) def test_mean(self): mn = self.descriptive.mean assert_allclose(mn, self.mean, rtol=1e-4) def test_sum(self): sm = self.descriptive.sum assert_allclose(sm, self.sum, rtol=1e-4) def test_var(self): # Use vardef=wgt option in SAS to match var = self.descriptive.var assert_allclose(var, self.var, rtol=1e-4) def test_std(self): # Use vardef=wgt option in SAS to match std = self.descriptive.std assert_allclose(std, self.std, rtol=1e-4) def test_sem(self): # Use default vardef in SAS to match; only makes sense if # weights sum to n. if not hasattr(self, "sem"): return sem = self.descriptive.std_mean assert_allclose(sem, self.sem, rtol=1e-4) def test_quantiles(self): quant = np.asarray(self.quantiles, dtype=np.float64) for return_pandas in False, True: qtl = self.descriptive.quantile(self.quantile_probs, return_pandas=return_pandas) qtl = np.asarray(qtl, dtype=np.float64) assert_allclose(qtl, quant, rtol=1e-4) class TestSim1(CheckExternalMixin): # 1d data # Taken from SAS mean = 0.401499 sum = 12.9553441 var = 1.08022 std = 1.03933 quantiles = np.r_[-1.81098, -0.84052, 0.32859, 0.77808, 2.93431] @classmethod def setup_class(cls): np.random.seed(9876789) cls.data = np.random.normal(size=20) cls.weights = np.random.uniform(0, 3, size=20) cls.quantile_probs = np.r_[0, 0.1, 0.5, 0.75, 1] cls.get_descriptives() class TestSim1t(CheckExternalMixin): # 1d data with ties # Taken from SAS mean = 5.05103296 sum = 156.573464 var = 9.9711934 std = 3.15771965 quantiles = np.r_[0, 1, 5, 8, 9] @classmethod def setup_class(cls): np.random.seed(9876789) cls.data = np.random.randint(0, 10, size=20) cls.data[15:20] = cls.data[0:5] cls.data[18:20] = cls.data[15:17] cls.weights = np.random.uniform(0, 3, size=20) cls.quantile_probs = np.r_[0, 0.1, 0.5, 0.75, 1] cls.get_descriptives() class TestSim1n(CheckExternalMixin): # 1d data with weights summing to n so we can check the standard # error of the mean # Taken from SAS mean = -0.3131058 sum = -6.2621168 var = 0.49722696 std = 0.70514322 sem = 0.15767482 quantiles = np.r_[-1.61593, -1.45576, -0.24356, 0.16770, 1.18791] @classmethod def setup_class(cls): np.random.seed(4342) cls.data = np.random.normal(size=20) cls.weights = np.random.uniform(0, 3, size=20) cls.weights *= 20 / cls.weights.sum() cls.quantile_probs = np.r_[0, 0.1, 0.5, 0.75, 1] cls.get_descriptives(1) class TestSim2(CheckExternalMixin): # 2d data # Taken from SAS mean = [-0.2170406, -0.2387543] sum = [-6.8383999, -7.5225444] var = [1.77426344, 0.61933542] std = [1.3320148, 0.78697867] quantiles = np.column_stack( (np.r_[-2.55277, -1.40479, -0.61040, 0.52740, 2.66246], np.r_[-1.49263, -1.15403, -0.16231, 0.16464, 1.83062])) @classmethod def setup_class(cls): np.random.seed(2249) cls.data = np.random.normal(size=(20, 2)) cls.weights = np.random.uniform(0, 3, size=20) cls.quantile_probs = np.r_[0, 0.1, 0.5, 0.75, 1] cls.get_descriptives() class TestWeightstats(object): @classmethod def setup_class(cls): np.random.seed(9876789) n1, n2 = 20, 20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1) x2 = m2 + np.random.randn(n2) x1_2d = m1 + np.random.randn(n1, 3) x2_2d = m2 + np.random.randn(n2, 3) w1 = np.random.randint(1,4, n1) w2 = np.random.randint(1,4, n2) cls.x1, cls.x2 = x1, x2 cls.w1, cls.w2 = w1, w2 cls.x1_2d, cls.x2_2d = x1_2d, x2_2d def test_weightstats_1(self): x1, x2 = self.x1, self.x2 w1, w2 = self.w1, self.w2 w1_ = 2. * np.ones(len(x1)) w2_ = 2. * np.ones(len(x2)) d1 = DescrStatsW(x1) # print ttest_ind(x1, x2) # print ttest_ind(x1, x2, usevar='unequal') # #print ttest_ind(x1, x2, usevar='unequal') # print stats.ttest_ind(x1, x2) # print ttest_ind(x1, x2, usevar='unequal', alternative='larger') # print ttest_ind(x1, x2, usevar='unequal', alternative='smaller') # print ttest_ind(x1, x2, usevar='unequal', weights=(w1_, w2_)) # print stats.ttest_ind(np.r_[x1, x1], np.r_[x2,x2]) assert_almost_equal(ttest_ind(x1, x2, weights=(w1_, w2_))[:2], stats.ttest_ind(np.r_[x1, x1], np.r_[x2, x2])) def test_weightstats_2(self): x1, x2 = self.x1, self.x2 w1, w2 = self.w1, self.w2 d1 = DescrStatsW(x1) d1w = DescrStatsW(x1, weights=w1) d2w = DescrStatsW(x2, weights=w2) x1r = d1w.asrepeats() x2r = d2w.asrepeats() # print 'random weights' # print ttest_ind(x1, x2, weights=(w1, w2)) # print stats.ttest_ind(x1r, x2r) assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2], stats.ttest_ind(x1r, x2r), 14) # not the same as new version with random weights/replication # assert x1r.shape[0] == d1w.sum_weights # assert x2r.shape[0] == d2w.sum_weights assert_almost_equal(x2r.mean(0), d2w.mean, 14) assert_almost_equal(x2r.var(), d2w.var, 14) assert_almost_equal(x2r.std(), d2w.std, 14) # note: the following is for 1d assert_almost_equal(np.cov(x2r, bias=1), d2w.cov, 14) # assert_almost_equal(np.corrcoef(np.x2r), d2w.corrcoef, 19) # TODO: exception in corrcoef (scalar case) # one-sample tests # print d1.ttest_mean(3) # print stats.ttest_1samp(x1, 3) # print d1w.ttest_mean(3) # print stats.ttest_1samp(x1r, 3) assert_almost_equal(d1.ttest_mean(3)[:2], stats.ttest_1samp(x1, 3), 11) assert_almost_equal(d1w.ttest_mean(3)[:2], stats.ttest_1samp(x1r, 3), 11) def test_weightstats_3(self): x1_2d, x2_2d = self.x1_2d, self.x2_2d w1, w2 = self.w1, self.w2 d1w_2d = DescrStatsW(x1_2d, weights=w1) d2w_2d = DescrStatsW(x2_2d, weights=w2) x1r_2d = d1w_2d.asrepeats() x2r_2d = d2w_2d.asrepeats() assert_almost_equal(x2r_2d.mean(0), d2w_2d.mean, 14) assert_almost_equal(x2r_2d.var(0), d2w_2d.var, 14) assert_almost_equal(x2r_2d.std(0), d2w_2d.std, 14) assert_almost_equal(np.cov(x2r_2d.T, bias=1), d2w_2d.cov, 14) assert_almost_equal(np.corrcoef(x2r_2d.T), d2w_2d.corrcoef, 14) # print d1w_2d.ttest_mean(3) # #scipy.stats.ttest is also vectorized # print stats.ttest_1samp(x1r_2d, 3) t, p, d = d1w_2d.ttest_mean(3) assert_almost_equal([t, p], stats.ttest_1samp(x1r_2d, 3), 11) # print [stats.ttest_1samp(xi, 3) for xi in x1r_2d.T] cm = CompareMeans(d1w_2d, d2w_2d) ressm = cm.ttest_ind() resss = stats.ttest_ind(x1r_2d, x2r_2d) assert_almost_equal(ressm[:2], resss, 14) # does not work for 2d, levene does not use weights # cm = CompareMeans(d1w_2d, d2w_2d) # ressm = cm.test_equal_var() # resss = stats.levene(x1r_2d, x2r_2d) # assert_almost_equal(ressm[:2], resss, 14) def test_weightstats_ddof_tests(self): # explicit test that ttest and confint are independent of ddof # one sample case x1_2d = self.x1_2d w1 = self.w1 d1w_d0 = DescrStatsW(x1_2d, weights=w1, ddof=0) d1w_d1 = DescrStatsW(x1_2d, weights=w1, ddof=1) d1w_d2 = DescrStatsW(x1_2d, weights=w1, ddof=2) # check confint independent of user ddof res0 = d1w_d0.ttest_mean() res1 = d1w_d1.ttest_mean() res2 = d1w_d2.ttest_mean() # concatenate into one array with np.r_ assert_almost_equal(np.r_[res1], np.r_[res0], 14) assert_almost_equal(np.r_[res2], np.r_[res0], 14) res0 = d1w_d0.ttest_mean(0.5) res1 = d1w_d1.ttest_mean(0.5) res2 = d1w_d2.ttest_mean(0.5) assert_almost_equal(np.r_[res1], np.r_[res0], 14) assert_almost_equal(np.r_[res2], np.r_[res0], 14) # check confint independent of user ddof res0 = d1w_d0.tconfint_mean() res1 = d1w_d1.tconfint_mean() res2 = d1w_d2.tconfint_mean() assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) def test_comparemeans_convenient_interface(self): x1_2d, x2_2d = self.x1_2d, self.x2_2d d1 = DescrStatsW(x1_2d) d2 = DescrStatsW(x2_2d) cm1 = CompareMeans(d1, d2) # smoke test for summary from statsmodels.iolib.table import SimpleTable for use_t in [True, False]: for usevar in ['pooled', 'unequal']: smry = cm1.summary(use_t=use_t, usevar=usevar) assert_(isinstance(smry, SimpleTable)) # test for from_data method cm2 = CompareMeans.from_data(x1_2d, x2_2d) assert_(str(cm1.summary()) == str(cm2.summary())) def test_comparemeans_convenient_interface_1d(self): # same as above for 2d, just use 1d data instead x1_2d, x2_2d = self.x1, self.x2 d1 = DescrStatsW(x1_2d) d2 = DescrStatsW(x2_2d) cm1 = CompareMeans(d1, d2) # smoke test for summary from statsmodels.iolib.table import SimpleTable for use_t in [True, False]: for usevar in ['pooled', 'unequal']: smry = cm1.summary(use_t=use_t, usevar=usevar) assert_(isinstance(smry, SimpleTable)) # test for from_data method cm2 = CompareMeans.from_data(x1_2d, x2_2d) assert_(str(cm1.summary()) == str(cm2.summary())) class CheckWeightstats1dMixin(object): def test_basic(self): x1r = self.x1r d1w = self.d1w assert_almost_equal(x1r.mean(0), d1w.mean, 14) assert_almost_equal(x1r.var(0, ddof=d1w.ddof), d1w.var, 14) assert_almost_equal(x1r.std(0, ddof=d1w.ddof), d1w.std, 14) var1 = d1w.var_ddof(ddof=1) assert_almost_equal(x1r.var(0, ddof=1), var1, 14) std1 = d1w.std_ddof(ddof=1) assert_almost_equal(x1r.std(0, ddof=1), std1, 14) assert_almost_equal(np.cov(x1r.T, bias=1-d1w.ddof), d1w.cov, 14) # assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14) def test_ttest(self): x1r = self.x1r d1w = self.d1w assert_almost_equal(d1w.ttest_mean(3)[:2], stats.ttest_1samp(x1r, 3), 11) # def # assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2], # stats.ttest_ind(x1r, x2r), 14) def test_ttest_2sample(self): x1, x2 = self.x1, self.x2 x1r, x2r = self.x1r, self.x2r w1, w2 = self.w1, self.w2 # Note: stats.ttest_ind handles 2d/nd arguments res_sp = stats.ttest_ind(x1r, x2r) assert_almost_equal(ttest_ind(x1, x2, weights=(w1, w2))[:2], res_sp, 14) # check correct ttest independent of user ddof cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0), DescrStatsW(x2, weights=w2, ddof=1)) assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14) cm = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1), DescrStatsW(x2, weights=w2, ddof=2)) assert_almost_equal(cm.ttest_ind()[:2], res_sp, 14) cm0 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0), DescrStatsW(x2, weights=w2, ddof=0)) cm1 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=0), DescrStatsW(x2, weights=w2, ddof=1)) cm2 = CompareMeans(DescrStatsW(x1, weights=w1, ddof=1), DescrStatsW(x2, weights=w2, ddof=2)) res0 = cm0.ttest_ind(usevar='unequal') res1 = cm1.ttest_ind(usevar='unequal') res2 = cm2.ttest_ind(usevar='unequal') assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) # check confint independent of user ddof res0 = cm0.tconfint_diff(usevar='pooled') res1 = cm1.tconfint_diff(usevar='pooled') res2 = cm2.tconfint_diff(usevar='pooled') assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) res0 = cm0.tconfint_diff(usevar='unequal') res1 = cm1.tconfint_diff(usevar='unequal') res2 = cm2.tconfint_diff(usevar='unequal') assert_almost_equal(res1, res0, 14) assert_almost_equal(res2, res0, 14) def test_confint_mean(self): # compare confint_mean with ttest d1w = self.d1w alpha = 0.05 low, upp = d1w.tconfint_mean() t, p, d = d1w.ttest_mean(low) assert_almost_equal(p, alpha * np.ones(p.shape), 8) t, p, d = d1w.ttest_mean(upp) assert_almost_equal(p, alpha * np.ones(p.shape), 8) t, p, d = d1w.ttest_mean(np.vstack((low, upp))) assert_almost_equal(p, alpha * np.ones(p.shape), 8) class CheckWeightstats2dMixin(CheckWeightstats1dMixin): def test_corr(self): x1r = self.x1r d1w = self.d1w assert_almost_equal(np.corrcoef(x1r.T), d1w.corrcoef, 14) class TestWeightstats1d_ddof(CheckWeightstats1dMixin): @classmethod def setup_class(cls): np.random.seed(9876789) n1, n2 = 20, 20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 1) x2 = m2 + np.random.randn(n2, 1) w1 = np.random.randint(1, 4, n1) w2 = np.random.randint(1, 4, n2) cls.x1, cls.x2 = x1, x2 cls.w1, cls.w2 = w1, w2 cls.d1w = DescrStatsW(x1, weights=w1, ddof=1) cls.d2w = DescrStatsW(x2, weights=w2, ddof=1) cls.x1r = cls.d1w.asrepeats() cls.x2r = cls.d2w.asrepeats() class TestWeightstats2d(CheckWeightstats2dMixin): @classmethod def setup_class(cls): np.random.seed(9876789) n1, n2 = 20, 20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 3) x2 = m2 + np.random.randn(n2, 3) w1 = np.random.randint(1, 4, n1) w2 = np.random.randint(1, 4, n2) cls.x1, cls.x2 = x1, x2 cls.w1, cls.w2 = w1, w2 cls.d1w = DescrStatsW(x1, weights=w1) cls.d2w = DescrStatsW(x2, weights=w2) cls.x1r = cls.d1w.asrepeats() cls.x2r = cls.d2w.asrepeats() class TestWeightstats2d_ddof(CheckWeightstats2dMixin): @classmethod def setup_class(cls): np.random.seed(9876789) n1, n2 = 20, 20 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 3) x2 = m2 + np.random.randn(n2, 3) w1 = np.random.randint(1, 4, n1) w2 = np.random.randint(1, 4, n2) cls.x1, cls.x2 = x1, x2 cls.w1, cls.w2 = w1, w2 cls.d1w = DescrStatsW(x1, weights=w1, ddof=1) cls.d2w = DescrStatsW(x2, weights=w2, ddof=1) cls.x1r = cls.d1w.asrepeats() cls.x2r = cls.d2w.asrepeats() class TestWeightstats2d_nobs(CheckWeightstats2dMixin): @classmethod def setup_class(cls): np.random.seed(9876789) n1, n2 = 20, 30 m1, m2 = 1, 1.2 x1 = m1 + np.random.randn(n1, 3) x2 = m2 + np.random.randn(n2, 3) w1 = np.random.randint(1, 4, n1) w2 = np.random.randint(1, 4, n2) cls.x1, cls.x2 = x1, x2 cls.w1, cls.w2 = w1, w2 cls.d1w = DescrStatsW(x1, weights=w1, ddof=0) cls.d2w = DescrStatsW(x2, weights=w2, ddof=1) cls.x1r = cls.d1w.asrepeats() cls.x2r = cls.d2w.asrepeats() def test_ttest_ind_with_uneq_var(): # from scipy # check vs. R a = (1, 2, 3) b = (1.1, 2.9, 4.2) pr = 0.53619490753126731 tr = -0.68649512735572582 t, p, df = ttest_ind(a, b, usevar='unequal') assert_almost_equal([t, p], [tr, pr], 13) a = (1, 2, 3, 4) pr = 0.84354139131608286 tr = -0.2108663315950719 t, p, df = ttest_ind(a, b, usevar='unequal') assert_almost_equal([t, p], [tr, pr], 13) def test_ztest_ztost(): # compare weightstats with separately tested proportion ztest ztost import statsmodels.stats.proportion as smprop x1 = [0, 1] w1 = [5, 15] res2 = smprop.proportions_ztest(15, 20., value=0.5) d1 = DescrStatsW(x1, w1) res1 = d1.ztest_mean(0.5) assert_allclose(res1, res2, rtol=0.03, atol=0.003) d2 = DescrStatsW(x1, np.array(w1)*21./20) res1 = d2.ztest_mean(0.5) assert_almost_equal(res1, res2, decimal=12) res1 = d2.ztost_mean(0.4, 0.6) res2 = smprop.proportions_ztost(15, 20., 0.4, 0.6) assert_almost_equal(res1[0], res2[0], decimal=12) x2 = [0, 1] w2 = [10, 10] # d2 = DescrStatsW(x1, np.array(w1)*21./20) d2 = DescrStatsW(x2, w2) res1 = ztest(d1.asrepeats(), d2.asrepeats()) res2 = smprop.proportions_chisquare(np.asarray([15, 10]), np.asarray([20., 20])) # TODO: check this is this difference expected?, see test_proportion assert_allclose(res1[1], res2[1], rtol=0.03) res1a = CompareMeans(d1, d2).ztest_ind() assert_allclose(res1a[1], res2[1], rtol=0.03) assert_almost_equal(res1a, res1, decimal=12) # test for ztest and z confidence interval against R BSDA z.test # Note: I needed to calculate the pooled standard deviation for R # std = np.std(np.concatenate((x-x.mean(),y-y.mean())), ddof=2) # > zt = z.test(x, sigma.x=0.57676142668828667, y, sigma.y=0.57676142668828667) # > cat_items(zt, "ztest.") ztest_ = Holder() ztest_.statistic = 6.55109865675183 ztest_.p_value = 5.711530850508982e-11 ztest_.conf_int = np.array([1.230415246535603, 2.280948389828034]) ztest_.estimate = np.array([7.01818181818182, 5.2625]) ztest_.null_value = 0 ztest_.alternative = 'two.sided' ztest_.method = 'Two-sample z-Test' ztest_.data_name = 'x and y' # > zt = z.test(x, sigma.x=0.57676142668828667, y, # sigma.y=0.57676142668828667, alternative="less") # > cat_items(zt, "ztest_smaller.") ztest_smaller = Holder() ztest_smaller.statistic = 6.55109865675183 ztest_smaller.p_value = 0.999999999971442 ztest_smaller.conf_int = np.array([np.nan, 2.196499421109045]) ztest_smaller.estimate = np.array([7.01818181818182, 5.2625]) ztest_smaller.null_value = 0 ztest_smaller.alternative = 'less' ztest_smaller.method = 'Two-sample z-Test' ztest_smaller.data_name = 'x and y' # > zt = z.test(x, sigma.x=0.57676142668828667, y, # sigma.y=0.57676142668828667, alternative="greater") # > cat_items(zt, "ztest_larger.") ztest_larger = Holder() ztest_larger.statistic = 6.55109865675183 ztest_larger.p_value = 2.855760072861813e-11 ztest_larger.conf_int = np.array([1.314864215254592, np.nan]) ztest_larger.estimate = np.array([7.01818181818182, 5.2625]) ztest_larger.null_value = 0 ztest_larger.alternative = 'greater' ztest_larger.method = 'Two-sample z-Test' ztest_larger.data_name = 'x and y' # > zt = z.test(x, sigma.x=0.57676142668828667, y, # sigma.y=0.57676142668828667, mu=1, alternative="two.sided") # > cat_items(zt, "ztest_mu.") ztest_mu = Holder() ztest_mu.statistic = 2.81972854805176 ztest_mu.p_value = 0.00480642898427981 ztest_mu.conf_int = np.array([1.230415246535603, 2.280948389828034]) ztest_mu.estimate = np.array([7.01818181818182, 5.2625]) ztest_mu.null_value = 1 ztest_mu.alternative = 'two.sided' ztest_mu.method = 'Two-sample z-Test' ztest_mu.data_name = 'x and y' # > zt = z.test(x, sigma.x=0.57676142668828667, y, # sigma.y=0.57676142668828667, mu=1, alternative="greater") # > cat_items(zt, "ztest_larger_mu.") ztest_larger_mu = Holder() ztest_larger_mu.statistic = 2.81972854805176 ztest_larger_mu.p_value = 0.002403214492139871 ztest_larger_mu.conf_int = np.array([1.314864215254592, np.nan]) ztest_larger_mu.estimate = np.array([7.01818181818182, 5.2625]) ztest_larger_mu.null_value = 1 ztest_larger_mu.alternative = 'greater' ztest_larger_mu.method = 'Two-sample z-Test' ztest_larger_mu.data_name = 'x and y' # > zt = z.test(x, sigma.x=0.57676142668828667, y, # sigma.y=0.57676142668828667, mu=2, alternative="less") # > cat_items(zt, "ztest_smaller_mu.") ztest_smaller_mu = Holder() ztest_smaller_mu.statistic = -0.911641560648313 ztest_smaller_mu.p_value = 0.1809787183191324 ztest_smaller_mu.conf_int = np.array([np.nan, 2.196499421109045]) ztest_smaller_mu.estimate = np.array([7.01818181818182, 5.2625]) ztest_smaller_mu.null_value = 2 ztest_smaller_mu.alternative = 'less' ztest_smaller_mu.method = 'Two-sample z-Test' ztest_smaller_mu.data_name = 'x and y' # > zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, # alternative="two.sided") # > cat_items(zt, "ztest_mu_1s.") ztest_mu_1s = Holder() ztest_mu_1s.statistic = 4.415212090914452 ztest_mu_1s.p_value = 1.009110038015147e-05 ztest_mu_1s.conf_int = np.array([6.74376372125119, 7.29259991511245]) ztest_mu_1s.estimate = 7.01818181818182 ztest_mu_1s.null_value = 6.4 ztest_mu_1s.alternative = 'two.sided' ztest_mu_1s.method = 'One-sample z-Test' ztest_mu_1s.data_name = 'x' # > zt = z.test(x, sigma.x=0.46436662631627995, mu=7.4, alternative="less") # > cat_items(zt, "ztest_smaller_mu_1s.") ztest_smaller_mu_1s = Holder() ztest_smaller_mu_1s.statistic = -2.727042762035397 ztest_smaller_mu_1s.p_value = 0.00319523783881176 ztest_smaller_mu_1s.conf_int = np.array([np.nan, 7.248480744895716]) ztest_smaller_mu_1s.estimate = 7.01818181818182 ztest_smaller_mu_1s.null_value = 7.4 ztest_smaller_mu_1s.alternative = 'less' ztest_smaller_mu_1s.method = 'One-sample z-Test' ztest_smaller_mu_1s.data_name = 'x' # > zt = z.test(x, sigma.x=0.46436662631627995, mu=6.4, alternative="greater") # > cat_items(zt, "ztest_greater_mu_1s.") ztest_larger_mu_1s = Holder() ztest_larger_mu_1s.statistic = 4.415212090914452 ztest_larger_mu_1s.p_value = 5.045550190097003e-06 ztest_larger_mu_1s.conf_int = np.array([6.78788289146792, np.nan]) ztest_larger_mu_1s.estimate = 7.01818181818182 ztest_larger_mu_1s.null_value = 6.4 ztest_larger_mu_1s.alternative = 'greater' ztest_larger_mu_1s.method = 'One-sample z-Test' ztest_larger_mu_1s.data_name = 'x' alternatives = {'less': 'smaller', 'greater': 'larger', 'two.sided': 'two-sided'} class TestZTest(object): # all examples use the same data # no weights used in tests @classmethod def setup_class(cls): cls.x1 = np.array([7.8, 6.6, 6.5, 7.4, 7.3, 7., 6.4, 7.1, 6.7, 7.6, 6.8]) cls.x2 = np.array([4.5, 5.4, 6.1, 6.1, 5.4, 5., 4.1, 5.5]) cls.d1 = DescrStatsW(cls.x1) cls.d2 = DescrStatsW(cls.x2) cls.cm = CompareMeans(cls.d1, cls.d2) def test(self): x1, x2 = self.x1, self.x2 cm = self.cm # tc : test cases for tc in [ztest_, ztest_smaller, ztest_larger, ztest_mu, ztest_smaller_mu, ztest_larger_mu]: zstat, pval = ztest(x1, x2, value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) zstat, pval = cm.ztest_ind(value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) # overwrite nan in R's confint tc_conf_int = tc.conf_int.copy() if np.isnan(tc_conf_int[0]): tc_conf_int[0] = - np.inf if np.isnan(tc_conf_int[1]): tc_conf_int[1] = np.inf # Note: value is shifting our confidence interval in zconfint ci = zconfint(x1, x2, value=0, alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10) ci = cm.zconfint_diff(alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10) ci = zconfint(x1, x2, value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int - tc.null_value, rtol=1e-10) # 1 sample test copy-paste d1 = self.d1 for tc in [ztest_mu_1s, ztest_smaller_mu_1s, ztest_larger_mu_1s]: zstat, pval = ztest(x1, value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) zstat, pval = d1.ztest_mean(value=tc.null_value, alternative=alternatives[tc.alternative]) assert_allclose(zstat, tc.statistic, rtol=1e-10) assert_allclose(pval, tc.p_value, rtol=1e-10, atol=1e-16) # overwrite nan in R's confint tc_conf_int = tc.conf_int.copy() if np.isnan(tc_conf_int[0]): tc_conf_int[0] = - np.inf if np.isnan(tc_conf_int[1]): tc_conf_int[1] = np.inf # Note: value is shifting our confidence interval in zconfint ci = zconfint(x1, value=0, alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10) ci = d1.zconfint_mean(alternative=alternatives[tc.alternative]) assert_allclose(ci, tc_conf_int, rtol=1e-10) def test_weightstats_len_1(): x1 = [1] w1 = [1] d1 = DescrStatsW(x1, w1) assert (d1.quantile([0.0, 0.5, 1.0]) == 1).all() def test_weightstats_2d_w1(): x1 = [[1], [2]] w1 = [[1], [2]] d1 = DescrStatsW(x1, w1) print(len(np.array(w1).shape)) assert (d1.quantile([0.5, 1.0]) == 2).all().all() def test_weightstats_2d_w2(): x1 = [[1]] w1 = [[1]] d1 = DescrStatsW(x1, w1) assert (d1.quantile([0, 0.5, 1.0]) == 1).all().all()
# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exc from oslo_log import log as logging from oslo_utils import versionutils from nova import db from nova.db.sqlalchemy import api as db_api from nova.db.sqlalchemy import api_models from nova.db.sqlalchemy import models as main_models from nova import exception from nova.i18n import _LE from nova import objects from nova.objects import base from nova.objects import fields KEYPAIR_TYPE_SSH = 'ssh' KEYPAIR_TYPE_X509 = 'x509' LOG = logging.getLogger(__name__) @db_api.api_context_manager.reader def _get_from_db(context, user_id, name=None): query = context.session.query(api_models.KeyPair).\ filter(api_models.KeyPair.user_id == user_id) if name is not None: db_keypair = query.filter(api_models.KeyPair.name == name).\ first() if not db_keypair: raise exception.KeypairNotFound(user_id=user_id, name=name) return db_keypair else: return query.all() @db_api.api_context_manager.reader def _get_count_from_db(context, user_id): return context.session.query(api_models.KeyPair).\ filter(api_models.KeyPair.user_id == user_id).\ count() @db_api.api_context_manager.writer def _create_in_db(context, values): kp = api_models.KeyPair() kp.update(values) try: kp.save(context.session) except db_exc.DBDuplicateEntry: raise exception.KeyPairExists(key_name=values['name']) return kp @db_api.api_context_manager.writer def _destroy_in_db(context, user_id, name): result = context.session.query(api_models.KeyPair).\ filter_by(user_id=user_id).\ filter_by(name=name).\ delete() if not result: raise exception.KeypairNotFound(user_id=user_id, name=name) # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class KeyPair(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: String attributes updated to support unicode # Version 1.2: Added keypair type # Version 1.3: Name field is non-null # Version 1.4: Add localonly flag to get_by_name() VERSION = '1.4' fields = { 'id': fields.IntegerField(), 'name': fields.StringField(nullable=False), 'user_id': fields.StringField(nullable=True), 'fingerprint': fields.StringField(nullable=True), 'public_key': fields.StringField(nullable=True), 'type': fields.StringField(nullable=False), } def obj_make_compatible(self, primitive, target_version): super(KeyPair, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 2) and 'type' in primitive: del primitive['type'] @staticmethod def _from_db_object(context, keypair, db_keypair): ignore = {'deleted': False, 'deleted_at': None} for key in keypair.fields: if key in ignore and not hasattr(db_keypair, key): keypair[key] = ignore[key] else: keypair[key] = db_keypair[key] keypair._context = context keypair.obj_reset_changes() return keypair @staticmethod def _get_from_db(context, user_id, name): return _get_from_db(context, user_id, name=name) @staticmethod def _destroy_in_db(context, user_id, name): return _destroy_in_db(context, user_id, name) @staticmethod def _create_in_db(context, values): return _create_in_db(context, values) @base.remotable_classmethod def get_by_name(cls, context, user_id, name, localonly=False): db_keypair = None if not localonly: try: db_keypair = cls._get_from_db(context, user_id, name) except exception.KeypairNotFound: pass if db_keypair is None: db_keypair = db.key_pair_get(context, user_id, name) return cls._from_db_object(context, cls(), db_keypair) @base.remotable_classmethod def destroy_by_name(cls, context, user_id, name): try: cls._destroy_in_db(context, user_id, name) except exception.KeypairNotFound: db.key_pair_destroy(context, user_id, name) @base.remotable def create(self): if self.obj_attr_is_set('id'): raise exception.ObjectActionError(action='create', reason='already created') # NOTE(danms): Check to see if it exists in the old DB before # letting them create in the API DB, since we won't get protection # from the UC. try: db.key_pair_get(self._context, self.user_id, self.name) raise exception.KeyPairExists(key_name=self.name) except exception.KeypairNotFound: pass self._create() def _create(self): updates = self.obj_get_changes() db_keypair = self._create_in_db(self._context, updates) self._from_db_object(self._context, self, db_keypair) @base.remotable def destroy(self): try: self._destroy_in_db(self._context, self.user_id, self.name) except exception.KeypairNotFound: db.key_pair_destroy(self._context, self.user_id, self.name) @base.NovaObjectRegistry.register class KeyPairList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # KeyPair <= version 1.1 # Version 1.1: KeyPair <= version 1.2 # Version 1.2: KeyPair <= version 1.3 VERSION = '1.2' fields = { 'objects': fields.ListOfObjectsField('KeyPair'), } @staticmethod def _get_from_db(context, user_id): return _get_from_db(context, user_id) @staticmethod def _get_count_from_db(context, user_id): return _get_count_from_db(context, user_id) @base.remotable_classmethod def get_by_user(cls, context, user_id): api_db_keypairs = cls._get_from_db(context, user_id) main_db_keypairs = db.key_pair_get_all_by_user(context, user_id) return base.obj_make_list(context, cls(context), objects.KeyPair, api_db_keypairs + main_db_keypairs) @base.remotable_classmethod def get_count_by_user(cls, context, user_id): return (cls._get_count_from_db(context, user_id) + db.key_pair_count_by_user(context, user_id)) @db_api.main_context_manager.reader def _count_unmigrated_instances(context): return context.session.query(main_models.InstanceExtra).\ filter_by(keypairs=None).\ filter_by(deleted=0).\ count() @db_api.main_context_manager.reader def _get_main_keypairs(context, limit): return context.session.query(main_models.KeyPair).\ filter_by(deleted=0).\ limit(limit).\ all() def migrate_keypairs_to_api_db(context, count): bad_instances = _count_unmigrated_instances(context) if bad_instances: LOG.error(_LE('Some instances are still missing keypair ' 'information. Unable to run keypair migration ' 'at this time.')) return 0, 0 main_keypairs = _get_main_keypairs(context, count) done = 0 for db_keypair in main_keypairs: kp = objects.KeyPair(context=context, user_id=db_keypair.user_id, name=db_keypair.name, fingerprint=db_keypair.fingerprint, public_key=db_keypair.public_key, type=db_keypair.type) try: kp._create() except exception.KeyPairExists: # NOTE(danms): If this got created somehow in the API DB, # then it's newer and we just continue on to destroy the # old one in the cell DB. pass db_api.key_pair_destroy(context, db_keypair.user_id, db_keypair.name) done += 1 return len(main_keypairs), done
"""HTTP API implementation. """ import sys from docopt import docopt from flask import Flask from flask import make_response from flask import request import numpy as np import ujson from werkzeug.exceptions import BadRequest from . import __version__ from .fit import activate as activate_base from .fit import fit as fit_base from .interfaces import PredictError from .util import args_from_config from .util import get_config from .util import get_metadata from .util import initialize_config from .util import logger from .util import memory_usage_psutil from .util import PluggableDecorator from .util import process_store from .util import run_job from .util import resolve_dotted_name app = Flask(__name__) def make_ujson_response(obj, status_code=200): """Encodes the given *obj* to json and wraps it in a response. :return: A Flask response. """ json_encoded = ujson.encode(obj, ensure_ascii=False) resp = make_response(json_encoded) resp.mimetype = 'application/json' resp.content_type = 'application/json; charset=utf-8' resp.status_code = status_code return resp class PredictService: """A default :class:`palladium.interfaces.PredictService` implementation. Aims to work out of the box for the most standard use cases. Allows overriding of specific parts of its logic by using granular methods to compose the work. """ types = { 'float': float, 'int': int, 'str': str, 'bool': lambda x: x.lower() == 'true', } def __init__( self, mapping, params=(), entry_point='/predict', decorator_list_name='predict_decorators', predict_proba=False, unwrap_sample=False, **kwargs ): """ :param mapping: A list of query parameters and their type that should be included in the request. These will be processed in the :meth:`sample_from_data` method to construct a sample that can be used for prediction. An example that expects two request parameters called ``pos`` and ``neg`` that are both of type str:: { ... 'mapping': [('pos', 'str'), ('neg', 'str')] ... } :param params: Similarly to *mapping*, this is a list of name and type of parameters that will be passed to the model's :meth:`~palladium.interfaces.Model.predict` method as keyword arguments. :param predict_proba: Instead of returning a single class (the default), when *predict_proba* is set to true, the result will instead contain a list of class probabilities. :param unwrap_sample: When working with text, scikit-learn and others will sometimes expect the input to be a 1d array of strings rather than a 2d array. Setting *unwrap_sample* to true will use this representation. """ self.mapping = mapping self.params = params self.entry_point = entry_point self.decorator_list_name = decorator_list_name self.predict_proba = predict_proba self.unwrap_sample = unwrap_sample vars(self).update(kwargs) def initialize_component(self, config): create_predict_function( self.entry_point, self, self.decorator_list_name, config) def __call__(self, model, request): try: return self.do(model, request) except Exception as e: return self.response_from_exception(e) def do(self, model, request): if request.method == 'GET': single = True samples = np.array([self.sample_from_data(model, request.args)]) else: single = False samples = [] for data in request.json: samples.append(self.sample_from_data(model, data)) samples = np.array(samples) params = self.params_from_data(model, request.args) y_pred = self.predict(model, samples, **params) return self.response_from_prediction(y_pred, single=single) def sample_from_data(self, model, data): """Convert incoming sample *data* into a numpy array. :param model: The :class:`~Model` instance to use for making predictions. :param data: A dict-like with the sample's data, typically retrieved from ``request.args`` or similar. """ values = [] for key, type_name in self.mapping: value_type = self.types[type_name] values.append(value_type(data[key])) if self.unwrap_sample: assert len(values) == 1 return np.array(values[0]) else: return np.array(values, dtype=object) def params_from_data(self, model, data): """Retrieve additional parameters (keyword arguments) for ``model.predict`` from request *data*. :param model: The :class:`~Model` instance to use for making predictions. :param data: A dict-like with the parameter data, typically retrieved from ``request.args`` or similar. """ params = {} for key, type_name in self.params: value_type = self.types[type_name] if key in data: params[key] = value_type(data[key]) elif hasattr(model, key): params[key] = getattr(model, key) return params def predict(self, model, sample, **kwargs): if self.predict_proba: return model.predict_proba(sample, **kwargs) else: return model.predict(sample, **kwargs) def response_from_prediction(self, y_pred, single=True): """Turns a model's prediction in *y_pred* into a JSON response. """ result = y_pred.tolist() if single: result = result[0] response = { 'metadata': get_metadata(), 'result': result, } return make_ujson_response(response, status_code=200) def response_from_exception(self, exc): if isinstance(exc, PredictError): return make_ujson_response({ 'metadata': get_metadata( error_code=exc.error_code, error_message=exc.error_message, status="ERROR" ) }, status_code=500) elif isinstance(exc, BadRequest): return make_ujson_response({ 'metadata': get_metadata( error_code=-1, error_message="BadRequest: {}".format(exc.args), status="ERROR" ) }, status_code=400) else: logger.exception("Unexpected error") return make_ujson_response({ 'metadata': get_metadata( error_code=-1, error_message="{}: {}".format( exc.__class__.__name__, str(exc)), status="ERROR" ) }, status_code=500) def predict(model_persister, predict_service): try: model = model_persister.read() response = predict_service(model, request) except Exception as exc: logger.exception("Unexpected error") response = make_ujson_response({ "status": "ERROR", "error_code": -1, "error_message": "{}: {}".format(exc.__class__.__name__, str(exc)), }, status_code=500) return response @app.route('/alive') @PluggableDecorator('alive_decorators') @args_from_config def alive(alive=None): if alive is None: alive = {} mem, mem_vms = memory_usage_psutil() info = { 'memory_usage': mem, # rss, resident set size 'memory_usage_vms': mem_vms, # vms, virtual memory size 'palladium_version': __version__, } info['service_metadata'] = get_config().get('service_metadata', {}) status_code = 200 for attr in alive.get('process_store_required', ()): obj = process_store.get(attr) if obj is not None: obj_info = {} obj_info['updated'] = process_store.mtime[attr].isoformat() if hasattr(obj, '__metadata__'): obj_info['metadata'] = obj.__metadata__ info[attr] = obj_info else: info[attr] = "N/A" status_code = 503 info['process_metadata'] = process_store['process_metadata'] return make_ujson_response(info, status_code=status_code) def create_predict_function( route, predict_service, decorator_list_name, config): """Creates a predict function and registers it to the Flask app using the route decorator. :param str route: Path of the entry point. :param palladium.interfaces.PredictService predict_service: The predict service to be registered to this entry point. :param str decorator_list_name: The decorator list to be used for this predict service. It is OK if there is no such entry in the active Palladium config. :return: A predict service function that will be used to process predict requests. """ model_persister = config.get('model_persister') @app.route(route, methods=['GET', 'POST'], endpoint=route) @PluggableDecorator(decorator_list_name) def predict_func(): return predict(model_persister, predict_service) return predict_func def devserver_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Serve the web API for development. Usage: pld-devserver [options] Options: -h --help Show this screen. --host=<host> The host to use [default: 0.0.0.0]. --port=<port> The port to use [default: 5000]. --debug=<debug> Whether or not to use debug mode [default: 0]. """ arguments = docopt(devserver_cmd.__doc__, argv=argv) initialize_config() app.run( host=arguments['--host'], port=int(arguments['--port']), debug=int(arguments['--debug']), ) class PredictStream: """A class that helps make predictions through stdin and stdout. """ def __init__(self): self.model = get_config()['model_persister'].read() self.predict_service = get_config()['predict_service'] def process_line(self, line): predict_service = self.predict_service datas = ujson.loads(line) samples = [predict_service.sample_from_data(self.model, data) for data in datas] samples = np.array(samples) params = predict_service.params_from_data(self.model, datas[0]) return predict_service.predict(self.model, samples, **params) def listen(self, io_in, io_out, io_err): """Listens to provided io stream and writes predictions to output. In case of errors, the error stream will be used. """ for line in io_in: if line.strip().lower() == 'exit': break try: y_pred = self.process_line(line) except Exception as e: io_out.write('[]\n') io_err.write( "Error while processing input row: {}" "{}: {}\n".format(line, type(e), e)) io_err.flush() else: io_out.write(ujson.dumps(y_pred.tolist())) io_out.write('\n') io_out.flush() def stream_cmd(argv=sys.argv[1:]): # pragma: no cover """\ Start the streaming server, which listens to stdin, processes line by line, and returns predictions. The input should consist of a list of json objects, where each object will result in a prediction. Each line is processed in a batch. Example input (must be on a single line): [{"sepal length": 1.0, "sepal width": 1.1, "petal length": 0.7, "petal width": 5}, {"sepal length": 1.0, "sepal width": 8.0, "petal length": 1.4, "petal width": 5}] Example output: ["Iris-virginica","Iris-setosa"] An input line with the word 'exit' will quit the streaming server. Usage: pld-stream [options] Options: -h --help Show this screen. """ docopt(stream_cmd.__doc__, argv=argv) initialize_config() stream = PredictStream() stream.listen(sys.stdin, sys.stdout, sys.stderr) @app.route('/list') @PluggableDecorator('list_decorators') @args_from_config def list(model_persister): info = { 'models': model_persister.list_models(), 'properties': model_persister.list_properties(), } return make_ujson_response(info) @PluggableDecorator('server_fit_decorators') @args_from_config def fit(): param_converters = { 'persist': lambda x: x.lower() in ('1', 't', 'true'), 'activate': lambda x: x.lower() in ('1', 't', 'true'), 'evaluate': lambda x: x.lower() in ('1', 't', 'true'), 'persist_if_better_than': float, } params = { name: typ(request.form[name]) for name, typ in param_converters.items() if name in request.form } thread, job_id = run_job(fit_base, **params) return make_ujson_response({'job_id': job_id}, status_code=200) @PluggableDecorator('update_model_cache_decorators') @args_from_config def update_model_cache(model_persister): method = getattr(model_persister, 'update_cache', None) if method is not None: thread, job_id = run_job(model_persister.update_cache) return make_ujson_response({'job_id': job_id}, status_code=200) else: return make_ujson_response({}, status_code=503) @PluggableDecorator('activate_decorators') def activate(): model_version = int(request.form['model_version']) try: activate_base(model_version=model_version) except LookupError: return make_ujson_response({}, status_code=503) else: return list() def add_url_rule(rule, endpoint=None, view_func=None, app=app, **options): if isinstance(view_func, str): view_func = resolve_dotted_name(view_func) app.add_url_rule(rule, endpoint=endpoint, view_func=view_func, **options)
# Copyright 2017 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and """The YCSB client in Python. Usage: # Set up instance and load data into database. # Set up environment variables. You should use your own credentials and gcloud # project. $ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json $ export GCLOUD_PROJECT=gcloud-project-name # Run the benchmark. $ python spanner/benchmark/ycsb.py run cloud_spanner -P pkb/workloada \ -p table=usertable -p cloudspanner.instance=ycsb-542756a4 \ -p recordcount=5000 -p operationcount=100 -p cloudspanner.database=ycsb \ -p num_worker=1 # To make a package so it can work with PerfKitBenchmarker. $ cd spanner; tar -cvzf ycsb-python.0.0.5.tar.gz benchmark/* """ from google.cloud import spanner import argparse import numpy import random import string import threading import timeit OPERATIONS = ['readproportion', 'updateproportion', 'scanproportion', 'insertproportion'] NUM_FIELD = 10 def parse_options(): """Parses options.""" parser = argparse.ArgumentParser() parser.add_argument('command', help='The YCSB command.') parser.add_argument('benchmark', help='The YCSB benchmark.') parser.add_argument('-P', '--workload', action='store', dest='workload', default='', help='The path to a YCSB workload file.') parser.add_argument('-p', '--parameter', action='append', dest='parameters', default=[], help='The key=value pair of parameter.') parser.add_argument('-b', '--num_bucket', action='store', type=int, dest='num_bucket', default=1000, help='The number of buckets in output.') args = parser.parse_args() parameters = {} parameters['command'] = args.command parameters['num_bucket'] = args.num_bucket for parameter in args.parameters: parts = parameter.strip().split('=') parameters[parts[0]] = parts[1] with open(args.workload, 'r') as f: for line in f.readlines(): parts = line.split('=') key = parts[0].strip() if key in OPERATIONS: parameters[key] = parts[1].strip() return parameters def open_database(parameters): """Opens a database specified by the parameters from parse_options().""" spanner_client = spanner.Client() instance_id = parameters['cloudspanner.instance'] instance = spanner_client.instance(instance_id) database_id = parameters['cloudspanner.database'] pool = spanner.BurstyPool(int(parameters['num_worker'])) database = instance.database(database_id, pool=pool) return database def load_keys(database, parameters): """Loads keys from database.""" keys = [] with database.snapshot() as snapshot: results = snapshot.execute_sql( 'SELECT u.id FROM %s u' % parameters['table']) for row in results: keys.append(row[0]) return keys def read(database, table, key): """Does a single read operation.""" with database.snapshot() as snapshot: result = snapshot.execute_sql('SELECT u.* FROM %s u WHERE u.id="%s"' % (table, key)) for row in result: key = row[0] for i in range(NUM_FIELD): field = row[i + 1] def update(database, table, key): """Does a single update operation.""" field = random.randrange(10) value = ''.join(random.choice(string.printable) for i in range(100)) with database.batch() as batch: batch.update(table=table, columns=('id', 'field%d' % field), values=[(key, value)]) def do_operation(database, keys, table, operation, latencies_ms): """Does a single operation and records latency.""" key = random.choice(keys) start = timeit.default_timer() if operation == 'read': read(database, table, key) elif operation == 'update': update(database, table, key) else: raise ValueError('Unknown operation: %s' % operation) end = timeit.default_timer() latencies_ms[operation].append((end - start) * 1000) def aggregate_metrics(latencies_ms, duration_ms, num_bucket): """Aggregates metrics.""" overall_op_count = 0 op_counts = {operation : len(latency) for operation, latency in latencies_ms.iteritems()} overall_op_count = sum([op_count for op_count in op_counts.itervalues()]) print('[OVERALL], RunTime(ms), %f' % duration_ms) print('[OVERALL], Throughput(ops/sec), %f' % (float(overall_op_count) / duration_ms * 1000.0)) for operation in op_counts.keys(): operation_upper = operation.upper() print('[%s], Operations, %d' % (operation_upper, op_counts[operation])) print('[%s], AverageLatency(us), %f' % ( operation_upper, numpy.average(latencies_ms[operation]) * 1000.0)) print('[%s], LatencyVariance(us), %f' % ( operation_upper, numpy.var(latencies_ms[operation]) * 1000.0)) print('[%s], MinLatency(us), %f' % ( operation_upper, min(latencies_ms[operation]) * 1000.0)) print('[%s], MaxLatency(us), %f' % ( operation_upper, max(latencies_ms[operation]) * 1000.0)) print('[%s], 95thPercentileLatency(us), %f' % ( operation_upper, numpy.percentile(latencies_ms[operation], 95.0) * 1000.0)) print('[%s], 99thPercentileLatency(us), %f' % ( operation_upper, numpy.percentile(latencies_ms[operation], 99.0) * 1000.0)) print('[%s], 99.9thPercentileLatency(us), %f' % ( operation_upper, numpy.percentile(latencies_ms[operation], 99.9) * 1000.0)) print('[%s], Return=OK, %d' % (operation_upper, op_counts[operation])) latency_array = numpy.array(latencies_ms[operation]) for j in range(num_bucket): print('[%s], %d, %d' % ( operation_upper, j, ((j <= latency_array) & (latency_array < (j + 1))).sum())) print('[%s], >%d, %d' % (operation_upper, num_bucket, (num_bucket <= latency_array).sum())) class WorkloadThread(threading.Thread): """A single thread running workload.""" def __init__(self, database, keys, parameters, total_weight, weights, operations): threading.Thread.__init__(self) self._database = database self._keys = keys self._parameters = parameters self._total_weight = total_weight self._weights = weights self._operations = operations self._latencies_ms = {} for operation in self._operations: self._latencies_ms[operation] = [] def run(self): """Run a single thread of the workload.""" i = 0 operation_count = int(self._parameters['operationcount']) while i < operation_count: i += 1 weight = random.uniform(0, self._total_weight) for j in range(len(self._weights)): if weight <= self._weights[j]: do_operation(self._database, self._keys, self._parameters['table'], self._operations[j], self._latencies_ms) break def latencies_ms(self): """Returns the latencies.""" return self._latencies_ms def run_workload(database, keys, parameters): """Runs workload against the database.""" total_weight = 0.0 weights = [] operations = [] latencies_ms = {} for operation in OPERATIONS: weight = float(parameters[operation]) if weight <= 0.0: continue total_weight += weight op_code = operation.split('proportion')[0] operations.append(op_code) weights.append(total_weight) latencies_ms[op_code] = [] threads = [] start = timeit.default_timer() for i in range(int(parameters['num_worker'])): thread = WorkloadThread(database, keys, parameters, total_weight, weights, operations) thread.start() threads.append(thread) for thread in threads: thread.join() end = timeit.default_timer() for thread in threads: thread_latencies_ms = thread.latencies_ms() for key in latencies_ms.keys(): latencies_ms[key].extend(thread_latencies_ms[key]) aggregate_metrics(latencies_ms, (end - start) * 1000.0, parameters['num_bucket']) if __name__ == '__main__': parameters = parse_options() if parameters['command'] == 'run': if 'cloudspanner.channels' in parameters: assert parameters['cloudspanner.channels'] == 1, ( 'Python doesn\'t support channels > 1.') database = open_database(parameters) keys = load_keys(database, parameters) run_workload(database, keys, parameters) else: raise ValueError('Unknown command %s.' % parameters['command'])
########################################################################## #Copyright 2015 Rasmus Dall # # # #Licensed under the Apache License, Version 2.0 (the "License"); # #you may not use this file except in compliance with the License. # #You may obtain a copy of the License at # # # #http://www.apache.org/licenses/LICENSE-2.0 # # # #Unless required by applicable law or agreed to in writing, software # #distributed under the License is distributed on an "AS IS" BASIS, # #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# #See the License for the specific language governing permissions and # #limitations under the License. # ########################################################################## import argparse from error_messages import SiReError class CombilexPhonemes(object): def __init__(self): self.phonemes = {} #The meaning of each element of the list is #[0] - VOC: Vowel (v) or Consonant (c) #[1] - VL: Vowel length: short (sh) long (l) dipthong (d) schwa (sc) consonant (c) #[2] - VH: Vowel height: high (h) mid (m) low (l) consonant (c) #[3] - VF: Vowel frontness: front (f) mid (m) back (b) consonant (c) #[4] - VL: Vowel lip round: Rounded (r) unrounded (u) consonant (c) #[5] - CT: Consonant Type: stop (s) fricative (f) affricative (af) nasal (n) liquid (l) approximant (ap) vowel (v) #[6] - CA: Consonant Place of Articulation: labial (la) alveolar (a) palatal (p) labio-dental (ld) dental (d) velar (ve) glottal (g) vowel (vo) #[7] - V: Voicing: Voiced (v) unvoiced (u) self.phonemes["p"] = ["c", "c", "c", "c", "c","s","la", "u"] #p self.phonemes["t"] = ["c", "c", "c", "c", "c","s","a", "u"] #t #self.phonemes["?"] = ["c", "c", "c", "c", "c","s","g", "v"] #glottal stop self.phonemes["G"] = ["c", "c", "c", "c", "c","s","g", "v"] #glottal stop self.phonemes["t^"] = ["c", "c", "c", "c", "c","ap","a", "v"] #t^ - not sure what else to call it to clarify, not used often in english at all self.phonemes["g"] = ["c", "c", "c", "c", "c","s","ve", "v"] #g self.phonemes["k"] = ["c", "c", "c", "c", "c","s","ve", "u"] #k self.phonemes["m"] = ["c", "c", "c", "c", "c","n","la", "v"] #m self.phonemes["b"] = ["c", "c", "c", "c", "c","s","la", "v"] #b self.phonemes["d"] = ["c", "c", "c", "c", "c","s","a", "v"] #d self.phonemes["x"] = ["c", "c", "c", "c", "c","f","ve", "u"] #not "ex" but the "ch" sound in "loch" (as Scots pronounce it) self.phonemes["tS"] = ["c", "c", "c", "c", "c","af","p", "u"] #"ch" in e.g. "achieve" self.phonemes["dZ"] = ["c", "c", "c", "c", "c","af","p", "v"] #"g" in e.g. "anthology" self.phonemes["s"] = ["c", "c", "c", "c", "c","f","a", "u"] #s self.phonemes["z"] = ["c", "c", "c", "c", "c","f","a", "v"] #z (not "zed" but "zee") self.phonemes["S"] = ["c", "c", "c", "c", "c","f","p", "u"] #"sh" as in "ship" self.phonemes["Z"] = ["c", "c", "c", "c", "c","f","p", "v"] #"g" as in "genre" - pronounced like "zh" self.phonemes["f"] = ["c", "c", "c", "c", "c","f","ld", "u"] #f self.phonemes["v"] = ["c", "c", "c", "c", "c","f","ld", "v"] #v self.phonemes["T"] = ["c", "c", "c", "c", "c","f","d", "u"] #"th" as in "think" self.phonemes["D"] = ["c", "c", "c", "c", "c","f","d", "v"] #the "th" in "that" but pronounced "dh" self.phonemes["h"] = ["c", "c", "c", "c", "c","f","g", "u"] #h self.phonemes["m!"] = ["c", "c", "c", "c", "c","n","la", "v"] #"m" in "-ism" ending of words self.phonemes["n"] = ["c", "c", "c", "c", "c","n","a", "v"] #n self.phonemes["n!"] = ["c", "c", "c", "c", "c","n","a", "v"] #"n" in "-tion" ending of words self.phonemes["N"] = ["c", "c", "c", "c", "c","n","ve", "v"] #"ng" in "-ing" ending (when the g is not pronounced fully) self.phonemes["l"] = ["c", "c", "c", "c", "c","l","a", "v"] #l #self.phonemes["K"] = ["c", "c", "c", "c", "c","l","a", "u"] #ll - Not used in Combilex RPX, GAM or EDI self.phonemes["lw"] = ["c", "c", "c", "c", "c","l","a", "v"] #"l" as in "double" self.phonemes["l!"] = ["c", "c", "c", "c", "c","l","a", "v"] #"l" as in "dwindle" self.phonemes["r"] = ["c", "c", "c", "c", "c","ap","a", "v"] #r self.phonemes["j"] = ["c", "c", "c", "c", "c","l","p", "v"] #y - Why it is not just y in name I don't understand self.phonemes["w"] = ["c", "c", "c", "c", "c","l","la", "v"] #w #self.phonemes["W"] = ["c", "c", "c", "c", "c","f","la", "u"] #hw - Not used in Combilex RPX, GAM or EDI self.phonemes["E"] = ["v", "sh", "m", "f", "u","v","vo", "v"] #e self.phonemes["a"] = ["v", "sh", "l", "f", "u","v","vo", "v"] #a self.phonemes["A"] = ["v", "l", "l", "b", "u","v","vo", "v"] #The second a in "advance" a long "aa" #self.phonemes["Ar"] = ["v", "l", "l", "f", "f","v","vo", "v"] #ar - Not used in Combilex RPX, GAM or EDI self.phonemes["@U"] = ["v", "d", "m", "b", "r","v","vo", "v"] #The "oa" in "float" pronounced more like "ou" self.phonemes["o^"] = ["v", "l", "m", "b", "r","v","vo", "v"] #Often used in French loan-words as a pronounciation variant e.g. "ont" in "montmarte" when pronounced without the "nt" self.phonemes["e^"] = ["v", "l", "m", "f", "r","v","vo", "v"] #Often used in French loan-words as a pronounciation variant e.g. "in" in "chargrin" when pronounced without the "n" #self.phonemes["9^"] = ["v", "l", "l", "f", "r","n","vo", "v"] #Not used in Combilex RPX, GAM or EDI self.phonemes["Q"] = ["v", "sh", "l", "b", "r","v","vo", "v"] #"o" in "pot" #self.phonemes["QO"] = ["v", "l", "l", "b", "r","v","vo", "v"] #au - Not used in Combilex RPX, GAM or EDI self.phonemes["O"] = ["v", "l", "m", "b", "r","v","vo", "v"] #"o" in "bomb" sort of "oo" self.phonemes["Or"] = ["v", "l", "m", "b", "r","v","vo", "v"] #Rhotic "o" as in GAM "board" - Not used in Combilex RPX and EDI #self.phonemes["@Ur"] = ["v", "l", "m", "b", "r","v","vo", "v"] #our - Not used in Combilex RPX, EDI or GAM self.phonemes["i"] = ["v", "l", "h", "f", "u","v","vo", "v"] #"ii" as in "-cy" and "-ry" ending #self.phonemes["iy"] = ["v", "s", "h", "f", "u","v","vo", "v"] #iy - Not used in Combilex RPX, GAM or EDI self.phonemes["I"] = ["v", "sh", "h", "f", "u","v","vo", "v"] #"i" as first i in "mini" (second is ii above) self.phonemes["@r"] = ["v", "sc", "m", "m", "u","v","vo", "v"] #@r - Rhotic @ as "e" in "moaner" - Not used in Combilex RPX and EDI (yet somehow VCTK manages it? Perhaps it is a manually added word) self.phonemes["@"] = ["v", "sc", "m", "m", "u","v","vo", "v"] #@/THE schwa self.phonemes["V"] = ["v", "sh", "l", "m", "u","v","vo", "v"] #"u" in "strut", also current "uh" sound self.phonemes["U"] = ["v", "sh", "h", "b", "r","v","vo", "v"] #Short "u" as in "foot" self.phonemes["u"] = ["v", "l", "h", "b", "r","v","vo", "v"] #Long "u" as in "pool" self.phonemes["eI"] = ["v", "d", "m", "f", "u","v","vo", "v"] #ei self.phonemes["aI"] = ["v", "d", "l", "m", "u","v","vo", "v"] #ai #self.phonemes["aIr"] = ["v", "d", "l", "m", "u","v","vo", "v"] #Rhotic "ai" as in GAM "hire" - Not used in Combilex RPX and EDI self.phonemes["ae"] = ["v", "d", "l", "m", "u","v","vo", "v"] #"ae" as "i" in EDI "migrate" - Not used in Combilex RPX and GAM #self.phonemes["aer"] = ["v", "d", "l", "m", "u","v","vo", "v"] #Rhotic aer - Not used in Combilex RPX, EDI and GAM self.phonemes["OI"] = ["v", "d", "m", "b", "r","v","vo", "v"] #"oi" as in "point" #self.phonemes["OIr"] = ["v", "d", "m", "b", "r","v","vo", "v"] #Rhotic oi as in GAM "coir" - Not used in Combilex RPX and EDI self.phonemes["aU"] = ["v", "d", "l", "m", "u","v","vo", "v"] #"ow" sound as in "house" #self.phonemes["aUr"] = ["v", "d", "l", "m", "u","v","vo", "v"] # Rhotic "ow" - Not used in Combilex GAM, RPX or EDI #self.phonemes["i@"] = ["v", "d", "h", "f", "u","v","vo", "v"] #i@ - Not used in Combilex GAM, RPX or EDI self.phonemes["I@"] = ["v", "d", "h", "f", "u","v","vo", "v"] #"I@" sound as "ea" in "idea" self.phonemes["@@"] = ["v", "l", "m", "m", "u","v","vo", "v"] #"@@" as only vowel pronounced in "burse" in RPX and EDI (also used in GAM in some loan words e.g. "eu" in "peugot") self.phonemes["@@r"] = ["v", "l", "m", "m", "u","v","vo", "v"] #Rhotic "@@" as only vowel pronounced in "burse" in GAM - Not used in RPX and EDI #self.phonemes["Er"] = ["v", "sh", "m", "f", "u","v","vo", "v"] #er - Not used in combilex RPX, GAM or EDI self.phonemes["E@"] = ["v", "sh", "m", "f", "u","v","vo", "v"] #"eir" sound as "air" in "cairn" self.phonemes["U@"] = ["v", "d", "h", "b", "r","v","vo", "v"] #"u" part of "caricature" often in conjunction with "j" (caricat j U@ re) self.phonemes["#"] = ["xx", "xx", "xx", "xx", "xx","xx","xx", "u"] #Around utt silence self.phonemes["sil"] = ["xx", "xx", "xx", "xx", "xx","xx","xx", "u"] #silence self.phonemes["pau"] = ["xx", "xx", "xx", "xx", "xx","xx","xx", "u"] #pause #self.phonemes["sp"] = ["xx", "xx", "xx", "xx", "xx","xx","xx", "u"] #short pause - should preferably not exist self.phonemes["xx"] = ["xx", "xx", "xx", "xx", "xx","xx","xx", "xx"] #Pre-utt self.phonemes["novowel"] = ["c", "c", "c", "c", "c","xx","xx", "xx"] #If cur syll contains no vowel self.phonemes["UH"] = ["v", "sh", "l", "m", "u","v","vo", "v"] #"uh" sound - for testing if seperating gives better results #Returns the phonemes considered silence def get_sil_phonemes(self): return ["sil", "pau", "#"] def get_phoneme_feats(self, phoneme): return self.phonemes[phoneme] def get_phoneme_feats_dict(self, phoneme): p = self.phonemes[phoneme] features = {} #Vowel or consonant features["VOC"] = p[0] #Vowel Length features["VL"] = p[1] #Vowel Height features["VH"] = p[2] #Vowel Frontedness features["VF"] = p[3] #Vowel Roundedness features["VR"] = p[4] #Consonant Type features["CT"] = p[5] #Consonant Place of Articulation features["CA"] = p[6] #Voicing features["V"] = p[7] return features def get_phonemes(self): return self.phonemes.keys() def get_feature_lists(self): features = {} #Vowel or consonant features["VOC"] = ["v", "c", "xx"] #Vowel Length features["VL"] = ["sh", "l", "d", "sc", "c", "xx"] #Vowel Height features["VH"] = ["h", "m", "l", "c", "xx"] #Vowel Frontedness features["VF"] = ["f", "m", "b", "c", "xx"] #Vowel Roundedness features["VR"] = ["r", "u", "c", "xx"] #Consonant Type features["CT"] = ["s", "f", "af", "n", "l", "ap", "v", "xx"] #Consonant Place of Articulation features["CA"] = ["la", "a", "p", "ld", "d", "ve", "g", "vo", "xx"] #Voicing features["V"] = ["v", "u", "xx"] return features #Note that is_consonant and is_vowel are not completely opposite as silence phones are considered neither. #I.e. if you want to check if something is a vowel do not do is_consonant(phone) == False as this will also #Apply to silence. def is_vowel(self, phoneme): if self.phonemes[phoneme][0] == "v": return True else: return False def is_consonant(self, phoneme): if self.phonemes[phoneme][0] == "c": return True else: return False #Checks if a phoneme exist in the present set. #If fail is True it will fail if it is not. def is_phoneme(self, phoneme, fail=False): if phoneme in self.phonemes: return True else: if fail: raise SiReError("Phoneme ({0}) not a valid phoneme!".format(phoneme)) return False class CMUPhonemes(CombilexPhonemes): def __init__(self): # Initialise it as a combilex phoneset super(CombilexPhonemes, self).__init__() # Clear the combilex phones self.phonemes = {} # Insert the CMUDict ones/arpabet #The meaning of each element of the list is #[0] - VOC: Vowel (v) or Consonant (c) #[1] - VL: Vowel length: short (sh) long (l) dipthong (d) schwa (sc) consonant (c) #[2] - VH: Vowel height: high (h) mid-high (mh) mid (m) mid-low (ml) low (l) consonant (c) #[3] - VF: Vowel frontness: front (f) mid (m) back (b) consonant (c) #[4] - VL: Vowel lip round: Rounded (r) unrounded (u) consonant (c) #[5] - CT: Consonant Type: stop (s) fricative (f) affricative (af) nasal (n) liquid (l) approximant (ap) vowel (v) #[6] - CA: Consonant Place of Articulation: labial (la) alveolar (a) palatal (p) labio-dental (ld) dental (d) velar (ve) glottal (g) vowel (vo) #[7] - V: Voicing: Voiced (v) unvoiced (u) self.phonemes["aa"] = ["v", "sh", "l", "b", "u", "v", "vo", "v"] #AA odd AA D self.phonemes["ae"] = ["v", "sh", "ml", "f", "u", "v", "vo", "v"] #AE at AE T self.phonemes["ah"] = ["v", "sc", "m", "m", "u", "v", "vo", "v"] # AH hut HH AH T <- Rasmus: I don't like this one, it covers both the Schwa and ^ IPA symbols and it is terribly broad. Here written as schwa. self.phonemes["ao"] = ["v", "sh", "ml", "b", "r", "v", "vo", "v"] # AO ought AO T self.phonemes["aw"] = ["v", "d", "l", "m", "u", "v", "vo", "v"] # AW cow K AW <- Rasmus: This is set as a low high mid-vowel though arguably it is moving from low to high, front to back. self.phonemes["ay"] = ["v", "d", "l", "m", "u", "v", "vo", "v"] # AY hide HH AY D <- Rasmus: This is set as a low height vowel though arguably it is moving from low to high. self.phonemes["b"] = ["c", "c", "c", "c", "c", "s", "la", "v"] #b self.phonemes["ch"] = ["c", "c", "c", "c", "c", "af", "p", "u"] # CH cheese CH IY Z self.phonemes["d"] = ["c", "c", "c", "c", "c", "s", "a", "v"] #d self.phonemes["dh"] = ["c", "c", "c", "c", "c", "f", "d", "v"] #DH thee DH IY self.phonemes["eh"] = ["v", "sh", "ml", "f", "u", "v", "vo", "v"] # EH Ed EH D self.phonemes["er"] = ["v", "sh", "ml", "v", "u", "v", "vo", "v"] # ER hurt HH ER T self.phonemes["ey"] = ["v", "d", "mh", "f", "u", "v", "vo", "v"] # EY ate EY T <- Rasmus: This is set as a mid-high vowel though arguably it is moving from mid-high to high. self.phonemes["f"] = ["c", "c", "c", "c", "c","f","ld", "u"] #f self.phonemes["v"] = ["c", "c", "c", "c", "c","f","ld", "v"] #v self.phonemes["g"] = ["c", "c", "c", "c", "c","s","ve", "v"] #g self.phonemes["hh"] = ["c", "c", "c", "c", "c","f","g", "u"] #h self.phonemes["ih"] = ["v", "s", "h", "f", "u","v","vo", "v"] # IH it IH T self.phonemes["iy"] = ["v", "l", "h", "f", "u","v","vo", "v"] # IY eat IY T self.phonemes["jh"] = ["c", "c", "c", "c", "c","af","p", "v"] # JH gee JH IY self.phonemes["k"] = ["c", "c", "c", "c", "c","s","ve", "u"] #k self.phonemes["l"] = ["c", "c", "c", "c", "c","l","a", "v"] #l self.phonemes["m"] = ["c", "c", "c", "c", "c","n","la", "v"] #m self.phonemes["n"] = ["c", "c", "c", "c", "c","n","a", "v"] #n self.phonemes["ng"] = ["c", "c", "c", "c", "c","n","ve", "v"] #"ng" in "-ing" ending (when the g is not pronounced fully) self.phonemes["ow"] = ["v", "d", "mh", "b", "r","v","vo", "v"] # OW oat OW T <- Rasmus: This is set as a rounded vowel though arguably it is moving from rounded to unrounded. self.phonemes["oy"] = ["v", "d", "ml", "b", "r","v","vo", "v"] # OY toy T OY <- Rasmus: This is set as a rounded back mid-low vowel though arguably it is moving from rounded back mid-low to unrounded front high. self.phonemes["p"] = ["c", "c", "c", "c", "c", "s", "la", "u"] #p self.phonemes["r"] = ["c", "c", "c", "c", "c", "l","a", "v"] # R read R IY D <- In combilex this is an approximant but in arpabet/cmudict a liquid. To distinguish it from l it has here only the id. self.phonemes["s"] = ["c", "c", "c", "c", "c","f","a", "u"] #s self.phonemes["sh"] = ["c", "c", "c", "c", "c","f","p", "u"] # SH she SH IY self.phonemes["t"] = ["c", "c", "c", "c", "c", "s", "a", "u"] #t self.phonemes["th"] = ["c", "c", "c", "c", "c","f","d", "u"] # TH theta TH EY T AH self.phonemes["uh"] = ["v", "s", "h", "b", "r","v","vo", "v"] # UH hood HH UH D self.phonemes["uw"] = ["v", "l", "h", "b", "r","v","vo", "v"] # UW two T UW self.phonemes["w"] = ["c", "c", "c", "c", "c","l","la", "v"] #w self.phonemes["y"] = ["c", "c", "c", "c", "c","l","p", "v"] # Y yield Y IY L D self.phonemes["z"] = ["c", "c", "c", "c", "c","f","a", "v"] # Z zee Z IY self.phonemes["zh"] = ["c", "c", "c", "c", "c","f","p", "v"] # ZH seizure S IY ZH ER self.phonemes["#"] = ["xx", "xx", "xx", "xx", "xx","xx","xx", "u"] #Around utt silence self.phonemes["sil"] = ["xx", "xx", "xx", "xx", "xx","xx","xx", "u"] #silence self.phonemes["pau"] = ["xx", "xx", "xx", "xx", "xx","xx","xx", "u"] #pause #self.phonemes["sp"] = ["xx", "xx", "xx", "xx", "xx","xx","xx", "u"] #short pause - should preferably not exist self.phonemes["xx"] = ["xx", "xx", "xx", "xx", "xx","xx","xx", "xx"] #Pre-utt self.phonemes["novowel"] = ["c", "c", "c", "c", "c","xx","xx", "xx"] #If cur syll contains no vowel self.phonemes["UH"] = ["v", "sh", "l", "m", "u","v","vo", "v"] #"uh" sound - for testing if seperating gives better results
import os import re import inspect def file_content(filepath): """ Fetches and formats file's content to perform the required operation. """ infile = open(filepath, 'r') filelines = tuple(infile) infile.close() comment_indexes = [] comments = [] for line in enumerate(filelines): if "@startDocuBlock" in line[1]: _start = line[0] if "@endDocuBlock" in line[1]: _end = line[0] + 1 comment_indexes.append([_start, _end]) for index in comment_indexes: comments.append(filelines[index[0]: index[1]]) return comments def example_content(filepath, fh, tag): """ Fetches an example file and inserts it using code """ arangosh = False curl = False first = True lastline = None long = "" longLines = 0 short = "" shortLines = 0 shortable = False showdots = True CURL_STATE_CMD = 1 CURL_STATE_HEADER = 2 CURL_STATE_BODY = 3 # read in the context, split into long and short infile = open(filepath, 'r') for line in infile: if first: arangosh = line.startswith("arangosh>") curl = line.startswith("shell> curl") first = False if arangosh: if line.startswith("arangosh>") or line.startswith("........>"): if lastline != None: # short = short + lastline # shortLines = shortLines + 1 lastline = None short = short + line shortLines = shortLines + 1 showdots = True else: if showdots: if lastline == None: # lastline = line shortable = True showdots = False lastline = None else: # short = short + "~~~hidden~~~\n" # shortLines = shortLines + 1 shortable = True showdots = False lastline = None if curl: if line.startswith("shell> curl"): curlState = CURL_STATE_CMD elif curlState == CURL_STATE_CMD and line.startswith("HTTP/1.1 "): curlState = CURL_STATE_HEADER elif curlState == CURL_STATE_HEADER and line.startswith("{"): curlState = CURL_STATE_BODY if curlState == CURL_STATE_CMD or curlState == CURL_STATE_HEADER: short = short + line shortLines = shortLines + 1 else: shortable = True long = long + line longLines = longLines + 1 if lastline != None: short = short + lastline shortLines = shortLines + 1 infile.close() if longLines - shortLines < 5: shortable = False # write example fh.write("\n") fh.write("<div id=\"%s_container\">\n" % tag) longTag = "%s_long" % tag shortTag = "%s_short" % tag longToggle = "$('#%s').hide(); $('#%s').show();" % (longTag, shortTag) shortToggle = "$('#%s').hide(); $('#%s').show();" % (shortTag, longTag) if shortable: fh.write("<div id=\"%s\" onclick=\"%s\" style=\"Display: none;\">\n" % (longTag, longToggle)) else: fh.write("<div id=\"%s\">\n" % longTag) fh.write("<pre>\n") fh.write("```\n") fh.write("%s" % long) fh.write("```\n") fh.write("</pre>\n") fh.write("</div>\n") if shortable: fh.write("<div id=\"%s\" onclick=\"%s\">\n" % (shortTag, shortToggle)) fh.write("<pre>\n") fh.write("```\n") fh.write("%s" % short) fh.write("```\n") if arangosh: fh.write("</pre><div class=\"example_show_button\">show execution results</div>\n") elif curl: fh.write("</pre><div class=\"example_show_button\">show response body</div>\n") else: fh.write("</pre><div class=\"example_show_button\">show</div>\n") fh.write("</div>\n") fh.write("</div>\n") fh.write("\n") def fetch_comments(dirpath): """ Fetches comments from files and writes to a file in required format. """ comments_filename = "allComments.txt" fh = open(comments_filename, "a") shouldIgnoreLine = False; for root, directories, files in os.walk(dirpath): for filename in files: if filename.endswith((".cpp", ".h", ".js")): filepath = os.path.join(root, filename) file_comments = file_content(filepath) for comment in file_comments: fh.write("\n<!-- filename: %s -->\n" % filename) for _com in comment: _text = re.sub(r"//(/)+\s*\n", "<br />", _com) _text = re.sub(r"///+(\s+\s+)([-\*\d])", r" \2", _text) _text = re.sub(r"///\s", "", _text) _text = _text.strip("\n") if _text: if not shouldIgnoreLine: if ("@startDocuBlock" in _text) or \ ("@endDocuBlock" in _text): fh.write("%s\n\n" % _text) elif ("@EXAMPLE_ARANGOSH_OUTPUT" in _text or \ "@EXAMPLE_ARANGOSH_RUN" in _text): shouldIgnoreLine = True _filename = re.search("{(.*)}", _text).group(1) dirpath = os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir, "Examples", _filename + ".generated")) if os.path.isfile(dirpath): example_content(dirpath, fh, _filename) else: print "Could not find code for " + _filename else: fh.write("%s\n" % _text) elif ("@END_EXAMPLE_ARANGOSH_OUTPUT" in _text or \ "@END_EXAMPLE_ARANGOSH_RUN" in _text): shouldIgnoreLine = False fh.close() if __name__ == "__main__": errorsFile = open("../../lib/Basics/errors.dat", "r") commentsFile = open("allComments.txt", "w") commentsFile.write("@startDocuBlock errorCodes \n") for line in errorsFile: commentsFile.write(line + "\n") commentsFile.write("@endDocuBlock \n") commentsFile.close() errorsFile.close() path = ["arangod/cluster","arangod/RestHandler","arangod/V8Server","arangod/RestServer","arangod/Wal", "lib/Admin","lib/HttpServer","lib/V8","lib/ApplicationServer","lib/Scheduler","lib/Rest","lib/BasicsC", "js/actions","js/client","js/apps/databases","js/apps/system/cerberus","js/apps/system/gharial","js/common","js/server"] for i in path: dirpath = os.path.abspath(os.path.join(os.path.dirname( __file__ ), os.pardir,"ArangoDB/../../"+i)) fetch_comments(dirpath) print "Searching for docublocks in " + i os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'templates'))
##################################################################################### # # Copyright (c) Microsoft Corporation. All rights reserved. # # This source code is subject to terms and conditions of the Apache License, Version 2.0. A # copy of the license can be found in the License.html file at the root of this distribution. If # you cannot locate the Apache License, Version 2.0, please send an email to # [email protected]. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Apache License, Version 2.0. # # You must not remove this notice, or any other, from this software. # # ##################################################################################### ''' This module is in place to test the interoperatbility between CPython and CLR numerical types. TODO: - at the moment the test cases in place are simple sanity checks to ensure the appropriate operator overloads have been implemented. This needs to be extended quite a bit (e.g., see what happens with OverFlow cases). - a few special cases aren't covered yet - comlex numbers, unary ops, System.Char, ''' from iptest.assert_util import * skiptest("win32") import System import clr #Test Python/CLR number interop. clr_integer_types = [ "System.Byte", "System.SByte", "System.Int16", "System.UInt16", "System.Int32", "System.UInt32", "System.Int64", "System.UInt64"] clr_float_types = [ "System.Single", "System.Double", "System.Decimal", ] #TODO - char??? clr_types = clr_integer_types + clr_float_types py_integer_types = ["int", "long"] py_float_types = ["float"] #TODO - special case complex??? py_types = py_integer_types + py_float_types bug_operands = [] unsupported_operands = [ #System.Decimal +: System.Single, System.Double, long, float "System.Decimal+long", "long+System.Decimal", #System.Decimal -: System.Single, System.Double, long, float "System.Decimal-long", "long-System.Decimal", #System.Decimal *: System.Single, System.Double, long, float "System.Decimal*long", "long*System.Decimal", #System.Decimal /:System.Single, System.Double, long, float "System.Decimal/long", "long/System.Decimal", #System.Decimal //:System.Byte System.SByte "System.Decimal//System.Byte", "System.Decimal//System.SByte", "System.Decimal//System.Int16", "System.Decimal//System.UInt16", "System.Decimal//System.Int32", "System.Decimal//System.UInt32", "System.Decimal//System.Int64", "System.Decimal//System.UInt64", "System.Decimal//System.Decimal", "System.Decimal//int", "System.Decimal//long", "System.Byte//System.Decimal", "System.SByte//System.Decimal", "System.Int16//System.Decimal", "System.UInt16//System.Decimal", "System.Int32//System.Decimal", "System.UInt32//System.Decimal", "System.Int64//System.Decimal", "System.UInt64//System.Decimal", "System.Decimal//System.Decimal", "int//System.Decimal", "long//System.Decimal", "System.Decimal**System.Byte", "System.Decimal**System.SByte", "System.Decimal**System.Int16", "System.Decimal**System.UInt16", "System.Decimal**System.Int32", "System.Decimal**System.UInt32", "System.Decimal**System.Int64", "System.Decimal**System.UInt64", "System.Decimal**System.Decimal", "System.Decimal**int", "System.Decimal**long", "System.Byte**System.Decimal", "System.SByte**System.Decimal", "System.Int16**System.Decimal", "System.UInt16**System.Decimal", "System.Int32**System.Decimal", "System.UInt32**System.Decimal", "System.Int64**System.Decimal", "System.UInt64**System.Decimal", "System.Decimal**System.Decimal", "int**System.Decimal", "long**System.Decimal", "System.Decimal%long", "long%System.Decimal", ] + bug_operands known_bugs = [] #------------------------------------------------------------------------------ def num_ok_for_type(number, proposed_type): ''' Helper function returns true if the number param is within the range of valid values for the proposed type ''' #handle special cases first if proposed_type=="long": #arbitrary precision return True if proposed_type=="float": #arbitrary precision return True if number >= eval(proposed_type + ".MinValue") and number <= eval(proposed_type + ".MaxValue"): return True #must give it another shot...maybe the operator is broken if eval(proposed_type + ".MinValue") <= number and eval(proposed_type + ".MaxValue") >= number: return True return False #------------------------------------------------------------------------------ def _test_interop_set(clr_types, py_types, test_cases): ''' Helper function which permutes Python/CLR types onto test cases ''' global unsupported_operands global known_bugs #each test case for leftop, op, rightop, expected_value in test_cases: #get the left operand as a Python type py_left = eval(leftop) #------------------------------------------------------------------ #create a list of values where each element is the lefthand operand #converted to a CLR type leftop_clr_types = [x for x in clr_types if num_ok_for_type(py_left, x)] leftop_clr_values = [eval(x + "(" + leftop + ")") for x in leftop_clr_types] #------------------------------------------------------------------ #create a list of values where each element is the lefthand operand #converted to a Python type leftop_py_types = [x for x in py_types if num_ok_for_type(py_left, x)] leftop_py_values = [eval(x + "(" + leftop + ")") for x in leftop_py_types] #------------------------------------------------------------------ #get the right operand as a Python type py_right = eval(rightop) rightop_clr_types = [x for x in clr_types if num_ok_for_type(py_right, x)] rightop_clr_values = [eval(x + "(" + rightop + ")") for x in rightop_clr_types] #------------------------------------------------------------------ #create a list of values where each element is the righthand operand #converted to a Python type rightop_py_types = [x for x in py_types if num_ok_for_type(py_right, x)] rightop_py_values = [eval(x + "(" + rightop + ")") for x in rightop_py_types] #------------------------------------------------------------------ #Comparisons between CPython/CLR types def assertionHelper(left_type, left_op, op, right_type, right_op, expected): ''' Helper function used to figure out which test cases fail without blowing up the rest of the test. ''' expression_str = left_type + "("+ left_op +") " + str(op) + " " + right_type + "("+ right_op +")" #if it's supposedly unsupported...make sure if unsupported_operands.count(left_type + op + right_type)>0: AssertError(TypeError, eval, expression_str) return try: expression = eval(expression_str) except TypeError as e: print("TYPE BUG:", expression_str) raise try: AreEqual(expression, expected) if known_bugs.count(left_type + op + right_type)>0: raise "NO BUG FOR: " + expression_str except: if known_bugs.count(left_type + op + right_type)>0: return print(expression_str) raise #CLR-CLR for x in leftop_clr_types: for y in rightop_clr_types: assertionHelper(x, leftop, op, y, rightop, expected_value) #CLR-PY for x in leftop_clr_types: for y in rightop_py_types: assertionHelper(x, leftop, op, y, rightop, expected_value) #PY-CLR for x in leftop_py_types: for y in rightop_clr_types: assertionHelper(x, leftop, op, y, rightop, expected_value) #PY-PY for x in leftop_py_types: for y in rightop_py_types: assertionHelper(x, leftop, op, y, rightop, expected_value) #------------------------------------------------------------------------------ #--BOOLEAN bool_test_cases = [ #x==x ("0","==","0", True), ("0.0","==","0", True), ("1","==","1", True), ("3","==","3", True), ("-1","==","-1", True), #! x==x ("10","==","0", False), ("10.0","==","0", False), ("11","==","1", False), ("31","==","3", False), ("-11","==","-1", False), #x!=x ("10","!=","0", True), ("10.0","!=","0", True), ("11","!=","1", True), ("31","!=","3", True), ("-11","!=","-1", True), #! x!=x ("0","!=","0", False), ("0.0","!=","0", False), ("1","!=","1", False), ("3","!=","3", False), ("-1","!=","-1", False), #x<=x ("0","<=","0", True), ("0.0","<=","0", True), ("1","<=","1", True), ("3","<=","3", True), ("-1","<=","-1", True), #! x<=x ("10","<=","0", False), ("10.0","<=","0", False), ("11","<=","1", False), ("13","<=","3", False), ("10","<=","-1", False), #x>=x ("0",">=","0", True), ("0.0",">=","0", True), ("1",">=","1", True), ("3",">=","3", True), ("-1",">=","-1", True), #! x>=x ("0",">=","10", False), ("0.0",">=","10", False), ("1",">=","11", False), ("3",">=","13", False), ("-1",">=","11", False), #x<=/<y ("0", "<=", "1", True), ("0", "<", "1", True), ("3.14", "<=", "19", True), ("3.14", "<", "19", True), #!x<=/<y ("10", "<=", "1", False), ("10", "<", "1", False), ("31.14", "<=", "19", False), ("31.14", "<", "19", False), #x>=/.y ("10", ">=", "1", True), ("10", ">", "1", True), ("31.14", ">=", "19", True), ("31.14", ">", "19", True), #! x>=/.y ("0", ">=", "1", False), ("0", ">", "1", False), ("3.14", ">=", "19", False), ("3.14", ">", "19", False), ] def test_boolean(): ''' Test boolean operations involving a left and right operand ''' _test_interop_set(clr_types, py_types, bool_test_cases) #------------------------------------------------------------------------------ #--ARITHMETIC #TODO - unary minus, unary plus arith_test_cases = [ #add ("0", "+", "0", 0), ("0", "+", "1", 1), ("1", "+", "-1", 0), ("2", "+", "-1", 1), #sub ("0", "-", "0", 0), ("0", "-", "1", -1), ("1", "-", "-1", 2), ("2", "-", "-1", 3), #mult ("0", "*", "0", 0), ("0", "*", "1", 0), ("2", "*", "1", 2), ("1", "*", "-1", -1), ("2", "*", "-1", -2), #div ("0", "/", "1", 0), ("4", "/", "2", 2), ("2", "/", "1", 2), ("1", "/", "-1", -1), ("2", "/", "-1", -2), #trun div ("0", "//", "1", 0), ("4", "//", "2", 2), ("2", "//", "1", 2), ("1", "//", "-1", -1), ("2", "//", "-1", -2), ("3", "//", "2", 1), #power ("0", "**", "1", 0), ("4", "**", "2", 16), ("2", "**", "1", 2), ("1", "**", "-1", 1), #mod ("0", "%", "1", 0), ("5", "%", "2", 1), ("2", "%", "1", 0), ("1", "%", "-1", 0), ("2", "%", "-1", 0), ] def test_arithmetic(): ''' Test general arithmetic operations. ''' _test_interop_set(clr_types, py_types, arith_test_cases) #------------------------------------------------------------------------------ #BITWISE and SHIFT #TODO: bitwise negation bitwise_test_cases = [ #left shift ("0", "<<", "1", 0), ("3", "<<", "1", 6), ("-3", "<<", "1", -6), #right shift ("0", ">>", "1", 0), ("6", ">>", "1", 3), ("-3", ">>", "1", -2), #bitwise AND ("0", "&", "1", 0), ("1", "&", "1", 1), ("7", "&", "2", 2), ("-1", "&", "1", 1), #bitwise OR ("0", "|", "1", 1), ("1", "|", "1", 1), ("4", "|", "2", 6), ("-1", "|", "1", -1), #bitwise XOR ("0", "^", "1", 1), ("1", "^", "1", 0), ("7", "^", "2", 5), ("-1", "^", "1", -2), ] def test_bitwiseshift(): ''' Test bitwise and shifting operations. ''' _test_interop_set(clr_integer_types, py_integer_types, bitwise_test_cases) def test_sanity(): ''' Make sure that numbers within the constraints of the numerical types are allowed. ''' temp_list = [ ["System.Byte", 0, 255], ["System.SByte", -128, 127], ["System.Byte", 0, 255], ["System.Int16", -32768, 32767], ["System.UInt16", 0, 65535], ["System.Int32", -2147483648, 2147483647], ["System.UInt32", 0, 4294967295], ["System.Int64", -9223372036854775808, 9223372036854775807], ["System.UInt64", 0, 18446744073709551615], ["System.Single", -3.40282e+038, 3.40282e+038], ["System.Double", -1.79769313486e+308, 1.79769313486e+308], ["System.Decimal", -79228162514264337593543950335, 79228162514264337593543950335], ["int", -2147483648, 2147483647] ] for num_type, small_val, large_val in temp_list: Assert(num_ok_for_type(1, num_type)) Assert(num_ok_for_type(1.0, num_type)) #Minimum value Assert(num_ok_for_type(small_val, num_type)) Assert(num_ok_for_type(small_val + 1, num_type)) Assert(num_ok_for_type(small_val + 2, num_type)) #Maximum value Assert(num_ok_for_type(large_val, num_type)) Assert(num_ok_for_type(large_val - 1, num_type)) Assert(num_ok_for_type(large_val - 2, num_type)) #Negative cases if num_type!="System.Single" and num_type!="System.Double" and num_type!="System.Decimal": Assert(not num_ok_for_type(small_val - 1, num_type)) Assert(not num_ok_for_type(small_val - 2, num_type)) Assert(not num_ok_for_type(large_val + 1, num_type)) Assert(not num_ok_for_type(large_val + 2, num_type)) #Special cases Assert(num_ok_for_type(0, "long")) Assert(num_ok_for_type(1, "long")) Assert(num_ok_for_type(-1, "long")) Assert(num_ok_for_type(5, "long")) Assert(num_ok_for_type(-92233720368547758080000, "long")) Assert(num_ok_for_type( 18446744073709551615000, "long")) Assert(num_ok_for_type(0.0, "float")) Assert(num_ok_for_type(1.0, "float")) Assert(num_ok_for_type(-1.0, "float")) Assert(num_ok_for_type(3.14, "float")) Assert(num_ok_for_type(-92233720368547758080000.0, "float")) Assert(num_ok_for_type( 18446744073709551615000.0, "float")) run_test(__name__)
#populate.py def print_crafting_tree(context): #item = Recipe.objects.get(output_item_id=19714) item = Item.objects.get(pk=45868) tree = item.buy_or_craft() print('For item: ' + item.name) shopping_list = {} print_list(tree, shopping_list) context['shopping_list'] = shopping_list def print_list(list, dict, indent='', multiplier=1): if not hasattr(list, '__iter__'): return else: if list[0] == 'craft': print(indent + 'Craft ' + str(list[2] * multiplier) + ' ' + list[4] + ' for ' + str(list[1] * list[2] * multiplier)) print_list(list[3], dict, indent + ' ', list[2] * multiplier) elif list[0] == 'buy': print(indent + 'Buy ' + str(list[2] * multiplier) + ' ' + list[3] + ' for ' + str(list[1] * multiplier) + ' (' + str(int(list[1] / list[2])) + ' each)') try: dict[list[3]] += list[2] * multiplier except KeyError: dict[list[3]] = list[2] * multiplier else: item_count = len(list) counter = 0 while counter < item_count: print_list(list[counter], dict, indent, multiplier) counter += 1 #views.py def get_item_details(self, context): '''Find Item records created with get_items and fill in missing details.''' processing = True total_updated = 0 while processing: #API endpoint allows 200 id requests max found_items = Item.objects.filter(name='').filter(icon='')[:200] num_found_items = found_items.count() if num_found_items > 0: #construct URL for items to update id_list = [] for item in found_items: id_list.append(str(item.item_id)) end_url = ','.join(id_list) #pull data from API item_list = self.get_api_data('items?ids=' + end_url, context) if not item_list: #API error return for item in item_list: update_item = Item.objects.get(pk=item['id']) update_item.add_details(item) update_item.save() #create entry in ItemFlag table new_itemflag = ItemFlag(for_item=update_item) new_itemflag.add_details(item['flags']) new_itemflag.save() total_updated += num_found_items else: processing = False context['total_items_updated'] = total_updated def get_recipe_details(self, context): '''Find Recipe records created with get_recipes and fill in missing details.''' processing = True total_updated = 0 while processing: #API endpoint allows 200 id requests max found_recipes = Recipe.objects.filter(type='').filter(output_item_id=0)[:200] num_found_recipes = found_recipes.count() if num_found_recipes > 0: #construct URL for recipes to update id_list = [] for recipe in found_recipes: id_list.append(str(recipe.recipe_id)) end_url = ','.join(id_list) #pull data from API recipe_list = self.get_api_data('recipes?ids=' + end_url, context) if not recipe_list: #API error return for recipe in recipe_list: update_recipe = Recipe.objects.get(pk=recipe['id']) update_recipe.add_details(recipe) update_recipe.save() #create entry in RecipeDiscipline table new_recipeflag = RecipeDiscipline(for_recipe=update_recipe) new_recipeflag.add_details(recipe['disciplines']) new_recipeflag.save() #create entries in RecipeIngredient table for ingredient in recipe['ingredients']: recipe_ingredient = RecipeIngredient(for_recipe=update_recipe) recipe_ingredient.add_details(ingredient) recipe_ingredient.save() total_updated += num_found_recipes else: processing = False context['total_recipes_updated'] = total_updated #models.py class WorthMakingFast(Item): '''Item that will produce a profit when buying the components of its Recipe, crafting that Item, then immediately selling the Item on the trading post''' profit = models.PositiveIntegerField(default=0) class Meta: ordering = ["-profit"] def __str__(self): return "WorthMakingFast " + str(self.item_id) + ": " + self.name + " profit: " + str(self.profit) #item_list.html <dl class="dl-horizontal"> {% for item in item_list %} <dt>{{ item.item_id }}</dt> <dd><a href="{% url 'commerce:detail' item.item_id %}">{{ item.name }}</a></dd> {% endfor %} </dl> #views.py class TradeableListView(generic.ListView): model = Tradeable template_name = 'commerce/tradeable_list.html' context_object_name = 'tradeable_list' paginate_by = 20 #models.py class Tradeable(Item): '''Subset of Items that can be traded with other players or are used as an ingredient to create tradeable Items. The main table for the commerce app''' recipe = models.IntegerField(default=0) class Meta: ordering = ["item_id"] def __str__(self): return "Tradeable " + str(self.item_id) + ": " + self.name def add_details(self, Item, getIcon): '''Copy details from an Item and create a local copy of the image found at the URL in Item.icon (named as <pk>.png)''' self.name = Item.name self.description = Item.description self.type = Item.type self.rarity = Item.rarity self.level = Item.level self.vendor_value = Item.vendor_value try: recipe_link = Recipe.objects.get(output_item_id=self.item_id) except Recipe.DoesNotExist: pass except Recipe.MultipleObjectsReturned: self.recipe = -1 else: self.recipe = recipe_link.recipe_id #TODO: avoid using an absolute path if getIcon: urlretrieve(Item.icon, '/home/turbobear/goldwarsplus/commerce/static/commerce/items/' + str(self.item_id) + '.png') def get_vendor_value_split(self): '''Calculate vendor_value in terms of gold, silver, and copper''' coin_list = [] coin_list.append(int(self.vendor_value / 10000)) #gold coin_list.append(int(self.vendor_value / 100) % 100) #silver coin_list.append(self.vendor_value % 100) #copper return coin_list def get_market_buy(self, quantity): '''Return the cost of the quantity of this item if bought on the trading post''' sell_orders = self.sell.all().order_by('unit_price') total = 0 count = 0 for order in sell_orders: if (order.quantity + count) <= quantity: count += order.quantity total += order.quantity * order.unit_price else: total += (quantity - count) * order.unit_price return total #quantity not available return 0 def get_market_sell(self): '''Return the value of this item if sold immediately on the trading post''' buy_orders = self.buy.all().order_by('-unit_price') return buy_orders[0].unit_price if buy_orders else 0 def get_market_delay_sell(self): '''Return the value of this item if sold one copper below the lowest current selling price on the trading post. Returns 0 if none of these items are listed''' sell_orders = self.sell.all().order_by('unit_price') return sell_orders[0].unit_price - 1 if sell_orders else 0 def get_context_data(self, **kwargs): context = super(TradeableListView, self).get_context_data(**kwargs) list = self.get_queryset() for item in list: coin_list = [] vendor_value = item.vendor_value coin_list.append(int(vendor_value / 10000)) #gold coin_list.append(int(vendor_value / 100 % 100)) #silver coin_list.append(vendor_value % 100) #copper context[str(item.item_id)+'_coins'] = coin_list return context {% if item.rarity == 'Fine' %} style="color:#62a4da" {% elif item.rarity == 'Masterwork' %} style="color:#1a9306" {% elif item.rarity == 'Rare' %} style="color:#fcd00b" {% elif item.rarity == 'Exotic' %} style="color:#ffa405" {% elif item.rarity == 'Ascended' %} style="color:#fb3e8d" {% elif item.rarity == 'Legendary' %} style="color:#4c139d" {% endif %} if form.cleaned_data['action'] == 'get_items': self.get_items(context) elif form.cleaned_data['action'] == 'get_item_details': self.get_item_details(context) elif form.cleaned_data['action'] == 'get_recipes': self.get_recipes(context) elif form.cleaned_data['action'] == 'get_recipe_details': self.get_recipe_details(context) def get_recipe_details(request): '''Find Recipe records created with get_recipes and fill in missing details.''' context = {'num_items': Item.objects.count(), 'num_recipes': Recipe.objects.count() } base_url = 'https://api.guildwars2.com/v2/recipes?ids=' processing = True total_updated = 0 while processing: #API endpoint allows 200 id requests max found_recipes = Recipe.objects.filter(type='').filter(output_item_id=0)[0:200] num_found_recipes = found_recipes.count() if num_found_recipes > 0: #construct URL for recipes to update recipe_list = [] for recipe in found_recipes: recipe_list.append(str(recipe.recipe_id)) end_url = ','.join(recipe_list) req = Request(base_url + end_url) #pull data from API try: response = urlopen(req) except HTTPError as e: context['api_error'] = e processing = False except URLError as e: context['api_error'] = e.reason processing = False else: #decode API response recipe_list = json.loads(response.read().decode('utf-8')) for recipe in recipe_list: update_recipe = Recipe.objects.get(pk=recipe['id']) update_recipe.add_details(recipe) update_recipe.save() #create entry in RecipeDiscipline table new_recipeflag = RecipeDiscipline(for_recipe=update_recipe) new_recipeflag.add_details(recipe['disciplines']) new_recipeflag.save() #create entries in RecipeIngredient table for ingredient in recipe['ingredients']: recipe_ingredient = RecipeIngredient(for_recipe=update_recipe) recipe_ingredient.add_details(ingredient) recipe_ingredient.save() total_updated += num_found_recipes else: processing = False context['total_recipes_updated'] = total_updated return render(request, 'commerce/update.html', context)
""" Tests for EntityData generic list view This test suite focuses on listing of record fields used by record views and lists. This serves two purposes: - it tests some additional options of the entity list logic that are not tested by the dfeault list view, and - it tests the logic that access site-wide data in addition to local data. """ from __future__ import unicode_literals from __future__ import absolute_import, division, print_function __author__ = "Graham Klyne ([email protected])" __copyright__ = "Copyright 2014, G. Klyne" __license__ = "MIT (http://opensource.org/licenses/MIT)" import logging log = logging.getLogger(__name__) import os import unittest from django.conf import settings from django.db import models from django.http import QueryDict from django.test import TestCase # cf. https://docs.djangoproject.com/en/dev/topics/testing/tools/#assertions from django.test.client import Client from utils.py3porting import urlparse, urljoin from utils.SuppressLoggingContext import SuppressLogging from annalist import layout from annalist import message from annalist.identifiers import RDF, RDFS, ANNAL from annalist.util import extract_entity_id from annalist.models.site import Site from annalist.models.collection import Collection from annalist.models.recordtype import RecordType from annalist.models.recordtypedata import RecordTypeData from annalist.models.entitydata import EntityData from annalist.models.entityfinder import EntityFinder from annalist.models.entitytypeinfo import EntityTypeInfo from annalist.views.uri_builder import ( uri_quote_param, uri_params, uri_with_params, continuation_params_url ) from annalist.views.entitylist import EntityGenericListView from annalist.views.form_utils.fieldchoice import FieldChoice from .AnnalistTestCase import AnnalistTestCase from .tests import ( test_layout, TestHost, TestHostUri, TestBasePath, TestBaseUri, TestBaseDir ) from .init_tests import ( init_annalist_test_site, init_annalist_test_coll, resetSitedata ) from .entity_testutils import ( make_message, make_quoted_message, site_dir, collection_dir, entitydata_list_url_query, site_view_url, collection_view_url, collection_edit_url, continuation_url_param, confirm_delete_params, collection_create_values, site_title, create_test_user, create_user_permissions, context_view_field, # context_bind_fields context_list_entities, context_list_head_fields, context_list_item_fields, context_list_item_field, context_list_item_field_value, check_field_list_context_fields, ) from .entity_testtypedata import ( recordtype_dir, recordtype_url, recordtype_create_values, ) from .entity_testentitydata import ( recorddata_dir, entitydata_dir, entity_url, entitydata_edit_url, entitydata_delete_confirm_url, entitydata_list_type_url, entitydata_list_all_url, entitydata_value_keys, entitydata_create_values, entitydata_values, entitydata_delete_confirm_form_data, entitylist_form_data ) from .entity_testsitedata import ( make_field_choices, no_selection, get_site_types, get_site_types_sorted, get_site_types_linked, get_site_lists, get_site_lists_sorted, get_site_lists_linked, get_site_views, get_site_views_sorted, get_site_views_linked, get_site_list_types, get_site_list_types_sorted, get_site_field_groups, get_site_field_groups_sorted, get_site_fields, get_site_fields_sorted, get_site_field_types, get_site_field_types_sorted, get_site_entities, get_site_entities_sorted, ) from .entity_testlistdata import ( recordlist_url, num_testcoll_enumerate_all_entities, num_testcoll_all_entities_scope_all ) # ----------------------------------------------------------------------------- # # EntityDefaultListView tests # # ----------------------------------------------------------------------------- class EntityGenericListViewTest(AnnalistTestCase): """ Tests for record type edit views """ def setUp(self): init_annalist_test_site() self.testsite = Site(TestBaseUri, TestBaseDir) self.testcoll = Collection.create(self.testsite, "testcoll", collection_create_values("testcoll")) self.testtype = RecordType.create(self.testcoll, "testtype", recordtype_create_values("testcoll", "testtype")) self.testtype2 = RecordType.create(self.testcoll, "testtype2", recordtype_create_values("testcoll", "testtype2")) self.testdata = RecordTypeData.create(self.testcoll, "testtype", {}) self.testdata2 = RecordTypeData.create(self.testcoll, "testtype2", {}) # self.user = User.objects.create_user('testuser', '[email protected]', 'testpassword') # self.user.save() create_test_user(self.testcoll, "testuser", "testpassword") self.client = Client(HTTP_HOST=TestHost) loggedin = self.client.login(username="testuser", password="testpassword") self.assertTrue(loggedin) e1 = self._create_entity_data("entity1") e2 = self._create_entity_data("entity2") e3 = self._create_entity_data("entity3") e4 = EntityData.create(self.testdata2, "entity4", entitydata_create_values("entity4", type_id="testtype2") ) self.list_ids = get_site_lists_linked("testcoll") return def tearDown(self): # resetSitedata() return @classmethod def tearDownClass(cls): resetSitedata(scope="collections") return # ----------------------------------------------------------------------------- # Helpers # ----------------------------------------------------------------------------- def _create_entity_data(self, entity_id, update="Entity"): "Helper function creates entity data with supplied entity_id" e = EntityData.create(self.testdata, entity_id, entitydata_create_values(entity_id, update=update) ) return e # ----------------------------------------------------------------------------- # Form rendering tests # ----------------------------------------------------------------------------- def test_EntityDefaultListView(self): self.assertEqual(EntityGenericListView.__name__, "EntityGenericListView", "Check EntityGenericListView class name") return def test_enumerate_all_entities(self): # Test enumeration of all collection and site entities # Introduced to facilitate debugging of site data storage rework entity_list = ( EntityFinder(self.testcoll, selector="ALL") .get_entities_sorted(type_id=None, altscope="all", user_permissions=None, context={}, search="" ) ) actual_entity_ids = [ "%s/%s"%(e.get_type_id(), e.get_id()) for e in entity_list ] # log.debug("@@ actual_entity_ids: \n"+"\n".join([repr(eti) for eti in actual_entity_ids])) self.assertEqual(len(actual_entity_ids), num_testcoll_enumerate_all_entities) # Will change with site data expect_entities = get_site_entities_sorted() expect_entity_ids = [ fc.id for fc in expect_entities ] # log.debug("@@ actual_entity_ids: \n"+"\n".join([ repr(eti) for eti in actual_entity_ids[145:] ])) # log.debug("@@ expect_entity_ids: \n"+"\n".join([ repr(eti) for eti in expect_entity_ids[145:] ])) self.assertEqual(actual_entity_ids, expect_entity_ids) return def test_enumerate_value_modes(self): # Test enumeration of value modes (tests enumeration type listing) # Introduced to facilitate debugging of site data storage rework entity_list = ( EntityFinder(self.testcoll, selector="ALL") .get_entities_sorted(type_id="_enum_value_mode", altscope="all", user_permissions=None, context={}, search="" ) ) # Enumerate enumeration types entity_types_ids = [ (e.get_type_id(), e.get_id()) for e in entity_list ] # log.info("@@ entity_types_ids: \n"+"\n".join([repr(eti) for eti in entity_types_ids])) self.assertEqual(len(entity_types_ids), 5) return def test_get_default_all_list(self): # List all entities in current collection u = entitydata_list_all_url("testcoll", list_id="Default_list_all") + "?continuation_url=/xyzzy/" r = self.client.get(u) self.assertEqual(r.status_code, 200) self.assertEqual(r.reason_phrase, "OK") list_label = "List entities with type information" list_title = "List entities with type information - Collection testcoll" self.assertContains(r, "<title>%s</title>"%list_title, html=True) self.assertContains(r, '<h2 class="page-heading">%s</h2>'%list_label, html=True) self.assertMatch(r.content, r'<input.type="hidden".name="continuation_url".+value="/xyzzy/"/>') # log.info(r.content) #@@ cont = uri_params({"continuation_url": u}) tooltip1 = "" # 'title="%s"'%r.context['fields'][0]['field_help'] tooltip2 = "" # 'title="%s"'%r.context['fields'][1]['field_help'] tooltip3 = "" # 'title="%s"'%r.context['fields'][2]['field_help'] rowdata = """ <div class="tbody row select-row"> <div class="small-1 columns"> <input type="checkbox" class="select-box right" name="entity_select" value="testtype/entity1" /> </div> <div class="small-11 columns"> <div class="row view-listrow"> <div class="view-value small-3 columns" %(tooltip1)s> <a href="%(base)s/c/testcoll/d/testtype/entity1/%(cont)s">entity1</a> </div> <div class="view-value small-2 columns" %(tooltip2)s> <a href="/testsite/c/testcoll/d/_type/testtype/%(cont)s">RecordType testcoll/_type/testtype</a> </div> <div class="view-value small-7 columns" %(tooltip3)s> <span>Entity testcoll/testtype/entity1</span> </div> </div> </div> </div> """%( { 'base': TestBasePath , 'cont': cont , 'tooltip1': tooltip1 , 'tooltip2': tooltip2 , 'tooltip3': tooltip3 } ) # log.info(r.content) # log.info(r.context["fields"]) # log.info(r.context["List_rows"]) self.assertContains(r, rowdata, html=True) # Test context self.assertEqual(r.context['title'], list_title) self.assertEqual(r.context['heading'], list_label) self.assertEqual(r.context['coll_id'], "testcoll") self.assertEqual(r.context['type_id'], None) self.assertEqual(r.context['continuation_url'], "/xyzzy/") list_choices = r.context['list_choices'] self.assertEqual(set(list_choices.options), set(self.list_ids)) self.assertEqual(list_choices['field_value'], "Default_list_all") # Unbound field descriptions head_fields = context_list_head_fields(r.context) self.assertEqual(len(head_fields), 1) # One row of 3 cols.. self.assertEqual(len(head_fields[0].description['row_field_descs']), 3) f0 = context_view_field(r.context, 0, 0) f1 = context_view_field(r.context, 0, 1) f2 = context_view_field(r.context, 0, 2) self.assertEqual(f0.field_id, 'Entity_id') self.assertEqual(f1.field_id, 'Entity_type') self.assertEqual(f2.field_id, 'Entity_label') # Entities and bound fields # log.info(entities) #@@ entities = context_list_entities(r.context) self.assertEqual(len(entities), 6) entity_fields = ( [ {'entity_type_id': "_type", 'annal:id': "testtype", 'rdfs:label': "RecordType testcoll/_type/testtype"} , {'entity_type_id': "_type", 'annal:id': "testtype2", 'rdfs:label': "RecordType testcoll/_type/testtype2"} , {'entity_type_id': "testtype", 'annal:id': "entity1", 'rdfs:label': "Entity testcoll/testtype/entity1"} , {'entity_type_id': "testtype", 'annal:id': "entity2", 'rdfs:label': "Entity testcoll/testtype/entity2"} , {'entity_type_id': "testtype", 'annal:id': "entity3", 'rdfs:label': "Entity testcoll/testtype/entity3"} , {'entity_type_id': "testtype2", 'annal:id': "entity4", 'rdfs:label': "Entity testcoll/testtype2/entity4"} ]) field_keys = ('annal:id', 'entity_type_id', 'rdfs:label') for eid in range(6): item_fields = context_list_item_fields(r.context, entities[eid]) for fid in range(3): item_field = item_fields[fid] head_field = head_fields[0].description['row_field_descs'][fid] # Check that row field descriptions match corresponding heading feld descriptions for fkey in ( 'field_id', 'field_name', 'field_label', 'field_property_uri', 'field_render_type', 'field_placement', 'field_value_type'): self.assertEqual(item_field.description[fkey], head_field[fkey]) # Check row field values fkey = field_keys[fid] self.assertEqual(item_field['field_value'], entity_fields[eid][fkey]) self.assertEqual(item_field['entity_type_id'], entity_fields[eid]['entity_type_id']) return def test_get_default_all_scope_all_list(self): # List all entities in current collection and site-wide # This repeats parts of the previous test but with scope='all' u = entitydata_list_all_url( "testcoll", list_id="Default_list_all", scope="all", continuation_url="/xyzzy/" ) r = self.client.get(u) self.assertEqual(r.status_code, 200) self.assertEqual(r.reason_phrase, "OK") list_label = "List entities with type information" list_title = "List entities with type information - Collection testcoll" self.assertContains(r, "<title>%s</title>"%list_title, html=True) self.assertContains(r, '<h2 class="page-heading">%s</h2>'%list_label, html=True) # Test context self.assertEqual(r.context['title'], list_title) self.assertEqual(r.context['heading'], list_label) self.assertEqual(r.context['coll_id'], "testcoll") self.assertEqual(r.context['type_id'], None) list_choices = r.context['list_choices'] self.assertEqual(set(list_choices.options), set(self.list_ids)) self.assertEqual(list_choices['field_value'], "Default_list_all") # Unbound field descriptions head_fields = context_list_head_fields(r.context) self.assertEqual(len(head_fields), 1) # One row of 3 cols.. self.assertEqual(len(head_fields[0].description['row_field_descs']), 3) f0 = context_view_field(r.context, 0, 0) f1 = context_view_field(r.context, 0, 1) f2 = context_view_field(r.context, 0, 2) self.assertEqual(f0.field_id, 'Entity_id') self.assertEqual(f1.field_id, 'Entity_type') self.assertEqual(f2.field_id, 'Entity_label') # Entities and bound fields entities = context_list_entities(r.context) # listed_entities = { e['entity_id']: e for e in entities } # for eid in listed_entities: # print "@@ eid %s"%(eid) self.assertEqual(len(entities), num_testcoll_all_entities_scope_all) # Will change with site data return def test_get_types_list(self): # List types in current collection u = entitydata_list_type_url( "testcoll", "_type", list_id="Type_list", scope=None ) r = self.client.get(u) self.assertEqual(r.status_code, 200) self.assertEqual(r.reason_phrase, "OK") # log.info(r.content) #@@ list_label = "Entity types" list_title = "Entity types - Collection testcoll" self.assertContains(r, "<title>%s</title>"%list_title, html=True) self.assertContains(r, '<h2 class="page-heading">%s</h2>'%list_label, html=True) # Test context self.assertEqual(r.context['title'], list_title) self.assertEqual(r.context['heading'], list_label) self.assertEqual(r.context['coll_id'], "testcoll") self.assertEqual(r.context['type_id'], "_type") # Fields head_fields = context_list_head_fields(r.context) self.assertEqual(len(head_fields), 1) # One row of 3 cols.. self.assertEqual(len(head_fields[0].description['row_field_descs']), 3) f0 = context_view_field(r.context, 0, 0) f1 = context_view_field(r.context, 0, 1) f2 = context_view_field(r.context, 0, 2) # 1st field self.assertEqual(f0.field_id, 'Entity_id') self.assertEqual(f0.field_name, 'entity_id') # 2nd field self.assertEqual(f1.field_id, 'Type_uri') self.assertEqual(f1.field_name, 'Type_uri') # 3rd field self.assertEqual(f2.field_id, 'Entity_label') self.assertEqual(f2.field_name, 'Entity_label') # Entities entities = context_list_entities(r.context) listed_entities = { e['entity_id']: e for e in entities } # self.assertIn('_initial_values', listed_entities) type_entities = {"testtype", "testtype2"} self.assertEqual(set(listed_entities.keys()), type_entities) return def test_get_types_scope_all_list(self): # List types in current collection and site-wide u = entitydata_list_type_url( "testcoll", "_type", list_id="Type_list", scope="all" ) r = self.client.get(u) self.assertEqual(r.status_code, 200) self.assertEqual(r.reason_phrase, "OK") # log.info(r.content) #@@ list_label = "Entity types" list_title = "Entity types - Collection testcoll" self.assertContains(r, "<title>%s</title>"%list_title, html=True) self.assertContains(r, '<h2 class="page-heading">%s</h2>'%list_label, html=True) # Test context self.assertEqual(r.context['title'], list_title) self.assertEqual(r.context['heading'], list_label) self.assertEqual(r.context['coll_id'], "testcoll") self.assertEqual(r.context['type_id'], "_type") # Fields head_fields = context_list_head_fields(r.context) self.assertEqual(len(head_fields), 1) # One row of 2 cols.. self.assertEqual(len(head_fields[0].description['row_field_descs']), 3) f0 = context_view_field(r.context, 0, 0) f1 = context_view_field(r.context, 0, 1) f2 = context_view_field(r.context, 0, 2) # 1st field self.assertEqual(f0.field_id, 'Entity_id') self.assertEqual(f0.field_name, 'entity_id') # 2nd field self.assertEqual(f1.field_id, 'Type_uri') self.assertEqual(f1.field_name, 'Type_uri') # 3rd field self.assertEqual(f2.field_id, 'Entity_label') self.assertEqual(f2.field_name, 'Entity_label') # Entities entities = context_list_entities(r.context) listed_entities = { e['entity_id']: e for e in entities } type_entities = get_site_types() | {"testtype", "testtype2"} self.assertEqual(set(listed_entities.keys()), type_entities) return def test_get_fields_list(self): # List fields in current collection u = entitydata_list_type_url( "testcoll", layout.FIELD_TYPEID, list_id="Field_list", scope="all", continuation_url="/xyzzy/" ) r = self.client.get(u) self.assertEqual(r.status_code, 200) self.assertEqual(r.reason_phrase, "OK") # log.info(r.content) #@@ list_label = "Field definitions" list_title = "Field definitions - Collection testcoll" self.assertContains(r, "<title>%s</title>"%list_title, html=True) self.assertContains(r, '<h2 class="page-heading">%s</h2>'%list_label, html=True) # Test context self.assertEqual(r.context['title'], list_title) self.assertEqual(r.context['heading'], list_label) self.assertEqual(r.context['coll_id'], "testcoll") self.assertEqual(r.context['type_id'], layout.FIELD_TYPEID) self.assertEqual(r.context['continuation_url'], "/xyzzy/") list_choices = r.context['list_choices'] self.assertEqual(set(list_choices.options), set(self.list_ids)) self.assertEqual(list_choices['field_value'], "Field_list") # Fields field_entities = ( { ('Entity_id', "EntityId", "annal:EntityRef", "Id") , ('Coll_comment', "Markdown", "annal:Richtext", "Collection metadata") , ('Coll_parent', "Enum_choice_opt", "annal:EntityRef", "Parent") , ('Coll_software_version', "Showtext", "annal:Text", "S/W version") , ('Entity_type', "EntityTypeId", "annal:EntityRef", "Type") , ('Entity_label', "Text", "annal:Text", "Label") , ('Field_help', "Markdown", "annal:Richtext", "Help") , ('Field_placement', "Placement", "annal:Placement", "Position/size") , ('Field_render_type', "Enum_choice", "annal:EntityRef", "Render type") , ('Field_value_mode', "Enum_choice", "annal:EntityRef", "Value mode") , ('Field_value_type', "Identifier", "annal:Identifier", "Value type") , ('Field_entity_type', "Identifier", "annal:Identifier", "Entity type") , ('Field_default', "Text", "annal:Text", "Default value") , ('Field_typeref', "Enum_optional", "annal:EntityRef", "Refer to type") , ('Field_restrict', "Text", "annal:Text", "Value restriction") , ('List_comment', "Markdown", "annal:Richtext", "Help") , ('List_default_type', "Enum_optional", "annal:Type", "Default type") , ('List_default_view', "Enum_optional", "annal:View", "Default view") , ('Type_label', "Text", "annal:Text", "Label") , ('Type_comment', "Markdown", "annal:Richtext", "Comment") , ('Type_uri', "Identifier", "annal:Identifier", "Type URI") , ('List_choice', "Enum_choice", "annal:EntityRef", "List view") , ('View_choice', "View_choice", "annal:EntityRef", "Choose view") , ('Group_field_sel', "Enum_optional", "annal:EntityRef", "Field ref") }) check_field_list_context_fields(self, r, field_entities) return def test_get_fields_list_no_continuation(self): u = entitydata_list_type_url( "testcoll", layout.FIELD_TYPEID, list_id="Field_list", scope="all", query_params={"foo": "bar"} ) r = self.client.get(u) self.assertEqual(r.status_code, 200) self.assertEqual(r.reason_phrase, "OK") list_label = "Field definitions" list_title = "Field definitions - Collection testcoll" self.assertContains(r, "<title>%s</title>"%list_title, html=True) self.assertContains(r, '<h2 class="page-heading">%s</h2>'%list_label, html=True) curi = continuation_params_url(u) field_params = ( { 'base': TestBasePath , 'list_base': urlparse(u).path.rstrip("/") , 'cont': uri_params({"continuation_url": curi}) , 'tooltip1': "" # 'title="%s"'%r.context['fields'][0]['field_help'] , 'tooltip2': "" # 'title="%s"'%r.context['fields'][1]['field_help'] , 'tooltip3': "" # 'title="%s"'%r.context['fields'][2]['field_help'] , 'tooltip4': "" # 'title="%s"'%r.context['fields'][3]['field_help'] , 'field_typeid': layout.FIELD_TYPEID }) rowdata1 = """ <div class="tbody row select-row"> <div class="small-1 columns"> <input type="checkbox" class="select-box right" name="entity_select" value="%(field_typeid)s/Coll_comment" /> </div> <div class="small-11 columns"> <div class="view-listrow row"> <div class="view-value small-4 medium-3 columns" %(tooltip1)s> <a href="%(base)s/c/testcoll/d/%(field_typeid)s/Coll_comment/%(cont)s">Coll_comment</a> </div> <div class="view-value small-4 medium-3 columns" %(tooltip2)s> <a href="%(base)s/c/testcoll/d/_enum_render_type/Markdown/%(cont)s"> Markdown rich text </a> </div> <div class="view-value small-12 medium-3 columns show-for-medium-up" %(tooltip3)s> <span>annal:Richtext</span> </div> <div class="view-value small-4 medium-3 columns" %(tooltip4)s> <span>Collection metadata</span> </div> </div> </div> </div> """%field_params rowdata2 = """ <div class="tbody row select-row"> <div class="small-1 columns"> <input type="checkbox" class="select-box right" name="entity_select" value="%(field_typeid)s/Coll_parent" /> </div> <div class="small-11 columns"> <div class="view-listrow row"> <div class="view-value small-4 medium-3 columns" %(tooltip1)s> <a href="%(base)s/c/testcoll/d/%(field_typeid)s/Coll_parent/%(cont)s">Coll_parent</a> </div> <div class="view-value small-4 medium-3 columns" %(tooltip2)s> <a href="%(base)s/c/testcoll/d/_enum_render_type/Enum_choice_opt/%(cont)s"> Optional entity reference </a> </div> <div class="view-value small-12 medium-3 columns show-for-medium-up" %(tooltip3)s> <span>annal:EntityRef</span> </div> <div class="view-value small-4 medium-3 columns" %(tooltip4)s> <span>Parent</span> </div> </div> </div> </div> """%field_params rowdata3 = """ <div class="tbody row select-row"> <div class="small-1 columns"> <input type="checkbox" class="select-box right" name="entity_select" value="%(field_typeid)s/Coll_software_version" /> </div> <div class="small-11 columns"> <div class="view-listrow row"> <div class="view-value small-4 medium-3 columns" %(tooltip1)s> <a href="%(base)s/c/testcoll/d/%(field_typeid)s/Coll_software_version/%(cont)s">Coll_software_version</a> </div> <div class="view-value small-4 medium-3 columns" %(tooltip2)s> <a href="%(base)s/c/testcoll/d/_enum_render_type/Showtext/%(cont)s">Display text</a> </div> <div class="view-value small-12 medium-3 columns show-for-medium-up" %(tooltip3)s> <span>annal:Text</span> </div> <div class="view-value small-4 medium-3 columns" %(tooltip4)s> <span>S/W version</span> </div> </div> </div> </div> """%field_params rowlinks = (""" <div class="row view-value-row"> <div class="link-bar small-12 columns"> <a href="entity_list.ttl?scope=all" title="Retrieve list data as Turtle"> Turtle </a> <a href="entity_list.jsonld?scope=all" title="Retrieve list data as JSON-LD"> JSON-LD </a> <a href="entity_list.jsonld?scope=all&amp;type=application/json" title="Display JSON list data"> <img src="/static/images/get_the_data_88x31.png" alt="get_the_data"> </a> </div> </div> """)%field_params # log.info("*** r.content: "+r.content) #@@ self.assertContains(r, rowdata1, html=True) self.assertContains(r, rowdata2, html=True) self.assertContains(r, rowdata3, html=True) self.assertContains(r, rowlinks, html=True) # Test context self.assertEqual(r.context['coll_id'], "testcoll") self.assertEqual(r.context['type_id'], layout.FIELD_TYPEID) self.assertEqual(r.context['continuation_url'], "") list_choices = r.context['list_choices'] self.assertEqual(set(list_choices.options), set(self.list_ids)) self.assertEqual(list_choices['field_value'], "Field_list") # Fields head_fields = context_list_head_fields(r.context) self.assertEqual(len(head_fields), 1) # One row of 4 cols.. self.assertEqual(len(head_fields[0].description['row_field_descs']), 4) return def test_get_fields_list_search(self): u = entitydata_list_type_url( "testcoll", layout.FIELD_TYPEID, list_id="Field_list", scope="all", continuation_url="/xyzzy/", query_params={"search": "Coll_"} ) r = self.client.get(u) self.assertEqual(r.status_code, 200) self.assertEqual(r.reason_phrase, "OK") list_label = "Field definitions" list_title = "Field definitions - Collection testcoll" self.assertContains(r, "<title>%s</title>"%list_title, html=True) self.assertContains(r, '<h2 class="page-heading">%s</h2>'%list_label, html=True) # Test context self.assertEqual(r.context['coll_id'], "testcoll") self.assertEqual(r.context['type_id'], layout.FIELD_TYPEID) self.assertEqual(r.context['continuation_url'], "/xyzzy/") self.assertEqual(r.context['search_for'], "Coll_") list_choices = r.context['list_choices'] self.assertEqual(set(list_choices.options), set(self.list_ids)) self.assertEqual(list_choices['field_value'], "Field_list") # Fields head_fields = context_list_head_fields(r.context) self.assertEqual(len(head_fields), 1) # One row of 4 cols.. self.assertEqual(len(head_fields[0].description['row_field_descs']), 4) f0 = context_view_field(r.context, 0, 0) f1 = context_view_field(r.context, 0, 1) f2 = context_view_field(r.context, 0, 2) f3 = context_view_field(r.context, 0, 3) self.assertEqual(f0.field_id, 'Entity_id') self.assertEqual(f1.field_id, 'Field_render_type') self.assertEqual(f2.field_id, 'Field_value_type') self.assertEqual(f3.field_id, 'Entity_label') # Entities entities = context_list_entities(r.context) self.assertEqual(len(entities), 7) field_entities = ( { ( "Coll_comment", "Markdown", "annal:Richtext", "Collection metadata" ) , ( "Coll_default_list_id", "Showtext", "annal:Text", "Default list" ) , ( "Coll_default_view_entity", "Showtext", "annal:Text", "Default view entity" ) , ( "Coll_default_view_id", "Showtext", "annal:Text", "Default view" ) , ( "Coll_default_view_type", "Showtext", "annal:Text", "Default view type" ) , ( "Coll_parent", "Enum_choice_opt", "annal:EntityRef", "Parent" ) , ( "Coll_software_version", "Showtext", "annal:Text", "S/W version" ) }) check_field_list_context_fields(self, r, field_entities) return def test_get_list_select_by_type(self): u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id=None) r = self.client.get(u) self.assertEqual(r.status_code, 200) self.assertEqual(r.reason_phrase, "OK") # Test context self.assertEqual(r.context['coll_id'], "testcoll") self.assertEqual(r.context['type_id'], layout.FIELD_TYPEID) list_choices = r.context['list_choices'] self.assertEqual(set(list_choices.options), set(self.list_ids)) self.assertEqual(list_choices['field_value'], "Field_list") return def test_get_list_no_collection(self): u = entitydata_list_type_url("no_collection", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.get(u) self.assertEqual(r.status_code, 404) self.assertEqual(r.reason_phrase, "Not found") msg_text = make_message(message.COLLECTION_NOT_EXISTS, id="no_collection") self.assertContains(r, msg_text, status_code=404) return def test_get_list_no_type(self): u = entitydata_list_type_url("testcoll", "no_type", list_id="Field_list") with SuppressLogging(logging.WARNING): r = self.client.get(u) self.assertEqual(r.status_code, 404) self.assertEqual(r.reason_phrase, "Not found") msg_text = make_message(message.RECORD_TYPE_NOT_EXISTS, id="no_type") self.assertContains(r, msg_text, status_code=404) return def test_get_list_no_list(self): u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="no_list") with SuppressLogging(logging.WARNING): r = self.client.get(u) self.assertEqual(r.status_code, 200) self.assertEqual(r.reason_phrase, "OK") msg_text = make_message(message.RECORD_LIST_NOT_EXISTS, id="no_list") self.assertContains(r, msg_text, status_code=200) return # ----------------------------------------------------------------------------- # Form response tests # ----------------------------------------------------------------------------- # -------- new -------- def test_post_new_type_entity(self): f = entitylist_form_data("new", list_id="Field_list") u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_edit_url("new", "testcoll", layout.FIELD_TYPEID, view_id="Field_view") c = continuation_url_param(u) self.assertIn(v, r['location']) self.assertIn(c, r['location']) return def test_post_new_all_entity(self): # Also tests continuation_url parameter handling s = collection_view_url(coll_id="testcoll") f = entitylist_form_data("new", list_id="Field_list", continuation_url=s) u = entitydata_list_all_url("testcoll", list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_edit_url( "new", "testcoll", layout.FIELD_TYPEID, view_id="Field_view" ) c = continuation_url_param(u, continuation_url_param(s)) self.assertIn(v, r['location']) self.assertIn(c, r['location']) return def test_post_new_type_entity_select_one(self): f = entitylist_form_data( "new", list_id="Field_list", entities=[layout.FIELD_TYPEID+"/field1"] ) u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_edit_url( "new", "testcoll", layout.FIELD_TYPEID, view_id="Field_view" ) c = continuation_url_param(u) self.assertIn(v, r['location']) self.assertIn(c, r['location']) return def test_post_new_type_entity_select_many(self): f = entitylist_form_data( "new", list_id="Field_list", entities=[layout.FIELD_TYPEID+"/field1", "testtype/entity1", "testtype/entity2"] ) u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") e2 = "error_head=%s"%(uri_quote_param(message.INPUT_ERROR),) e3 = "error_message=" self.assertIn(u, r['location']) self.assertIn(e2, r['location']) self.assertIn(e3, r['location']) return # -------- copy -------- def test_post_copy_type_entity(self): # Also tests continuation_url parameter handling s = site_view_url() f = entitylist_form_data("copy", entities=[layout.FIELD_TYPEID+"/field1"], continuation_url=s) u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_edit_url("copy", "testcoll", layout.FIELD_TYPEID, "field1") c = continuation_url_param(u, continuation_url_param(s)) self.assertIn(v, r['location']) self.assertIn(c, r['location']) return def test_post_copy_all_entity(self): f = entitylist_form_data("copy", entities=[layout.FIELD_TYPEID+"/field1"]) u = entitydata_list_all_url("testcoll", list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_edit_url("copy", "testcoll", layout.FIELD_TYPEID, "field1") c = continuation_url_param(u) self.assertIn(v, r['location']) self.assertIn(c, r['location']) return def test_post_copy_type_entity_select_other(self): f = entitylist_form_data("copy", entities=["testtype/entity1"]) u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_edit_url("copy", "testcoll", "testtype", "entity1") c = continuation_url_param(u) self.assertIn(v, r['location']) self.assertIn(c, r['location']) return def test_post_copy_type_entity_select_none(self): f = entitylist_form_data("copy") u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") c = "continuation_url" e2 = "error_head=%s"%(uri_quote_param(message.INPUT_ERROR),) e3 = "error_message=" self.assertIn(u, r['location']) self.assertIn(e2, r['location']) self.assertIn(e3, r['location']) self.assertNotIn(c, r['location']) return def test_post_copy_type_entity_select_many(self): f = entitylist_form_data( "copy", entities=[layout.FIELD_TYPEID+"/field1", "testtype/entity1", "testtype/entity2"] ) u = entitydata_list_type_url("testcoll", "testtype") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") e2 = "error_head=%s"%(uri_quote_param(message.INPUT_ERROR),) e3 = "error_message=" self.assertIn(u, r['location']) self.assertIn(e2, r['location']) self.assertIn(e3, r['location']) return def test_post_copy_type_entity_no_login(self): self.client.logout() f = entitylist_form_data("copy", entities=[layout.FIELD_TYPEID+"/field1"]) u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 401) self.assertEqual(r.reason_phrase, "Unauthorized") return # -------- edit -------- def test_post_edit_type_entity(self): f = entitylist_form_data("edit", entities=[layout.FIELD_TYPEID+"/field1"]) u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") c = continuation_url_param(u) v = entitydata_edit_url("edit", "testcoll", layout.FIELD_TYPEID, "field1") self.assertIn(v, r['location']) self.assertIn(c, r['location']) return def test_post_edit_all_entity(self): f = entitylist_form_data("edit", entities=[layout.FIELD_TYPEID+"/field1"]) u = entitydata_list_all_url("testcoll", list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_edit_url("edit", "testcoll", layout.FIELD_TYPEID, "field1") c = continuation_url_param(u) self.assertIn(v, r['location']) self.assertIn(c, r['location']) return def test_post_edit_type_entity_select_none(self): f = entitylist_form_data("edit") u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") e2 = "error_head=%s"%(uri_quote_param(message.INPUT_ERROR),) e3 = "error_message=" self.assertIn(u, r['location']) self.assertIn(e2, r['location']) self.assertIn(e3, r['location']) return def test_post_edit_type_entity_select_many(self): f = entitylist_form_data( "edit", entities=[layout.FIELD_TYPEID+"/field1", "testtype/entity1", "testtype/entity2"] ) u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") e2 = "error_head=%s"%(uri_quote_param(message.INPUT_ERROR),) e3 = "error_message=" self.assertIn(u, r['location']) self.assertIn(e2, r['location']) self.assertIn(e3, r['location']) return def test_post_edit_type_entity_no_login(self): self.client.logout() f = entitylist_form_data("edit", entities=[layout.FIELD_TYPEID+"/field1"]) u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 401) self.assertEqual(r.reason_phrase, "Unauthorized") return # -------- delete -------- def test_post_delete_type_entity(self): testtypedelete = RecordType.create(self.testcoll, "testtypedelete", recordtype_create_values("testcoll", "testtypedelete")) testdatadelete = RecordTypeData.create(self.testcoll, "testtypedelete", {}) f = entitylist_form_data("delete", entities=["_type/testtypedelete"]) u = entitydata_list_type_url("testcoll", "_type", list_id="Type_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 200) self.assertEqual(r.reason_phrase, "OK") self.assertContains(r, "<h3>%s</h3>"%message.CONFIRM_REQUESTED_ACTION) msg_text = make_message(message.REMOVE_ENTITY_DATA, type_id="_type", id="testtypedelete" ) self.assertContains(r, msg_text + ": " + message.ARE_YOU_SURE) self.assertContains(r, message.CONFIRM_OR_CANCEL) self.assertContains(r, '<input type="hidden" name="confirmed_action" value="/testsite/c/testcoll/d/_type/!delete_confirmed"/>', html=True ) self.assertEqual(r.context['action_description'], msg_text) self.assertEqual(r.context['confirmed_action'], '/testsite/c/testcoll/d/_type/!delete_confirmed') self.assertEqual(r.context['action_params'], confirm_delete_params(button_id="entity_delete", entity_id="testtypedelete", type_id="_type", list_id="Type_list") ) self.assertEqual(r.context['cancel_action'], '/testsite/c/testcoll/l/Type_list/_type/') return def test_post_delete_type_not_exists(self): f = entitylist_form_data("delete", entities=["_type/sitetype"]) u = entitydata_list_all_url("testcoll", list_id="Type_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") l = r['location'] e2 = "error_head=" + uri_quote_param(message.INPUT_ERROR) e3 = "error_message=" + make_quoted_message(message.CANNOT_DELETE_ENTITY, type_id="_type", id="sitetype" ) self.assertIn(u, r['location']) self.assertIn(e2, r['location']) self.assertIn(e3, r['location']) return def test_post_delete_type_entity_with_values(self): f = entitylist_form_data("delete", entities=["_type/testtype"]) u = entitydata_list_type_url("testcoll", "_type", list_id="Type_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") e2 = "error_head=" + uri_quote_param(message.INPUT_ERROR) e3 = "error_message=" + make_quoted_message(message.TYPE_VALUES_FOR_DELETE, type_id="_type", id="testtype" ) self.assertIn(u, r['location']) self.assertIn(e2, r['location']) self.assertIn(e3, r['location']) return def test_post_delete_site_entity(self): f = entitylist_form_data("delete", entities=[layout.FIELD_TYPEID+"/Field_comment"]) u = entitydata_list_type_url("testcoll", layout.FIELD_TYPEID, list_id="Field_list") # log.info("entitydata_list_all_url: %s"%u) r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") e2 = "error_head=" + uri_quote_param(message.INPUT_ERROR) e3 = "error_message=" + make_quoted_message(message.CANNOT_DELETE_ENTITY, type_id=layout.FIELD_TYPEID, id="Field_comment" ) self.assertIn(u, r['location']) self.assertIn(e2, r['location']) self.assertIn(e3, r['location']) return # -------- close / search / view / default-view / customize-------- def test_post_close(self): # 'Close' button on list view c = "/xyzzy/" f = entitylist_form_data("close", entities=["testtype/entity1", "testtype/entity2"], continuation_url=c) u = entitydata_list_type_url("testcoll", "testtype") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") self.assertEqual(c, r['location']) return def test_post_close_no_continuation(self): # 'Close' button on list view with no continuation URI given in form f = entitylist_form_data("close", entities=["testtype/entity1", "testtype/entity2"], continuation_url="" ) u = entitydata_list_type_url("testcoll", "testtype") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = collection_view_url(coll_id="testcoll") self.assertEqual(v, r['location']) return def test_post_view_list(self): # 'View' button on list view: change displayed list f = entitylist_form_data("list_type", list_id="View_list") u = entitydata_list_type_url("testcoll", "_type", list_id="Type_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_list_type_url("testcoll", "_type", list_id="View_list") c = continuation_url_param(collection_view_url("testcoll")) self.assertIn(v, r['location']) self.assertIn(c, r['location']) return def test_post_view_all_list(self): # 'View' button on list view: change displayed list f = entitylist_form_data("list_type", list_scope_all="all", list_id="View_list") u = entitydata_list_type_url("testcoll", "_type", list_id="Type_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_list_type_url("testcoll", "_type", list_id="View_list") c = continuation_url_param(collection_view_url("testcoll")) self.assertIn(v, r['location']) self.assertIn(c, r['location']) return def test_post_view_search(self): # Redisplay list with entries matching search string f = entitylist_form_data("list_type", search="search&term", continuation_url="/xyzxy/") u = entitydata_list_type_url("testcoll", "testtype") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_list_type_url( "testcoll", "testtype", list_id="Default_list" ) c = continuation_url_param("/xyzxy/") s = "search=search%26term" self.assertIn(v, r['location']) self.assertIn(c, r['location']) self.assertIn(s, r['location']) # Note: Search rendering tested by test_get_fields_list_search above return def test_post_view_all_search(self): # Redisplay list with entries matching search string f = entitylist_form_data( "list_type", list_scope_all="all", search="search&term", continuation_url="/xyzxy/" ) u = entitydata_list_type_url("testcoll", "testtype") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_list_type_url( "testcoll", "testtype", list_id="Default_list" ) c = continuation_url_param("/xyzxy/") s = "search=search%26term" self.assertIn(v, r['location']) self.assertIn(c, r['location']) self.assertIn(s, r['location']) # Note: Search rendering tested by test_get_fields_list_search above return def test_post_view_no_type(self): # Redisplay list with entries matching search string f = entitylist_form_data("list_all", list_id="Type_list") u = entitydata_list_all_url("testcoll", list_id="Default_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_list_all_url("testcoll", list_id="Type_list") c = continuation_url_param(collection_view_url("testcoll")) self.assertIn(v, r['location']) self.assertIn(c, r['location']) return def test_post_default_list(self): # This button makes the current list view default for the collection f = entitylist_form_data("default_view", list_id="View_list") u = entitydata_list_type_url("testcoll", "_type", list_id="Type_list") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = entitydata_list_type_url("testcoll", "_type", list_id="Type_list") c = continuation_url_param(collection_view_url("testcoll")) h = "info_head=" + uri_quote_param(message.ACTION_COMPLETED) m = "info_message=" + make_quoted_message(message.DEFAULT_LIST_UPDATED, list_id="Type_list" ) self.assertIn(v, r['location']) self.assertIn(c, r['location']) self.assertIn(h, r['location']) self.assertIn(m, r['location']) return def test_post_customize(self): f = entitylist_form_data("customize", continuation_url="/xyzxy/") u = entitydata_list_all_url("testcoll") r = self.client.post(u, f) self.assertEqual(r.status_code, 302) self.assertEqual(r.reason_phrase, "Found") self.assertEqual(r.content, b"") v = collection_edit_url("testcoll") c = continuation_url_param(u, continuation_url_param("/xyzxy/")) self.assertIn(v, r['location']) self.assertIn(c, r['location']) return # End.
from django.db import models, connection from django.core.urlresolvers import reverse from django.template.defaultfilters import slugify from scipy_central.person.models import User from scipy_central.utils import rest_help_extra rest_help = ('Let the community know what your submission does, how it solves ' 'the problem, and/or how it works. ') + rest_help_extra pep8_help = """Please follow <a target="_blank" href="http://www.python.org/dev/peps/pep-0008/">PEP 8 guidelines</a> <ul> <li>No more than 80 characters per row</li> <li>Please use spaces and not tabs</li> <li>Use 4 spaces to indent, (use 2 if you must)</li> <li>Spaces around arithmetic operators</li> <li>Comments in the code should supplement your summary</li> </li> </ul>""" class Module(models.Model): """ Modules that code snippets and code libraries depend on i.e. code dependancies """ name = models.CharField(max_length=100, unique=True) website = models.URLField() class License(models.Model): """ License for the submission """ name = models.CharField(max_length=255, help_text="License used for code submissions.") slug = models.SlugField(max_length=255, editable=False) description = models.TextField(help_text="Short description of license") text_template = models.TextField(help_text=('Full license text with ', 'template fields')) def __unicode__(self): return self.name class Meta: permissions = (("can_edit", "Can edit this license"),) class SubmissionManager(models.Manager): def create_without_commit(self, **kwargs): """ Uses code from django.db.models.create(...) to create a new instance without writing to the database. To save the object to the database (presumably after validating it, or doing some other checks), you can call the ``obj.save()`` method. E.g.: obj = models.Submission.objects.create_without_commit(....) ... ... obj.save(force_insert=True, using=models.Submission.objects.db) # or obj.save(force_insert=True) """ qs = self.get_query_set() obj = qs.model(**kwargs) self._for_write = True #obj.save(force_insert=True, using=self.db) return obj class Submission(models.Model): """ A single model for all submission types. Most of the information is stored in the ``Revision`` for the submission, allowing us to store a history of the submission in consecutive revisions. """ objects = SubmissionManager() # Submission type SUBMISSION_TYPE = ( ('snippet', 'Code snippet'), ('package', 'Code library/package'), ('link', 'Remote link'), ) sub_type = models.CharField(max_length=10, choices=SUBMISSION_TYPE, help_text = 'Your submission should be one of 3 types') # Original submitter created_by = models.ForeignKey(User, null=True, blank=True) date_created = models.DateTimeField(auto_now_add=True, editable=False) # fileset: for revisioning of the submission fileset = models.ForeignKey('filestorage.FileSet', null=True, blank=True) # frozen: no further revisions allowed (not used at the moment) frozen = models.BooleanField(default=False) # For future use: inspired_by = models.ManyToManyField('self', null=True, blank=True) @property def last_revision(self): try: return self.revisions.order_by('-date_created')[0] except (KeyError, IndexError): return None @property def num_revisions(self): return self.revisions.count() @property def slug(self): try: return self.last_revision.slug except AttributeError: return '' def __unicode__(self): return self.slug def get_absolute_url(self): """ I can't seem to find a way to use the "reverse" or "permalink" functions to create this URL: do it manually, to match ``urls.py`` """ return reverse('spc-view-item', args=[0]).rstrip('0') + \ '%d/%d/%s' % (self.pk, self.last_revision.rev_id+1, self.slug) class RevisionManager(models.Manager): def create_without_commit(self, **kwargs): """ Uses code from django.db.models.create(...) to create a new instance without writing to the database. To save the object to the database (presumably after validating it, or doing some other checks), you can call the ``obj.save()`` method. E.g.: obj = models.Revision.objects.create_without_commit(....) ... ... obj.save(force_insert=True, using=models.Revision.objects.db) # or obj.save(force_insert=True) """ qs = self.get_query_set() obj = qs.model(**kwargs) self._for_write = True #obj.save(force_insert=True, using=self.db) return obj def all(self): return self.filter(is_displayed=True) def absolutely_all(self): return super(RevisionManager, self).all() def most_recent(self): """Most recent revisions only""" is_displayed = 1 if connection.vendor == 'sqlite' else 'true' return self.extra(where=[ "submission_revision.id = " " (SELECT id FROM submission_revision AS __sr_2 " " WHERE (__sr_2.entry_id = submission_revision.entry_id " " AND __sr_2.is_displayed = {0}) " " ORDER BY __sr_2.date_created DESC LIMIT 1)".format(is_displayed)]) def top_authors(self): """ From BSD licensed code: http://github.com/coleifer/djangosnippets.org/blob/master/cab/models.py """ return User.objects.annotate(score=models.Count('revision'))\ .order_by('-score', 'username') class Revision(models.Model): objects = RevisionManager() # The submission: parent item for this revision entry = models.ForeignKey(Submission, related_name="revisions") # user-provided submission title. title = models.CharField(max_length=150, verbose_name='Provide a title for your submission') # auto-created slug field slug = models.SlugField(max_length=155, editable=False) # Created on date_created = models.DateTimeField(auto_now_add=True, editable=False) # Users that created this submission created_by = models.ForeignKey(User, null=True, blank=True) # Submission license. Only used for code packages. Code snippets are # always CC0 licensed, and external links must list their own license. # There are just too many licenses out there for us to track them all. # We don't expect people to be using this site to screen for code based # on license. Use Google for that. # The only choices right now are CC0 and simplified-BSD. sub_license = models.ForeignKey(License, null=True, blank=True, verbose_name="Choose a license for your submission", help_text='<a href="/licenses">More on licenses</a>') # User-provided description of the submission. Uses reStructuredText. # Is blank for URL (link) submissions. description = models.TextField(verbose_name='Describe and explain your submission', help_text=rest_help) # HTML version of the ReST ``description`` field # Auto created field description_html = models.TextField() # Code snippet hash (will use ssdeep later on: 57 characters) hash_id = models.CharField(max_length=60, null=True, blank=True, editable=False) # For snippet submissions item_code = models.TextField(null=True, blank=True, verbose_name='Copy and paste code snippet', help_text=pep8_help) # For link-type submissions item_url = models.URLField(verbose_name="Link to resource", blank=True, null=True, help_text=("Link to the code's website, documentation, or " 'publication (<a target="_blank" href="http://en.' 'wikipedia.org/wiki/Digital_object_identifier">' 'DOI preferred</a>)'), max_length=255) # Tags for this revision tags = models.ManyToManyField('tagging.Tag', through='TagCreation') # Should this revision be displayed? One might decide to remove # revision from display if they violate licenses, or are improper in # some way. # Also set False for revision by users that submit when not yet # authenticated. is_displayed = models.BooleanField(default=False) # Validation hash validation_hash = models.CharField(max_length=40, null=True, blank=True) # For future user: list of modules required to run the code modules_used = models.ManyToManyField(Module, blank=True, null=True) # For future use: revision reason update_reason = models.CharField(max_length=155, null=True, blank=True) # users can only comment if set to True enable_comments = models.BooleanField(default=True) class Meta: ordering = ['date_created'] def __unicode__(self): return self.title[0:50] + '::' + str(self.created_by.username) @property def rev_id(self): """ Determines which revision of the submission this is, given the ``revision`` object. """ return list(self.entry.revisions.absolutely_all()).index(self) @property def rev_id_human(self): return self.rev_id + 1 @property def previous_submission(self): n = 1 sub = Submission.objects.all().filter(pk=self.entry.pk-n) if len(sub): while sub[0].last_revision.is_displayed==False: n += 1 sub = Submission.objects.all().filter(pk=self.entry.pk-n) if len(sub): continue else: return None return sub[0].get_absolute_url() else: return None @property def next_submission(self): n = 1 sub = Submission.objects.all().filter(pk=self.entry.pk+n) if len(sub): while sub[0].last_revision.is_displayed==False: n += 1 sub = Submission.objects.all().filter(pk=self.entry.pk+n) if len(sub): continue else: return None return sub[0].get_absolute_url() else: return None @property def previous_revision(self): all_revs = list(self.entry.revisions.absolutely_all()) try: if all_revs.index(self)-1 >= 0: return all_revs[all_revs.index(self)-1] else: return None except ValueError: # Happens when previewing a submission before submitting it return None @property def next_revision(self): all_revs = list(self.entry.revisions.absolutely_all()) try: if all_revs.index(self)+1 >= len(all_revs): return None else: return all_revs[all_revs.index(self)+1] except ValueError: # Happens when previewing a submission before submitting it # (the template calls on self.next_revision) return None @property def human_revision_string(self): """ Returns the revision information in a helpful way """ try: return 'Revision %d of %d' % (self.rev_id+1, self.entry.num_revisions) except ValueError: # self.rev_id is not available when entering a new submission return 'Revision information not available yet' @property def short_human_revision_string(self): """ Returns the revision information in a helpful way """ if self.rev_id == 0: return '' else: return 'revision&nbsp;%d' % (self.rev_id+1) def save(self, *args, **kwargs): """ Override the model's saving function to create the slug """ # http://docs.djangoproject.com/en/dev/topics/db/models/ #overriding-predefined-model-methods self.slug = slugify(self.title) # Call the "real" save() method. super(Revision, self).save(*args, **kwargs) def get_absolute_url(self): """ I can't seem to find a way to use the "reverse" or "permalink" functions to create this URL: do it manually, to match ``urls.py`` """ return reverse('spc-view-item', args=[0]).rstrip('0') + \ '%d/%d/%s' % (self.entry.pk, self.rev_id+1, self.slug) class TagCreation(models.Model): """ Tracks by whom and when tags were created """ created_by = models.ForeignKey(User) revision = models.ForeignKey(Revision) tag = models.ForeignKey('tagging.Tag') date_created = models.DateTimeField(auto_now_add=True, editable=False) def __unicode__(self): return self.tag.name
import cPickle import math import random import types import hlib import hlib.api import hlib.engine import hlib.events import lib.datalayer import hlib.log import games import games.color import games.settlers.board_def # pylint: disable-msg=F0401 import hruntime # @UnresolvedImport InactivePathError = lambda: games.GameError(msg = 'This path is not active right now', reply_status = 402) InactiveNodeError = lambda: games.GameError(msg = 'This node is not active right now', reply_status = 402) InactiveNumberError = lambda: games.GameError(msg = 'This number is not active right now', reply_status = 402) TooManyVillagesError = lambda: games.GameError(msg = 'You have too many villages', reply_status = 403) TooManyTownsError = lambda: games.GameError(msg = 'You have too many towns', reply_status = 403) TooManyPathsError = lambda: games.GameError(msg = 'You have too many paths', reply_status = 403) NotEnoughPointCardsError = lambda: games.GameError(msg = 'You can not use Point cards yet', reply_status = 402) class ApiBoard(hlib.api.ApiJSON): def __init__(self, b): super(ApiBoard, self).__init__(['fields', 'nodes', 'paths', 'ports']) f = lambda n: [_x.to_api() for _x in getattr(b, n).values()] self.fields = f('fields') self.nodes = f('nodes') self.paths = f('paths') self.ports = f('ports') class Resource(games.Resource): RESOURCE_SHEEP = 0 RESOURCE_WOOD = 1 RESOURCE_ROCK = 2 RESOURCE_GRAIN = 3 RESOURCE_CLAY = 4 RESOURCE_DESERT = 5 RESOURCE_MIN = 0 RESOURCE_MAX = 4 map_resource2str = {-2: 'free', -1: 'unknown', 0: 'sheep', 1: 'wood', 2: 'rock', 3: 'grain', 4: 'clay', 5: 'desert' } map_str2resource = {'free': -2, 'unknown': -1, 'sheep': 0, 'wood': 1, 'rock': 2, 'grain': 3, 'clay': 4, 'desert': 5 } class Resources(games.Resources): def __init__(self): games.Resources.__init__(self) self.sheep = 0 self.wood = 0 self.rock = 0 self.grain = 0 self.clay = 0 def __len__(self): # pylint: disable-msg=R0201 return 5 def __getitem__(self, key): if type(key) in [types.IntType, types.LongType]: key = Resource.map_resource2str[key] return getattr(self, key) def __setitem__(self, key, value): if type(key) == types.IntType: key = Resource.map_resource2str[key] setattr(self, key, value) def keys(self): # pylint: disable-msg=R0201 return ['wood', 'sheep', 'rock', 'grain', 'clay'] def values(self): return [self.sheep, self.wood, self.rock, self.grain, self.clay] def max(self): return max(self.values()) class Player(games.Player): def __init__(self, game, user): games.Player.__init__(self, game, user) self.resources = Resources() self.cards = hlib.database.IndexedMapping() self.mightest_chilvary = False self.longest_path = False self.first_village = None self.second_village = None self.vault = None def __getattr__(self, name): if name == 'points': p = 0 for n in self.game.board.get_used_nodes(player = self): if n.type == BoardNode.TYPE_VILLAGE: p += 1 else: p += 2 for c in self.cards.values(): if c.type == Card.TYPE_POINT and c.is_used: p += 1 if self.mightest_chilvary: p += 2 if self.longest_path: p += 2 return p if name == 'is_on_game': return self.game.type == Game.TYPE_GAME and self.is_on_turn if name == 'has_free_path': return len(self.game.board.get_used_paths(player = self)) < 15 if name == 'has_free_village': return len([n for n in self.game.board.get_used_nodes(player = self) if n.type == BoardNode.TYPE_VILLAGE]) < 5 if name == 'has_free_town': return len([n for n in self.game.board.get_used_nodes(player = self) if n.type == BoardNode.TYPE_TOWN]) < 4 if name == 'can_roll_dice': return self.is_on_turn and self.game.type in [Game.TYPE_PREPARE_DICE, Game.TYPE_PREPARE_KNIGHT] if name == 'can_pass': if not self.is_on_turn: return False if self.game.type in [Game.TYPE_FREE, Game.TYPE_FINISHED, Game.TYPE_CANCELED]: return False if self.game.type == Game.TYPE_PLACE_FIRST: if len(self.game.board.get_used_nodes(player = self)) != 1 or len(self.game.board.get_used_paths(player = self)) != 1: return False return True if self.game.type == Game.TYPE_PLACE_SECOND: if len(self.game.board.get_used_nodes(player = self)) != 2 or len(self.game.board.get_used_paths(player = self)) != 2: return False return True if self.game.type == Game.TYPE_GAME: return True return False return games.Player.__getattr__(self, name) def to_state(self): d = games.Player.to_state(self) d.update({ 'points': self.points, 'has_longest_path': self.longest_path, 'has_mightest_chilvary': self.mightest_chilvary, 'can_roll_dice': self.can_roll_dice, 'first_village': self.first_village and self.first_village.id or None, 'second_village': self.second_village and self.second_village.id or None, 'resources': { 'total': sum(self.resources.values()) }, 'vault': {}, 'cards': { 'unused_cards': len([c for c in self.cards.values() if c.is_used != True]), 'used_knights': len([c for c in self.cards.values() if c.type == Card.TYPE_KNIGHT and c.is_used]) } }) if self.id == self.game.my_player.id: d['has_free_village'] = self.has_free_village d['has_free_town'] = self.has_free_town d['cards']['cards'] = [c.to_api() for c in self.cards.values()] for k in self.resources.keys(): d['resources'][k] = self.resources[k] if hasattr(self, 'vault') and self.vault != None: for k in self.vault.keys(): d['vault'][k] = self.vault[k] return d def add_resource(self, node, field): if field.resource == Resource.RESOURCE_DESERT: return None if node.type == BoardNode.TYPE_VILLAGE: amount = 1 else: amount = 2 self.add_resource_raw(field.resource, amount) return (field.resource, amount) def apply_thief_to_full(self): if self.resources.sum() <= 0: return stolen = Resources() howmany = int(math.ceil(self.resources.sum() / 2)) # # Steal one piece of any resource that's available in # `search_set`. If `original_set` is not None, number # of resources is decreased in `original_set` too. # Stolen resource is marked in `stolen` set. # def __steal_one_piece(search_set, original_set = None): while True: r = random.randint(Resource.RESOURCE_MIN, Resource.RESOURCE_MAX) if search_set[r] > 0: break search_set[r] -= 1 stolen[r] += 1 if original_set: original_set[r] -= 1 # Set with resources we can steal freely unprotected_resources = self.resources.clone() # Player has no vault yet, create an empty one if not hasattr(self, 'vault') or self.vault == None: self.vault = Resources() # Remove from `unprotected_resources` those resources # player want to keep for himself unprotected_resources.deduct(self.vault) if howmany >= unprotected_resources.sum(): # Not enough unprotected resources - we can safely steal all unprotected ones, # and then cut deeper into player's stack howmany -= unprotected_resources.sum() self.resources.deduct(unprotected_resources) stolen.ascribe(unprotected_resources) for _ in range(0, howmany): __steal_one_piece(self.resources) else: # Remove only from unprotcted stack, it has enough resources for _ in range(0, howmany): __steal_one_piece(unprotected_resources, original_set = self.resources) hlib.events.trigger('game.settlers.ResourcesStolen', self.game, game = self.game, resources = stolen, victim = self.user) def apply_thief_to_one(self, thief): if self.resources.sum() <= 0: return while True: r = random.randint(Resource.RESOURCE_MIN, Resource.RESOURCE_MAX) if self.resources[r] > 0: break self.resources[r] -= 1 thief.add_resource_raw(r, 1) hlib.events.trigger('game.settlers.ResourceStolen', self.game, game = self.game, resource = r, thief = thief.user, victim = self.user) def exchange_resources(self, t, src, dst, pieces): if not self.is_on_game: raise games.NotYourTurnError() if src == dst: raise games.GameError(msg = 'You can not exchange resources for the same one') if self.resources[src] < pieces: raise games.NotEnoughResourcesError() if t == Game.RESOURCE_EXCHANGE_FOUR: k = 4 elif t == Game.RESOURCE_EXCHANGE_THREE: k = 3 if len(self.game.board.get_used_ports(self, Resource.RESOURCE_FREE)) <= 0: raise games.GameError(msg = 'You have no access to free port') elif t == Game.RESOURCE_EXCHANGE_TWO: k = 2 if len(self.game.board.get_used_ports(self, src)) <= 0: raise games.GameError(msg = 'You have no access to this resource port') else: raise games.GameError(msg = 'Unknown exchange type') if pieces % k: raise games.GameError(msg = 'This is not valid number') self.resources[src] -= pieces self.resources[dst] += (pieces / k) src_c = Resources() src_c[src] = pieces dst_c = Resources() dst_c[dst] = (pieces / k) hlib.events.trigger('game.settlers.ResourcesExchanged', self.game, game = self.game, user = self.user, src = src_c, dst = dst_c) def apply_monopoly(self, thief, resource): amount = self.resources[resource] self.resources[resource] = 0 thief.add_resource_raw(resource, amount) rc = Resources() rc[resource] = amount hlib.events.trigger('game.settlers.Monopoly', self.game, game = self.game, thief = thief.user, victim = self.user, resources = rc) def exchange_has_any_common(self): return self.game.type == games.Game.TYPE_GAME def exchange_four_has_any(self): if not self.exchange_has_any_common(): return False return max(self.resources.values() >= 4) def exchange_three_has_any(self): if not self.exchange_has_any_common(): return False return max(self.resources.values() >= 3) and len(self.game.board.get_used_ports(self, Resource.RESOURCE_FREE)) def exchange_two_has_any(self): if not self.exchange_has_any_common(): return False for k in self.resources.keys(): if self.resources[k] >= 2 and len(self.game.board.get_used_ports(self, k)) > 0: return True return False def exchange_has_any(self): return (self.exchange_four_has_any() or self.exchange_three_has_any() or self.exchange_two_has_any()) class BoardField(hlib.database.DBObject): TYPE_UNKNOWN = 0 TYPE_SEA = 1 TYPE_DESERT = 2 TYPE_RESOURCE = 3 map_type2str = {0: 'unknown', 1: 'sea', 2: 'desert', 3: 'resource' } def __init__(self, game, typ, thief, number, resource): hlib.database.DBObject.__init__(self) self.game = game self.id = None self.type = int(typ) self.number = int(number) self.resource = int(resource) self.thief = bool(thief) def to_api(self): return { 'id': self.id, 'type': self.type, 'number': self.number, 'resource': self.resource, 'thief': self.thief } class BoardPort(hlib.database.DBObject): def __init__(self, game, clock, resource, nodes): hlib.database.DBObject.__init__(self) self.game = game self.id = None self.clock = clock self.resource = resource self.nodes = nodes def to_api(self): return { 'id': self.id, 'clock': self.clock, 'resource': self.resource, 'nodes': self.nodes } class BoardPath(games.OwnerableDBObject): TYPE_FREE = 1 TYPE_OWNED = 2 POSITION_LT = 1 POSITION_T = 2 POSITION_RT = 3 POSITION_RB = 4 POSITION_B = 5 POSITION_LB = 6 map_pos2str = {1: 'lt', 2: 't', 3: 'rt', 4: 'lt', 5: 't', 6: 'rt' } def __init__(self, game, typ, owner): games.OwnerableDBObject.__init__(self, game, typ, owner) self._v_mark = False def __getattr__(self, name): if name == '_v_mark': setattr(self, '_v_mark', False) return False class BoardNode(games.OwnerableDBObject): TYPE_FREE = 1 TYPE_VILLAGE = 2 TYPE_TOWN = 3 map_type2str = {1: 'free', 2: 'village', 3: 'town'} class Card(games.Card): TYPE_KNIGHT = 1 TYPE_MONOPOLY = 2 TYPE_ROADS = 3 TYPE_INVENTION = 4 TYPE_POINT = 5 map_card2str = {1: 'knight', 2: 'monopoly', 3: 'roads', 4: 'invention', 5: 'point'} def __getattr__(self, name): if name == 'can_be_used': if self.is_used: return False if not self.player.is_on_turn: return False if not self.player.is_on_game and self.type != Card.TYPE_KNIGHT: return False if not self.game.type in [Game.TYPE_PREPARE_KNIGHT, Game.TYPE_GAME]: return False if self.game.round == self.bought: return False for card in self.player.cards.values(): if card.used == self.game.round: return False return True return games.Card.__getattr__(self, name) COLOR_SPACE = games.color.ColorSpace('settlers', colors = [ ('red', games.color.Color('red', 'Red', '#CC0000')), ('pink', games.color.Color('pink', 'Pink', '#DD21D4')), ('black', games.color.Color('black', 'Black', '#000000')), ('green', games.color.Color('green', 'Green', '#33FF00')), ('purple', games.color.Color('purple', 'Purple', '#990099')), ('dark_blue', games.color.Color('dark_blue', 'Dark blue', '#000099')), ('brown', games.color.Color('brown', 'Brown', '#663300')), ('dark_green', games.color.Color('dark_green', 'Dark green', '#347235')), ('orange', games.color.Color('orange', 'Orange', '#FF9900')), ('light_blue', games.color.Color('light_blue', 'Light blue', '#00CCCC')) ]) class Board(games.Board): # pylint: disable-msg=R0904 COLORS = {1: ['#CC0000', 'red'], 2: ['#FF9900', 'orange'], 3: ['#DD21D4', 'pink'], 4: ['#000000', 'black'], 5: ['#33FF00', 'green'], 6: ['#990099', 'purple'], 7: ['#00CCCC', 'light_blue'], 8: ['#000099', 'dark_blue'], 9: ['#663300', 'brown'], 10: ['#347235', 'dark_green'] } def __init__(self, game, init = True): games.Board.__init__(self, game) self.fields = hlib.database.IndexedMapping(first_key = 1) self.nodes = hlib.database.IndexedMapping(first_key = 1) self.paths = hlib.database.IndexedMapping(first_key = 1) self.ports = hlib.database.IndexedMapping() self.lps_length = -1 self.lps_counter = 0 self.lps_owner = 0 self.lps_multi = False if not init: return self.init() def init(self): # create ports resources = [Resource.RESOURCE_SHEEP, Resource.RESOURCE_WOOD, Resource.RESOURCE_ROCK, Resource.RESOURCE_GRAIN, Resource.RESOURCE_CLAY] random.shuffle(resources) free_ports = 4 free = random.randrange(0, 2) if free == 1: free = True else: free = False for desc in games.settlers.board_def.PORT_DESCS: if desc == []: continue if free and free_ports > 0: resource = Resource.RESOURCE_FREE free_ports -= 1 else: resource = resources.pop(0) if len(desc['clock']) == 1: clock = desc['clock'][0] nodes = desc['nodes'][0] else: j = random.randrange(0, 2) clock = desc['clock'][j] nodes = desc['nodes'][j] free = not free self.ports.push(BoardPort(self.game, clock, resource, nodes)) # create fields while True: resources = [Resource.RESOURCE_SHEEP, Resource.RESOURCE_SHEEP, Resource.RESOURCE_SHEEP, Resource.RESOURCE_SHEEP, Resource.RESOURCE_ROCK, Resource.RESOURCE_ROCK, Resource.RESOURCE_ROCK, Resource.RESOURCE_CLAY, Resource.RESOURCE_CLAY, Resource.RESOURCE_CLAY, Resource.RESOURCE_WOOD, Resource.RESOURCE_WOOD, Resource.RESOURCE_WOOD, Resource.RESOURCE_WOOD, Resource.RESOURCE_GRAIN, Resource.RESOURCE_GRAIN, Resource.RESOURCE_GRAIN, Resource.RESOURCE_GRAIN] if self.game.flags.floating_desert == True: resources.append(Resource.RESOURCE_DESERT) numbers = [2, 3, 3, 4, 4, 5, 5, 6, 6, 8, 8, 9, 9, 10, 10, 11, 11, 12] random.shuffle(resources) random.shuffle(numbers) for i in range(1, 20): if self.game.flags.floating_desert != True: if i == 10: field = (BoardField.TYPE_DESERT, 1, 7, Resource.RESOURCE_DESERT) else: field = (BoardField.TYPE_RESOURCE, 0, numbers.pop(0), resources.pop(0)) else: resource = resources.pop(0) if resource == Resource.RESOURCE_DESERT: field = (BoardField.TYPE_DESERT, 1, 7, resource) else: field = (BoardField.TYPE_RESOURCE, 0, numbers.pop(0), resource) self.fields.push(BoardField(self.game, field[0], field[1], field[2], field[3])) if self.game.flags.spread_fields != True: break too_close_cnt = 0 for field_id, field_neighbours in games.settlers.board_def.FIELD_NEIGHBOURS.items(): too_close_cnt = 0 if self.fields[field_id].number not in (6, 8): continue for field_neighbour_id in field_neighbours: if self.fields[field_neighbour_id].number in (6, 8): too_close_cnt += 1 if too_close_cnt >= 2: break break if too_close_cnt >= 2: hlib.log.log_dbg('New board: reroll fields, spread enabled') continue # There's no field with more than 1 (6, 8) neighbour, we're done break for i in range(1, 55): self.nodes.push(BoardNode(self, BoardNode.TYPE_FREE, games.DummyOwner())) for i in range(1, 73): self.paths.push(BoardPath(self, BoardPath.TYPE_FREE, games.DummyOwner())) self.render_preview() def __getattr__(self, name): if name == 'thief_field': for f in self.fields.values(): if f.thief: return f raise games.GameError(msg = 'This game has no thief?') if name == 'free_numbers_map': return dict([(f.id, f.thief != True) for f in self.fields.values()]) return games.Board.__getattr__(self, name) def render_preview(self): import PIL.Image import ImageDraw offset = (59 + 29, 102) preview = PIL.Image.new('RGBA', (645, 714), (255, 255, 255, 255)) preview_draw = ImageDraw.Draw(preview) save_info = None for field in self.fields.values(): field_img = PIL.Image.open('../static/images/games/settlers/board/real/field/' + Resource.map_resource2str[field.resource] + '.gif') field_img_conv = field_img.convert('RGBA') if not save_info: save_info = field_img_conv.info preview.paste(field_img_conv, (int(board_def.COORS['field'][field.id][0]) + offset[0], int(board_def.COORS['field'][field.id][1]) + offset[1]), field_img_conv) for sea in board_def.COORS['sea'].values(): sea_img = PIL.Image.open('../static/images/games/settlers/board/real/field/sea.gif') sea_img_conv = sea_img.convert('RGBA') preview.paste(sea_img_conv, (int(sea[0]) + offset[0], int(sea[1]) + offset[1]), sea_img_conv) del preview_draw preview = preview.resize((161, 178)) preview.save('../static/images/gamepreview/' + str(self.game.id) + '.gif', 'GIF', transparency = 255) def active_nodes_map(self, player = None): player = player or self.game.my_player if not player or not player.is_on_turn: return active_nodes_map_negative if self.game.type in [Game.TYPE_USE_KNIGHT, Game.TYPE_USE_KNIGHT_GAME, Game.TYPE_FREE, Game.TYPE_FREE_PATHS_FIRST, Game.TYPE_FREE_PATHS_SECOND, Game.TYPE_FINISHED, Game.TYPE_PREPARE_THIEF, Game.TYPE_PREPARE_DICE, Game.TYPE_PREPARE_KNIGHT, Game.TYPE_FREE_RESOURCES, Game.TYPE_MONOPOLY, Game.TYPE_CANCELED]: return active_nodes_map_negative if self.game.type in [Game.TYPE_PLACE_FIRST, Game.TYPE_PLACE_SECOND]: m = active_nodes_map_positive.copy() for nid, node in self.nodes.items(): if node.type == BoardNode.TYPE_FREE: continue m[nid] = False for neighbour in games.settlers.board_def.NODE_DESCS[nid]['neighbours']: m[neighbour] = False return m if self.game.type in [Game.TYPE_APPLY_THIEF, Game.TYPE_APPLY_KNIGHT, Game.TYPE_APPLY_KNIGHT_GAME]: m = active_nodes_map_negative.copy() nodes = self.get_nodes_by_field(self.thief_field) for node in nodes: if node.type != BoardNode.TYPE_FREE and not node.is_owner(player): m[node.id] = True return m if self.game.type == Game.TYPE_GAME: m = active_nodes_map_positive.copy() for nid, node in self.nodes.items(): if m[nid] == False: continue if node.type != BoardNode.TYPE_FREE: for neighbour in games.settlers.board_def.NODE_DESCS[nid]['neighbours']: m[neighbour] = False if node.type != BoardNode.TYPE_VILLAGE or not node.is_owner(player): m[nid] = False continue found = False for pid in games.settlers.board_def.NODE_DESCS[nid]['paths']: if not self.paths[pid].is_owner(player): continue found = True break m[nid] = found return m return active_nodes_map_negative def active_paths_map(self, player = None): player = player or self.game.my_player if player == None or not player.is_on_turn: return active_paths_map_negative if self.game.type in [Game.TYPE_FREE, Game.TYPE_PREPARE_THIEF, Game.TYPE_APPLY_THIEF, Game.TYPE_USE_KNIGHT, Game.TYPE_USE_KNIGHT_GAME, Game.TYPE_FINISHED, Game.TYPE_PREPARE_DICE, Game.TYPE_PREPARE_KNIGHT, Game.TYPE_APPLY_KNIGHT, Game.TYPE_FREE_RESOURCES, Game.TYPE_MONOPOLY, Game.TYPE_APPLY_KNIGHT_GAME, Game.TYPE_CANCELED]: return active_paths_map_negative if self.game.type == Game.TYPE_PLACE_FIRST: if not player.first_village: return active_paths_map_negative return self.get_free_paths_map_by_node(player.first_village) if self.game.type == Game.TYPE_PLACE_SECOND: if not player.second_village: return active_paths_map_negative return self.get_free_paths_map_by_node(player.second_village) if self.game.type in [Game.TYPE_GAME, Game.TYPE_FREE_PATHS_FIRST, Game.TYPE_FREE_PATHS_SECOND]: m = active_paths_map_negative.copy() for path in self.paths.values(): if path.type != BoardPath.TYPE_FREE: continue node1 = self.nodes[games.settlers.board_def.PATH_DESCS[path.id]['nodes'][0]] node2 = self.nodes[games.settlers.board_def.PATH_DESCS[path.id]['nodes'][1]] if node1.is_owner(player) or node2.is_owner(player): m[path.id] = True continue sibling1 = None for i in range(2): sibling1 = self.get_path_sibling_by_node(path, node1, i) if sibling1 == None: continue if sibling1.type == BoardPath.TYPE_OWNED and sibling1.is_owner(player): break sibling1 = None sibling2 = None for i in range(2): sibling2 = self.get_path_sibling_by_node(path, node2, i) if sibling2 == None: continue if sibling2.type == BoardPath.TYPE_OWNED and sibling2.is_owner(player): break sibling2 = None if (sibling1 and not sibling2 and node1.type != BoardNode.TYPE_FREE) or (sibling2 and not sibling1 and node2.type != BoardNode.TYPE_FREE): continue if sibling1 or sibling2: m[path.id] = True return m return {} def get_used_paths(self, player = None): return [p for p in self.paths.values() if (not player and p.type != BoardPath.TYPE_FREE) or (player and p.type != BoardPath.TYPE_FREE and p.is_owner(player))] def get_used_nodes(self, player = None): return [n for n in self.nodes.values() if (not player and n.type != BoardNode.TYPE_FREE) or (player and n.type != BoardNode.TYPE_FREE and n.is_owner(player))] def get_used_ports(self, player, resource): r = [] for p in self.ports.values(): if p.resource != resource: continue for n in p.nodes: node = self.nodes[n] if node.is_owner(player): r.append(node) return r def get_nodes_by_field(self, field): return [n for n in self.nodes.values() if field.id in games.settlers.board_def.NODE_DESCS[n.id]['fields']] def get_fields_by_node(self, n): return [self.fields[i] for i in games.settlers.board_def.NODE_DESCS[n.id]['fields']] def get_fields_by_number(self, n): return [f for f in self.fields.values() if f.number == n] def get_free_paths_map_by_node(self, node): return dict([(p.id, node.id in games.settlers.board_def.PATH_DESCS[p.id]['nodes'] and p.type == BoardPath.TYPE_FREE) for p in self.paths.values()]) def get_node_by_path(self, path, index): return self.nodes[games.settlers.board_def.PATH_DESCS[path.id]['nodes'][index]] # Longest path search def get_path_sibling_by_node(self, path, node, index): i = 0 for pid in games.settlers.board_def.NODE_DESCS[node.id]['paths']: if pid == path.id: index = index + 1 i = i + 1 continue if i == index: return self.paths[pid] i = i + 1 def lps_init(self): self.lps_length = -1 self.lps_counter = 0 self.lps_owner = 0 self.lps_multi = False def lps_node_is_traversable(self, node, owner): # pylint: disable-msg=R0201 r = not ((node.type == BoardNode.TYPE_VILLAGE or node.type == BoardNode.TYPE_TOWN) and (not node.is_owner(owner))) return r def lps_save_result(self, owner): if self.lps_counter > self.lps_length: self.lps_owner = owner self.lps_length = self.lps_counter self.lps_multi = False elif self.lps_counter == self.lps_length: if self.lps_owner != owner: self.lps_multi = True def lps_traverse_path(self, path, node): path._v_mark = True self.lps_counter = self.lps_counter + 1 traversed = False nodes_tested = False for i in range(2): p = self.get_path_sibling_by_node(path, node, i) # pylint: disable-msg=W0212 if not p or p._v_mark or p.type != BoardPath.TYPE_OWNED or p.owner != path.owner: continue nodes_tested = True for j in range(2): n = self.get_node_by_path(p, j) if n.id == node.id: continue if not self.lps_node_is_traversable(n, path.owner): continue self.lps_traverse_path(p, n) traversed = True if not traversed: if nodes_tested == True: self.lps_counter = self.lps_counter + 1 self.lps_save_result(path.owner) if nodes_tested == True: self.lps_counter = self.lps_counter - 1 self.lps_counter = self.lps_counter - 1 path._v_mark = False def lps_search(self): self.lps_init() for path in self.paths.values(): if path.type != BoardPath.TYPE_OWNED: continue for i in [0, 1]: node = self.get_node_by_path(path, i) if not self.lps_node_is_traversable(node, path.owner): continue self.lps_traverse_path(path, node) if self.lps_length < 5: return (False, None, self.lps_length) if self.lps_multi: return (True, None, self.lps_length) return (True, self.lps_owner, self.lps_length) class Game(games.Game): TYPE_PLACE_FIRST = 10 TYPE_PLACE_SECOND = 11 TYPE_PREPARE_KNIGHT = 12 TYPE_PREPARE_THIEF = 13 TYPE_APPLY_THIEF = 14 TYPE_USE_KNIGHT = 15 TYPE_APPLY_KNIGHT = 16 TYPE_PREPARE_DICE = 17 TYPE_MONOPOLY = 18 TYPE_USE_KNIGHT_GAME = 19 TYPE_APPLY_KNIGHT_GAME = 20 TYPE_FREE_RESOURCES = 21 TYPE_FREE_PATHS_FIRST = 22 TYPE_FREE_PATHS_SECOND = 23 TIMEOUT_BEGIN_TYPES = [0, 10, 11] TIMEOUT_TURN_TYPES = [0, 10, 11, 2, 3] RESOURCE_EXCHANGE_FOUR = 4 RESOURCE_EXCHANGE_THREE = 3 RESOURCE_EXCHANGE_TWO = 2 RESOURCE_DESCS = {'village': {Resource.RESOURCE_CLAY: 1, Resource.RESOURCE_WOOD: 1, Resource.RESOURCE_SHEEP: 1, Resource.RESOURCE_GRAIN: 1}, 'town': {Resource.RESOURCE_GRAIN: 2, Resource.RESOURCE_ROCK: 3}, 'path': {Resource.RESOURCE_CLAY: 1, Resource.RESOURCE_WOOD: 1}, 'card': {Resource.RESOURCE_SHEEP: 1, Resource.RESOURCE_GRAIN: 1, Resource.RESOURCE_ROCK: 1}} def __init__(self, flags): games.Game.__init__(self, flags, Player) self.card_line = '' self.card_index = 0 self.dice_rolls = hlib.database.SimpleList() self.longest_length = 0 self.longest_owner = games.DummyOwner() self.dont_shuffle = flags.dont_shuffle cards = {Card.TYPE_KNIGHT: 14, Card.TYPE_MONOPOLY: 2, Card.TYPE_ROADS: 2, Card.TYPE_INVENTION: 2, Card.TYPE_POINT: 5} # pylint: disable-msg=W0612 for i in range(25): while True: n = random.randint(0, 24) if 0 <= n and n <= 13: n = Card.TYPE_KNIGHT elif 14 <= n and n <= 15: n = Card.TYPE_MONOPOLY elif 16 <= n and n <= 17: n = Card.TYPE_ROADS elif 18 <= n and n <= 19: n = Card.TYPE_INVENTION else: n = Card.TYPE_POINT if cards[n] > 0: cards[n] -= 1 self.card_line += unicode(n) break def __getattr__(self, name): if name == 'winner_player': if len(self.players) <= 0: return None if self.forhont_player.points <= 0: return None return self.forhont_player if name == 'dice_rolls_stats': ss = {'with': {'numbers': {2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0}}, 'without': {'numbers': {2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0}}} i = 0 for dr in self.dice_rolls: i = i + 1 if i > self.limit * 3: ss['without']['numbers'][dr] += 1 ss['with']['numbers'][dr] += 1 ss['with']['sum'] = sum(ss['with']['numbers']) ss['without']['sum'] = sum(ss['without']['numbers']) return ss if name == 'last_numbers': if self.round == 0: return [] if self.round < 2: return [i for i in reversed(list(self.dice_rolls))] else: return [i for i in reversed(list(self.dice_rolls[len(self.dice_rolls) - self.limit:]))] return games.Game.__getattr__(self, name) def to_state(self): d = games.Game.to_state(self) d.update({ 'last_numbers': self.last_numbers, 'board': ApiBoard(self.board), 'remaining_cards': 25 - self.card_index }) if self.type == games.Game.TYPE_FINISHED: drs = self.dice_rolls_stats d['dice_rolls'] = { 'with': [(k, v) for k, v in drs['with']['numbers'].items()], 'without': [(k, v) for k, v in drs['without']['numbers'].items()] } return d def begin(self): # pylint: disable-msg=W0201 self.type = Game.TYPE_PLACE_FIRST if self.dont_shuffle != True: if self.limit == 3: order = [0, 1, 2] else: order = [0, 1, 2, 3] random.shuffle(order) new_players = hlib.database.IndexedMapping() tmp_players = [p for p in self.players.values()] for i in order: new_players.push(tmp_players[i]) self.players = new_players games.Game.begin(self) def get_next_dice(self): while True: n = random.randint(1, 6) + random.randint(1, 6) if n == 7 and self.round < 4: continue self.dice_rolls.append(n) break return n def check_game_finalization(self): if self.my_player.points >= 10: self.finish() # pylint: disable-msg=R0201 def build_object(self, o, player, newtype, resources = None): if resources: if not player.has_resources_for(Game.RESOURCE_DESCS[resources]): raise games.NotEnoughResourcesError() player.spend_resources_for(Game.RESOURCE_DESCS[resources]) o.type = newtype o.owner = player def apply_monopoly(self, resource): # pylint: disable-msg=E1101 for p in self.players.values(): if p.id != self.my_player.id: p.apply_monopoly(self.my_player, resource) self.type = Game.TYPE_GAME def apply_invention(self, r1, r2): self.my_player.add_resource_raw(r1, 1) self.my_player.add_resource_raw(r2, 1) self.type = Game.TYPE_GAME rc = Resources() rc[r1] += 1 rc[r2] += 1 hlib.events.trigger('game.settlers.ResourcesReceived', self, hidden = True, game = self, user = self.my_player.user, resources = rc) def apply_points(self): if not self.type in [Game.TYPE_PREPARE_KNIGHT, Game.TYPE_GAME]: raise games.NotYourTurnError() if not self.my_player.is_on_turn: raise games.NotYourTurnError() cards = [c for c in self.my_player.cards.values() if c.type == Card.TYPE_POINT and not c.is_used] if self.my_player.points + len(cards) < 10: raise NotEnoughPointCardsError() i = 10 - self.my_player.points for c in cards: c.used = self.round hlib.events.trigger('game.CardUsed', self, game = self, user = self.my_player.user, card = c) i -= 1 if i == 0: self.check_game_finalization() return def card_clicked(self, cid): card = self.my_player.cards[cid] if not card.can_be_used: raise games.InactiveCardError() card.used = self.round if card.type == Card.TYPE_KNIGHT: if self.type == Game.TYPE_PREPARE_KNIGHT: self.type = Game.TYPE_USE_KNIGHT else: self.type = Game.TYPE_USE_KNIGHT_GAME self.check_mightest_chilvary() elif card.type == Card.TYPE_POINT: self.check_game_finalization() elif card.type == Card.TYPE_MONOPOLY: self.type = Game.TYPE_MONOPOLY elif card.type == Card.TYPE_ROADS: if not self.my_player.has_free_path: raise TooManyPathsError() self.type = Game.TYPE_FREE_PATHS_FIRST elif card.type == Card.TYPE_INVENTION: self.type = Game.TYPE_FREE_RESOURCES hlib.events.trigger('game.CardUsed', self, game = self, user = self.my_player.user, card = card) def number_clicked(self, nid): if not self.my_player.is_on_turn: raise games.NotYourTurnError() if not self.type in [Game.TYPE_PREPARE_THIEF, Game.TYPE_USE_KNIGHT, Game.TYPE_USE_KNIGHT_GAME]: raise InactiveNumberError() if self.board.free_numbers_map[nid] != True: raise InactiveNumberError() for f in self.board.fields.values(): f.thief = False self.board.fields[nid].thief = True hlib.events.trigger('game.settlers.ThiefPlaced', self, game = self, user = self.my_player.user, field = self.board.fields[nid]) self.pass_turn(check = False) def path_clicked(self, pid): if not self.my_player.is_on_turn: raise games.NotYourTurnError() if self.board.active_paths_map()[pid] != True: raise InactivePathError() path = self.board.paths[pid] if self.type in [Game.TYPE_PLACE_FIRST, Game.TYPE_PLACE_SECOND]: path.type = BoardPath.TYPE_OWNED path.owner = self.my_player return if self.type == Game.TYPE_FREE_PATHS_FIRST: self.build_object(path, self.my_player, BoardPath.TYPE_OWNED) hlib.events.trigger('game.settlers.PathBuilt', self, hidden = True, game = self, user = self.my_player.user, path = path) if not self.my_player.has_free_path: self.type = Game.TYPE_GAME else: self.type = Game.TYPE_FREE_PATHS_SECOND self.check_longest_path() return if self.type == Game.TYPE_FREE_PATHS_SECOND: self.build_object(path, self.my_player, BoardPath.TYPE_OWNED) hlib.events.trigger('game.settlers.PathBuilt', self, hidden = True, game = self, user = self.my_player.user, path = path) self.type = Game.TYPE_GAME self.check_longest_path() return if self.type == Game.TYPE_GAME: if not self.my_player.has_free_path: raise TooManyPathsError() self.build_object(path, self.my_player, BoardPath.TYPE_OWNED, 'path') hlib.events.trigger('game.settlers.PathBuilt', self, hidden = True, game = self, user = self.my_player.user, path = path) self.check_longest_path() def node_clicked(self, nid): if not self.my_player.is_on_turn: raise games.NotYourTurnError() if self.board.active_nodes_map()[nid] != True: raise InactiveNodeError() node = self.board.nodes[nid] if self.type in [Game.TYPE_PLACE_FIRST, Game.TYPE_PLACE_SECOND]: node.type = BoardNode.TYPE_VILLAGE node.owner = self.my_player if self.my_player.first_village == None: self.my_player.first_village = node elif self.my_player.second_village == None: self.my_player.second_village = node return if self.type in [Game.TYPE_APPLY_THIEF, Game.TYPE_APPLY_KNIGHT, Game.TYPE_APPLY_KNIGHT_GAME]: node.owner.apply_thief_to_one(self.my_player) self.pass_turn(check = False) return if self.type == Game.TYPE_GAME: if node.type == BoardNode.TYPE_FREE: if not self.my_player.has_free_village: raise TooManyVillagesError() self.build_object(node, self.my_player, BoardNode.TYPE_VILLAGE, 'village') hlib.events.trigger('game.settlers.VillageBuilt', self, hidden = True, game = self, user = self.my_player.user, node = node) self.check_longest_path() elif node.type == BoardNode.TYPE_VILLAGE: if not self.my_player.has_free_town: raise TooManyTownsError() self.build_object(node, self.my_player, BoardNode.TYPE_TOWN, 'town') hlib.events.trigger('game.settlers.TownBuilt', self, hidden = True, game = self, user = self.my_player.user, node = node) self.check_game_finalization() def buy_card(self): # pylint: disable-msg=E0203,W0201 if not self.my_player.is_on_turn: raise games.NotYourTurnError() if self.card_index == 25: raise games.GameError(msg = 'The deck is empty') if not self.my_player.has_resources_for(Game.RESOURCE_DESCS['card']): raise games.NotEnoughResourcesError() self.my_player.spend_resources_for(Game.RESOURCE_DESCS['card']) c = Card(self, self.my_player, int(self.card_line[self.card_index]), self.round) self.my_player.cards.push(c) self.card_index += 1 hlib.events.trigger('game.CardBought', self, game = self, user = self.my_player.user, card = c) def deal_resources(self, dice): per_owner = dict([(i, Resources()) for i in self.players.keys()]) for field in self.board.get_fields_by_number(dice): if field.thief: continue nodes = [n for n in self.board.get_nodes_by_field(field) if n.type != BoardNode.TYPE_FREE] for node in nodes: (resource, amount) = node.owner.add_resource(node, field) per_owner[node.owner.id].add(Resource.map_resource2str[resource], amount) # pylint: disable-msg=E1101 for p in self.players.values(): r = per_owner[p.id] if r.sum() > 0: hlib.events.trigger('game.settlers.ResourcesReceived', self, hidden = True, game = self, user = p.user, resources = per_owner[p.id]) def roll_dice(self): if not self.my_player.can_roll_dice: raise games.NotYourTurnError() dice = self.get_next_dice() hlib.events.trigger('game.settlers.DiceRolled', self, game = self, user = self.my_player.user, dice = dice) if dice == 7: self.type = Game.TYPE_PREPARE_THIEF # pylint: disable-msg=E1101 for player in self.players.values(): if player.resources.sum() > 7: player.apply_thief_to_full() else: self.deal_resources(dice) self.type = Game.TYPE_GAME def can_we_steal(self): for node in self.board.get_used_nodes(): if node.is_owner(self.my_player): continue if self.board.thief_field.id in games.settlers.board_def.NODE_DESCS[node.id]['fields']: return True return False def do_pass_turn(self, forced = False): # pylint: disable-msg=W0201,R0912,R0915 self.last_pass = hruntime.time current_forhont = self.forhont next_round = False if forced: self.type = Game.TYPE_GAME if self.type == Game.TYPE_APPLY_THIEF: self.type = Game.TYPE_GAME elif self.type == Game.TYPE_PREPARE_THIEF: if self.can_we_steal(): self.type = Game.TYPE_APPLY_THIEF else: self.type = Game.TYPE_GAME elif self.type == Game.TYPE_APPLY_KNIGHT: self.type = Game.TYPE_PREPARE_DICE elif self.type == Game.TYPE_APPLY_KNIGHT_GAME: self.type = Game.TYPE_GAME elif self.type == Game.TYPE_USE_KNIGHT: if self.can_we_steal(): self.type = Game.TYPE_APPLY_KNIGHT else: self.type = Game.TYPE_PREPARE_DICE elif self.type == Game.TYPE_USE_KNIGHT_GAME: if self.can_we_steal(): self.type = Game.TYPE_APPLY_KNIGHT_GAME else: self.type = Game.TYPE_GAME elif self.type == Game.TYPE_GAME: if self.forhont == self.limit - 1: self.forhont = 0 next_round = True else: self.forhont = self.forhont + 1 if self.round == 1 and next_round != True: self.type = Game.TYPE_PREPARE_DICE else: self.type = Game.TYPE_PREPARE_KNIGHT elif self.type == Game.TYPE_PLACE_SECOND: if self.my_player.second_village: rc = Resources() for field in self.board.get_fields_by_node(self.my_player.second_village): rst = self.my_player.add_resource(self.my_player.second_village, field) if rst != None: rc.add(rst[0], rst[1]) hlib.events.trigger('game.settlers.ResourcesReceived', self, hidden = False, game = self, user = self.my_player.user, resources = rc) if self.forhont == 0: next_round = True self.type = Game.TYPE_PREPARE_DICE else: self.forhont = self.forhont - 1 elif self.type == Game.TYPE_PLACE_FIRST: if self.forhont == self.limit - 1: self.type = Game.TYPE_PLACE_SECOND else: self.forhont = self.forhont + 1 else: if self.forhont == self.limit - 1: self.forhont = 0 else: self.forhont = self.forhont + 1 if current_forhont != self.forhont: while self.forhont_player.has_too_many_misses(): next_round = self.do_pass_turn(forced = forced) # Check it on entering new player's turn - it is possible to have >10 points and NOT be on turn. self.check_game_finalization() return next_round def pass_turn(self, **kwargs): if 'first_village' in kwargs and 'first_path' in kwargs: if self.my_player.first_village != None: raise games.GameError(msg = 'You have first village already') nid = int(kwargs['first_village']) pid = int(kwargs['first_path']) self.node_clicked(nid) self.path_clicked(pid) del kwargs['first_village'] del kwargs['first_path'] if 'second_village' in kwargs and 'second_path' in kwargs: if self.my_player.second_village != None: raise games.GameError(msg = 'You have second village already') nid = int(kwargs['second_village']) pid = int(kwargs['second_path']) self.node_clicked(nid) self.path_clicked(pid) del kwargs['second_village'] del kwargs['second_path'] games.Game.pass_turn(self, **kwargs) def reset_players_lps(self): # pylint: disable-msg=E1101 for player in self.players.values(): player.longest_path = False def check_longest_path(self): linfo = self.board.lps_search() if linfo[0] == False: # pylint: disable-msg=W0201 self.reset_players_lps() self.longest_length = linfo[2] self.longest_owner = games.DummyOwner() else: if linfo[1] == None: self.longest_length = linfo[2] else: self.reset_players_lps() self.longest_length = linfo[2] if linfo[1] != self.longest_owner: self.longest_owner = linfo[1] hlib.events.trigger('game.settlers.LongestPathBonusEarned', self, game = self, user = self.longest_owner.user) self.longest_owner.longest_path = True self.check_game_finalization() def check_mightest_chilvary(self): # pylint: disable-msg=E1101 max_count = 0 max_player = None max_players = 0 for player in self.players.values(): cards = [c for c in player.cards.values() if c.type == Card.TYPE_KNIGHT and c.is_used] if len(cards) > max_count: max_count = len(cards) max_player = player max_players = 1 elif len(cards) == max_count: max_players += 1 if max_count < 3: return if max_players != 1: return old_owner = None for player in self.players.values(): if player.mightest_chilvary == True: old_owner = player player.mightest_chilvary = False max_player.mightest_chilvary = True if max_player != old_owner: hlib.events.trigger('game.settlers.MightestChilvaryBonusEarned', self, game = self, user = max_player.user) self.check_game_finalization() @staticmethod def create_game(flags, system_game = False): g = games.Game.create_game(Game, flags, system_game = system_game) g.board = Board(g) return g class GameCreationFlags(games.GameCreationFlags): FLAGS = games.GameCreationFlags.FLAGS + ['floating_desert', 'spread_fields'] MAX_OPPONENTS = 3 import events.game.settlers import board_def # Relative import is required to avoid circular imports active_nodes_map_negative = dict([(_i, False) for _i in range(1, len(board_def.NODE_DESCS))]) active_nodes_map_positive = dict([(_i, True) for _i in range(1, len(board_def.NODE_DESCS))]) active_paths_map_negative = dict([(_i, False) for _i in range(1, len(board_def.PATH_DESCS))])
# coding=utf-8 import os import tempfile import shutil from django.core.files.uploadedfile import UploadedFile from django.test import TransactionTestCase from django.contrib.auth.models import Group from hs_core.testing import MockIRODSTestCaseMixin from hs_core import hydroshare from hs_core.models import BaseResource from hs_core.hydroshare.utils import resource_file_add_process, resource_post_create_actions from hs_core.views.utils import create_folder, move_or_rename_file_or_folder, remove_folder from hs_file_types.models import GenericLogicalFile, GeoRasterLogicalFile, GenericFileMetaData class CompositeResourceTest(MockIRODSTestCaseMixin, TransactionTestCase): def setUp(self): super(CompositeResourceTest, self).setUp() self.group, _ = Group.objects.get_or_create(name='Resource Author') self.user = hydroshare.create_account( '[email protected]', username='user1', first_name='Creator_FirstName', last_name='Creator_LastName', superuser=False, groups=[self.group] ) self.temp_dir = tempfile.mkdtemp() self.raster_file_name = 'small_logan.tif' self.raster_file = 'hs_composite_resource/tests/data/{}'.format(self.raster_file_name) self.generic_file_name = 'generic_file.txt' self.generic_file = 'hs_composite_resource/tests/data/{}'.format(self.generic_file_name) target_temp_raster_file = os.path.join(self.temp_dir, self.raster_file_name) shutil.copy(self.raster_file, target_temp_raster_file) self.raster_file_obj = open(target_temp_raster_file, 'r') target_temp_generic_file = os.path.join(self.temp_dir, self.generic_file_name) shutil.copy(self.generic_file, target_temp_generic_file) self.generic_file_obj = open(target_temp_generic_file, 'r') def tearDown(self): super(CompositeResourceTest, self).tearDown() if os.path.exists(self.temp_dir): shutil.rmtree(self.temp_dir) def test_create_composite_resource(self): # test that we can create a composite resource # there should not be any resource at this point self.assertEqual(BaseResource.objects.count(), 0) self._create_composite_resource() # there should be one resource at this point self.assertEqual(BaseResource.objects.count(), 1) self.assertEqual(self.composite_resource.resource_type, "CompositeResource") self.composite_resource.delete() def test_create_composite_resource_with_file_upload(self): # test that when we create composite resource with an uploaded file, then the uploaded file # is automatically set to genericlogicalfile type self.assertEqual(BaseResource.objects.count(), 0) self.raster_file_obj = open(self.raster_file, 'r') self.composite_resource = hydroshare.create_resource( resource_type='CompositeResource', owner=self.user, title='Test Raster File Metadata', files=(self.raster_file_obj,) ) # Deprecated: there should not be a GenericLogicalFile object at this point # Issue 2456 Create composite with uploaded file now part of logical file self.assertEqual(GenericLogicalFile.objects.count(), 1) # set the logical file resource_post_create_actions(resource=self.composite_resource, user=self.user, metadata=self.composite_resource.metadata) # there should be one resource at this point self.assertEqual(BaseResource.objects.count(), 1) self.assertEqual(self.composite_resource.resource_type, "CompositeResource") self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is associated with GenericLogicalFile self.assertEqual(res_file.has_logical_file, True) self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile") # there should be 1 GenericLogicalFile object at this point self.assertEqual(GenericLogicalFile.objects.count(), 1) # there should be 1 GenericFileMetaData object at this point self.assertEqual(GenericFileMetaData.objects.count(), 1) self.composite_resource.delete() # there should be no GenericLogicalFile object at this point self.assertEqual(GenericLogicalFile.objects.count(), 0) # there should be no GenericFileMetaData object at this point self.assertEqual(GenericFileMetaData.objects.count(), 0) def test_file_add_to_composite_resource(self): # test that when we add file to an existing composite resource, the added file # automatically set to genericlogicalfile type self.assertEqual(BaseResource.objects.count(), 0) self.raster_file_obj = open(self.raster_file, 'r') self._create_composite_resource() # there should not be any GenericLogicalFile object at this point self.assertEqual(GenericLogicalFile.objects.count(), 0) # add a file to the resource resource_file_add_process(resource=self.composite_resource, files=(self.raster_file_obj,), user=self.user) # there should be one resource at this point self.assertEqual(BaseResource.objects.count(), 1) self.assertEqual(self.composite_resource.resource_type, "CompositeResource") self.assertEqual(self.composite_resource.files.all().count(), 1) res_file = self.composite_resource.files.first() # check that the resource file is associated with GenericLogicalFile self.assertEqual(res_file.has_logical_file, True) self.assertEqual(res_file.logical_file_type_name, "GenericLogicalFile") # there should be 1 GenericLogicalFile object at this point self.assertEqual(GenericLogicalFile.objects.count(), 1) self.composite_resource.delete() def test_raster_file_type_folder_delete(self): # here we are testing that when a folder is deleted containing # files for a logical file type, other files in the composite resource are still associated # with their respective logical file types self.assertEqual(BaseResource.objects.count(), 0) self.raster_file_obj = open(self.raster_file, 'r') self._create_composite_resource() # add the raster file to the resource resource_file_add_process(resource=self.composite_resource, files=(self.raster_file_obj,), user=self.user) tif_res_file = [f for f in self.composite_resource.files.all() if f.extension == ".tif"][0] # add a generic file type self.generic_file_obj = open(self.generic_file, 'r') resource_file_add_process(resource=self.composite_resource, files=(self.generic_file_obj,), user=self.user) # there should be 2 GenericLogicalFile objects self.assertEqual(GenericLogicalFile.objects.count(), 2) # there should not be any GeoRasterLogicalFile object self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) GeoRasterLogicalFile.set_file_type(self.composite_resource, tif_res_file.id, self.user) # there should be 1 GenericLogicalFile objects self.assertEqual(GenericLogicalFile.objects.count(), 1) # there should be 1 GeoRasterLogicalFile object self.assertEqual(GeoRasterLogicalFile.objects.count(), 1) txt_res_file = [f for f in self.composite_resource.files.all() if f.extension == ".txt"][0] self.assertEqual(txt_res_file.logical_file_type_name, "GenericLogicalFile") # now delete the folder small_logan that contains files associated with raster file type folder_path = "data/contents/small_logan" remove_folder(self.user, self.composite_resource.short_id, folder_path) txt_res_file.refresh_from_db() self.assertEqual(txt_res_file.logical_file_type_name, "GenericLogicalFile") # there should not be any GeoRasterLogicalFile object self.assertEqual(GeoRasterLogicalFile.objects.count(), 0) # there should be 1 GenericLogicalFile objects self.assertEqual(GenericLogicalFile.objects.count(), 1) self.composite_resource.delete() def test_core_metadata_CRUD(self): """test that all core metadata elements work for this resource type""" self._create_composite_resource() # test current metadata status of the composite resource # there should be title element self.assertEqual(self.composite_resource.metadata.title.value, "Test Composite Resource") # there shouldn't be abstract element self.assertEqual(self.composite_resource.metadata.description, None) # there shouldn't be any format element self.assertEqual(self.composite_resource.metadata.formats.count(), 0) # there should be date element - 2 elements self.assertEqual(self.composite_resource.metadata.dates.count(), 2) # there should be 1 creator element self.assertEqual(self.composite_resource.metadata.creators.count(), 1) # there should not be any contributor element self.assertEqual(self.composite_resource.metadata.contributors.count(), 0) # there should not be any coverage element self.assertEqual(self.composite_resource.metadata.coverages.count(), 0) # there should not be any funding agency element self.assertEqual(self.composite_resource.metadata.funding_agencies.count(), 0) # there should be 1 identifier element self.assertEqual(self.composite_resource.metadata.identifiers.count(), 1) # there should be 1 language element self.assertNotEqual(self.composite_resource.metadata.language, None) # there should not be any publisher element self.assertEqual(self.composite_resource.metadata.publisher, None) # there should not be any format element self.assertEqual(self.composite_resource.metadata.formats.count(), 0) # there should not be any relation element self.assertEqual(self.composite_resource.metadata.relations.count(), 0) # there should be 1 rights element self.assertNotEqual(self.composite_resource.metadata.rights, None) # there shouldn't be any source element self.assertEqual(self.composite_resource.metadata.sources.count(), 0) # there should not be any subject elements self.assertEqual(self.composite_resource.metadata.subjects.count(), 0) # there should be 1 type element self.assertNotEqual(self.composite_resource.metadata.type, None) # there should not be any key/value metadata self.assertEqual(self.composite_resource.extra_metadata, {}) # test create metadata # create abstract metadata = self.composite_resource.metadata metadata.create_element('description', abstract='new abstract for the resource') # there should be abstract element self.assertNotEqual(self.composite_resource.metadata.description, None) # add a file to the resource to auto create format element self.raster_file_obj = open(self.raster_file, 'r') resource_file_add_process(resource=self.composite_resource, files=(self.raster_file_obj,), user=self.user) self.assertEqual(self.composite_resource.files.all().count(), 1) # now there should be 1 format element self.assertEqual(self.composite_resource.metadata.formats.count(), 1) # add another creator metadata.create_element('creator', name='John Smith') # there should be 2 creators now self.assertEqual(self.composite_resource.metadata.creators.count(), 2) # add a contributor metadata.create_element('contributor', name='Lisa Smith') # there should be 1 contributor now self.assertEqual(self.composite_resource.metadata.contributors.count(), 1) # add a period type coverage value_dict = {'name': 'Name for period coverage', 'start': '1/1/2000', 'end': '12/12/2012'} metadata.create_element('coverage', type='period', value=value_dict) # add a point type coverage value_dict = {'east': '56.45678', 'north': '12.6789', 'units': 'decimal deg'} metadata.create_element('coverage', type='point', value=value_dict) # there should be 2 coverage elements now self.assertEqual(self.composite_resource.metadata.coverages.count(), 2) cov_pt = self.composite_resource.metadata.coverages.all().filter(type='point').first() self.assertNotEqual(cov_pt, None) cov_period = self.composite_resource.metadata.coverages.all().filter(type='period').first() self.assertNotEqual(cov_period, None) # add a funding agency element with only the required name value type metadata.create_element('fundingagency', agency_name='NSF') # there should be 1 funding agency element now self.assertEqual(self.composite_resource.metadata.funding_agencies.count(), 1) # add another identifier metadata.create_element('identifier', name='someIdentifier', url="http://some.org/001") # there should be 2 identifier elements self.assertEqual(self.composite_resource.metadata.identifiers.count(), 2) # add publisher element publisher_CUAHSI = "Consortium of Universities for the Advancement of " \ "Hydrologic Science, Inc. (CUAHSI)" url_CUAHSI = 'https://www.cuahsi.org' # publisher element can be added when the resource is published self.composite_resource.raccess.published = True self.composite_resource.raccess.save() # user can't set CUASHI as the publisher - when the resource has no content file # first delete the content file res_file = self.composite_resource.files.first() hydroshare.delete_resource_file(self.composite_resource.short_id, res_file.id, self.user) with self.assertRaises(Exception): metadata.create_element('publisher', name=publisher_CUAHSI, url=url_CUAHSI) metadata.create_element('publisher', name='USGS', url="http://usgs.gov") # there should a publisher element now self.assertNotEqual(self.composite_resource.metadata.publisher, None) # add a relation element of uri type metadata.create_element('relation', type='isPartOf', value='http://hydroshare.org/resource/001') # there should be 1 relation element self.assertEqual(self.composite_resource.metadata.relations.count(), 1) # add a source element of uri type metadata.create_element('source', derived_from='http://hydroshare.org/resource/0001') # there should be 1 source element self.assertEqual(self.composite_resource.metadata.sources.count(), 1) # add 2 subject elements metadata.create_element('subject', value='sub-1') metadata.create_element('subject', value='sub-2') # there should be 2 subject elements self.assertEqual(self.composite_resource.metadata.subjects.count(), 2) # add key/value metadata self.composite_resource.extra_metadata = {'key-1': 'value-1', 'key-2': 'value-2'} self.composite_resource.save() self.assertEqual(self.composite_resource.extra_metadata, {'key-1': 'value-1', 'key-2': 'value-2'}) # test update metadata # test update title metadata.update_element('title', self.composite_resource.metadata.title.id, value="New Title") self.assertEqual(self.composite_resource.metadata.title.value, 'New Title') # test update abstract metadata.update_element('description', self.composite_resource.metadata.description.id, abstract='Updated composite resource') self.assertEqual(self.composite_resource.metadata.description.abstract, 'Updated composite resource') # test updating funding agency agency_element = self.composite_resource.metadata.funding_agencies.all().filter( agency_name='NSF').first() metadata.update_element('fundingagency', agency_element.id, award_title="Cyber Infrastructure", award_number="NSF-101-20-6789", agency_url="http://www.nsf.gov") agency_element = self.composite_resource.metadata.funding_agencies.all().filter( agency_name='NSF').first() self.assertEquals(agency_element.agency_name, 'NSF') self.assertEquals(agency_element.award_title, 'Cyber Infrastructure') self.assertEquals(agency_element.award_number, 'NSF-101-20-6789') self.assertEquals(agency_element.agency_url, 'http://www.nsf.gov') some_idf = self.composite_resource.metadata.identifiers.all().filter( name='someIdentifier').first() metadata.update_element('identifier', some_idf.id, name='someOtherIdentifier') some_idf = self.composite_resource.metadata.identifiers.all().filter( name='someOtherIdentifier').first() self.assertNotEqual(some_idf, None) # update language self.assertEqual(self.composite_resource.metadata.language.code, 'eng') metadata.update_element('language', self.composite_resource.metadata.language.id, code='fre') self.assertEqual(self.composite_resource.metadata.language.code, 'fre') # test that updating publisher element raises exception with self.assertRaises(Exception): metadata.update_element('publisher', self.composite_resource.metadata.publisher.id, name='USU', url="http://usu.edu") # test update relation type rel_to_update = self.composite_resource.metadata.relations.all().filter( type='isPartOf').first() metadata.update_element('relation', rel_to_update.id, type='isVersionOf', value="dummy value 2") rel_to_update = self.composite_resource.metadata.relations.all().filter( type='isVersionOf').first() self.assertEqual(rel_to_update.value, "dummy value 2") src_1 = self.composite_resource.metadata.sources.all().filter( derived_from='http://hydroshare.org/resource/0001').first() metadata.update_element('source', src_1.id, derived_from='http://hydroshare.org/resource/0002') src_1 = self.composite_resource.metadata.sources.first() self.assertEqual(src_1.derived_from, 'http://hydroshare.org/resource/0002') # change the point coverage to type box # since we deleted the content file, there should not be any coverage element self.assertEqual(self.composite_resource.metadata.coverages.count(), 0) # add a point type coverage value_dict = {'east': '56.45678', 'north': '12.6789', 'units': 'decimal deg'} metadata.create_element('coverage', type='point', value=value_dict) value_dict = {'northlimit': '56.45678', 'eastlimit': '120.6789', 'southlimit': '16.45678', 'westlimit': '16.6789', 'units': 'decimal deg'} cov_pt = self.composite_resource.metadata.coverages.all().filter(type='point').first() metadata.update_element('coverage', cov_pt.id, type='box', value=value_dict) cov_pt = self.composite_resource.metadata.coverages.all().filter(type='point').first() self.assertEqual(cov_pt, None) cov_box = self.composite_resource.metadata.coverages.all().filter(type='box').first() self.assertNotEqual(cov_box, None) # update creator creator = self.composite_resource.metadata.creators.all().filter(name='John Smith').first() self.assertEqual(creator.email, None) metadata.update_element('creator', creator.id, email='[email protected]') creator = self.composite_resource.metadata.creators.all().filter(name='John Smith').first() self.assertEqual(creator.email, '[email protected]') # update contributor contributor = self.composite_resource.metadata.contributors.first() self.assertEqual(contributor.email, None) metadata.update_element('contributor', contributor.id, email='[email protected]') contributor = self.composite_resource.metadata.contributors.first() self.assertEqual(contributor.email, '[email protected]') self.composite_resource.delete() def test_metadata_xml(self): """test that the call to resource.get_metadata_xml() doesn't raise exception for composite resource type get_metadata_xml() includes both resource level metadata and file type metadata for each logical file objects within the resource """ # 1. create core metadata elements # 2. create genericlogicalfile type metadata # 3. create georasterlogicalfile type metadata self._create_composite_resource() # add a file to the resource to auto create format element # as well as be able to add generic file type metadata self.generic_file_obj = open(self.generic_file, 'r') resource_file_add_process(resource=self.composite_resource, files=(self.generic_file_obj,), user=self.user) # add a raster file to the resource to auto create format element # as well as be able to add raster file type metadata self.raster_file_obj = open(self.raster_file, 'r') resource_file_add_process(resource=self.composite_resource, files=(self.raster_file_obj,), user=self.user) self.assertEqual(self.composite_resource.files.all().count(), 2) # add some core metadata # create abstract metadata = self.composite_resource.metadata metadata.create_element('description', abstract='new abstract for the resource') # add a contributor metadata.create_element('contributor', name='Lisa Smith') # add a funding agency element with only the required name value type metadata.create_element('fundingagency', agency_name='NSF') # add a relation element of uri type metadata.create_element('relation', type='isPartOf', value='http://hydroshare.org/resource/001') # add a source element of uri type metadata.create_element('source', derived_from='http://hydroshare.org/resource/0001') # add 2 subject elements metadata.create_element('subject', value='sub-1') metadata.create_element('subject', value='sub-2') # add key/value metadata self.composite_resource.extra_metadata = {'key-1': 'value-1', 'key-2': 'value-2'} self.composite_resource.save() # add a publisher element self.composite_resource.raccess.published = True self.composite_resource.raccess.save() publisher_CUAHSI = "Consortium of Universities for the Advancement of " \ "Hydrologic Science, Inc. (CUAHSI)" url_CUAHSI = 'https://www.cuahsi.org' metadata.create_element('publisher', name=publisher_CUAHSI, url=url_CUAHSI) # add generic logical file type metadata res_file = [f for f in self.composite_resource.files.all() if f.logical_file_type_name == "GenericLogicalFile"][0] gen_logical_file = res_file.logical_file # add dataset name self.assertEqual(gen_logical_file.dataset_name, None) gen_logical_file.dataset_name = "This is a generic dataset" gen_logical_file.save() # add key/value metadata gen_logical_file.metadata.extra_metadata = {'key1': 'value 1', 'key2': 'value 2'} gen_logical_file.metadata.save() # add temporal coverage value_dict = {'name': 'Name for period coverage', 'start': '1/1/2000', 'end': '12/12/2012'} gen_logical_file.metadata.create_element('coverage', type='period', value=value_dict) # add spatial coverage value_dict = {'east': '56.45678', 'north': '12.6789', 'units': 'decimal deg'} gen_logical_file.metadata.create_element('coverage', type='point', value=value_dict) tif_res_file = [f for f in self.composite_resource.files.all() if f.extension == ".tif"][0] GeoRasterLogicalFile.set_file_type(self.composite_resource, tif_res_file.id, self.user) # add generic logical file type metadata res_file = [f for f in self.composite_resource.files.all() if f.logical_file_type_name == "GeoRasterLogicalFile"][0] raster_logical_file = res_file.logical_file # check we have dataset name self.assertEqual(raster_logical_file.dataset_name, "small_logan") # add key/value metadata raster_logical_file.metadata.extra_metadata = {'keyA': 'value A', 'keyB': 'value B'} raster_logical_file.metadata.save() # add temporal coverage value_dict = {'name': 'Name for period coverage', 'start': '1/1/2010', 'end': '12/12/2016'} raster_logical_file.metadata.create_element('coverage', type='period', value=value_dict) # test no exception raised when generating the metadata xml for this resource type try: self.composite_resource.get_metadata_xml() except Exception as ex: self.fail("Failed to generate metadata in xml format. Error:{}".format(ex.message)) self.composite_resource.delete() def test_resource_coverage_auto_update(self): # this is to test that the spatial coverage and temporal coverage # for composite resource get updated by the system based on the # coverage metadata that all logical file objects of the resource have at anytime # 1. test that resource coverages get updated on LFO level metadata creation # 2. test that resource coverages get updated on LFO level metadata update # 3. test that resource coverages get updated on content file delete # create a composite resource with no content file self._create_composite_resource() # at this point the there should not be any resource level coverage metadata self.assertEqual(self.composite_resource.metadata.coverages.count(), 0) # now add the raster tif file to the resource - which should put this file as # part of a GenericLogicalFile object self.raster_file_obj = open(self.raster_file, 'r') resource_file_add_process(resource=self.composite_resource, files=(self.raster_file_obj,), user=self.user) res_file = self.composite_resource.files.all().first() GeoRasterLogicalFile.set_file_type(self.composite_resource, res_file.id, self.user) # raster logical file should have a coverage element of type box res_file = [f for f in self.composite_resource.files.all() if f.logical_file_type_name == "GeoRasterLogicalFile"][0] raster_logical_file = res_file.logical_file self.assertEqual(raster_logical_file.metadata.coverages.count(), 1) self.assertEqual(raster_logical_file.metadata.coverages.all().filter( type='box').count(), 1) # now the resource should have a coverage metadata element of type box self.assertEqual(self.composite_resource.metadata.coverages.count(), 1) self.assertEqual(self.composite_resource.metadata.coverages.all().filter( type='box').count(), 1) # the spatial coverage at the file type level should be exactly the same as the # resource level - due to auto update feature in composite resource res_coverage = self.composite_resource.metadata.coverages.all().filter(type='box').first() raster_lfo_coverage = raster_logical_file.metadata.coverages.all().filter( type='box').first() self.assertEqual(res_coverage.value['projection'], raster_lfo_coverage.value['projection']) self.assertEqual(res_coverage.value['units'], raster_lfo_coverage.value['units']) self.assertEqual(res_coverage.value['northlimit'], raster_lfo_coverage.value['northlimit']) self.assertEqual(res_coverage.value['southlimit'], raster_lfo_coverage.value['southlimit']) self.assertEqual(res_coverage.value['eastlimit'], raster_lfo_coverage.value['eastlimit']) self.assertEqual(res_coverage.value['westlimit'], raster_lfo_coverage.value['westlimit']) # At this point there is not temporal coverage either at the file type level or resource # level self.assertEqual(self.composite_resource.metadata.coverages.all().filter( type='period').count(), 0) self.assertEqual(raster_logical_file.metadata.coverages.all().filter( type='period').count(), 0) # adding temporal coverage to the logical file should add the temporal coverage to the # resource value_dict = {'start': '1/1/2010', 'end': '12/12/2015'} raster_logical_file.metadata.create_element('coverage', type='period', value=value_dict) self.assertEqual(self.composite_resource.metadata.coverages.all().filter( type='period').count(), 1) self.assertEqual(raster_logical_file.metadata.coverages.all().filter( type='period').count(), 1) res_coverage = self.composite_resource.metadata.coverages.all().filter( type='period').first() raster_lfo_coverage = raster_logical_file.metadata.coverages.all().filter( type='period').first() self.assertEqual(res_coverage.value['start'], raster_lfo_coverage.value['start']) self.assertEqual(res_coverage.value['end'], raster_lfo_coverage.value['end']) self.assertEqual(res_coverage.value['start'], '1/1/2010') self.assertEqual(res_coverage.value['end'], '12/12/2015') # test updating the temporal coverage for file type should update the temporal coverage # for the resource value_dict = {'start': '12/1/2010', 'end': '12/1/2015'} raster_logical_file.metadata.update_element('coverage', raster_lfo_coverage.id, type='period', value=value_dict) res_coverage = self.composite_resource.metadata.coverages.all().filter( type='period').first() raster_lfo_coverage = raster_logical_file.metadata.coverages.all().filter( type='period').first() self.assertEqual(res_coverage.value['start'], raster_lfo_coverage.value['start']) self.assertEqual(res_coverage.value['end'], raster_lfo_coverage.value['end']) self.assertEqual(res_coverage.value['start'], '12/1/2010') self.assertEqual(res_coverage.value['end'], '12/1/2015') # test that the resource coverage is superset of file type coverages self.generic_file_obj = open(self.generic_file, 'r') resource_file_add_process(resource=self.composite_resource, files=(self.generic_file_obj,), user=self.user) res_file = [f for f in self.composite_resource.files.all() if f.logical_file_type_name == "GenericLogicalFile"][0] generic_logical_file = res_file.logical_file # there should not be any coverage for the generic LFO at this point self.assertEqual(generic_logical_file.metadata.coverages.count(), 0) # create temporal coverage for generic LFO value_dict = {'start': '1/1/2009', 'end': '1/1/2015'} generic_logical_file.metadata.create_element('coverage', type='period', value=value_dict) self.assertEqual(generic_logical_file.metadata.coverages.count(), 1) res_coverage = self.composite_resource.metadata.coverages.all().filter( type='period').first() # resource temporal coverage is now super set of the 2 temporal coverages # in 2 LFOs self.assertEqual(res_coverage.value['start'], '1/1/2009') self.assertEqual(res_coverage.value['end'], '12/1/2015') # test resource superset spatial coverage res_coverage = self.composite_resource.metadata.coverages.all().filter( type='box').first() self.assertEqual(res_coverage.value['projection'], 'WGS 84 EPSG:4326') self.assertEqual(res_coverage.value['units'], 'Decimal degrees') self.assertEqual(res_coverage.value['northlimit'], 42.0500269597691) self.assertEqual(res_coverage.value['eastlimit'], -111.57773718106195) self.assertEqual(res_coverage.value['southlimit'], 41.98722286029891) self.assertEqual(res_coverage.value['westlimit'], -111.69756293084055) value_dict = {'east': '-110.88845678', 'north': '43.6789', 'units': 'Decimal deg'} generic_logical_file.metadata.create_element('coverage', type='point', value=value_dict) res_coverage = self.composite_resource.metadata.coverages.all().filter( type='box').first() self.assertEqual(res_coverage.value['projection'], 'WGS 84 EPSG:4326') self.assertEqual(res_coverage.value['units'], 'Decimal degrees') self.assertEqual(res_coverage.value['northlimit'], 43.6789) self.assertEqual(res_coverage.value['eastlimit'], -110.88845678) self.assertEqual(res_coverage.value['southlimit'], 41.98722286029891) self.assertEqual(res_coverage.value['westlimit'], -111.69756293084055) # update the LFO coverage to box type value_dict = {'eastlimit': '-110.88845678', 'northlimit': '43.6789', 'westlimit': '-112.78967', 'southlimit': '40.12345', 'units': 'Decimal deg'} lfo_spatial_coverage = generic_logical_file.metadata.spatial_coverage generic_logical_file.metadata.update_element('coverage', lfo_spatial_coverage.id, type='box', value=value_dict) res_coverage = self.composite_resource.metadata.coverages.all().filter( type='box').first() self.assertEqual(res_coverage.value['projection'], 'WGS 84 EPSG:4326') self.assertEqual(res_coverage.value['units'], 'Decimal degrees') self.assertEqual(res_coverage.value['northlimit'], 43.6789) self.assertEqual(res_coverage.value['eastlimit'], -110.88845678) self.assertEqual(res_coverage.value['southlimit'], 40.12345) self.assertEqual(res_coverage.value['westlimit'], -112.78967) # deleting the generic file should reset the coverage of the resource to that of the # raster LFO res_file = [f for f in self.composite_resource.files.all() if f.logical_file_type_name == "GenericLogicalFile"][0] hydroshare.delete_resource_file(self.composite_resource.short_id, res_file.id, self.user) res_coverage = self.composite_resource.metadata.coverages.all().filter( type='box').first() raster_lfo_coverage = raster_logical_file.metadata.coverages.all().filter( type='box').first() self.assertEqual(res_coverage.value['projection'], raster_lfo_coverage.value['projection']) self.assertEqual(res_coverage.value['units'], raster_lfo_coverage.value['units']) self.assertEqual(res_coverage.value['northlimit'], raster_lfo_coverage.value['northlimit']) self.assertEqual(res_coverage.value['southlimit'], raster_lfo_coverage.value['southlimit']) self.assertEqual(res_coverage.value['eastlimit'], raster_lfo_coverage.value['eastlimit']) self.assertEqual(res_coverage.value['westlimit'], raster_lfo_coverage.value['westlimit']) res_coverage = self.composite_resource.metadata.coverages.all().filter( type='period').first() raster_lfo_coverage = raster_logical_file.metadata.coverages.all().filter( type='period').first() self.assertEqual(res_coverage.value['start'], raster_lfo_coverage.value['start']) self.assertEqual(res_coverage.value['end'], raster_lfo_coverage.value['end']) self.assertEqual(res_coverage.value['start'], '12/1/2010') self.assertEqual(res_coverage.value['end'], '12/1/2015') # deleting the remaining content file from resource should leave the resource # with no coverage element res_file = [f for f in self.composite_resource.files.all() if f.logical_file_type_name == "GeoRasterLogicalFile"][0] hydroshare.delete_resource_file(self.composite_resource.short_id, res_file.id, self.user) self.assertEqual(self.composite_resource.files.count(), 0) self.assertEqual(self.composite_resource.metadata.coverages.count(), 0) self.composite_resource.delete() def test_can_be_public_or_discoverable(self): self._create_composite_resource() # at this point resource can't be public or discoverable as some core metadata missing self.assertEqual(self.composite_resource.can_be_public_or_discoverable, False) # add a text file self.generic_file_obj = open(self.generic_file, 'r') resource_file_add_process(resource=self.composite_resource, files=(self.generic_file_obj,), user=self.user) # at this point still resource can't be public or discoverable - as some core metadata # is missing self.assertEqual(self.composite_resource.can_be_public_or_discoverable, False) # add a raster file to the resource to auto create format element self.raster_file_obj = open(self.raster_file, 'r') resource_file_add_process(resource=self.composite_resource, files=(self.raster_file_obj,), user=self.user) # at this point still resource can't be public or discoverable - as some core metadata # is missing self.assertEqual(self.composite_resource.can_be_public_or_discoverable, False) # there should be 3 required core metadata elements missing at this point missing_elements = self.composite_resource.metadata.get_required_missing_elements() self.assertEqual(len(missing_elements), 2) self.assertIn('Abstract', missing_elements) self.assertIn('Keywords', missing_elements) # add the above missing elements # create abstract metadata = self.composite_resource.metadata # add Abstract (element name is description) metadata.create_element('description', abstract='new abstract for the resource') # add keywords (element name is subject) metadata.create_element('subject', value='sub-1') # at this point resource can be public or discoverable self.assertEqual(self.composite_resource.can_be_public_or_discoverable, True) self.composite_resource.delete() def test_supports_folder_creation(self): """Here we are testing the function supports_folder_creation() """ self._create_composite_resource() # add a file to the resource which will be part of a GenericLogicalFile object self._add_generic_file_to_resource() self.assertEqual(self.composite_resource.files.count(), 1) # we should be able to create this new folder new_folder_full_path = os.path.join(self.composite_resource.file_path, "my-new-folder") self.assertEqual(self.composite_resource.supports_folder_creation(new_folder_full_path), True) # create the folder new_folder_path = os.path.join("data", "contents", "my-new-folder") create_folder(self.composite_resource.short_id, new_folder_path) old_file_path = self.composite_resource.files.get().short_path # now move the file to this new folder move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, os.path.join("data", "contents", old_file_path), os.path.join(new_folder_path, self.generic_file_name)) # test that we should be able to create a folder inside the folder that contains # a resource file that is part of a Generic Logical file new_folder_full_path = os.path.join(new_folder_full_path, "another-folder") self.assertTrue(self.composite_resource.supports_folder_creation(new_folder_full_path)) # add a raster tif file to the resource which will be part of # a GoeRasterLogicalFile object self._add_raster_file_to_resource() self.assertEqual(self.composite_resource.files.count(), 2) # make the tif as part of the GeoRasterLogicalFile tif_res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.tif')[0] GeoRasterLogicalFile.set_file_type(self.composite_resource, tif_res_file.id, self.user) tif_res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.tif')[0] self.assertTrue(tif_res_file.resource_file.name.endswith( "/data/contents/small_logan/small_logan.tif")) # test that creating a folder at "/data/contents/small_logan/" is not supported # as that folder contains a resource file that's part of GeoRaster logical file new_folder_path = "{}/data/contents/small_logan/my-new-folder" new_folder_path = new_folder_path.format(self.composite_resource.short_id) self.assertEqual(self.composite_resource.supports_folder_creation(new_folder_path), False) self.composite_resource.delete() def test_supports_move_or_rename_file_or_folder(self): """here we are testing the function supports_move_or_rename_file_or_folder() of the composite resource class""" self._create_composite_resource() # add a file to the resource which will be part of a GenericLogicalFile object self._add_generic_file_to_resource() self.assertEqual(self.composite_resource.files.count(), 1) # test that we can rename the resource file that's part of the GenericLogical File gen_res_file = self.composite_resource.files.first() gen_res_file_basename = hydroshare.utils.get_resource_file_name_and_extension( gen_res_file)[1] self.assertEqual(self.generic_file_name, gen_res_file_basename) src_full_path = os.path.join(self.composite_resource.file_path, self.generic_file_name) tgt_full_path = os.path.join(self.composite_resource.file_path, 'renamed_file.txt') # this is the function we are testing self.assertEqual(self.composite_resource.supports_rename_path( src_full_path, tgt_full_path), True) # create a new folder so that we can test if the generic file can be moved there or not # this code is confusing because three different conventions are involved: # 1. Relative path # 2. Partially qualified path data/contents/folder # 3. Fully qualified path starting at root_path and containing file_path new_folder_full_path = os.path.join(self.composite_resource.file_path, "my-new-folder") new_folder_path = os.path.join("data", "contents", "my-new-folder") self.assertTrue(self.composite_resource.supports_folder_creation(new_folder_full_path)) # create the folder create_folder(self.composite_resource.short_id, new_folder_path) # now move the file to this new folder tgt_full_path = os.path.join(new_folder_full_path, self.generic_file_name) # this is the function we are testing self.assertEqual(self.composite_resource.supports_rename_path( src_full_path, tgt_full_path), True) # test that if a folder contains a resource file that's part of a GenericLogicalFile # that folder can be renamed # now move the file to this new folder move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, 'data/contents/' + self.generic_file_name, new_folder_path + "/" + self.generic_file_name) # test rename folder src_full_path = self.composite_resource.short_id + '/data/contents/my-new-folder/' tgt_full_path = self.composite_resource.short_id + '/data/contents/my-new-folder-1/' # this is the function we are testing self.assertEqual(self.composite_resource.supports_rename_path( src_full_path, tgt_full_path), True) # add a raster tif file to the resource which will be part of # a GoeRasterLogicalFile object self._add_raster_file_to_resource() self.assertEqual(self.composite_resource.files.count(), 2) # make the tif as part of the GeoRasterLogicalFile tif_res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.tif')[0] GeoRasterLogicalFile.set_file_type(self.composite_resource, tif_res_file.id, self.user) tif_res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.tif')[0] self.assertTrue(tif_res_file.resource_file.name.endswith( "/data/contents/small_logan/small_logan.tif")) # test renaming of any files that are part of GeoRasterLogicalFile is not allowed src_full_path = self.composite_resource.short_id + '/data/contents/small_logan/' + \ self.raster_file_name tgt_full_path = self.composite_resource.short_id + \ '/data/contents/small_logan/small_logan_1.tif' # this is the function we are testing self.assertEqual(self.composite_resource.supports_rename_path( src_full_path, tgt_full_path), False) # test rename folder that contains resource files that are part of the GeoRasterLogicalFile # is allowed src_full_path = self.composite_resource.short_id + '/data/contents/small_logan' tgt_full_path = self.composite_resource.short_id + '/data/contents/small_logan_1' # this is the function we are testing self.assertEqual(self.composite_resource.supports_rename_path( src_full_path, tgt_full_path), True) # test that we can't move a file to a folder that contains resource files that are part # of GeoRasterLogicalFile object src_full_path = self.composite_resource.short_id + '/data/contents/my-new-folder/' + \ self.generic_file_name tgt_full_path = self.composite_resource.short_id + '/data/contents/small_logan/' + \ self.generic_file_name # this is the function we are testing self.assertEqual(self.composite_resource.supports_rename_path( src_full_path, tgt_full_path), False) self.composite_resource.delete() def test_supports_zip(self): """Here we are testing the function supports_zip()""" self._create_composite_resource() # test that a folder containing a resource file that's part of the GenericLogicalFile # can be zipped # add a file to the resource which will be part of a GenericLogicalFile object self._add_generic_file_to_resource() self.assertEqual(self.composite_resource.files.count(), 1) new_folder_path = "data/contents/my-new-folder" # create the folder create_folder(self.composite_resource.short_id, new_folder_path) # now move the file to this new folder move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, 'data/contents/' + self.generic_file_name, new_folder_path + "/" + self.generic_file_name) folder_to_zip = self.composite_resource.short_id + '/data/contents/my-new-folder' # test that we can zip the folder my_new_folder self.assertEqual(self.composite_resource.supports_zip(folder_to_zip), True) # test that a folder containing resource files that are part of the GeorasterLogicalFile # can be zipped self._add_raster_file_to_resource() self.assertEqual(self.composite_resource.files.count(), 2) # make the tif as part of the GeoRasterLogicalFile tif_res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.tif')[0] GeoRasterLogicalFile.set_file_type(self.composite_resource, tif_res_file.id, self.user) tif_res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.tif')[0] # resource file exists in a new folder 'small_logan' self.assertTrue(tif_res_file.resource_file.name.endswith( "/data/contents/small_logan/small_logan.tif")) folder_to_zip = self.composite_resource.short_id + '/data/contents/small_logan' # test that we can zip the folder small_logan self.assertEqual(self.composite_resource.supports_zip(folder_to_zip), True) self.composite_resource.delete() def test_supports_delete_original_folder_on_zip(self): """Here we are testing the function supports_delete_original_folder_on_zip() of the composite resource class""" self._create_composite_resource() # test that a folder containing a resource file that's part of the GenericLogicalFile # can be deleted after that folder gets zipped # add a file to the resource which will be part of a GenericLogicalFile object self._add_generic_file_to_resource() self.assertEqual(self.composite_resource.files.count(), 1) new_folder_path = "data/contents/my-new-folder" # create the folder create_folder(self.composite_resource.short_id, new_folder_path) # now move the file to this new folder move_or_rename_file_or_folder(self.user, self.composite_resource.short_id, 'data/contents/' + self.generic_file_name, new_folder_path + "/" + self.generic_file_name) folder_to_zip = self.composite_resource.short_id + '/data/contents/my-new-folder' # test that we can zip the folder my_new_folder self.assertEqual(self.composite_resource.supports_zip(folder_to_zip), True) # this is the function we are testing - my-new-folder can be deleted self.assertEqual(self.composite_resource.supports_delete_folder_on_zip( folder_to_zip), True) # test that a folder containing a resource file that's part of the GeoRasterLogicalFile # can't be deleted after that folder gets zipped # add a file to the resource which will be part of a GeoRasterLogicalFile object self._add_raster_file_to_resource() self.assertEqual(self.composite_resource.files.count(), 2) # make the tif as part of the GeoRasterLogicalFile tif_res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.tif')[0] GeoRasterLogicalFile.set_file_type(self.composite_resource, tif_res_file.id, self.user) tif_res_file = hydroshare.utils.get_resource_files_by_extension( self.composite_resource, '.tif')[0] # resource file exists in a new folder 'small_logan' self.assertTrue(tif_res_file.resource_file.name.endswith( "/data/contents/small_logan/small_logan.tif")) folder_to_zip = self.composite_resource.short_id + '/data/contents/small_logan' # test that we can zip the folder my_new_folder self.assertEqual(self.composite_resource.supports_zip(folder_to_zip), True) # this is the function we are testing - small_logan folder can't be deleted self.assertEqual(self.composite_resource.supports_delete_folder_on_zip( folder_to_zip), False) self.composite_resource.delete() def _create_composite_resource(self): self.composite_resource = hydroshare.create_resource( resource_type='CompositeResource', owner=self.user, title='Test Composite Resource' ) def _add_generic_file_to_resource(self): self.generic_file_obj = UploadedFile(file=open(self.generic_file, 'rb'), name=os.path.basename(self.generic_file)) resource_file_add_process(resource=self.composite_resource, files=(self.generic_file_obj,), user=self.user) def _add_raster_file_to_resource(self): self.raster_file_obj = UploadedFile(file=open(self.raster_file, 'rb'), name=os.path.basename(self.raster_file)) resource_file_add_process(resource=self.composite_resource, files=(self.raster_file_obj,), user=self.user)
"""Tests for the WLED config flow.""" from unittest.mock import MagicMock from wled import WLEDConnectionError from homeassistant.components.wled.const import CONF_KEEP_MASTER_LIGHT, DOMAIN from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF from homeassistant.const import CONF_HOST, CONF_MAC, CONF_NAME from homeassistant.core import HomeAssistant from homeassistant.data_entry_flow import ( RESULT_TYPE_ABORT, RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM, ) from tests.common import MockConfigEntry async def test_full_user_flow_implementation( hass: HomeAssistant, mock_wled_config_flow: MagicMock, mock_setup_entry: None ) -> None: """Test the full manual user flow from start to finish.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, ) assert result.get("step_id") == "user" assert result.get("type") == RESULT_TYPE_FORM assert "flow_id" in result result = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={CONF_HOST: "192.168.1.123"} ) assert result.get("title") == "192.168.1.123" assert result.get("type") == RESULT_TYPE_CREATE_ENTRY assert "data" in result assert result["data"][CONF_HOST] == "192.168.1.123" assert result["data"][CONF_MAC] == "aabbccddeeff" async def test_full_zeroconf_flow_implementation( hass: HomeAssistant, mock_wled_config_flow: MagicMock, mock_setup_entry: None ) -> None: """Test the full manual user flow from start to finish.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_ZEROCONF}, data={"host": "192.168.1.123", "hostname": "example.local.", "properties": {}}, ) flows = hass.config_entries.flow.async_progress() assert len(flows) == 1 assert result.get("description_placeholders") == {CONF_NAME: "example"} assert result.get("step_id") == "zeroconf_confirm" assert result.get("type") == RESULT_TYPE_FORM assert "flow_id" in result flow = flows[0] assert "context" in flow assert flow["context"][CONF_HOST] == "192.168.1.123" assert flow["context"][CONF_NAME] == "example" result2 = await hass.config_entries.flow.async_configure( result["flow_id"], user_input={} ) assert result2.get("title") == "example" assert result2.get("type") == RESULT_TYPE_CREATE_ENTRY assert "data" in result2 assert result2["data"][CONF_HOST] == "192.168.1.123" assert result2["data"][CONF_MAC] == "aabbccddeeff" async def test_connection_error( hass: HomeAssistant, mock_wled_config_flow: MagicMock ) -> None: """Test we show user form on WLED connection error.""" mock_wled_config_flow.update.side_effect = WLEDConnectionError result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: "example.com"}, ) assert result.get("type") == RESULT_TYPE_FORM assert result.get("step_id") == "user" assert result.get("errors") == {"base": "cannot_connect"} async def test_zeroconf_connection_error( hass: HomeAssistant, mock_wled_config_flow: MagicMock ) -> None: """Test we abort zeroconf flow on WLED connection error.""" mock_wled_config_flow.update.side_effect = WLEDConnectionError result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_ZEROCONF}, data={"host": "192.168.1.123", "hostname": "example.local.", "properties": {}}, ) assert result.get("type") == RESULT_TYPE_ABORT assert result.get("reason") == "cannot_connect" async def test_zeroconf_confirm_connection_error( hass: HomeAssistant, mock_wled_config_flow: MagicMock ) -> None: """Test we abort zeroconf flow on WLED connection error.""" mock_wled_config_flow.update.side_effect = WLEDConnectionError result = await hass.config_entries.flow.async_init( DOMAIN, context={ "source": SOURCE_ZEROCONF, CONF_HOST: "example.com", CONF_NAME: "test", }, data={"host": "192.168.1.123", "hostname": "example.com.", "properties": {}}, ) assert result.get("type") == RESULT_TYPE_ABORT assert result.get("reason") == "cannot_connect" async def test_user_device_exists_abort( hass: HomeAssistant, init_integration: MagicMock, mock_wled_config_flow: MagicMock, ) -> None: """Test we abort zeroconf flow if WLED device already configured.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_USER}, data={CONF_HOST: "192.168.1.123"}, ) assert result.get("type") == RESULT_TYPE_ABORT assert result.get("reason") == "already_configured" async def test_zeroconf_device_exists_abort( hass: HomeAssistant, init_integration: MagicMock, mock_wled_config_flow: MagicMock, ) -> None: """Test we abort zeroconf flow if WLED device already configured.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_ZEROCONF}, data={"host": "192.168.1.123", "hostname": "example.local.", "properties": {}}, ) assert result.get("type") == RESULT_TYPE_ABORT assert result.get("reason") == "already_configured" async def test_zeroconf_with_mac_device_exists_abort( hass: HomeAssistant, init_integration: MockConfigEntry, mock_wled_config_flow: MagicMock, ) -> None: """Test we abort zeroconf flow if WLED device already configured.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": SOURCE_ZEROCONF}, data={ "host": "192.168.1.123", "hostname": "example.local.", "properties": {CONF_MAC: "aabbccddeeff"}, }, ) assert result.get("type") == RESULT_TYPE_ABORT assert result.get("reason") == "already_configured" async def test_options_flow( hass: HomeAssistant, mock_config_entry: MockConfigEntry ) -> None: """Test options config flow.""" mock_config_entry.add_to_hass(hass) result = await hass.config_entries.options.async_init(mock_config_entry.entry_id) assert result.get("type") == RESULT_TYPE_FORM assert result.get("step_id") == "init" assert "flow_id" in result result2 = await hass.config_entries.options.async_configure( result["flow_id"], user_input={CONF_KEEP_MASTER_LIGHT: True}, ) assert result2.get("type") == RESULT_TYPE_CREATE_ENTRY assert result2.get("data") == { CONF_KEEP_MASTER_LIGHT: True, }
""" @file inv/api/methods.py @author Brian Kim @brief this module defines the list of api methods supported by inv as well as the authorization scheme of user groups """ from model import * from datetime import datetime # definition of the methods def save(): db.session.commit() def list_obj2dict(x): """ converts a list of model objects to dictionaries """ return [dict(y) for y in x] """ User methods """ def read_user(uid): """ @param id int the id of the user """ return User.query.filter_by(uid=uid).first() def read_user_all(): return list_obj2dict(User.query.filter_by().all()) def delete_user(uid): u = User.query.filter_by(uid=uid).first() if not u: return False db.session.delete(u) save() return True """ Location methods """ def create_building(x): """ @param x str the name of the building @return y LocationBuilding the object that was placed into the db or None for failure """ y = None exists = find_building(x) if not exists: y = LocationBuilding(x) db.session.add(y) save() return y def find_building(name): """ @param x str the name of the building @return y LocationBuilding or None """ return LocationBuilding.query.filter_by(name=name).first() def read_building(x): """ @param x str the id of the building @return y LocationBuilding or None """ return LocationBuilding.query.filter_by(id=x).first() def read_building_all(): """ @return list of dict representation of LocationBuildings """ return list_obj2dict(LocationBuilding.query.filter_by().all()) def update_building(id,x): """ @param id int the id of the building @param x str the new name of the building @return building LocationBuilding or None """ building = LocationBuilding.query.filter_by(id=id).first() if not building: return None building.name = x save() return building def delete_building(id): """ @param id int the id of the building to delete @return bool True for success, False for failure """ building = LocationBuilding.query.filter_by(id=id).first() if not building: return False db.session.delete(building) save() return True def count_all_location(by): return 0 def count_location(by,id): return 0 def create_location(x): """ @param x dict key/value pairs of the location to add @return y Location the object that was placed into the db or None for failure """ y = None exists = Location.query.filter(Location.building.has(name=x['building']),Location.room.is_(x['room'])).first() if not exists: # get the values building = find_building(x['building']) room = x['room'] # create a building entry if it doesn't exist if not building: building = create_building(x['building']) # create and add the location y = Location(building,room) db.session.add(y) save() return y def read_location_all(): x = Location.query.filter_by().all() return list_obj2dict(x) def read_location(id): return Location.query.filter_by(id=id).first() def update_location(id,x): """ @param id int the id of the building @param x dict key/value pairs of the updated values @return loc Location or None """ loc = read_location(id) if not loc: return None # building name new_build = x.get('building') if loc.building.name != new_build: build = find_building(new_build) build = create_building(new_build) if not build else build loc.building = build # room number loc.room = x.get('room') save() return loc def delete_location(id): """ @param id int the id of the location to delete @return bool True for success, False for failure """ loc = Location.query.filter_by(id=id).first() if not loc: return False db.session.delete(loc) save() return True """ Item methods """ def create_category(x): """ @param x str the name of the building @return y ItemCategory the object that was placed into the db or None for failure """ y = None exists = find_category(x) if not exists: y = ItemCategory(x) db.session.add(y) save() return y def find_category(name): """ @param x str the name of the item category @return y ItemCategory or None """ return ItemCategory.query.filter_by(name=name).first() def read_category(x): """ @param x str the name of the category @return y ItemCategory or None """ return ItemCategory.query.filter_by(id=x).first() def read_category_all(): """ @return list of dict representation of all item categories """ return list_obj2dict(ItemCategory.query.filter_by().all()) def update_category(id,x): """ @param id int the id of the category @param x str the new name of the category @return cat Category or None """ cat = Category.query.filter_by(id=id).first() if not cat: return None # category name cat.name = x.get('name') save() return cat def delete_category(id): """ @param id int the id of the category to delete @return bool True for success, False for failure """ cat = Category.query.filter_by(id=id).first() if not cat: return False db.session.delete(cat) save() return True def create_manufacturer(x): """ @param x str the name of the building @return y LocationBuilding the object that was placed into the db or None for failure """ y = None exists = find_manufacturer(x) if not exists: y = ItemManufacturer(x) db.session.add(y) save() return y def find_manufacturer(name): """ @param x str the name of the item manufacturer @return y ItemManufacturer or None """ return ItemManufacturer.query.filter_by(name=name).first() def read_manufacturer(x): """ @param x str the id of the manufacturer @return y ItemManufacturer or None """ return ItemManufacturer.query.filter_by(id=x).first() def read_manufacturer_all(): """ @return list of dict representation of all item manufacturers """ return list_obj2dict(ItemManufacturer.query.filter_by().all()) def update_manufacturer(id,x): """ @param id int the id of the manufacturer @param x str the new name of the manufacturer @return man ItemManufacturer or None """ man = ItemManufacturer.query.filter_by(id=id).first() if not man: return None # manufacturer name man.name = x.get('name') save() return man def delete_manufacturer(id): """ @param id int the id of the manufacturer to delete @return bool True for success, False for failure """ man = ItemManufacturer.query.filter_by(id=id).first() if not man: return False db.session.delete(man) save() return True def item_exists(x): category = Item.category.has(name=x['category']) manufacturer = Item.manufacturer.has(name=x['manufacturer']) model = Item.model.is_(x['model']) return Item.query.filter(category,manufacturer,model).first() def create_item(x): """ @param x dict key/value pairs of the item to add """ y = None if not item_exists(x): # get the values category = find_category(x['category']) manufacturer = find_manufacturer(x['manufacturer']) model = x['model'] # create a building entry if it doesn't exist if not category: category = create_category(x['category']) if not manufacturer: manufacturer = create_manufacturer(x['manufacturer']) # create and add the item y = Item(category,manufacturer,model) db.session.add(y) save() return y def read_item_all(): x = Item.query.filter_by().all() return list_obj2dict(x) def read_item(id): return Item.query.filter_by(id=id).first() def update_item(id,x): """ @param id int the id of the item @param x dict key/value pairs of the updated values @return i Item or None """ i = read_item(id) if not i: return None # item category new_cat = x.get('category') if i.category.name != new_cat: cat = find_category(new_cat) cat = create_category(new_cat) if not cat else cat i.category = cat # item manufacturer new_man = x.get('manufacturer') if i.manufacturer.name != new_man: man = find_manufacturer(new_man) man = create_manufacturer(new_man) if not man else man i.manufacturer = man # item model i.model = x.get('model') save() return i def delete_item(id): """ @param id int the id of the item to delete @return bool True for success, False for failure """ i = read_item(id) if not i: return False db.session.delete(i) save() return True """ asset methods """ def asset_exists(x): return Asset.query.filter(Asset.tag_ece.is_(x['tag_ece'])).first() def create_asset(x): """ @param x dict key/value pairs of the asset to add """ y = None if not asset_exists(x): # get the values ece = x['tag_ece'] vu = x['tag_vu'] unit = x['tag_unit'] svc = x['tag_svc'] serial = x['serial'] status = x['status'] item_id = x['item'] # price = x['price'] ip = x['ip'] comments = x['comments'] purchased_since1970 = x['purchased'] purchased = datetime.fromtimestamp(purchased_since1970) if purchased_since1970 else None owner_id = x['owner'] # home_id = x['home'] # # convert id's into objects item = Item.query.filter_by(id=item_id).first() owner = User.query.filter_by(uid=owner_id).first() home = Location.query.filter_by(id=home_id).first() # create the asset y = Asset(ece,status,item,comments,price,ip,purchased,owner,home,vu,unit,svc,serial) # insert into db db.session.add(y) save() return y def read_asset_all(): return list_obj2dict(Asset.query.filter_by().all()) def read_asset(id): return Asset.query.filter_by(tag_ece=id).first() def update_asset(id,x): """ @param id int the id of the asset to update @param x dict key/value pairs of the updated values @return a Asset or None """ a = read_asset(id) if not a: return None if x.get('tag_ece'): a.tag_ece = x.get('tag_ece') if x.get('tag_vu'): a.tag_vu = x.get('tag_vu') if x.get('tag_unit'): a.tag_unit = x.get('tag_unit') if x.get('tag_svc'): a.tag_svc = x.get('tag_svc') if x.get('serial'): a.serial = x.get('serial') if x.get('status'): a.status = x.get('status') if x.get('item'): a.item = read_item(x.get('item')) if x.get('price'): a.price = x['price'] if x.get('ip'): a.ip = x['ip'] if x.get('comments'): a.comments = x['comments'] if x.get('purchased'): a.purchased = datetime.fromtimestamp(x['purchased']) if x.get('owner'): a.owner = User.query.filter_by(uid=x['owner']).first() if x.get('home'): a.home = Location.query.filter_by(id=x['home']).first() save() return a def delete_asset(id): """ @param id int the id of the asset to delete """ a = read_asset(id) if not a: return False db.session.delete(a) save() return True """ inventory methods """ def create_inv(x): who = read_user(x['who']) what = read_asset(x['what']) when = datetime.fromtimestamp(x['when']) where = read_location(x['where']) how = x['how'] inv = Inventory(who,what,when,where,how) db.session.add(inv) save() return inv def read_inv(id): return Inventory.query.filter_by(id=id).first() def read_inv_all(): return list_obj2dict(Inventory.query.filter_by().all()) def read_inv_by_user(id): return list_obj2dict(Inventory.query.filter(Inventory.who.has(uid=uid)).all()) def read_inv_by_asset(id): return list_obj2dict(Inventory.query.filter(Inventory.what.has(id=id)).all()) def read_inv_by_location(id): return list_obj2dict(Inventory.query.filter(Inventory.where.has(id=id)).all()) def read_inv_by_date(date): date = datetime.fromtimestamp(date).date() return list_obj2dict(Inventory.query.filter(Inventory.when.date() == date).all()) def update_inv(id,x): inv = read_inv(id) if not inv: return None inv.who = User.query.filter_by(uid=x['who']) inv.what = Asset.query.filter_by(id=x['what']) inv.when = datetime.fromtimestamp(x['when']) inv.where = Location.query.filter_by(id=x['where']) inv.how = x['how'] save() return inv def delete_inv(id): inv = read_inv(id) if not inv: return False db.session.delete(inv) save()
import os import unittest from conans.client import tools from conans.model.ref import ConanFileReference from conans.test.utils.tools import TestClient from conans.util.files import load, mkdir, rmdir conanfile = ''' from conans import ConanFile from conans.util.files import save, load import os class ConanFileToolsTest(ConanFile): name = "Pkg" version = "0.1" exports_sources = "*" generators = "cmake" def build(self): self.output.info("Source files: %s" % load(os.path.join(self.source_folder, "file.h"))) save("myartifact.lib", "artifact contents!") save("subdir/myartifact2.lib", "artifact2 contents!") def package(self): self.copy("*.h") self.copy("*.lib") ''' class DevInSourceFlowTest(unittest.TestCase): def _assert_pkg(self, folder): self.assertEqual(sorted(['file.h', 'myartifact.lib', 'subdir', 'conaninfo.txt', 'conanmanifest.txt']), sorted(os.listdir(folder))) self.assertEqual(load(os.path.join(folder, "myartifact.lib")), "artifact contents!") self.assertEqual(load(os.path.join(folder, "subdir/myartifact2.lib")), "artifact2 contents!") def parallel_folders_test(self): client = TestClient() repo_folder = os.path.join(client.current_folder, "recipe") build_folder = os.path.join(client.current_folder, "build") package_folder = os.path.join(client.current_folder, "pkg") mkdir(repo_folder) mkdir(build_folder) mkdir(package_folder) client.current_folder = repo_folder # equivalent to git clone recipe client.save({"conanfile.py": conanfile, "file.h": "file_h_contents!"}) client.current_folder = build_folder client.run("install ../recipe") client.run("build ../recipe") client.current_folder = package_folder client.run("package ../recipe --build-folder=../build --package-folder='%s'" % package_folder) self._assert_pkg(package_folder) client.current_folder = repo_folder client.run("export . lasote/testing") client.run("export-pkg . Pkg/0.1@lasote/testing -bf=../pkg") ref = ConanFileReference.loads("Pkg/0.1@lasote/testing") cache_package_folder = client.cache.package_layout(ref).packages() cache_package_folder = os.path.join(cache_package_folder, os.listdir(cache_package_folder)[0]) self._assert_pkg(cache_package_folder) def insource_build_test(self): client = TestClient() repo_folder = client.current_folder package_folder = os.path.join(client.current_folder, "pkg") mkdir(package_folder) client.save({"conanfile.py": conanfile, "file.h": "file_h_contents!"}) client.run("install .") client.run("build .") client.current_folder = package_folder client.run("package .. --build-folder=.. --package-folder='%s' " % package_folder) self._assert_pkg(package_folder) client.current_folder = repo_folder client.run("export . lasote/testing") client.run("export-pkg . Pkg/0.1@lasote/testing -bf='%s' -if=." % package_folder) ref = ConanFileReference.loads("Pkg/0.1@lasote/testing") cache_package_folder = client.cache.package_layout(ref).packages() cache_package_folder = os.path.join(cache_package_folder, os.listdir(cache_package_folder)[0]) self._assert_pkg(cache_package_folder) def child_build_test(self): client = TestClient() build_folder = os.path.join(client.current_folder, "build") mkdir(build_folder) package_folder = os.path.join(build_folder, "package") mkdir(package_folder) client.save({"conanfile.py": conanfile, "file.h": "file_h_contents!"}) client.current_folder = build_folder client.run("install ..") client.run("build ..") client.current_folder = package_folder client.run("package ../.. --build-folder=../") self._assert_pkg(package_folder) rmdir(package_folder) # IMPORTANT: Symptom that package + package_folder is not fitting # well now. (To discuss) # But I think now you choose you way to develop, local or cache, if you use conan export-pkg # you are done, if you use package() you need the "conan project" feature client.current_folder = build_folder client.run("export-pkg .. Pkg/0.1@lasote/testing --source-folder=.. ") ref = ConanFileReference.loads("Pkg/0.1@lasote/testing") cache_package_folder = client.cache.package_layout(ref).packages() cache_package_folder = os.path.join(cache_package_folder, os.listdir(cache_package_folder)[0]) self._assert_pkg(cache_package_folder) conanfile_out = ''' from conans import ConanFile from conans.util.files import save, load import os class ConanFileToolsTest(ConanFile): name = "Pkg" version = "0.1" generators = "cmake" def source(self): save("file.h", "file_h_contents!") def build(self): self.output.info("Source files: %s" % load(os.path.join(self.source_folder, "file.h"))) save("myartifact.lib", "artifact contents!") def package(self): self.copy("*.h") self.copy("*.lib") ''' class DevOutSourceFlowTest(unittest.TestCase): def _assert_pkg(self, folder): self.assertEqual(sorted(['file.h', 'myartifact.lib', 'conaninfo.txt', 'conanmanifest.txt']), sorted(os.listdir(folder))) def parallel_folders_test(self): client = TestClient() repo_folder = os.path.join(client.current_folder, "recipe") src_folder = os.path.join(client.current_folder, "src") build_folder = os.path.join(client.current_folder, "build") package_folder = os.path.join(build_folder, "package") mkdir(repo_folder) mkdir(src_folder) mkdir(build_folder) mkdir(package_folder) client.current_folder = repo_folder # equivalent to git clone recipe client.save({"conanfile.py": conanfile_out}) client.current_folder = build_folder client.run("install ../recipe") client.current_folder = src_folder client.run("install ../recipe") client.run("source ../recipe") client.current_folder = build_folder client.run("build ../recipe --source-folder=../src") client.current_folder = package_folder client.run("package ../../recipe --source-folder=../../src --build-folder=../") self._assert_pkg(package_folder) client.current_folder = repo_folder client.run("export . lasote/testing") client.run("export-pkg . Pkg/0.1@lasote/testing -bf=../build/package") ref = ConanFileReference.loads("Pkg/0.1@lasote/testing") cache_package_folder = client.cache.package_layout(ref).packages() cache_package_folder = os.path.join(cache_package_folder, os.listdir(cache_package_folder)[0]) self._assert_pkg(cache_package_folder) def insource_build_test(self): client = TestClient() repo_folder = client.current_folder package_folder = os.path.join(client.current_folder, "pkg") mkdir(package_folder) client.save({"conanfile.py": conanfile_out}) client.run("install .") client.run("source .") client.run("build . ") client.current_folder = package_folder client.run("package .. --build-folder=.. --package-folder='%s'" % package_folder) self._assert_pkg(package_folder) client.current_folder = repo_folder client.run("export . lasote/testing") client.run("export-pkg . Pkg/0.1@lasote/testing -bf=./pkg") ref = ConanFileReference.loads("Pkg/0.1@lasote/testing") cache_package_folder = client.cache.package_layout(ref).packages() cache_package_folder = os.path.join(cache_package_folder, os.listdir(cache_package_folder)[0]) self._assert_pkg(cache_package_folder) def child_build_test(self): client = TestClient() repo_folder = client.current_folder build_folder = os.path.join(client.current_folder, "build") mkdir(build_folder) package_folder = os.path.join(build_folder, "package") mkdir(package_folder) client.save({"conanfile.py": conanfile_out}) client.current_folder = build_folder client.run("install ..") client.run("source ..") client.run("build .. --source-folder=.") client.current_folder = package_folder client.run("package ../.. --build-folder=../") self._assert_pkg(package_folder) rmdir(package_folder) client.current_folder = repo_folder client.run("export-pkg . Pkg/0.1@lasote/testing -bf=./build") ref = ConanFileReference.loads("Pkg/0.1@lasote/testing") cache_package_folder = client.cache.package_layout(ref).packages() cache_package_folder = os.path.join(cache_package_folder, os.listdir(cache_package_folder)[0]) self._assert_pkg(cache_package_folder) def build_local_different_folders_test(self): # Real build, needed to ensure that the generator is put in the correct place and # cmake finds it, using an install_folder different from build_folder client = TestClient() client.run("new lib/1.0") # FIXME: this test, so it doesn't need to clone from github client.run("source . --source-folder src") # Patch the CMakeLists to include the generator file from a different folder install_dir = os.path.join(client.current_folder, "install_x86_64") tools.replace_in_file(os.path.join(client.current_folder, "src", "hello", "CMakeLists.txt"), "${CMAKE_BINARY_DIR}/conanbuildinfo.cmake", '"%s/conanbuildinfo.cmake"' % install_dir.replace("\\", "/"), output=client.out) client.run("install . --install-folder install_x86_64 -s arch=x86_64") client.run("build . --build-folder build_x86_64 --install-folder '%s' " "--source-folder src" % install_dir) self.assertTrue(os.path.exists(os.path.join(client.current_folder, "build_x86_64", "lib")))
# Copyright 2016 James Hensman, alexggmatthews, Mark van der Wilk # Copyright 2017 Thomas Viehmann # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy import torch from .. import likelihoods from .. import densities from .. import parameter from .model import GPModel class SGPRUpperMixin(object): """ Upper bound for the GP regression marginal likelihood. It is implemented here as a Mixin class which works with SGPR and GPRFITC. Note that the same inducing points are used for calculating the upper bound, as are used for computing the likelihood approximation. This may not lead to the best upper bound. The upper bound can be tightened by optimising Z, just as just like the lower bound. This is especially important in FITC, as FITC is known to produce poor inducing point locations. An optimisable upper bound can be found in https://github.com/markvdw/gp_upper. The key reference is :: @misc{titsias_2014, title={Variational Inference for Gaussian and Determinantal Point Processes}, url={http://www2.aueb.gr/users/mtitsias/papers/titsiasNipsVar14.pdf}, publisher={Workshop on Advances in Variational Inference (NIPS 2014)}, author={Titsias, Michalis K.}, year={2014}, month={Dec} } """ def compute_upper_bound(self): num_inducing = self.Z.size(0) num_data = self.Y.size(0) Kdiag = self.kern.Kdiag(self.X) jitter = torch.eye(num_inducing, dtype=self.Z.dtype, device=self.Z.device) * self.jitter_level Kuu = self.kern.K(self.Z.get()) + jitter Kuf = self.kern.K(self.Z.get(), self.X) L = torch.cholesky(Kuu, upper=False) LB = torch.cholesky(Kuu + self.likelihood.variance.get() ** -1.0 * torch.matmul(Kuf, Kuf.t()), upper=False) LinvKuf, _ = torch.solve(Kuf, L) # could use triangular solve # Using the Trace bound, from Titsias' presentation c = Kdiag.sum() - (LinvKuf ** 2.0).sum() # Kff = self.kern.K(self.X) # Qff = tf.matmul(Kuf, LinvKuf, transpose_a=True) # Alternative bound on max eigenval: # c = tf.reduce_max(tf.reduce_sum(tf.abs(Kff - Qff), 0)) corrected_noise = self.likelihood.variance.get() + c const = -0.5 * num_data * torch.log(2 * float(numpy.pi) * self.likelihood.variance.get()) logdet = torch.diag(L).log().sum() - torch.diag(LB).log().sum() LC = torch.cholesky(Kuu + corrected_noise ** -1.0 * torch.matmul(Kuf, Kuf.t()), upper=False) v, _ = torch.solve(corrected_noise ** -1.0 * torch.matmul(Kuf, self.Y), LC) quad = -0.5 * corrected_noise ** -1.0 * (self.Y ** 2.0).sum() + 0.5 * (v ** 2.0).sum() return const + logdet + quad class SGPR(GPModel, SGPRUpperMixin): """ Sparse Variational GP regression. The key reference is :: @inproceedings{titsias2009variational, title={Variational learning of inducing variables in sparse Gaussian processes}, author={Titsias, Michalis K}, booktitle={International Conference on Artificial Intelligence and Statistics}, pages={567--574}, year={2009} } """ def __init__(self, X, Y, kern, Z, mean_function=None, **kwargs): """ X is a data matrix, size N x D Y is a data matrix, size N x R Z is a matrix of pseudo inputs, size M x D kern, mean_function are appropriate GPflow objects This method only works with a Gaussian likelihood. """ likelihood = likelihoods.Gaussian(dtype=X.dtype) super(SGPR,self).__init__(X, Y, kern, likelihood, mean_function, **kwargs) self.Z = parameter.Param(Z) self.num_data = X.size(0) self.num_latent = Y.size(1) def compute_log_likelihood(self, X=None, Y=None): """ For a derivation of the terms in here, see the associated SGPR notebook. """ assert X is None and Y is None, "{} does not support minibatch mode".format(str(type(self))) num_inducing = self.Z.size(0) num_data = self.Y.size(0) output_dim = self.Y.size(1) err = self.Y - self.mean_function(self.X) Kdiag = self.kern.Kdiag(self.X) Kuf = self.kern.K(self.Z.get(), self.X) jitter = torch.eye(num_inducing, dtype=self.Z.dtype, device=self.Z.device) * self.jitter_level Kuu = self.kern.K(self.Z.get()) + jitter L = torch.cholesky(Kuu, upper=False) sigma = self.likelihood.variance.get()**0.5 # Compute intermediate matrices A = torch.solve(Kuf, L)[0] / sigma # could use triangular solve AAT = torch.matmul(A, A.t()) B = AAT + torch.eye(num_inducing, dtype=AAT.dtype, device=AAT.device) LB = torch.cholesky(B, upper=False) Aerr = torch.matmul(A, err) c = torch.solve(Aerr, LB)[0] / sigma # could use triangular solve # compute log marginal bound bound = -0.5 * num_data * output_dim * float(numpy.log(2 * numpy.pi)) bound += -output_dim * torch.sum(torch.log(torch.diag(LB))) bound = bound - 0.5 * num_data * output_dim * torch.log(self.likelihood.variance.get()) bound += -0.5 * torch.sum(err**2) / self.likelihood.variance.get() bound += 0.5 * torch.sum(c**2) bound += -0.5 * output_dim * torch.sum(Kdiag) / self.likelihood.variance.get() bound += 0.5 * output_dim * torch.sum(torch.diag(AAT)) return bound def predict_f(self, Xnew, full_cov=False): """ Compute the mean and variance of the latent function at some new points Xnew. For a derivation of the terms in here, see the associated SGPR notebook. """ num_inducing = self.Z.size(0) err = self.Y - self.mean_function(self.X) Kuf = self.kern.K(self.Z.get(), self.X) jitter = torch.eye(num_inducing, dtype=self.Z.dtype, device=self.Z.device) * self.jitter_level Kuu = self.kern.K(self.Z.get()) + jitter Kus = self.kern.K(self.Z.get(), Xnew) sigma = self.likelihood.variance.get()**0.5 L = torch.cholesky(Kuu, upper=False) A = torch.solve(Kuf, L)[0] / sigma # could use triangular solve here and below B = torch.matmul(A,A.t()) + torch.eye(num_inducing, dtype=A.dtype, device=A.device) LB = torch.cholesky(B, upper=False) Aerr = torch.matmul(A, err) c = torch.solve(Aerr, LB)[0] / sigma tmp1,_ = torch.solve(Kus, L) tmp2,_ = torch.solve(tmp1,LB) mean = torch.matmul(tmp2.t(), c) if full_cov: var = self.kern.K(Xnew) + torch.matmul(tmp2.t(), tmp2) - torch.matmul(tmp1.t(), tmp1) var = var.unsqueeze(2).expand(-1, -1, self.Y.size(1)) else: var = self.kern.Kdiag(Xnew) + (tmp2**2).sum(0) - (tmp1**2).sum(0) var = var.unsqueeze(1).expand(-1, self.Y.size(1)) return mean + self.mean_function(Xnew), var class GPRFITC(GPModel, SGPRUpperMixin): def __init__(self, X, Y, kern, Z, mean_function=None, **kwargs): # was mean_function = Zero() """ This implements GP regression with the FITC approximation. The key reference is @inproceedings{Snelson06sparsegaussian, author = {Edward Snelson and Zoubin Ghahramani}, title = {Sparse Gaussian Processes using Pseudo-inputs}, booktitle = {Advances In Neural Information Processing Systems }, year = {2006}, pages = {1257--1264}, publisher = {MIT press} } Implementation loosely based on code from GPML matlab library although obviously gradients are automatic in GPflow. X is a data matrix, size N x D Y is a data matrix, size N x R Z is a matrix of pseudo inputs, size M x D kern, mean_function are appropriate GPflow objects This method only works with a Gaussian likelihood. """ likelihood = likelihoods.Gaussian(dtype=X.dtype) super(SGPR,self).__init__(X, Y, kern, likelihood, mean_function, **kwargs) self.Z = parameter.Param(Z) self.num_data = X.size(0) self.num_latent = Y.size(1) def _common_terms(self): num_inducing = self.Z.size(0) err = self.Y - self.mean_function(self.X) # size N x R Kdiag = self.kern.Kdiag(self.X) Kuf = self.kern.K(self.Z.get(), self.X) jitter = torch.eye(num_inducing, dtype=self.Z.dtype, device=self.Z.device) * self.jitter_level Kuu = self.kern.K(self.Z.get()) + jitter Luu = torch.cholesky(Kuu, upper=False) # => Luu Luu^T = Kuu V, _ = torch.solve(Kuf, Luu) # => V^T V = Qff = Kuf^T Kuu^-1 Kuf diagQff = (V**2).sum(0) nu = Kdiag - diagQff + self.likelihood.variance.get() B = torch.eye(num_inducing, out=V.data.new()) + torch.matmul(V / nu, V.t()) L = torch.cholesky(B, upper=False) beta = err / nu.unsqueeze(1) # size N x R alpha = torch.matmul(V, beta) # size N x R gamma, _ = torch.solve(alpha, L) # size N x R return err, nu, Luu, L, alpha, beta, gamma def compute_log_likelihood(self, X=None, Y=None): """ Construct a tensorflow function to compute the bound on the marginal likelihood. """ assert X is None and Y is None, "{} does not support minibatch mode".format(str(type(self))) # FITC approximation to the log marginal likelihood is # log ( normal( y | mean, K_fitc ) ) # where K_fitc = Qff + diag( \nu ) # where Qff = Kfu Kuu^{-1} Kuf # with \nu_i = Kff_{i,i} - Qff_{i,i} + \sigma^2 # We need to compute the Mahalanobis term -0.5* err^T K_fitc^{-1} err # (summed over functions). # We need to deal with the matrix inverse term. # K_fitc^{-1} = ( Qff + \diag( \nu ) )^{-1} # = ( V^T V + \diag( \nu ) )^{-1} # Applying the Woodbury identity we obtain # = \diag( \nu^{-1} ) - \diag( \nu^{-1} ) V^T ( I + V \diag( \nu^{-1} ) V^T )^{-1) V \diag(\nu^{-1} ) # Let \beta = \diag( \nu^{-1} ) err # and let \alpha = V \beta # then Mahalanobis term = -0.5* ( \beta^T err - \alpha^T Solve( I + V \diag( \nu^{-1} ) V^T, alpha ) ) err, nu, Luu, L, alpha, beta, gamma = self._common_terms() mahalanobisTerm = -0.5 * (err**2 / nu.unsqueeze(1)).sum() + 0.5 * (gamma**2).sum() # We need to compute the log normalizing term -N/2 \log 2 pi - 0.5 \log \det( K_fitc ) # We need to deal with the log determinant term. # \log \det( K_fitc ) = \log \det( Qff + \diag( \nu ) ) # = \log \det( V^T V + \diag( \nu ) ) # Applying the determinant lemma we obtain # = \log [ \det \diag( \nu ) \det( I + V \diag( \nu^{-1} ) V^T ) ] # = \log [ \det \diag( \nu ) ] + \log [ \det( I + V \diag( \nu^{-1} ) V^T ) ] constantTerm = -0.5 * self.num_data * float(2*numpy.pi) logDeterminantTerm = -0.5 * nu.log().sum() - torch.diag(L).log().sum() logNormalizingTerm = constantTerm + logDeterminantTerm return mahalanobisTerm + logNormalizingTerm * self.num_latent def _build_predict(self, Xnew, full_cov=False): """ Compute the mean and variance of the latent function at some new points Xnew. """ _, _, Luu, L, _, _, gamma = self._common_terms() Kus = self.kern.K(self.Z.get(), Xnew) # size M x Xnew w, _ = torch.solve(Kus, Luu) # size M x Xnew tmp, _ = torch.solve(gamma, L.t()) mean = torch.matmul(w.t(), tmp) + self.mean_function(Xnew) intermediateA, _ = torch.solve(w, L) if full_cov: var = self.kern.K(Xnew) - torch.matmul(w.t(), w) + torch.matmul(intermediateA.t(), intermediateA) var = torch.unsqueeze(2).expand(-1, -1, self.Y.size(1)) else: var = self.kern.Kdiag(Xnew) - (w**2).sum(0) + (intermediateA**2).sum(0) # size Xnew, var = torch.unsuqeeze(2).expand(-1, self.Y.size(1)) return mean, var
''' Code for simulating secure key rate, twofolds, and quantum bit error rate Written in Python and QuTIP by Catherine Holloway ([email protected]). Detector model and squashing functions by Catherine Holloway, based on code by Dr. Thomas Jennewein ([email protected]). Contributed to the QuTiP project on June 06, 2012 by Catherine Holloway. ''' #imports from qutip import * from numpy import * from pylab import * import matplotlib import matplotlib.pyplot as plt def choose(n, k): """ Binomial coefficient function for the detector model. Parameters ---------- n : int Number of elements. k : int Number of subelements. Returns ------- coeff : int Binomial coefficient. """ if 0 <= k <= n: ntok = 1 ktok = 1 for t in xrange(1, min(k, n - k) + 1): ntok *= n ktok *= t n -= 1 return ntok // ktok else: return 0 def BucketDetector_realistic_detector(N,efficiency,n_factor): """ Bucket detector model based on H. Lee, U. Yurtsever, P. Kok, G. Hockney, C. Adami, S. Braunstein, and J. Dowling, "Towards photostatistics from photon-number discriminating detectors," Journal of Modern Optics, vol. 51, p. 15171528, 2004. Parameters ---------- N : int The Fock Space dimension. efficiency : float The channel efficiency. n_factor : float The average number of dark counts per detection window APD (Bucket Detector). Returns ------- [proj, un_proj] : list The projection and unprojection operators. """ proj=zeros((N,N)) #APD (Bucket Detector) un_detector (=gives probability for 0-detection) un_proj=identity(N) #n_factor = 0; for i in range(N): probs = 0; for k in range (1,100): for d in range(k+1): if k-d<=i: probs= probs+ (exp(-n_factor)*(n_factor)**(d))/factorial(d)*choose(i,k-d)*efficiency**(k-d)*(1-efficiency)**(i-k+d) proj[i,i]=probs un_proj = un_proj-proj un_proj = Qobj(un_proj) proj = Qobj(proj) return [proj,un_proj] def measure_2folds_4modes_squashing(N,psi,proj,proj2): """ Determines the 2-fold count rate on the joint state outputs for an array of double count probabilities. Parameters ---------- N : int The Fock Space dimension. psi : qobj The entangled state to analyze proj1 : qobj 1st projection operator for the Channel between Alice and the Channel between Bob. proj2 : qobj 2nd projection operator for the Channel between Alice and the Channel between Bob. Returns ------- [HH,HV,VH,VV] : list Two-fold probabilities. Notes ----- The squashing (assigning double pairs to random bases) comes from two papers: T. Moroder, O. Guhne, N. Beaudry, M. Piani, and N. Lutkenhaus, "Entanglement verication with realistic measurement devices via squashing operations," Phys. Rev. A, vol. 81, p. 052342, May 2010. N. Lutkenhaus, "Estimates for practical quantum cryptography," Phys. Rev.A, vol. 59, pp. 3301-3319, May 1999. """ ida=qeye(N) final_state=psi det_exp = zeros((2,2,2,2)) #i,j,k,l means Ha,Va,Hb,Vb, 0 means detector clicked, 1 means detector did not click for i in range(2): for j in range(2): for k in range(2): for l in range(2): #expectation values for different detector configurations det_exp[i][j][k][l] = abs(expect(tensor(proj[i],proj[j],proj2[k],proj[l]),final_state)) #two fold probabilities HH = det_exp[0][1][0][1]+0.5*(det_exp[0][0][0][1]+det_exp[0][1][0][0])+0.25*det_exp[0][0][0][0] VV = det_exp[1][0][1][0]+0.5*(det_exp[0][0][1][0]+det_exp[1][0][0][0])+0.25*det_exp[0][0][0][0] HV = det_exp[0][1][1][0]+0.5*(det_exp[0][0][1][0]+det_exp[0][1][0][0])+0.25*det_exp[0][0][0][0] VH = det_exp[1][0][0][1]+0.5*(det_exp[0][0][0][1]+det_exp[1][0][0][0])+0.25*det_exp[0][0][0][0] return [HH,HV,VH,VV] def sim_qkd_entanglement(eps,loss_a,loss_b,n_factor_a,n_factor_b,N): """ Simulate skr with an SPDC state. Parameters ---------- eps : float The squeezing factor, sort of analogous to the amount of pumping power to the spdc source, but not really. loss_a : float Efficiency of the quantum channel going to Alice. loss_b : float Efficiency of the quantum channel going to Bob. n_factor_a : float Background noise in Alice's detection. n_factor_b : float Background noise in Bob's detection. N : int Size of the fock space that we allow for the states Returns ------- qber : float The Quantum Bit Error Rate twofolds : float Probability of Alice and Bob getting a simultaneous detection of a photon pair (also referred to as coincidences) within a timing window. skr : float Probability of getting a secure key bit within a timing window, assuming error correction and privacy amplification, in the limit of many coincidences. """ #make vaccuum state vacc = basis(N,0) #make squeezing operator for SPDC H_sq = 1j*eps*(tensor(create(N),create(N))+tensor(destroy(N),destroy(N))) #exponentiate hamiltonian and apply it to vaccuum state to make an SPDC state U_sq = H_sq.expm() spdc = U_sq*tensor(vacc,vacc) psi = tensor(spdc,spdc) #since qutip doesn't have a permute function, #we have to do a couple of steps in between #1. turn psi from a sparse matrix to a full matrix out = psi.full() #2. reshape psi into a 4-D matrix out = reshape(out, (N,N,N,-1)) #3. permute the dimensions of our 4-D matrix out = transpose(out,(0,3,2,1)) #4. turn the matrix back into a 1-D array out = reshape(out,(N*N*N*N,-1)) #5. convert the matrix back into a quantum object psi = Qobj(out,dims = [[N, N, N, N], [1, 1, 1, 1]]) # model detectors a_det = BucketDetector_realistic_detector(N,loss_a,n_factor_a) b_det = BucketDetector_realistic_detector(N,loss_b,n_factor_b) #measure detection probabilities probs2f=measure_2folds_4modes_squashing(N,psi,a_det,b_det) #Rates returned are 'per pulse', so multiply by source rate twofolds=probs2f[0]+probs2f[1]+probs2f[2]+probs2f[3] #Determine QBER from returned detection probabilities qber = (probs2f[0]+probs2f[3])/twofolds #calculate the entropy of the qber if qber>0: H2=-qber*log2(qber) - (1-qber)*log2(1-qber) else: H2 = 0 # estimate error correction efficiency from the CASCADE algorithm f_e = 1.16904371810274 + qber #security analysis - calculate skr in infinite key limit #See Chris Erven's PhD thesis or Xiongfeng Ma's paper #to understand where this equation comes from skr=real(twofolds*0.5*(1-(1+f_e)*H2)) return [qber, skr, twofolds] if __name__=='__main__': #Lets look at what happens to the secure key rate and #the quantum bit error rate as the loss gets worse. #Analogous to distance with fiber optic links. #define the fock space N = 7 #define the squeezing paramter eps = 0.2 #define the noise factor n_factor = 4.0e-5 #define the length of the coincidence window (in s) coinc_window = 2.0e-9 loss_db = arange(0,30) skr = zeros(30) qber = zeros(30) twofolds = zeros(30) #run calculation for i in range(30): exp_loss = 10.0**(-loss_db[i]/10.0); [qber[i], skr[i], twofolds[i]] = sim_qkd_entanglement(eps,exp_loss,exp_loss,n_factor,n_factor,N) skr = skr/coinc_window qber = qber*100 #plot results fig = plt.figure() ax = fig.add_subplot(211) ax.plot(loss_db, skr,lw=2) ax.set_yscale('log') ax.set_ylabel('Secure Key Rate (bits/s)') ax.set_xlabel('Loss (dB)') ax = fig.add_subplot(212) ax.plot(loss_db, qber,lw=2) ax.set_ylabel('Quantum Bit Error Rate (%)') ax.set_ylim([0,15]) ax.set_xlabel('Loss (dB)') plt.show()
# -*- coding: utf-8 -*- import basic import site_builder import re from sets import Set from functions import getReadbleTime, prettyItemBonus, getDisplayPages import math from guild import guildsController import json import random import cherrypy class miscController(basic.defaultController): @basic.printpage def printPage(self, page, params): self.sbuilder = site_builder.builder() return { 'index': self.printIndex, '': self.printIndex, 'login': self.sbuilder.parseLogin, 'logout': self.sbuilder.parseLogout, 'help': self.printHelp, 'help_ugc': self.printHelpApprove, 'help_auth': self.printHelpAuth, 'search': self.printSearchResults, 'all-players': self.printAllPlayers, 'all-guilds': self.printAllGuilds, 'ne': self.printFactionPage, 'ha': self.printFactionPage, 'ft': self.printFactionPage, #'test': self.printTest 'licenses': self.printLicensesHelp, 'thx': self.printThanks, 'invite': self.printInvitePage, 'credits': self.printCredits } @basic.methods def methods(self, params = {}): return { 'type_of_form': { 'agree_with_rules': self.agreeWithRules }, 'like': self.likeProxy, 'report': self.reportThing, } @staticmethod def formatArtworks(self, artworks): authors_ids = Set() for item in artworks: authors_ids.add(item['author']) players_names = self.model.players.getPlayersList(authors_ids, ['_id', 'name']) for artwork in artworks: artwork['race_name'] = self.balance.races[artwork['faction']][artwork['race']] artwork['class_name'] = self.balance.classes[str(artwork['class'])] if 'builtin' in artwork: artwork['img'] = self.core.ARTWORK_SHOP_PATH+artwork['img']+'.jpg' artwork['share_img'] = artwork['img'] else: artwork.update({ "share_img": artwork["img"][3:], 'img': artwork['img']+'_fit.png' }) for player in players_names: if player['_id'] == artwork['author']: artwork['author_name'] = player['name'] break return artworks @staticmethod def formatArtworkInfo(info): if 'link' in info and info['link']: if len(info['link']) > 32: info['link_name'] = info['link'][:32]+'...' else: info['link_name'] = info['link'] return info def formatPlayers(self, players): for player in players: player['lvl'] = int(player['lvl']) self.sbuilder.raceFormat(player) return players # -------------------------------------------------------------------------------------------------- # Misc @staticmethod def likeIt(obj, params): ajax = False if "ajax" in params and int(params['ajax'])==1: ajax = True if not obj.cur_player: if ajax: return json.dumps({"error": 1,"message" : "1002"}) else: return obj.sbuilder.throwWebError(1002) if '_id' in params: if 'type' in params: tp = params['type'] else: tp = 'item' obj.model.items.likeItem(params['_id'], obj.cur_player['login_id'], tp) if ajax: return json.dumps({"success": 1,"liked" : "1"}) else: obj.httpRedirect(params) def likeProxy(self, params): return self.likeIt(self, params) def reportThing(self, params): if not self.cur_player: return self.sbuilder.throwWebError(1002) if '_id' in params: if 'report_type' in params and params['report_type'] in ['item', 'spell', 'artwork']: type_of = params['report_type'] else: type_of = 'item' self.model.items.reportItem(params['_id'], self.cur_player['login_id'], type_of) self.httpRedirect(params) def getBlogLastPosts(self, count = 2): posts = self.model.misc.getBlogPosts() return posts[:count] def agreeWithRules(self, params): if self.cur_player: self.model.players.updatePlayerData(self.cur_player['login_id'], {'agree_with_rules': True}) self.httpRedirect(params) # -------------------------------------------------------------------------------------------------- # Print pages def printIndex(self, fields, param): def getLastItems(last_count = 8): last_items = self.model.items.getLastApprovedItems(last_count) if self.cur_player: str_class = str(self.cur_player['login_class']) else: str_class = False authors_ids = Set() items_ids = Set() for item in last_items: authors_ids.add(item['author']) items_ids.add(item['_id']) item.update(prettyItemBonus(item, self.balance.stats_name)) if item['type'] == 1 and str_class: if not item['view'] in self.sbuilder.balance.available_weapons[str_class]: item['cant_use'] = True item['img'] += '_fit.png' players_names = self.model.players.getPlayersList(authors_ids, ['_id', 'name']) item_likes = self.model.items.getItemsLikes(items_ids) for item in last_items: for player in players_names: if player['_id'] == item['author']: item['author_name'] = player['name'] break for likes in item_likes: if likes['item_id'] == item['_id']: item['likes'] = len(likes['people']) is_like = False if self.cur_player: is_like = self.cur_player['login_id'] in likes['people'] item['is_like'] = is_like break else: item['likes'] = 0 return last_items def getTrendingPlayers(count = 8): players = self.model.players.getTrendingPlayers(count) for player in players: player.update({ 'class_name': self.balance.classes[str(player['class'])], 'race_name': self.balance.races[player['faction']][player['race']], }) return players def getTrendingGuild(count = 8): guilds = self.model.guilds.getTrendingGuilds(count) return guilds def getFeaturedArtwork(): artwork = self.model.misc.getRandomArtwork() if artwork: artwork['race_name'] = self.balance.races[artwork['faction']][artwork['race']] artwork['class_name'] = self.balance.classes[str(artwork['class'])] player = self.model.players.getPlayerBy_ID(artwork['author'], {'name': 1}) if 'UID' in artwork: artwork['img'] = self.core.ARTWORK_PATH + artwork['img'] + '.jpg' else: artwork['img'] += '_fit.png' if player: artwork['author_name'] = player['name'] likes = self.model.items.getItemLikes(artwork['_id']) if likes: artwork.update({ 'likes': len(likes['people']), 'is_like': self.cur_player and self.cur_player['login_id'] in likes['people'], }) return artwork def getRandomTitle(): titles = [ 'Good news everyone!', 'Oh my, yes!', 'Ya filthy crab!', 'Bad news, nobody!', 'Pew pew pew', 'Me fail English? That`s unpossible', 'Uulwi ifis halahs gag erh\'ongg w\'ssh', 'You are not prepared!', 'BOOOONEEEESTOOORRMMM', 'Oppan orcish style', 'New toys? For me?', 'Thank you, @MeowMio', '15.9% uptime', '4 9 14 0 14 2', 'What does Marsellus Wallace look like?', 'May the Force be with you', 'Do not refresh this page', 'Refresh this page', 'No errors here', 'Fine!', 'Not bugs but features', 'QRATOR HTTP 502', 'Waka-Waka-Waka', 'Pinocchio was an android', 'You are cut out for Tweeria', 'Life sucks, get a helmet', 'Wrong username and password, %username%, try again', 'Wweerai owns teh Intarnet', 'Solutions are not the answer, %username%', 'Three Nations Army', 'Human Quan\'Zul Style!', 'ALGEBRAIC!', 'Are you an orcaholic, %username%?', 'Live long and prosper', 'Live fast - die young - undead forever', 'Tweeria Time!', 'The reckoning has come!', 'Shelter your weak, your young and your old!', ] return random.sample(titles, 1)[0] def getArtistsLikes(count = 10): return self.model.misc.getAuthorsLikes(count) if self.cur_player: inventory_count = self.model.items.getInventoryCount(self.cur_player['login_id']) else: inventory_count = 0 fields.update(self.model.misc.getGameStatistics()) fields.update({ self.title: getRandomTitle(), 'inventory_count': inventory_count, 'featured_artwork': getFeaturedArtwork(), 'last_items': getLastItems(), 'authors_likes': getArtistsLikes(10), 'trending_players': getTrendingPlayers(5), 'trending_guilds': getTrendingGuild(5), 'blog_posts': self.getBlogLastPosts(2), 'tip': self.model.misc.getRandomTip() }) return basic.defaultController._printTemplate(self, 'index', fields) def printSearchResults(self, fields, param): fields.update({self.title: 'Search'}) if 'q' in param: regx = re.compile(re.sub('<.*?>','',param['q']), re.IGNORECASE) search_query = {'name': {'$regex': regx}} else: search_query = {} if 'q' in param and len(param['q']) > 1: players = self.model.players.getPlayersListFiltered( search_query = search_query, count = 20, page = 1 ) for player in players: player.update({ 'class_name': self.balance.classes[str(player['class'])], 'race_name': self.balance.races[player['faction']][player['race']], }) guilds = self.model.guilds.getGuildsListFiltered( search_query = search_query, count = 20, page = 1 ) fields.update({'players': players, 'guilds': guilds}) else: fields.update({'error': 'empty_search'}) return basic.defaultController._printTemplate(self, 'search', fields) def printTest(self, fields, param): return basic.defaultController._printTemplate(self, 'players/registration', fields) def printThanks(self, fields, param): return basic.defaultController._printTemplate(self, 'misc/thx', fields) def printHelp(self, fields, param): fields.update({self.title: 'Help'}) return basic.defaultController._printTemplate(self, 'misc/help', fields) def printLicensesHelp(self, fields, param): fields.update({self.title: 'Licenses'}) return basic.defaultController._printTemplate(self, 'misc/licenses', fields) def printHelpApprove(self, fields, param): fields.update({self.title: 'Help UGC'}) return basic.defaultController._printTemplate(self, 'misc/help_ugc', fields) def printHelpAuth(self, fields, param): fields.update({self.title: 'Help auth'}) return basic.defaultController._printTemplate(self, 'misc/help_auth', fields) def printAllPlayers(self, fields, param): fields.update({self.title: 'All players'}) def getPaginatorData(players_on_page): players_count = self.model.players.getPlayersCount() pages = int(math.ceil(float(players_count) / players_on_page)) fields.update({ 'total_pages': pages }) def getSortParams(): if not 'pi' in param: fields.update({'param_pi': 1}) try: page_number = int(param['pi']) except Exception: page_number = 1 if 'field' in param: sort_field = param['field'] else: sort_field = 'lvl' sort_order = -1 if 'order' in param: try: sort_order = int(param['order']) except Exception: pass return { 'page_number': page_number, 'sort_field': sort_field, 'sort_order': sort_order } players_on_page = 20 sort_params = getSortParams() players = self.model.players.getPlayersListFiltered( count = players_on_page, page = sort_params['page_number'], field = sort_params['sort_field'], sort = sort_params['sort_order'] ) getPaginatorData(players_on_page) for player in players: player.update({ 'class_name': self.balance.classes[str(player['class'])], 'race_name': self.balance.races[player['faction']][player['race']], }) fields.update({'players': players}) fields.update({'display_pages': getDisplayPages(int(fields['param_pi']), fields['total_pages'], 10)}) return basic.defaultController._printTemplate(self, 'players/all_players', fields) def printAllGuilds(self, fields, param): fields.update({self.title: 'All guilds'}) def getPaginatorData(guilds_on_page): guilds_count = self.model.guilds.getGuildsCount() pages = int(math.ceil(float(guilds_count) / guilds_on_page)) fields.update({ 'total_pages': pages }) def getSortParams(): if not 'pi' in param: fields.update({'param_pi': 1}) try: page_number = int(param['pi']) except Exception: page_number = 1 if 'field' in param: sort_field = param['field'] else: sort_field = 'people_count' sort_order = -1 if 'order' in param: try: sort_order = int(param['order']) except Exception: pass return { 'page_number': page_number, 'sort_field': sort_field, 'sort_order': sort_order } guilds_on_page = 20 sort_params = getSortParams() getPaginatorData(guilds_on_page) guilds = self.model.guilds.getGuildsListFiltered( count = guilds_on_page, page = sort_params['page_number'], field = sort_params['sort_field'], sort = sort_params['sort_order'] ) if self.cur_player: guild = self.model.guilds.getPlayersGuild(self.cur_player['login_id']) if guild: guild = guildsController.formatGuilds(self,[guild])[0] fields.update({ 'login_guild': guild }) fields.update({'guilds': guilds}) fields.update({'display_pages': getDisplayPages(int(fields['param_pi']), fields['total_pages'], 10)}) return basic.defaultController._printTemplate(self, 'guilds/all_guilds', fields) def printFactionPage(self, fields, param): sides = { 'ne': 0, 'ha': 1, 'ft': 2 } side_id = sides[fields['__page__']] fields.update({ 'side_name': self.sbuilder.balance.faction[side_id] }) return basic.defaultController._printTemplate(self, 'misc/faction', fields) def printInvitePage(self, fields, param): player = False player_name = False for key in param: if key[:2] != '__' and key != 'guild': player_name = key break if player_name: player = self.model.players.getPlayerRawByName(player_name, { 'name': 1, 'class':1, '_guild_name': 1, 'race': 1, 'faction': 1, 'lvl': 1, 'avatar': 1 }) if not player: return self.sbuilder.throwWebError(404) player.update({ 'class_name': self.balance.classes[str(player['class'])], 'race_name': self.balance.races[player['faction']][player['race']] }) fields.update({'player': player}) return basic.defaultController._printTemplate(self, 'misc/landing_page', fields) def printCredits(self, fields, param): fields.update({self.title: 'Credits'}) return basic.defaultController._printTemplate(self, 'misc/credits', fields) data = { 'class': miscController, 'type': ['index', 'default'], 'urls': [ '', 'index', 'thx', 'logout', 'licenses', 'login', 'search', 'help', 'all-players', 'all-guilds', 'ne', 'ft', 'ha', 'test', 'help_ugc', 'help_auth', 'invite', 'credits' ] }
# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from osc_lib.tests import utils as osc_utils from unittest import mock from saharaclient.api import node_group_templates as api_ngt from saharaclient.osc.v1 import node_group_templates as osc_ngt from saharaclient.tests.unit.osc.v1 import fakes NGT_INFO = { "node_processes": [ "namenode", "tasktracker" ], "name": "template", "tenant_id": "tenant_id", "availability_zone": 'av_zone', "use_autoconfig": True, "hadoop_version": "0.1", "shares": None, "is_default": False, "description": 'description', "node_configs": {}, "is_proxy_gateway": False, "auto_security_group": True, "volume_type": None, "volumes_size": 2, "volume_mount_prefix": "/volumes/disk", "plugin_name": "fake", "is_protected": False, "security_groups": None, "floating_ip_pool": "floating_pool", "is_public": True, "id": "ng_id", "flavor_id": "flavor_id", "volumes_availability_zone": None, "volumes_per_node": 2, "volume_local_to_instance": False } class TestNodeGroupTemplates(fakes.TestDataProcessing): def setUp(self): super(TestNodeGroupTemplates, self).setUp() self.ngt_mock = ( self.app.client_manager.data_processing.node_group_templates) self.ngt_mock.reset_mock() self.app.api_version['data_processing'] = '1' class TestCreateNodeGroupTemplate(TestNodeGroupTemplates): # TODO(apavlov): check for creation with --json def setUp(self): super(TestCreateNodeGroupTemplate, self).setUp() self.ngt_mock.create.return_value = api_ngt.NodeGroupTemplate( None, NGT_INFO) self.fl_mock = self.app.client_manager.compute.flavors self.fl_mock.get.return_value = mock.Mock(id='flavor_id') self.fl_mock.reset_mock() # Command to test self.cmd = osc_ngt.CreateNodeGroupTemplate(self.app, None) def test_ngt_create_minimum_options(self): arglist = ['--name', 'template', '--plugin', 'fake', '--plugin-version', '0.1', '--processes', 'namenode', 'tasktracker', '--flavor', 'flavor_id'] verifylist = [('name', 'template'), ('plugin', 'fake'), ('plugin_version', '0.1'), ('flavor', 'flavor_id'), ('processes', ['namenode', 'tasktracker'])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) # Check that correct arguments were passed self.ngt_mock.create.assert_called_once_with( auto_security_group=False, availability_zone=None, description=None, flavor_id='flavor_id', floating_ip_pool=None, hadoop_version='0.1', is_protected=False, is_proxy_gateway=False, is_public=False, name='template', node_processes=['namenode', 'tasktracker'], plugin_name='fake', security_groups=None, use_autoconfig=False, volume_local_to_instance=False, volume_type=None, volumes_availability_zone=None, volumes_per_node=None, volumes_size=None, shares=None, node_configs=None, volume_mount_prefix=None) def test_ngt_create_all_options(self): arglist = ['--name', 'template', '--plugin', 'fake', '--plugin-version', '0.1', '--processes', 'namenode', 'tasktracker', '--security-groups', 'secgr', '--auto-security-group', '--availability-zone', 'av_zone', '--flavor', 'flavor_id', '--floating-ip-pool', 'floating_pool', '--volumes-per-node', '2', '--volumes-size', '2', '--volumes-type', 'type', '--volumes-availability-zone', 'vavzone', '--volumes-mount-prefix', '/volume/asd', '--volumes-locality', '--description', 'descr', '--autoconfig', '--proxy-gateway', '--public', '--protected'] verifylist = [('name', 'template'), ('plugin', 'fake'), ('plugin_version', '0.1'), ('processes', ['namenode', 'tasktracker']), ('security_groups', ['secgr']), ('auto_security_group', True), ('availability_zone', 'av_zone'), ('flavor', 'flavor_id'), ('floating_ip_pool', 'floating_pool'), ('volumes_per_node', 2), ('volumes_size', 2), ('volumes_type', 'type'), ('volumes_availability_zone', 'vavzone'), ('volumes_mount_prefix', '/volume/asd'), ('volumes_locality', True), ('description', 'descr'), ('autoconfig', True), ('proxy_gateway', True), ('public', True), ('protected', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) # Check that correct arguments were passed self.ngt_mock.create.assert_called_once_with( auto_security_group=True, availability_zone='av_zone', description='descr', flavor_id='flavor_id', floating_ip_pool='floating_pool', hadoop_version='0.1', is_protected=True, is_proxy_gateway=True, is_public=True, name='template', node_processes=['namenode', 'tasktracker'], plugin_name='fake', security_groups=['secgr'], use_autoconfig=True, volume_local_to_instance=True, volume_type='type', volumes_availability_zone='vavzone', volumes_per_node=2, volumes_size=2, shares=None, node_configs=None, volume_mount_prefix='/volume/asd') # Check that columns are correct expected_columns = ( 'Auto security group', 'Availability zone', 'Description', 'Flavor id', 'Floating ip pool', 'Id', 'Is default', 'Is protected', 'Is proxy gateway', 'Is public', 'Name', 'Node processes', 'Plugin name', 'Plugin version', 'Security groups', 'Use autoconfig', 'Volume local to instance', 'Volume mount prefix', 'Volume type', 'Volumes availability zone', 'Volumes per node', 'Volumes size') self.assertEqual(expected_columns, columns) # Check that data is correct expected_data = ( True, 'av_zone', 'description', 'flavor_id', 'floating_pool', 'ng_id', False, False, False, True, 'template', 'namenode, tasktracker', 'fake', '0.1', None, True, False, '/volumes/disk', None, None, 2, 2) self.assertEqual(expected_data, data) class TestListNodeGroupTemplates(TestNodeGroupTemplates): def setUp(self): super(TestListNodeGroupTemplates, self).setUp() self.ngt_mock.list.return_value = [api_ngt.NodeGroupTemplate( None, NGT_INFO)] # Command to test self.cmd = osc_ngt.ListNodeGroupTemplates(self.app, None) def test_ngt_list_no_options(self): arglist = [] verifylist = [] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) # Check that columns are correct expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version'] self.assertEqual(expected_columns, columns) # Check that data is correct expected_data = [('template', 'ng_id', 'fake', '0.1')] self.assertEqual(expected_data, list(data)) def test_ngt_list_long(self): arglist = ['--long'] verifylist = [('long', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) # Check that columns are correct expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version', 'Node processes', 'Description'] self.assertEqual(expected_columns, columns) # Check that data is correct expected_data = [('template', 'ng_id', 'fake', '0.1', 'namenode, tasktracker', 'description')] self.assertEqual(expected_data, list(data)) def test_ngt_list_extra_search_opts(self): arglist = ['--plugin', 'fake', '--plugin-version', '0.1', '--name', 'templ'] verifylist = [('plugin', 'fake'), ('plugin_version', '0.1'), ('name', 'templ')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) # Check that columns are correct expected_columns = ['Name', 'Id', 'Plugin name', 'Plugin version'] self.assertEqual(expected_columns, columns) # Check that data is correct expected_data = [('template', 'ng_id', 'fake', '0.1')] self.assertEqual(expected_data, list(data)) class TestShowNodeGroupTemplate(TestNodeGroupTemplates): def setUp(self): super(TestShowNodeGroupTemplate, self).setUp() self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate( None, NGT_INFO) # Command to test self.cmd = osc_ngt.ShowNodeGroupTemplate(self.app, None) def test_ngt_show(self): arglist = ['template'] verifylist = [('node_group_template', 'template')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) # Check that correct arguments were passed self.ngt_mock.find_unique.assert_called_once_with(name='template') # Check that columns are correct expected_columns = ( 'Auto security group', 'Availability zone', 'Description', 'Flavor id', 'Floating ip pool', 'Id', 'Is default', 'Is protected', 'Is proxy gateway', 'Is public', 'Name', 'Node processes', 'Plugin name', 'Plugin version', 'Security groups', 'Use autoconfig', 'Volume local to instance', 'Volume mount prefix', 'Volume type', 'Volumes availability zone', 'Volumes per node', 'Volumes size') self.assertEqual(expected_columns, columns) # Check that data is correct expected_data = ( True, 'av_zone', 'description', 'flavor_id', 'floating_pool', 'ng_id', False, False, False, True, 'template', 'namenode, tasktracker', 'fake', '0.1', None, True, False, '/volumes/disk', None, None, 2, 2) self.assertEqual(expected_data, data) class TestDeleteNodeGroupTemplate(TestNodeGroupTemplates): def setUp(self): super(TestDeleteNodeGroupTemplate, self).setUp() self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate( None, NGT_INFO) # Command to test self.cmd = osc_ngt.DeleteNodeGroupTemplate(self.app, None) def test_ngt_delete(self): arglist = ['template'] verifylist = [('node_group_template', ['template'])] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) # Check that correct arguments were passed self.ngt_mock.delete.assert_called_once_with('ng_id') class TestUpdateNodeGroupTemplate(TestNodeGroupTemplates): # TODO(apavlov): check for update with --json def setUp(self): super(TestUpdateNodeGroupTemplate, self).setUp() self.ngt_mock.find_unique.return_value = api_ngt.NodeGroupTemplate( None, NGT_INFO) self.ngt_mock.update.return_value = api_ngt.NodeGroupTemplate( None, NGT_INFO) self.fl_mock = self.app.client_manager.compute.flavors self.fl_mock.get.return_value = mock.Mock(id='flavor_id') self.fl_mock.reset_mock() # Command to test self.cmd = osc_ngt.UpdateNodeGroupTemplate(self.app, None) def test_ngt_update_no_options(self): arglist = [] verifylist = [] self.assertRaises(osc_utils.ParserException, self.check_parser, self.cmd, arglist, verifylist) def test_ngt_update_nothing_updated(self): arglist = ['template'] verifylist = [('node_group_template', 'template')] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) # Check that correct arguments were passed self.ngt_mock.update.assert_called_once_with('ng_id') def test_ngt_update_all_options(self): arglist = ['template', '--name', 'template', '--plugin', 'fake', '--plugin-version', '0.1', '--processes', 'namenode', 'tasktracker', '--security-groups', 'secgr', '--auto-security-group-enable', '--availability-zone', 'av_zone', '--flavor', 'flavor_id', '--floating-ip-pool', 'floating_pool', '--volumes-per-node', '2', '--volumes-size', '2', '--volumes-type', 'type', '--volumes-availability-zone', 'vavzone', '--volumes-mount-prefix', '/volume/asd', '--volumes-locality-enable', '--description', 'descr', '--autoconfig-enable', '--proxy-gateway-enable', '--public', '--protected'] verifylist = [('node_group_template', 'template'), ('name', 'template'), ('plugin', 'fake'), ('plugin_version', '0.1'), ('processes', ['namenode', 'tasktracker']), ('security_groups', ['secgr']), ('use_auto_security_group', True), ('availability_zone', 'av_zone'), ('flavor', 'flavor_id'), ('floating_ip_pool', 'floating_pool'), ('volumes_per_node', 2), ('volumes_size', 2), ('volumes_type', 'type'), ('volumes_availability_zone', 'vavzone'), ('volumes_mount_prefix', '/volume/asd'), ('volume_locality', True), ('description', 'descr'), ('use_autoconfig', True), ('is_proxy_gateway', True), ('is_public', True), ('is_protected', True)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) columns, data = self.cmd.take_action(parsed_args) # Check that correct arguments were passed self.ngt_mock.update.assert_called_once_with( 'ng_id', auto_security_group=True, availability_zone='av_zone', description='descr', flavor_id='flavor_id', floating_ip_pool='floating_pool', hadoop_version='0.1', is_protected=True, is_proxy_gateway=True, is_public=True, name='template', node_processes=['namenode', 'tasktracker'], plugin_name='fake', security_groups=['secgr'], use_autoconfig=True, volume_local_to_instance=True, volume_type='type', volumes_availability_zone='vavzone', volumes_per_node=2, volumes_size=2, volume_mount_prefix='/volume/asd') # Check that columns are correct expected_columns = ( 'Auto security group', 'Availability zone', 'Description', 'Flavor id', 'Floating ip pool', 'Id', 'Is default', 'Is protected', 'Is proxy gateway', 'Is public', 'Name', 'Node processes', 'Plugin name', 'Plugin version', 'Security groups', 'Use autoconfig', 'Volume local to instance', 'Volume mount prefix', 'Volume type', 'Volumes availability zone', 'Volumes per node', 'Volumes size') self.assertEqual(expected_columns, columns) # Check that data is correct expected_data = ( True, 'av_zone', 'description', 'flavor_id', 'floating_pool', 'ng_id', False, False, False, True, 'template', 'namenode, tasktracker', 'fake', '0.1', None, True, False, '/volumes/disk', None, None, 2, 2) self.assertEqual(expected_data, data) def test_ngt_update_private_unprotected(self): arglist = ['template', '--private', '--unprotected'] verifylist = [('node_group_template', 'template'), ('is_public', False), ('is_protected', False)] parsed_args = self.check_parser(self.cmd, arglist, verifylist) self.cmd.take_action(parsed_args) # Check that correct arguments were passed self.ngt_mock.update.assert_called_once_with( 'ng_id', is_protected=False, is_public=False)
from datetime import timedelta, datetime, time import json from couchdbkit import ResourceNotFound from django.contrib import messages from django.utils.decorators import method_decorator import pytz from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect, Http404, HttpResponse from django.shortcuts import render from django.utils.translation import ugettext as _, ugettext_noop from corehq.apps.app_manager.models import Application from corehq.apps.app_manager.util import get_case_properties from corehq.apps.hqwebapp.views import CRUDPaginatedViewMixin from corehq.apps.reminders.forms import ( CaseReminderForm, ComplexCaseReminderForm, SurveyForm, SurveySampleForm, EditContactForm, RemindersInErrorForm, KeywordForm, OneTimeReminderForm, SimpleScheduleCaseReminderForm, CaseReminderEventForm, CaseReminderEventMessageForm, KEYWORD_CONTENT_CHOICES, KEYWORD_RECIPIENT_CHOICES, ComplexScheduleCaseReminderForm) from corehq.apps.reminders.models import ( CaseReminderHandler, CaseReminderEvent, CaseReminder, REPEAT_SCHEDULE_INDEFINITELY, EVENT_AS_OFFSET, EVENT_AS_SCHEDULE, SurveyKeyword, SurveyKeywordAction, Survey, SURVEY_METHOD_LIST, SurveyWave, ON_DATETIME, RECIPIENT_SURVEY_SAMPLE, QUESTION_RETRY_CHOICES, REMINDER_TYPE_ONE_TIME, REMINDER_TYPE_DEFAULT, SEND_NOW, SEND_LATER, METHOD_SMS, METHOD_SMS_SURVEY, METHOD_STRUCTURED_SMS, RECIPIENT_USER_GROUP, RECIPIENT_SENDER, RECIPIENT_OWNER, ) from corehq.apps.sms.views import BaseMessagingSectionView from corehq.apps.users.decorators import require_permission from corehq.apps.users.models import CommCareUser, Permissions, CouchUser from dimagi.utils.decorators.memoized import memoized from .models import UI_SIMPLE_FIXED, UI_COMPLEX from .util import get_form_list, get_sample_list, get_recipient_name, get_form_name from corehq.apps.sms.mixin import VerifiedNumber from corehq.apps.sms.util import register_sms_contact, update_contact from corehq.apps.domain.models import Domain, DomainCounter from corehq.apps.groups.models import Group from casexml.apps.case.models import CommCareCase, CommCareCaseGroup from dateutil.parser import parse from corehq.apps.sms.util import close_task from dimagi.utils.timezones import utils as tz_utils from corehq.apps.reports import util as report_utils from dimagi.utils.couch.database import is_bigcouch, bigcouch_quorum_count, iter_docs reminders_permission = require_permission(Permissions.edit_data) def get_project_time_info(domain): timezone = report_utils.get_timezone(None, domain) now = pytz.utc.localize(datetime.utcnow()) timezone_now = now.astimezone(timezone) return (timezone, now, timezone_now) @reminders_permission def default(request, domain): return HttpResponseRedirect(reverse('list_reminders', args=[domain])) @reminders_permission def list_reminders(request, domain, reminder_type=REMINDER_TYPE_DEFAULT): all_handlers = CaseReminderHandler.get_handlers(domain=domain).all() all_handlers = filter(lambda x : x.reminder_type == reminder_type, all_handlers) if reminder_type == REMINDER_TYPE_ONE_TIME: all_handlers.sort(key=lambda handler : handler.start_datetime) handlers = [] utcnow = datetime.utcnow() timezone, now, timezone_now = get_project_time_info(domain) for handler in all_handlers: if reminder_type == REMINDER_TYPE_ONE_TIME: reminders = handler.get_reminders() try: reminder = reminders[0] except IndexError: handler.retire() continue recipients = get_recipient_name(reminder.recipient, include_desc=False) if handler.method == METHOD_SMS_SURVEY: content = get_form_name(handler.events[0].form_unique_id) else: message = handler.events[0].message[handler.default_lang] if len(message) > 50: content = '"%s..."' % message[:47] else: content = '"%s"' % message sent = handler.start_datetime <= utcnow else: recipients = None content = None sent = None handlers.append({ "handler" : handler, "recipients" : recipients, "content" : content, "sent" : sent, "start_datetime" : tz_utils.adjust_datetime_to_timezone(handler.start_datetime, pytz.utc.zone, timezone.zone) if handler.start_datetime is not None else None, }) return render(request, "reminders/partial/list_reminders.html", { 'domain': domain, 'reminder_handlers': handlers, 'reminder_type': reminder_type, 'timezone' : timezone, 'now' : now, 'timezone_now' : timezone_now, }) @reminders_permission def add_reminder(request, domain, handler_id=None, template="reminders/partial/add_reminder.html"): if handler_id: handler = CaseReminderHandler.get(handler_id) if handler.doc_type != 'CaseReminderHandler' or handler.domain != domain: raise Http404 else: handler = None if request.method == "POST": reminder_form = CaseReminderForm(request.POST) if reminder_form.is_valid(): if not handler: handler = CaseReminderHandler(domain=domain) handler.ui_type = UI_SIMPLE_FIXED for key, value in reminder_form.cleaned_data.items(): if (key != "frequency") and (key != "message"): handler[key] = value handler.max_iteration_count = REPEAT_SCHEDULE_INDEFINITELY handler.schedule_length = reminder_form.cleaned_data["frequency"] handler.event_interpretation = EVENT_AS_OFFSET handler.events = [ CaseReminderEvent( day_num = 0 ,fire_time = time(hour=0,minute=0,second=0) ,message = reminder_form.cleaned_data["message"] ,callback_timeout_intervals = [] ) ] handler.save() return HttpResponseRedirect(reverse('list_reminders', args=[domain])) elif handler: initial = {} for key in handler.to_json(): if (key != "max_iteration_count") and (key != "schedule_length") and (key != "events") and (key != "event_interpretation"): initial[key] = handler[key] initial["message"] = json.dumps(handler.events[0].message) initial["frequency"] = handler.schedule_length reminder_form = CaseReminderForm(initial=initial) else: reminder_form = CaseReminderForm() return render(request, template, { 'reminder_form': reminder_form, 'domain': domain }) def render_one_time_reminder_form(request, domain, form, handler_id): timezone, now, timezone_now = get_project_time_info(domain) context = { "domain": domain, "form" : form, "sample_list" : get_sample_list(domain), "form_list" : get_form_list(domain), "groups" : Group.by_domain(domain), "handler_id" : handler_id, "timezone" : timezone, "timezone_now" : timezone_now, "now" : now, } return render(request, "reminders/partial/add_one_time_reminder.html", context) @reminders_permission def add_one_time_reminder(request, domain, handler_id=None): if handler_id: handler = CaseReminderHandler.get(handler_id) if handler.doc_type != "CaseReminderHandler" or handler.domain != domain: raise Http404 else: handler = None timezone = report_utils.get_timezone(None, domain) # Use project timezone only if request.method == "POST": form = OneTimeReminderForm(request.POST) form._cchq_domain = domain if form.is_valid(): content_type = form.cleaned_data.get("content_type") recipient_type = form.cleaned_data.get("recipient_type") if handler is None: handler = CaseReminderHandler( domain = domain, reminder_type = REMINDER_TYPE_ONE_TIME, nickname = "One-time Reminder", ) handler.default_lang = "xx" handler.method = content_type handler.recipient = recipient_type handler.start_condition_type = ON_DATETIME handler.start_datetime = form.cleaned_data.get("datetime") handler.start_offset = 0 handler.events = [CaseReminderEvent( day_num = 0, fire_time = time(0,0), form_unique_id = form.cleaned_data.get("form_unique_id") if content_type == METHOD_SMS_SURVEY else None, message = {handler.default_lang : form.cleaned_data.get("message")} if content_type == METHOD_SMS else {}, callback_timeout_intervals = [], )] handler.schedule_length = 1 handler.event_interpretation = EVENT_AS_OFFSET handler.max_iteration_count = 1 handler.sample_id = form.cleaned_data.get("case_group_id") if recipient_type == RECIPIENT_SURVEY_SAMPLE else None handler.user_group_id = form.cleaned_data.get("user_group_id") if recipient_type == RECIPIENT_USER_GROUP else None handler.save() return HttpResponseRedirect(reverse('one_time_reminders', args=[domain])) else: if handler is not None: start_datetime = tz_utils.adjust_datetime_to_timezone(handler.start_datetime, pytz.utc.zone, timezone.zone) initial = { "send_type" : SEND_LATER, "date" : start_datetime.strftime("%Y-%m-%d"), "time" : start_datetime.strftime("%H:%M"), "recipient_type" : handler.recipient, "case_group_id" : handler.sample_id, "user_group_id" : handler.user_group_id, "content_type" : handler.method, "message" : handler.events[0].message[handler.default_lang] if handler.default_lang in handler.events[0].message else None, "form_unique_id" : handler.events[0].form_unique_id if handler.events[0].form_unique_id is not None else None, } else: initial = {} form = OneTimeReminderForm(initial=initial) return render_one_time_reminder_form(request, domain, form, handler_id) @reminders_permission def copy_one_time_reminder(request, domain, handler_id): handler = CaseReminderHandler.get(handler_id) initial = { "send_type" : SEND_NOW, "recipient_type" : handler.recipient, "case_group_id" : handler.sample_id, "user_group_id" : handler.user_group_id, "content_type" : handler.method, "message" : handler.events[0].message[handler.default_lang] if handler.default_lang in handler.events[0].message else None, "form_unique_id" : handler.events[0].form_unique_id if handler.events[0].form_unique_id is not None else None, } return render_one_time_reminder_form(request, domain, OneTimeReminderForm(initial=initial), None) @reminders_permission def delete_reminder(request, domain, handler_id): handler = CaseReminderHandler.get(handler_id) if handler.doc_type != 'CaseReminderHandler' or handler.domain != domain: raise Http404 handler.retire() view_name = "one_time_reminders" if handler.reminder_type == REMINDER_TYPE_ONE_TIME else "list_reminders" return HttpResponseRedirect(reverse(view_name, args=[domain])) @reminders_permission def scheduled_reminders(request, domain, template="reminders/partial/scheduled_reminders.html"): timezone = Domain.get_by_name(domain).default_timezone reminders = CaseReminderHandler.get_all_reminders(domain) dates = [] now = datetime.utcnow() timezone_now = datetime.now(pytz.timezone(timezone)) today = timezone_now.date() def adjust_next_fire_to_timezone(reminder_utc): return tz_utils.adjust_datetime_to_timezone( reminder_utc.next_fire, pytz.utc.zone, timezone) if reminders: start_date = adjust_next_fire_to_timezone(reminders[0]).date() if today < start_date: start_date = today end_date = adjust_next_fire_to_timezone(reminders[-1]).date() else: start_date = end_date = today # make sure start date is a Monday and enddate is a Sunday start_date -= timedelta(days=start_date.weekday()) end_date += timedelta(days=6-end_date.weekday()) while start_date <= end_date: dates.append(start_date) start_date += timedelta(days=1) reminder_data = [] for reminder in reminders: handler = reminder.handler recipient = reminder.recipient recipient_desc = get_recipient_name(recipient) case = reminder.case reminder_data.append({ "handler_name" : handler.nickname, "next_fire" : adjust_next_fire_to_timezone(reminder), "recipient_desc" : recipient_desc, "recipient_type" : handler.recipient, "case_id" : case.get_id if case is not None else None, "case_name" : case.name if case is not None else None, }) return render(request, template, { 'domain': domain, 'reminder_data': reminder_data, 'dates': dates, 'today': today, 'now': now, 'timezone': timezone, 'timezone_now': timezone_now, }) def get_events_scheduling_info(events): """ Return a list of events as dictionaries, only with information pertinent to scheduling changes. """ result = [] for e in events: result.append({ "day_num" : e.day_num, "fire_time" : e.fire_time, "fire_time_aux" : e.fire_time_aux, "fire_time_type" : e.fire_time_type, "time_window_length" : e.time_window_length, "callback_timeout_intervals" : e.callback_timeout_intervals, "form_unique_id" : e.form_unique_id, }) return result @reminders_permission def add_complex_reminder_schedule(request, domain, handler_id=None): if handler_id: h = CaseReminderHandler.get(handler_id) if h.doc_type != 'CaseReminderHandler' or h.domain != domain: raise Http404 else: h = None form_list = get_form_list(domain) sample_list = get_sample_list(domain) if request.method == "POST": form = ComplexCaseReminderForm(request.POST) form._cchq_is_superuser = request.couch_user.is_superuser form._cchq_use_custom_content_handler = (h is not None and h.custom_content_handler is not None) form._cchq_custom_content_handler = h.custom_content_handler if h is not None else None form._cchq_domain = domain if form.is_valid(): if h is None: h = CaseReminderHandler(domain=domain) h.ui_type = UI_COMPLEX else: if h.start_condition_type != form.cleaned_data["start_condition_type"]: for reminder in h.get_reminders(): reminder.retire() h.active = form.cleaned_data["active"] h.case_type = form.cleaned_data["case_type"] h.nickname = form.cleaned_data["nickname"] h.default_lang = form.cleaned_data["default_lang"] h.method = form.cleaned_data["method"] h.recipient = form.cleaned_data["recipient"] h.start_property = form.cleaned_data["start_property"] h.start_value = form.cleaned_data["start_value"] h.start_date = form.cleaned_data["start_date"] old_start_offset = h.start_offset h.start_offset = form.cleaned_data["start_offset"] h.start_match_type = form.cleaned_data["start_match_type"] old_schedule_length = h.schedule_length h.schedule_length = form.cleaned_data["schedule_length"] h.event_interpretation = form.cleaned_data["event_interpretation"] h.max_iteration_count = form.cleaned_data["max_iteration_count"] h.until = form.cleaned_data["until"] old_events = h.events h.events = form.cleaned_data["events"] h.submit_partial_forms = form.cleaned_data["submit_partial_forms"] h.include_case_side_effects = form.cleaned_data["include_case_side_effects"] h.ui_frequency = form.cleaned_data["frequency"] h.start_condition_type = form.cleaned_data["start_condition_type"] h.max_question_retries = form.cleaned_data["max_question_retries"] h.recipient_case_match_property = form.cleaned_data["recipient_case_match_property"] h.recipient_case_match_type = form.cleaned_data["recipient_case_match_type"] h.recipient_case_match_value = form.cleaned_data["recipient_case_match_value"] h.custom_content_handler = form.cleaned_data["custom_content_handler"] h.force_surveys_to_use_triggered_case = form.cleaned_data["force_surveys_to_use_triggered_case"] h.user_group_id = form.cleaned_data["user_group_id"] if form.cleaned_data["start_condition_type"] == "ON_DATETIME": dt = parse(form.cleaned_data["start_datetime_date"]).date() tm = parse(form.cleaned_data["start_datetime_time"]).time() h.start_datetime = datetime.combine(dt, tm) else: h.start_datetime = None h.sample_id = form.cleaned_data["sample_id"] if get_events_scheduling_info(old_events) != get_events_scheduling_info(h.events) or old_start_offset != h.start_offset or old_schedule_length != h.schedule_length: save_kwargs = { "schedule_changed" : True, "prev_definition" : CaseReminderHandler.get(handler_id) if handler_id is not None else None, } else: save_kwargs = {} h.save(**save_kwargs) return HttpResponseRedirect(reverse('list_reminders', args=[domain])) else: if h is not None: initial = { "active" : h.active, "case_type" : h.case_type, "nickname" : h.nickname, "default_lang" : h.default_lang, "method" : h.method, "recipient" : h.recipient, "start_property" : h.start_property, "start_value" : h.start_value, "start_date" : h.start_date, "start_match_type" : h.start_match_type, "start_offset" : h.start_offset, "schedule_length" : h.schedule_length, "event_interpretation" : h.event_interpretation, "max_iteration_count" : h.max_iteration_count, "until" : h.until, "events" : h.events, "submit_partial_forms" : h.submit_partial_forms, "include_case_side_effects" : h.include_case_side_effects, "start_condition_type" : h.start_condition_type, "start_datetime_date" : str(h.start_datetime.date()) if isinstance(h.start_datetime, datetime) else None, "start_datetime_time" : str(h.start_datetime.time()) if isinstance(h.start_datetime, datetime) else None, "frequency" : h.ui_frequency, "sample_id" : h.sample_id, "use_until" : "Y" if h.until is not None else "N", "max_question_retries" : h.max_question_retries, "recipient_case_match_property" : h.recipient_case_match_property, "recipient_case_match_type" : h.recipient_case_match_type, "recipient_case_match_value" : h.recipient_case_match_value, "use_custom_content_handler" : h.custom_content_handler is not None, "custom_content_handler" : h.custom_content_handler, "force_surveys_to_use_triggered_case" : h.force_surveys_to_use_triggered_case, "user_group_id": h.user_group_id, } else: initial = { "events" : [CaseReminderEvent(day_num=0, fire_time=time(0,0), message={"":""}, callback_timeout_intervals=[], form_unique_id=None)], "use_until" : "N", "max_question_retries" : QUESTION_RETRY_CHOICES[-1], "active" : True, } form = ComplexCaseReminderForm(initial=initial) return render(request, "reminders/partial/add_complex_reminder.html", { "domain": domain, "form": form, "form_list": form_list, "handler_id": handler_id, "sample_list": sample_list, "is_superuser" : request.couch_user.is_superuser, "user_groups": Group.by_domain(domain), }) class CreateScheduledReminderView(BaseMessagingSectionView): urlname = 'create_reminder_schedule' page_title = ugettext_noop("Schedule Reminder") template_name = 'reminders/manage_scheduled_reminder.html' ui_type = UI_SIMPLE_FIXED @property def reminder_form_class(self): return { UI_COMPLEX: ComplexScheduleCaseReminderForm, UI_SIMPLE_FIXED: SimpleScheduleCaseReminderForm, }[self.ui_type] @property @memoized def schedule_form(self): if self.request.method == 'POST': return self.reminder_form_class( self.request.POST, domain=self.domain, is_previewer=self.is_previewer, ) return self.reminder_form_class( is_previewer=self.is_previewer, domain=self.domain, ) @property def available_languages(self): return ['en'] @property def is_previewer(self): return self.request.couch_user.is_previewer @property def parent_pages(self): return [ { 'title': _("Reminders"), 'url': reverse('list_reminders', args=[self.domain]), }, ] @property def page_context(self): return { 'form': self.schedule_form, 'event_form': CaseReminderEventForm(ui_type=self.ui_type), 'message_form': CaseReminderEventMessageForm(), 'ui_type': self.ui_type, 'available_languages': self.available_languages, } @property def available_case_types(self): case_types = [] for app_doc in iter_docs(Application.get_db(), self.app_ids): app = Application.wrap(app_doc) case_types.extend([m.case_type for m in app.modules]) return set(case_types) @property def action(self): return self.request.POST.get('action') @property def case_type(self): return self.request.POST.get('caseType') @property def app_ids(self): data = Application.get_db().view( 'app_manager/applications_brief', reduce=False, startkey=[self.domain], endkey=[self.domain, {}], ).all() return [d['id'] for d in data] @property def search_term(self): return self.request.POST.get('term') @property def search_case_type_response(self): filtered_case_types = self._filter_by_term(self.available_case_types) return self._format_response(filtered_case_types) @property def search_case_property_response(self): if not self.case_type: return [] case_properties = ['name'] for app_doc in iter_docs(Application.get_db(), self.app_ids): app = Application.wrap(app_doc) for properties in get_case_properties(app, [self.case_type]).values(): case_properties.extend(properties) case_properties = self._filter_by_term(set(case_properties)) return self._format_response(case_properties) @property def search_subcase_property_response(self): if not self.case_type: return [] subcase_properties = ['name'] for app_doc in iter_docs(Application.get_db(), self.app_ids): app = Application.wrap(app_doc) for module in app.get_modules(): if not module.case_type == self.case_type: continue for form in module.get_forms(): for subcase in form.actions.subcases: subcase_properties.extend(subcase.case_properties.keys()) subcase_properties = self._filter_by_term(set(subcase_properties)) return self._format_response(subcase_properties) @property def search_forms_response(self): forms = [] for app_doc in iter_docs(Application.get_db(), self.app_ids): app = Application.wrap(app_doc) for module in app.get_modules(): for form in module.get_forms(): forms.append({ 'text': form.full_path_name, 'id': form.unique_id, }) if not self.search_term: return forms final_forms = [] search_terms = self.search_term.split(" ") for form in forms: matches = [t for t in search_terms if t in form['text']] if len(matches) == len(search_terms): final_forms.append(form) return final_forms def _filter_by_term(self, filter_list): return [f for f in filter_list if self.search_term in f] def _format_response(self, resp_list): return [{'text': r, 'id': r} for r in resp_list] @method_decorator(reminders_permission) def dispatch(self, request, *args, **kwargs): return super(CreateScheduledReminderView, self).dispatch(request, *args, **kwargs) def post(self, *args, **kwargs): if self.action in [ 'search_case_type', 'search_case_property', 'search_subcase_property', 'search_forms', ]: return HttpResponse(json.dumps(getattr(self, '%s_response' % self.action))) if self.schedule_form.is_valid(): self.process_schedule_form() return HttpResponseRedirect(reverse(RemindersListView.urlname, args=[self.domain])) else: messages.error(self.request, "There were errors saving your reminder.") return self.get(*args, **kwargs) def process_schedule_form(self): new_handler = CaseReminderHandler() self.schedule_form.save(new_handler) class CreateComplexScheduledReminderView(CreateScheduledReminderView): urlname = 'create_complex_reminder_schedule' page_title = ugettext_noop("Schedule Multi Event Reminder") ui_type = UI_COMPLEX class EditScheduledReminderView(CreateScheduledReminderView): urlname = 'edit_reminder_schedule' page_title = ugettext_noop("Edit Scheduled Reminder") @property def handler_id(self): return self.kwargs.get('handler_id') @property def page_name(self): if self.ui_type == UI_COMPLEX: return _("Edit Scheduled Multi Event Reminder") return self.page_title @property @memoized def schedule_form(self): initial = self.reminder_form_class.compute_initial(self.reminder_handler) if self.request.method == 'POST': return self.reminder_form_class( self.request.POST, initial=initial, is_previewer=self.is_previewer, domain=self.domain, is_edit=True, ) return self.reminder_form_class( initial=initial, is_previewer=self.is_previewer, domain=self.domain, is_edit=True, ) @property @memoized def reminder_handler(self): try: return CaseReminderHandler.get(self.handler_id) except ResourceNotFound: raise Http404() @property def available_languages(self): langcodes = [] for event in self.reminder_handler.events: langcodes.extend(event.message.keys()) return list(set(langcodes)) or ['en'] @property def ui_type(self): return self.reminder_handler.ui_type @property def page_context(self): page_context = super(EditScheduledReminderView, self).page_context page_context.update({ 'handler_id': self.handler_id, }) return page_context @property def page_url(self): return reverse(self.urlname, args=[self.domain, self.handler_id]) def process_schedule_form(self): self.schedule_form.save(self.reminder_handler) @reminders_permission def manage_keywords(request, domain): context = { "domain" : domain, "keywords" : SurveyKeyword.get_all(domain) } return render(request, "reminders/partial/manage_keywords.html", context) @reminders_permission def add_keyword(request, domain, keyword_id=None): sk = None if keyword_id is not None: sk = SurveyKeyword.get(keyword_id) if sk.domain != domain: raise Http404 if request.method == "POST": form = KeywordForm(request.POST) form._cchq_domain = domain form._sk_id = sk._id if sk is not None else None if form.is_valid(): if sk is None: sk = SurveyKeyword(domain=domain) sk.keyword = form.cleaned_data.get("keyword") sk.description = form.cleaned_data.get("description") sk.delimiter = form.cleaned_data.get("delimiter") sk.override_open_sessions = form.cleaned_data.get("override_open_sessions") sk.initiator_doc_type_filter = [] if form.cleaned_data.get("restrict_keyword_initiation"): if form.cleaned_data.get("allow_initiation_by_case"): sk.initiator_doc_type_filter.append("CommCareCase") if form.cleaned_data.get("allow_initiation_by_mobile_worker"): sk.initiator_doc_type_filter.append("CommCareUser") sk.actions = [SurveyKeywordAction( recipient = RECIPIENT_SENDER, action = form.cleaned_data.get("sender_content_type"), message_content = form.cleaned_data.get("sender_message"), form_unique_id = form.cleaned_data.get("sender_form_unique_id"), )] if form.cleaned_data.get("process_structured_sms"): sk.actions.append(SurveyKeywordAction( recipient = RECIPIENT_SENDER, action = METHOD_STRUCTURED_SMS, form_unique_id = form.cleaned_data.get("structured_sms_form_unique_id"), use_named_args = form.cleaned_data.get("use_named_args"), named_args = form.cleaned_data.get("named_args"), named_args_separator = form.cleaned_data.get("named_args_separator"), )) if form.cleaned_data.get("notify_others"): sk.actions.append(SurveyKeywordAction( recipient = form.cleaned_data.get("other_recipient_type"), recipient_id = form.cleaned_data.get("other_recipient_id"), action = form.cleaned_data.get("other_recipient_content_type"), message_content = form.cleaned_data.get("other_recipient_message"), form_unique_id = form.cleaned_data.get("other_recipient_form_unique_id"), )) sk.save() return HttpResponseRedirect(reverse("manage_keywords", args=[domain])) else: initial = { "keyword" : None, "description" : None, "override_open_sessions" : False, "sender_content_type" : None, "sender_message" : None, "sender_form_unique_id" : None, "notify_others" : False, "other_recipient_type" : None, "other_recipient_id" : None, "other_recipient_content_type" : None, "other_recipient_message" : None, "other_recipient_form_unique_id" : None, "process_structured_sms" : False, "structured_sms_form_unique_id" : None, "use_custom_delimiter" : False, "delimiter" : None, "use_named_args_separator" : False, "use_named_args" : False, "named_args_separator" : None, "named_args" : [], "restrict_keyword_initiation" : False, "allow_initiation_by_case" : False, "allow_initiation_by_mobile_worker" : False, } if sk is not None: initial["keyword"] = sk.keyword initial["description"] = sk.description initial["delimiter"] = sk.delimiter initial["override_open_sessions"] = sk.override_open_sessions initial["restrict_keyword_initiation"] = len(sk.initiator_doc_type_filter) > 0 initial["allow_initiation_by_case"] = "CommCareCase" in sk.initiator_doc_type_filter initial["allow_initiation_by_mobile_worker"] = "CommCareUser" in sk.initiator_doc_type_filter for action in sk.actions: if action.action == METHOD_STRUCTURED_SMS: initial["process_structured_sms"] = True initial["structured_sms_form_unique_id"] = action.form_unique_id initial["use_custom_delimiter"] = sk.delimiter is not None initial["use_named_args_separator"] = action.named_args_separator is not None initial["use_named_args"] = action.use_named_args initial["named_args_separator"] = action.named_args_separator initial["named_args"] = [{"name" : k, "xpath" : v} for k, v in action.named_args.items()] elif action.recipient == RECIPIENT_SENDER: initial["sender_content_type"] = action.action initial["sender_message"] = action.message_content initial["sender_form_unique_id"] = action.form_unique_id else: initial["notify_others"] = True initial["other_recipient_type"] = action.recipient initial["other_recipient_id"] = action.recipient_id initial["other_recipient_content_type"] = action.action initial["other_recipient_message"] = action.message_content initial["other_recipient_form_unique_id"] = action.form_unique_id form = KeywordForm(initial=initial) context = { "domain" : domain, "form_list" : get_form_list(domain), "form" : form, "keyword" : sk, "content_type_choices" : [{"code" : a[0], "desc" : a[1]} for a in KEYWORD_CONTENT_CHOICES], "recipient_type_choices" : [{"code" : a[0], "desc" : a[1]} for a in KEYWORD_RECIPIENT_CHOICES], "groups" : [{"code" : g._id, "desc" : g.name} for g in Group.by_domain(domain)], } return render(request, "reminders/partial/add_keyword.html", context) @reminders_permission def delete_keyword(request, domain, keyword_id): s = SurveyKeyword.get(keyword_id) if s.domain != domain or s.doc_type != "SurveyKeyword": raise Http404 s.retire() return HttpResponseRedirect(reverse("manage_keywords", args=[domain])) @reminders_permission def add_survey(request, domain, survey_id=None): survey = None if survey_id is not None: survey = Survey.get(survey_id) if request.method == "POST": form = SurveyForm(request.POST) if form.is_valid(): name = form.cleaned_data.get("name") waves = form.cleaned_data.get("waves") followups = form.cleaned_data.get("followups") samples = form.cleaned_data.get("samples") send_automatically = form.cleaned_data.get("send_automatically") send_followup = form.cleaned_data.get("send_followup") sample_data = {} for sample in samples: sample_data[sample["sample_id"]] = sample if send_followup: timeout_intervals = [int(followup["interval"]) * 1440 for followup in followups] else: timeout_intervals = [] timeout_duration = sum(timeout_intervals) / 1440 final_timeout = lambda wave : [((wave.end_date - wave.date).days - timeout_duration) * 1440] if survey is None: wave_list = [] for wave in waves: wave_list.append(SurveyWave( date=parse(wave["date"]).date(), time=parse(wave["time"]).time(), end_date=parse(wave["end_date"]).date(), form_id=wave["form_id"], reminder_definitions={}, delegation_tasks={}, )) if send_automatically: for wave in wave_list: for sample in samples: if sample["method"] == "SMS": handler = CaseReminderHandler( domain = domain, nickname = "Survey '%s'" % name, default_lang = "en", method = "survey", recipient = RECIPIENT_SURVEY_SAMPLE, start_condition_type = ON_DATETIME, start_datetime = datetime.combine(wave.date, time(0,0)), start_offset = 0, events = [CaseReminderEvent( day_num = 0, fire_time = wave.time, form_unique_id = wave.form_id, callback_timeout_intervals = timeout_intervals + final_timeout(wave), )], schedule_length = 1, event_interpretation = EVENT_AS_SCHEDULE, max_iteration_count = 1, sample_id = sample["sample_id"], survey_incentive = sample["incentive"], submit_partial_forms = True, ) handler.save() wave.reminder_definitions[sample["sample_id"]] = handler._id survey = Survey ( domain = domain, name = name, waves = wave_list, followups = followups, samples = samples, send_automatically = send_automatically, send_followup = send_followup ) else: current_waves = survey.waves survey.waves = [] unchanged_wave_json = [] # Keep any waves that didn't change in case the surveys are in progress for wave in current_waves: for wave_json in waves: parsed_date = parse(wave_json["date"]).date() parsed_time = parse(wave_json["time"]).time() if parsed_date == wave.date and parsed_time == wave.time and wave_json["form_id"] == wave.form_id: wave.end_date = parse(wave_json["end_date"]).date() survey.waves.append(wave) unchanged_wave_json.append(wave_json) continue for wave in survey.waves: current_waves.remove(wave) for wave_json in unchanged_wave_json: waves.remove(wave_json) # Retire reminder definitions / close delegation tasks for old waves for wave in current_waves: for sample_id, handler_id in wave.reminder_definitions.items(): handler = CaseReminderHandler.get(handler_id) handler.retire() for sample_id, delegation_data in wave.delegation_tasks.items(): for case_id, delegation_case_id in delegation_data.items(): close_task(domain, delegation_case_id, request.couch_user.get_id) # Add in new waves for wave_json in waves: survey.waves.append(SurveyWave( date=parse(wave_json["date"]).date(), time=parse(wave_json["time"]).time(), end_date=parse(wave_json["end_date"]).date(), form_id=wave_json["form_id"], reminder_definitions={}, delegation_tasks={}, )) # Retire reminder definitions that are no longer needed if send_automatically: new_sample_ids = [sample_json["sample_id"] for sample_json in samples if sample_json["method"] == "SMS"] else: new_sample_ids = [] for wave in survey.waves: for sample_id, handler_id in wave.reminder_definitions.items(): if sample_id not in new_sample_ids: handler = CaseReminderHandler.get(handler_id) handler.retire() del wave.reminder_definitions[sample_id] # Update existing reminder definitions for wave in survey.waves: for sample_id, handler_id in wave.reminder_definitions.items(): handler = CaseReminderHandler.get(handler_id) handler.events[0].callback_timeout_intervals = timeout_intervals + final_timeout(wave) handler.nickname = "Survey '%s'" % name handler.survey_incentive = sample_data[sample_id]["incentive"] handler.save() # Create additional reminder definitions as necessary for wave in survey.waves: for sample_id in new_sample_ids: if sample_id not in wave.reminder_definitions: handler = CaseReminderHandler( domain = domain, nickname = "Survey '%s'" % name, default_lang = "en", method = "survey", recipient = RECIPIENT_SURVEY_SAMPLE, start_condition_type = ON_DATETIME, start_datetime = datetime.combine(wave.date, time(0,0)), start_offset = 0, events = [CaseReminderEvent( day_num = 0, fire_time = wave.time, form_unique_id = wave.form_id, callback_timeout_intervals = timeout_intervals + final_timeout(wave), )], schedule_length = 1, event_interpretation = EVENT_AS_SCHEDULE, max_iteration_count = 1, sample_id = sample_id, survey_incentive = sample_data[sample_id]["incentive"], submit_partial_forms = True, ) handler.save() wave.reminder_definitions[sample_id] = handler._id # Set the rest of the survey info survey.name = name survey.followups = followups survey.samples = samples survey.send_automatically = send_automatically survey.send_followup = send_followup # Sort the questionnaire waves by date and time survey.waves = sorted(survey.waves, key = lambda wave : datetime.combine(wave.date, wave.time)) # Create / Close delegation tasks as necessary for samples with method "CATI" survey.update_delegation_tasks(request.couch_user.get_id) survey.save() return HttpResponseRedirect(reverse("survey_list", args=[domain])) else: initial = {} if survey is not None: waves = [] samples = [CommCareCaseGroup.get(sample["sample_id"]) for sample in survey.samples] utcnow = datetime.utcnow() for wave in survey.waves: wave_json = { "date" : str(wave.date), "form_id" : wave.form_id, "time" : str(wave.time), "ignore" : wave.has_started(survey), "end_date" : str(wave.end_date), } waves.append(wave_json) initial["name"] = survey.name initial["waves"] = waves initial["followups"] = survey.followups initial["samples"] = survey.samples initial["send_automatically"] = survey.send_automatically initial["send_followup"] = survey.send_followup form = SurveyForm(initial=initial) form_list = get_form_list(domain) form_list.insert(0, {"code":"--choose--", "name":"-- Choose --"}) sample_list = get_sample_list(domain) sample_list.insert(0, {"code":"--choose--", "name":"-- Choose --"}) context = { "domain" : domain, "survey_id" : survey_id, "form" : form, "form_list" : form_list, "sample_list" : sample_list, "method_list" : SURVEY_METHOD_LIST, "user_list" : CommCareUser.by_domain(domain), "started" : survey.has_started() if survey is not None else False, } return render(request, "reminders/partial/add_survey.html", context) @reminders_permission def survey_list(request, domain): context = { "domain" : domain, "surveys" : Survey.get_all(domain) } return render(request, "reminders/partial/survey_list.html", context) @reminders_permission def add_sample(request, domain, sample_id=None): sample = None if sample_id is not None: try: sample = CommCareCaseGroup.get(sample_id) except ResourceNotFound: raise Http404 if request.method == "POST": form = SurveySampleForm(request.POST, request.FILES) if form.is_valid(): name = form.cleaned_data.get("name") sample_contacts = form.cleaned_data.get("sample_contacts") time_zone = form.cleaned_data.get("time_zone") use_contact_upload_file = form.cleaned_data.get("use_contact_upload_file") contact_upload_file = form.cleaned_data.get("contact_upload_file") if sample is None: sample = CommCareCaseGroup( domain=domain, name=name, timezone=time_zone.zone ) else: sample.name = name sample.timezone = time_zone.zone errors = [] phone_numbers = [] if use_contact_upload_file == "Y": for contact in contact_upload_file: phone_numbers.append(contact["phone_number"]) else: for contact in sample_contacts: phone_numbers.append(contact["phone_number"]) existing_number_entries = VerifiedNumber.view('sms/verified_number_by_number', keys=phone_numbers, include_docs=True ).all() for entry in existing_number_entries: if entry.domain != domain or entry.owner_doc_type != "CommCareCase": errors.append("Cannot use phone number %s" % entry.phone_number) if len(errors) > 0: if use_contact_upload_file == "Y": form._errors["contact_upload_file"] = form.error_class(errors) else: form._errors["sample_contacts"] = form.error_class(errors) else: existing_numbers = [v.phone_number for v in existing_number_entries] nonexisting_numbers = list(set(phone_numbers).difference(existing_numbers)) id_range = DomainCounter.increment(domain, "survey_contact_id", len(nonexisting_numbers)) ids = iter(range(id_range[0], id_range[1] + 1)) for phone_number in nonexisting_numbers: register_sms_contact(domain, "participant", str(ids.next()), request.couch_user.get_id, phone_number, contact_phone_number_is_verified="1", contact_backend_id="MOBILE_BACKEND_TROPO_US", language_code="en", time_zone=time_zone.zone) newly_registered_entries = VerifiedNumber.view('sms/verified_number_by_number', keys=nonexisting_numbers, include_docs=True ).all() sample.cases = ([v.owner_id for v in existing_number_entries] + [v.owner_id for v in newly_registered_entries]) sample.save() # Update delegation tasks for surveys using this sample surveys = Survey.view("reminders/sample_to_survey", key=[domain, sample._id, "CATI"], include_docs=True).all() for survey in surveys: survey.update_delegation_tasks(request.couch_user.get_id) survey.save() return HttpResponseRedirect(reverse("sample_list", args=[domain])) else: initial = {} if sample is not None: initial["name"] = sample.name initial["time_zone"] = sample.timezone contact_info = [] for case_id in sample.cases: case = CommCareCase.get(case_id) contact_info.append({"id":case.name, "phone_number":case.contact_phone_number, "case_id" : case_id}) initial["sample_contacts"] = contact_info form = SurveySampleForm(initial=initial) context = { "domain" : domain, "form" : form, "sample_id" : sample_id } return render(request, "reminders/partial/add_sample.html", context) @reminders_permission def sample_list(request, domain): context = { "domain" : domain, "samples": CommCareCaseGroup.get_all(domain) } return render(request, "reminders/partial/sample_list.html", context) @reminders_permission def edit_contact(request, domain, sample_id, case_id): case = CommCareCase.get(case_id) if case.domain != domain: raise Http404 if request.method == "POST": form = EditContactForm(request.POST) if form.is_valid(): phone_number = form.cleaned_data.get("phone_number") vn = VerifiedNumber.view('sms/verified_number_by_number', key=phone_number, include_docs=True, ).one() if vn is not None and vn.owner_id != case_id: form._errors["phone_number"] = form.error_class(["Phone number is already in use."]) else: update_contact(domain, case_id, request.couch_user.get_id, contact_phone_number=phone_number) return HttpResponseRedirect(reverse("edit_sample", args=[domain, sample_id])) else: initial = { "phone_number" : case.get_case_property("contact_phone_number"), } form = EditContactForm(initial=initial) context = { "domain" : domain, "case" : case, "form" : form, } return render(request, "reminders/partial/edit_contact.html", context) @reminders_permission def reminders_in_error(request, domain): handler_map = {} if request.method == "POST": form = RemindersInErrorForm(request.POST) if form.is_valid(): kwargs = {} if is_bigcouch(): # Force a write to all nodes before returning kwargs["w"] = bigcouch_quorum_count() current_timestamp = datetime.utcnow() for reminder_id in form.cleaned_data.get("selected_reminders"): reminder = CaseReminder.get(reminder_id) if reminder.domain != domain: continue if reminder.handler_id in handler_map: handler = handler_map[reminder.handler_id] else: handler = reminder.handler handler_map[reminder.handler_id] = handler reminder.error = False reminder.error_msg = None handler.set_next_fire(reminder, current_timestamp) reminder.save(**kwargs) timezone = report_utils.get_timezone(request.couch_user.user_id, domain) reminders = [] for reminder in CaseReminder.view("reminders/reminders_in_error", startkey=[domain], endkey=[domain, {}], include_docs=True).all(): if reminder.handler_id in handler_map: handler = handler_map[reminder.handler_id] else: handler = reminder.handler handler_map[reminder.handler_id] = handler recipient = reminder.recipient case = reminder.case reminders.append({ "reminder_id" : reminder._id, "handler_type" : handler.reminder_type, "handler_id" : reminder.handler_id, "handler_name" : handler.nickname, "case_id" : case.get_id if case is not None else None, "case_name" : case.name if case is not None else None, "next_fire" : tz_utils.adjust_datetime_to_timezone(reminder.next_fire, pytz.utc.zone, timezone.zone).strftime("%Y-%m-%d %H:%M:%S"), "error_msg" : reminder.error_msg or "-", "recipient_name" : get_recipient_name(recipient), }) context = { "domain" : domain, "reminders" : reminders, "timezone" : timezone, "timezone_now" : datetime.now(tz=timezone), } return render(request, "reminders/partial/reminders_in_error.html", context) class RemindersListView(BaseMessagingSectionView): template_name = 'reminders/reminders_list.html' urlname = "list_reminders_new" page_title = ugettext_noop("Reminder Definitions") @property def page_url(self): return reverse(self.urlname, args=[self.domain]) @property def reminders(self): all_handlers = CaseReminderHandler.get_handlers(domain=self.domain).all() all_handlers = filter(lambda x : x.reminder_type == REMINDER_TYPE_DEFAULT, all_handlers) for handler in all_handlers: yield self._fmt_reminder_data(handler) @property def page_context(self): return { 'reminders': list(self.reminders), } @property def reminder_id(self): return self.request.POST['reminderId'] @property @memoized def reminder(self): return CaseReminderHandler.get(self.reminder_id) def _fmt_reminder_data(self, reminder): return { 'id': reminder._id, 'isActive': reminder.active, 'caseType': reminder.case_type, 'name': reminder.nickname, 'url': reverse(EditScheduledReminderView.urlname, args=[self.domain, reminder._id]), } def get_action_response(self, active): try: self.reminder.active = active self.reminder.save() return { 'success': True, 'reminder': self._fmt_reminder_data(self.reminder), } except Exception as e: return { 'success': False, 'error': e, } def post(self, *args, **kwargs): action = self.request.POST.get('action') if action in ['activate', 'deactivate']: return HttpResponse(json.dumps(self.get_action_response(action == 'activate'))) raise { 'success': False, } class KeywordsListView(BaseMessagingSectionView, CRUDPaginatedViewMixin): template_name = 'reminders/keyword_list.html' urlname = 'keyword_list' page_title = ugettext_noop("Keywords") limit_text = ugettext_noop("keywords per page") empty_notification = ugettext_noop("You have no keywords. Please add one!") loading_message = ugettext_noop("Loading keywords...") @property def page_url(self): return reverse(self.urlname, args=[self.domain]) @property def parameters(self): return self.request.POST if self.request.method == 'POST' else self.request.GET @property @memoized def total(self): data = CommCareCaseGroup.get_db().view( 'reminders/survey_keywords', reduce=True, startkey=[self.domain], endkey=[self.domain, {}], ).first() return data['value'] if data else 0 @property def column_names(self): return [ _("Keyword"), _("Description"), ] @property def page_context(self): return self.pagination_context @property def paginated_list(self): for keyword in SurveyKeyword.get_by_domain( self.domain, limit=self.limit, skip=self.skip, ): yield { 'itemData': { 'id': keyword._id, 'keyword': keyword.keyword, 'description': keyword.description, 'editUrl': reverse('edit_keyword', args=[self.domain, keyword._id]), }, 'template': 'keyword-row-template', } def post(self, *args, **kwargs): return self.paginate_crud_response
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'Language' db.create_table('journalmanager_language', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('iso_code', self.gf('django.db.models.fields.CharField')(max_length=2)), ('name', self.gf('django.db.models.fields.CharField')(max_length=64)), )) db.send_create_signal('journalmanager', ['Language']) # Adding model 'UserProfile' db.create_table('journalmanager_userprofile', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)), ('email', self.gf('django.db.models.fields.EmailField')(unique=True, max_length=75)), )) db.send_create_signal('journalmanager', ['UserProfile']) # Adding model 'Collection' db.create_table('journalmanager_collection', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)), ('name_slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, unique=True, null=True, blank=True)), ('url', self.gf('django.db.models.fields.URLField')(max_length=200)), ('logo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)), ('acronym', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=16, blank=True)), ('country', self.gf('django.db.models.fields.CharField')(max_length=32)), ('state', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('address', self.gf('django.db.models.fields.TextField')()), ('address_number', self.gf('django.db.models.fields.CharField')(max_length=8)), ('address_complement', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)), ('zip_code', self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True)), ('phone', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)), ('fax', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), )) db.send_create_signal('journalmanager', ['Collection']) # Adding model 'UserCollections' db.create_table('journalmanager_usercollections', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), ('collection', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.Collection'])), ('is_default', self.gf('django.db.models.fields.BooleanField')(default=False)), ('is_manager', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal('journalmanager', ['UserCollections']) # Adding model 'Institution' db.create_table('journalmanager_institution', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=256, db_index=True)), ('complement', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('acronym', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=16, blank=True)), ('country', self.gf('django.db.models.fields.CharField')(max_length=32)), ('state', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('address', self.gf('django.db.models.fields.TextField')()), ('address_number', self.gf('django.db.models.fields.CharField')(max_length=8)), ('address_complement', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)), ('zip_code', self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True)), ('phone', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)), ('fax', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)), ('cel', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)), ('is_trashed', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)), )) db.send_create_signal('journalmanager', ['Institution']) # Adding model 'Publisher' db.create_table('journalmanager_publisher', ( ('institution_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['journalmanager.Institution'], unique=True, primary_key=True)), )) db.send_create_signal('journalmanager', ['Publisher']) # Adding M2M table for field collections on 'Publisher' db.create_table('journalmanager_publisher_collections', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('publisher', models.ForeignKey(orm['journalmanager.publisher'], null=False)), ('collection', models.ForeignKey(orm['journalmanager.collection'], null=False)) )) db.create_unique('journalmanager_publisher_collections', ['publisher_id', 'collection_id']) # Adding model 'Sponsor' db.create_table('journalmanager_sponsor', ( ('institution_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['journalmanager.Institution'], unique=True, primary_key=True)), )) db.send_create_signal('journalmanager', ['Sponsor']) # Adding M2M table for field collections on 'Sponsor' db.create_table('journalmanager_sponsor_collections', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('sponsor', models.ForeignKey(orm['journalmanager.sponsor'], null=False)), ('collection', models.ForeignKey(orm['journalmanager.collection'], null=False)) )) db.create_unique('journalmanager_sponsor_collections', ['sponsor_id', 'collection_id']) # Adding model 'Journal' db.create_table('journalmanager_journal', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('creator', self.gf('django.db.models.fields.related.ForeignKey')(related_name='enjoy_creator', to=orm['auth.User'])), ('publisher', self.gf('django.db.models.fields.related.ForeignKey')(related_name='publishers', to=orm['journalmanager.Publisher'])), ('previous_title', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='prev_title', null=True, to=orm['journalmanager.Journal'])), ('use_license', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.UseLicense'])), ('national_code', self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=256, db_index=True)), ('title_iso', self.gf('django.db.models.fields.CharField')(max_length=256, db_index=True)), ('short_title', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, db_index=True)), ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('acronym', self.gf('django.db.models.fields.CharField')(max_length=16)), ('scielo_issn', self.gf('django.db.models.fields.CharField')(max_length=16)), ('print_issn', self.gf('django.db.models.fields.CharField')(max_length=9)), ('eletronic_issn', self.gf('django.db.models.fields.CharField')(max_length=9)), ('subject_descriptors', self.gf('django.db.models.fields.CharField')(max_length=512)), ('init_year', self.gf('django.db.models.fields.CharField')(max_length=16)), ('init_vol', self.gf('django.db.models.fields.CharField')(max_length=16)), ('init_num', self.gf('django.db.models.fields.CharField')(max_length=16)), ('final_year', self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True)), ('final_vol', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)), ('final_num', self.gf('django.db.models.fields.CharField')(max_length=16, blank=True)), ('frequency', self.gf('django.db.models.fields.CharField')(max_length=16)), ('pub_status', self.gf('django.db.models.fields.CharField')(default='inprogress', max_length=16, null=True, blank=True)), ('pub_status_reason', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('pub_status_changed_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='pub_status_changed_by', to=orm['auth.User'])), ('editorial_standard', self.gf('django.db.models.fields.CharField')(max_length=64)), ('ctrl_vocabulary', self.gf('django.db.models.fields.CharField')(max_length=64)), ('pub_level', self.gf('django.db.models.fields.CharField')(max_length=64)), ('secs_code', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)), ('copyrighter', self.gf('django.db.models.fields.CharField')(max_length=254)), ('url_online_submission', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)), ('url_journal', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)), ('notes', self.gf('django.db.models.fields.TextField')(max_length=254, null=True, blank=True)), ('index_coverage', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('cover', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)), ('logo', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)), ('is_trashed', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)), ('other_previous_title', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), )) db.send_create_signal('journalmanager', ['Journal']) # Adding M2M table for field sponsor on 'Journal' db.create_table('journalmanager_journal_sponsor', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('journal', models.ForeignKey(orm['journalmanager.journal'], null=False)), ('sponsor', models.ForeignKey(orm['journalmanager.sponsor'], null=False)) )) db.create_unique('journalmanager_journal_sponsor', ['journal_id', 'sponsor_id']) # Adding M2M table for field collections on 'Journal' db.create_table('journalmanager_journal_collections', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('journal', models.ForeignKey(orm['journalmanager.journal'], null=False)), ('collection', models.ForeignKey(orm['journalmanager.collection'], null=False)) )) db.create_unique('journalmanager_journal_collections', ['journal_id', 'collection_id']) # Adding M2M table for field languages on 'Journal' db.create_table('journalmanager_journal_languages', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('journal', models.ForeignKey(orm['journalmanager.journal'], null=False)), ('language', models.ForeignKey(orm['journalmanager.language'], null=False)) )) db.create_unique('journalmanager_journal_languages', ['journal_id', 'language_id']) # Adding M2M table for field abstract_keyword_languages on 'Journal' db.create_table('journalmanager_journal_abstract_keyword_languages', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('journal', models.ForeignKey(orm['journalmanager.journal'], null=False)), ('language', models.ForeignKey(orm['journalmanager.language'], null=False)) )) db.create_unique('journalmanager_journal_abstract_keyword_languages', ['journal_id', 'language_id']) # Adding model 'JournalPublicationEvents' db.create_table('journalmanager_journalpublicationevents', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('journal', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.Journal'])), ('status', self.gf('django.db.models.fields.CharField')(max_length=16)), ('reason', self.gf('django.db.models.fields.TextField')(default='', blank=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])), )) db.send_create_signal('journalmanager', ['JournalPublicationEvents']) # Adding model 'JournalStudyArea' db.create_table('journalmanager_journalstudyarea', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('journal', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.Journal'])), ('study_area', self.gf('django.db.models.fields.CharField')(max_length=256)), )) db.send_create_signal('journalmanager', ['JournalStudyArea']) # Adding model 'JournalTitle' db.create_table('journalmanager_journaltitle', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('journal', self.gf('django.db.models.fields.related.ForeignKey')(related_name='other_titles', to=orm['journalmanager.Journal'])), ('title', self.gf('django.db.models.fields.CharField')(max_length=128)), ('category', self.gf('django.db.models.fields.CharField')(max_length=128)), )) db.send_create_signal('journalmanager', ['JournalTitle']) # Adding model 'JournalMission' db.create_table('journalmanager_journalmission', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('journal', self.gf('django.db.models.fields.related.ForeignKey')(related_name='missions', to=orm['journalmanager.Journal'])), ('description', self.gf('django.db.models.fields.TextField')()), ('language', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.Language'], null=True)), )) db.send_create_signal('journalmanager', ['JournalMission']) # Adding model 'UseLicense' db.create_table('journalmanager_uselicense', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('license_code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)), ('reference_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)), ('disclaimer', self.gf('django.db.models.fields.TextField')(max_length=512, null=True, blank=True)), )) db.send_create_signal('journalmanager', ['UseLicense']) # Adding model 'TranslatedData' db.create_table('journalmanager_translateddata', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('translation', self.gf('django.db.models.fields.CharField')(max_length=512, null=True, blank=True)), ('language', self.gf('django.db.models.fields.CharField')(max_length=32)), ('model', self.gf('django.db.models.fields.CharField')(max_length=32)), ('field', self.gf('django.db.models.fields.CharField')(max_length=32)), )) db.send_create_signal('journalmanager', ['TranslatedData']) # Adding model 'SectionTitle' db.create_table('journalmanager_sectiontitle', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('section', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.Section'])), ('title', self.gf('django.db.models.fields.CharField')(max_length=256)), ('language', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.Language'])), )) db.send_create_signal('journalmanager', ['SectionTitle']) # Adding model 'Section' db.create_table('journalmanager_section', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('journal', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.Journal'])), ('code', self.gf('django.db.models.fields.CharField')(max_length=16, null=True, blank=True)), ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('is_trashed', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)), )) db.send_create_signal('journalmanager', ['Section']) # Adding model 'Issue' db.create_table('journalmanager_issue', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('journal', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.Journal'])), ('volume', self.gf('django.db.models.fields.CharField')(max_length=16)), ('number', self.gf('django.db.models.fields.CharField')(max_length=16)), ('is_press_release', self.gf('django.db.models.fields.BooleanField')(default=False)), ('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('publication_start_month', self.gf('django.db.models.fields.IntegerField')()), ('publication_end_month', self.gf('django.db.models.fields.IntegerField')()), ('publication_year', self.gf('django.db.models.fields.IntegerField')()), ('is_marked_up', self.gf('django.db.models.fields.BooleanField')(default=False)), ('use_license', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.UseLicense'], null=True)), ('total_documents', self.gf('django.db.models.fields.IntegerField')(default=0)), ('ctrl_vocabulary', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)), ('editorial_standard', self.gf('django.db.models.fields.CharField')(max_length=64)), ('cover', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)), ('is_trashed', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)), ('label', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=16, null=True, blank=True)), )) db.send_create_signal('journalmanager', ['Issue']) # Adding M2M table for field section on 'Issue' db.create_table('journalmanager_issue_section', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('issue', models.ForeignKey(orm['journalmanager.issue'], null=False)), ('section', models.ForeignKey(orm['journalmanager.section'], null=False)) )) db.create_unique('journalmanager_issue_section', ['issue_id', 'section_id']) # Adding model 'IssueTitle' db.create_table('journalmanager_issuetitle', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('issue', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.Issue'])), ('language', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['journalmanager.Language'], null=True, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)), )) db.send_create_signal('journalmanager', ['IssueTitle']) # Adding model 'Supplement' db.create_table('journalmanager_supplement', ( ('issue_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['journalmanager.Issue'], unique=True, primary_key=True)), ('suppl_label', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)), )) db.send_create_signal('journalmanager', ['Supplement']) # Adding model 'PendedForm' db.create_table('journalmanager_pendedform', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('view_name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('form_hash', self.gf('django.db.models.fields.CharField')(max_length=32)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='pending_forms', to=orm['auth.User'])), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal('journalmanager', ['PendedForm']) # Adding model 'PendedValue' db.create_table('journalmanager_pendedvalue', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('form', self.gf('django.db.models.fields.related.ForeignKey')(related_name='data', to=orm['journalmanager.PendedForm'])), ('name', self.gf('django.db.models.fields.CharField')(max_length=255)), ('value', self.gf('django.db.models.fields.TextField')()), )) db.send_create_signal('journalmanager', ['PendedValue']) def backwards(self, orm): # Deleting model 'Language' db.delete_table('journalmanager_language') # Deleting model 'UserProfile' db.delete_table('journalmanager_userprofile') # Deleting model 'Collection' db.delete_table('journalmanager_collection') # Deleting model 'UserCollections' db.delete_table('journalmanager_usercollections') # Deleting model 'Institution' db.delete_table('journalmanager_institution') # Deleting model 'Publisher' db.delete_table('journalmanager_publisher') # Removing M2M table for field collections on 'Publisher' db.delete_table('journalmanager_publisher_collections') # Deleting model 'Sponsor' db.delete_table('journalmanager_sponsor') # Removing M2M table for field collections on 'Sponsor' db.delete_table('journalmanager_sponsor_collections') # Deleting model 'Journal' db.delete_table('journalmanager_journal') # Removing M2M table for field sponsor on 'Journal' db.delete_table('journalmanager_journal_sponsor') # Removing M2M table for field collections on 'Journal' db.delete_table('journalmanager_journal_collections') # Removing M2M table for field languages on 'Journal' db.delete_table('journalmanager_journal_languages') # Removing M2M table for field abstract_keyword_languages on 'Journal' db.delete_table('journalmanager_journal_abstract_keyword_languages') # Deleting model 'JournalPublicationEvents' db.delete_table('journalmanager_journalpublicationevents') # Deleting model 'JournalStudyArea' db.delete_table('journalmanager_journalstudyarea') # Deleting model 'JournalTitle' db.delete_table('journalmanager_journaltitle') # Deleting model 'JournalMission' db.delete_table('journalmanager_journalmission') # Deleting model 'UseLicense' db.delete_table('journalmanager_uselicense') # Deleting model 'TranslatedData' db.delete_table('journalmanager_translateddata') # Deleting model 'SectionTitle' db.delete_table('journalmanager_sectiontitle') # Deleting model 'Section' db.delete_table('journalmanager_section') # Deleting model 'Issue' db.delete_table('journalmanager_issue') # Removing M2M table for field section on 'Issue' db.delete_table('journalmanager_issue_section') # Deleting model 'IssueTitle' db.delete_table('journalmanager_issuetitle') # Deleting model 'Supplement' db.delete_table('journalmanager_supplement') # Deleting model 'PendedForm' db.delete_table('journalmanager_pendedform') # Deleting model 'PendedValue' db.delete_table('journalmanager_pendedvalue') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'journalmanager.collection': { 'Meta': {'ordering': "['name']", 'object_name': 'Collection'}, 'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}), 'address': ('django.db.models.fields.TextField', [], {}), 'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'collection': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_collection'", 'to': "orm['auth.User']", 'through': "orm['journalmanager.UserCollections']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}), 'name_slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}), 'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}) }, 'journalmanager.institution': { 'Meta': {'ordering': "['name']", 'object_name': 'Institution'}, 'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}), 'address': ('django.db.models.fields.TextField', [], {}), 'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}), 'cel': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'complement': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), 'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}), 'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}) }, 'journalmanager.issue': { 'Meta': {'object_name': 'Issue'}, 'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_marked_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_press_release': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}), 'label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'null': 'True', 'blank': 'True'}), 'number': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'publication_end_month': ('django.db.models.fields.IntegerField', [], {}), 'publication_start_month': ('django.db.models.fields.IntegerField', [], {}), 'publication_year': ('django.db.models.fields.IntegerField', [], {}), 'section': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Section']", 'symmetrical': 'False', 'blank': 'True'}), 'total_documents': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']", 'null': 'True'}), 'volume': ('django.db.models.fields.CharField', [], {'max_length': '16'}) }, 'journalmanager.issuetitle': { 'Meta': {'object_name': 'IssueTitle'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Issue']"}), 'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']", 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}) }, 'journalmanager.journal': { 'Meta': {'ordering': "['title']", 'object_name': 'Journal'}, 'abstract_keyword_languages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'abstract_keyword_languages'", 'symmetrical': 'False', 'to': "orm['journalmanager.Language']"}), 'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}), 'copyrighter': ('django.db.models.fields.CharField', [], {'max_length': '254'}), 'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enjoy_creator'", 'to': "orm['auth.User']"}), 'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'eletronic_issn': ('django.db.models.fields.CharField', [], {'max_length': '9'}), 'final_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'final_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}), 'final_year': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}), 'frequency': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'index_coverage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'init_num': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'init_vol': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'init_year': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Language']", 'symmetrical': 'False'}), 'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'national_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}), 'other_previous_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'previous_title': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'prev_title'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}), 'print_issn': ('django.db.models.fields.CharField', [], {'max_length': '9'}), 'pub_level': ('django.db.models.fields.CharField', [], {'max_length': '64'}), 'pub_status': ('django.db.models.fields.CharField', [], {'default': "'inprogress'", 'max_length': '16', 'null': 'True', 'blank': 'True'}), 'pub_status_changed_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pub_status_changed_by'", 'to': "orm['auth.User']"}), 'pub_status_reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'publisher': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'publishers'", 'to': "orm['journalmanager.Publisher']"}), 'scielo_issn': ('django.db.models.fields.CharField', [], {'max_length': '16'}), 'secs_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'short_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'}), 'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'journal_sponsor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['journalmanager.Sponsor']"}), 'subject_descriptors': ('django.db.models.fields.CharField', [], {'max_length': '512'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}), 'title_iso': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'url_journal': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'url_online_submission': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']"}) }, 'journalmanager.journalmission': { 'Meta': {'object_name': 'JournalMission'}, 'description': ('django.db.models.fields.TextField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missions'", 'to': "orm['journalmanager.Journal']"}), 'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']", 'null': 'True'}) }, 'journalmanager.journalpublicationevents': { 'Meta': {'object_name': 'JournalPublicationEvents'}, 'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}), 'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'max_length': '16'}) }, 'journalmanager.journalstudyarea': { 'Meta': {'object_name': 'JournalStudyArea'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}), 'study_area': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'journalmanager.journaltitle': { 'Meta': {'object_name': 'JournalTitle'}, 'category': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'other_titles'", 'to': "orm['journalmanager.Journal']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'journalmanager.language': { 'Meta': {'ordering': "['name']", 'object_name': 'Language'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, 'journalmanager.pendedform': { 'Meta': {'object_name': 'PendedForm'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'form_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_forms'", 'to': "orm['auth.User']"}), 'view_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'journalmanager.pendedvalue': { 'Meta': {'object_name': 'PendedValue'}, 'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['journalmanager.PendedForm']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'value': ('django.db.models.fields.TextField', [], {}) }, 'journalmanager.publisher': { 'Meta': {'ordering': "['name']", 'object_name': 'Publisher', '_ormbases': ['journalmanager.Institution']}, 'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}), 'institution_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Institution']", 'unique': 'True', 'primary_key': 'True'}) }, 'journalmanager.section': { 'Meta': {'object_name': 'Section'}, 'code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}), 'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, 'journalmanager.sectiontitle': { 'Meta': {'object_name': 'SectionTitle'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}), 'section': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Section']"}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, 'journalmanager.sponsor': { 'Meta': {'ordering': "['name']", 'object_name': 'Sponsor', '_ormbases': ['journalmanager.Institution']}, 'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}), 'institution_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Institution']", 'unique': 'True', 'primary_key': 'True'}) }, 'journalmanager.supplement': { 'Meta': {'object_name': 'Supplement', '_ormbases': ['journalmanager.Issue']}, 'issue_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Issue']", 'unique': 'True', 'primary_key': 'True'}), 'suppl_label': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}) }, 'journalmanager.translateddata': { 'Meta': {'object_name': 'TranslatedData'}, 'field': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'translation': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}) }, 'journalmanager.uselicense': { 'Meta': {'object_name': 'UseLicense'}, 'disclaimer': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'license_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}), 'reference_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, 'journalmanager.usercollections': { 'Meta': {'object_name': 'UserCollections'}, 'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'journalmanager.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) } } complete_apps = ['journalmanager']
# pylint: disable=E1101 """ BaseWorker.py """ import random import logging import logging.config import msgpack import zmq from zmq.log.handlers import PUBHandler from zmq.eventloop.zmqstream import ZMQStream from zmq.eventloop.ioloop import IOLoop, DelayedCallback, PeriodicCallback from cps2zmq.gather import mdp, log class BaseWorker(object): """ Base class for workers that processes messages. Attributes: idn (str): The worker's 'id'. This an address that is appended to the front \ of messages it sends to the server. context (:obj:`zmq.Context`): required by ZMQ to make the magic happen. The other side of the socket is usually bound by a MameServer. loop (IOLoop): socket_addr (str): The address to connect the frontend socket to. This is usually set front (:obj:`zmq.Context.socket`): Requests for work are sent out from here. \ Work is received on here. msgs_recv (int): Counts how many messages this worker received """ HB_INTERVAL = 1000 HB_LIVENESS = 3 def __init__(self, idn, front_addr, service, pub_addr=None, log_to_file=False): self.idn = bytes(idn, encoding='UTF-8') self.service = service self.front_addr = front_addr self.frontstream = None self.heartbeater = None self.pub_addr = pub_addr self.publish = None self.current_liveness = 3 self.msgs_recv = 0 self._protocol = b'MDPW01' self._logger = None self.setup_logging(log_to_file) self.setup() def setup(self): """ Sets up networking and the heartbeat callback. """ self._logger.info('Setting worker up') # logging.info('Setting worker up') context = zmq.Context.instance() front = context.socket(zmq.DEALER) front.setsockopt(zmq.IDENTITY, self.idn) front.setsockopt(zmq.LINGER, 0) self.frontstream = ZMQStream(front, IOLoop.instance()) self.frontstream.on_recv(self.handle_message) self.frontstream.connect(self.front_addr) if self.pub_addr: self.publish = context.socket(zmq.PUB) self.publish.setsockopt(zmq.LINGER, 0) self.publish.connect(self.pub_addr) ph = PUBHandler(self.publish) name = '.'.join([self.__class__.__name__, str(self.idn, encoding='utf-8')]) ph.root_topic = name ph.setLevel(logging.INFO) self._logger.addHandler(ph) self.heartbeater = PeriodicCallback(self.beat, self.HB_INTERVAL) self.ready(self.frontstream, self.service) self.heartbeater.start() def setup_logging(self, log_to_file): name = '.'.join([self.__class__.__name__, str(self.idn, encoding='utf-8')]) self._logger = log.configure(name, fhandler=log_to_file) def close(self): """ Closes all sockets and the heartbeat callback. """ self._logger.info('Closing') if self.publish: self.publish.close() self.publish = None if self.heartbeater: self.heartbeater.stop() self.heartbeater = None if self.frontstream: self.frontstream.socket.close() self.frontstream.close() self.frontstream = None def start(self): """ Starts the worker. """ self._logger.info('Starting') IOLoop.instance().start() def handle_message(self, msg): """ A callback. Handles message when it is received. """ self.current_liveness = self.HB_LIVENESS empty = msg.pop(0) protocol = msg.pop(0) command = msg.pop(0) if command == mdp.DISCONNECT: self._logger.info('Received disconnect command') IOLoop.instance().stop() # self.close() elif command == mdp.REQUEST: self._logger.info('Received request command') self.handle_request(msg) else: # print('Error', self.__class__.__name__, self.idn, 'received', command, 'command') # sys.stdout.flush() # raise mdp.UnsupportedCommandException(command) pass def handle_request(self, msg): """ Callback. Handles a work request. """ client_addr, _, message = msg self.msgs_recv += 1 try: unpacked = msgpack.unpackb(message, encoding='utf-8') except msgpack.exceptions.UnpackValueError: self._logger.error('Failed to unpack', exc_info=True) IOLoop.instance().stop() processed = self.process(unpacked) packed = msgpack.packb(processed) try: self.reply(self.frontstream, client_addr, packed) except TypeError: self._logger.error('Encountered error', exc_info=True) def report(self): """ Report stats. """ self._logger.info('Received %s messages total', self.msgs_recv) def beat(self): """ A callback. Sends heartbeat and checks if worker has lost connection. """ self.heartbeat(self.frontstream) if self.current_liveness < 0: self._logger.info('Lost connection - timed out') IOLoop.instance().stop() # this would reconnect the worker # delayed = DelayedCallback(self.setup, 5000) # delayed.start() def process(self, message): """ Should be overridden. """ return message def ready(self, socket, service): """ Helper function. Ready message is sent once upon connection to the server. """ self.current_liveness = self.HB_LIVENESS self._logger.debug('Sending ready') socket.send_multipart([b'', self._protocol, mdp.READY, service]) def reply(self, socket, client_addr, message): """ Helper function. Sent upon completion of work. """ reply_msg = [b'', self._protocol, mdp.REPLY, client_addr, b'', message] self._logger.debug('Sending reply') socket.send_multipart(reply_msg) def heartbeat(self, socket): """ Helper function. Sent periodically. """ self.current_liveness -= 1 # Set up ability to filter this out - Probably set up a different logger? # self._logger.debug('Sending heartbeat') socket.send_multipart([b'', self._protocol, mdp.HEARTBEAT]) def disconnect(self, socket): """ Helper function. """ self._logger.debug('Sending disconnect') socket.send_multipart([b'', self._protocol, mdp.DISCONNECT]) if __name__ == '__main__': worker = BaseWorker(str(random.randint(69, 420)), "tcp://127.0.0.1:5557", b'mame', "tcp://127.0.0.1:5558") worker.start() worker.report() worker.close()
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from swift import gettext_ as _ import time from six.moves.urllib.parse import unquote from swift.common.utils import public, csv_append, Timestamp from swift.common.constraints import check_metadata from swift.common import constraints from swift.common.http import HTTP_ACCEPTED, is_success from swift.proxy.controllers.base import Controller, delay_denial, \ cors_validation, clear_info_cache from swift.common.storage_policy import POLICIES from swift.common.swob import HTTPBadRequest, HTTPForbidden, \ HTTPNotFound class ContainerController(Controller): """WSGI controller for container requests""" server_type = 'Container' # Ensure these are all lowercase pass_through_headers = ['x-container-read', 'x-container-write', 'x-container-sync-key', 'x-container-sync-to', 'x-versions-location'] def __init__(self, app, account_name, container_name, **kwargs): Controller.__init__(self, app) self.account_name = unquote(account_name) self.container_name = unquote(container_name) def _x_remove_headers(self): st = self.server_type.lower() return ['x-remove-%s-read' % st, 'x-remove-%s-write' % st, 'x-remove-versions-location', 'x-remove-%s-sync-key' % st, 'x-remove-%s-sync-to' % st] def _convert_policy_to_index(self, req): """ Helper method to convert a policy name (from a request from a client) to a policy index (for a request to a backend). :param req: incoming request """ policy_name = req.headers.get('X-Storage-Policy') if not policy_name: return policy = POLICIES.get_by_name(policy_name) if not policy: raise HTTPBadRequest(request=req, content_type="text/plain", body=("Invalid %s '%s'" % ('X-Storage-Policy', policy_name))) if policy.is_deprecated: body = 'Storage Policy %r is deprecated' % (policy.name) raise HTTPBadRequest(request=req, body=body) return int(policy) def clean_acls(self, req): if 'swift.clean_acl' in req.environ: for header in ('x-container-read', 'x-container-write'): if header in req.headers: try: req.headers[header] = \ req.environ['swift.clean_acl'](header, req.headers[header]) except ValueError as err: return HTTPBadRequest(request=req, body=str(err)) return None def GETorHEAD(self, req): """Handler for HTTP GET/HEAD requests.""" if not self.account_info(self.account_name, req)[1]: if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp return HTTPNotFound(request=req) part = self.app.container_ring.get_part( self.account_name, self.container_name) node_iter = self.app.iter_nodes(self.app.container_ring, part) resp = self.GETorHEAD_base( req, _('Container'), node_iter, part, req.swift_entity_path) if 'swift.authorize' in req.environ: req.acl = resp.headers.get('x-container-read') aresp = req.environ['swift.authorize'](req) if aresp: return aresp if not req.environ.get('swift_owner', False): for key in self.app.swift_owner_headers: if key in resp.headers: del resp.headers[key] return resp @public @delay_denial @cors_validation def GET(self, req): """Handler for HTTP GET requests.""" return self.GETorHEAD(req) @public @delay_denial @cors_validation def HEAD(self, req): """Handler for HTTP HEAD requests.""" return self.GETorHEAD(req) @public @cors_validation def PUT(self, req): """HTTP PUT request handler.""" error_response = \ self.clean_acls(req) or check_metadata(req, 'container') if error_response: return error_response policy_index = self._convert_policy_to_index(req) if not req.environ.get('swift_owner'): for key in self.app.swift_owner_headers: req.headers.pop(key, None) if len(self.container_name) > constraints.MAX_CONTAINER_NAME_LENGTH: resp = HTTPBadRequest(request=req) resp.body = 'Container name length of %d longer than %d' % \ (len(self.container_name), constraints.MAX_CONTAINER_NAME_LENGTH) return resp account_partition, accounts, container_count = \ self.account_info(self.account_name, req) if not accounts and self.app.account_autocreate: self.autocreate_account(req, self.account_name) account_partition, accounts, container_count = \ self.account_info(self.account_name, req) if not accounts: return HTTPNotFound(request=req) if self.app.max_containers_per_account > 0 and \ container_count >= self.app.max_containers_per_account and \ self.account_name not in self.app.max_containers_whitelist: container_info = \ self.container_info(self.account_name, self.container_name, req) if not is_success(container_info.get('status')): resp = HTTPForbidden(request=req) resp.body = 'Reached container limit of %s' % \ self.app.max_containers_per_account return resp container_partition, containers = self.app.container_ring.get_nodes( self.account_name, self.container_name) headers = self._backend_requests(req, len(containers), account_partition, accounts, policy_index) clear_info_cache(self.app, req.environ, self.account_name, self.container_name) resp = self.make_requests( req, self.app.container_ring, container_partition, 'PUT', req.swift_entity_path, headers) return resp @public @cors_validation def POST(self, req): """HTTP POST request handler.""" error_response = \ self.clean_acls(req) or check_metadata(req, 'container') if error_response: return error_response if not req.environ.get('swift_owner'): for key in self.app.swift_owner_headers: req.headers.pop(key, None) account_partition, accounts, container_count = \ self.account_info(self.account_name, req) if not accounts: return HTTPNotFound(request=req) container_partition, containers = self.app.container_ring.get_nodes( self.account_name, self.container_name) headers = self.generate_request_headers(req, transfer=True) clear_info_cache(self.app, req.environ, self.account_name, self.container_name) resp = self.make_requests( req, self.app.container_ring, container_partition, 'POST', req.swift_entity_path, [headers] * len(containers)) return resp @public @cors_validation def DELETE(self, req): """HTTP DELETE request handler.""" account_partition, accounts, container_count = \ self.account_info(self.account_name, req) if not accounts: return HTTPNotFound(request=req) container_partition, containers = self.app.container_ring.get_nodes( self.account_name, self.container_name) headers = self._backend_requests(req, len(containers), account_partition, accounts) clear_info_cache(self.app, req.environ, self.account_name, self.container_name) resp = self.make_requests( req, self.app.container_ring, container_partition, 'DELETE', req.swift_entity_path, headers) # Indicates no server had the container if resp.status_int == HTTP_ACCEPTED: return HTTPNotFound(request=req) return resp def _backend_requests(self, req, n_outgoing, account_partition, accounts, policy_index=None): additional = {'X-Timestamp': Timestamp(time.time()).internal} if policy_index is None: additional['X-Backend-Storage-Policy-Default'] = \ int(POLICIES.default) else: additional['X-Backend-Storage-Policy-Index'] = str(policy_index) headers = [self.generate_request_headers(req, transfer=True, additional=additional) for _junk in range(n_outgoing)] for i, account in enumerate(accounts): i = i % len(headers) headers[i]['X-Account-Partition'] = account_partition headers[i]['X-Account-Host'] = csv_append( headers[i].get('X-Account-Host'), '%(ip)s:%(port)s' % account) headers[i]['X-Account-Device'] = csv_append( headers[i].get('X-Account-Device'), account['device']) return headers
import numpy as np import pytest import pytz import pandas as pd from pandas import DataFrame, Index, Series, date_range import pandas._testing as tm class TestDataFrameAlign: def test_frame_align_aware(self): idx1 = date_range("2001", periods=5, freq="H", tz="US/Eastern") idx2 = date_range("2001", periods=5, freq="2H", tz="US/Eastern") df1 = DataFrame(np.random.randn(len(idx1), 3), idx1) df2 = DataFrame(np.random.randn(len(idx2), 3), idx2) new1, new2 = df1.align(df2) assert df1.index.tz == new1.index.tz assert df2.index.tz == new2.index.tz # different timezones convert to UTC # frame with frame df1_central = df1.tz_convert("US/Central") new1, new2 = df1.align(df1_central) assert new1.index.tz == pytz.UTC assert new2.index.tz == pytz.UTC # frame with Series new1, new2 = df1.align(df1_central[0], axis=0) assert new1.index.tz == pytz.UTC assert new2.index.tz == pytz.UTC df1[0].align(df1_central, axis=0) assert new1.index.tz == pytz.UTC assert new2.index.tz == pytz.UTC def test_align_float(self, float_frame): af, bf = float_frame.align(float_frame) assert af._mgr is not float_frame._mgr af, bf = float_frame.align(float_frame, copy=False) assert af._mgr is float_frame._mgr # axis = 0 other = float_frame.iloc[:-5, :3] af, bf = float_frame.align(other, axis=0, fill_value=-1) tm.assert_index_equal(bf.columns, other.columns) # test fill value join_idx = float_frame.index.join(other.index) diff_a = float_frame.index.difference(join_idx) diff_b = other.index.difference(join_idx) diff_a_vals = af.reindex(diff_a).values diff_b_vals = bf.reindex(diff_b).values assert (diff_a_vals == -1).all() af, bf = float_frame.align(other, join="right", axis=0) tm.assert_index_equal(bf.columns, other.columns) tm.assert_index_equal(bf.index, other.index) tm.assert_index_equal(af.index, other.index) # axis = 1 other = float_frame.iloc[:-5, :3].copy() af, bf = float_frame.align(other, axis=1) tm.assert_index_equal(bf.columns, float_frame.columns) tm.assert_index_equal(bf.index, other.index) # test fill value join_idx = float_frame.index.join(other.index) diff_a = float_frame.index.difference(join_idx) diff_b = other.index.difference(join_idx) diff_a_vals = af.reindex(diff_a).values # TODO(wesm): unused? diff_b_vals = bf.reindex(diff_b).values # noqa assert (diff_a_vals == -1).all() af, bf = float_frame.align(other, join="inner", axis=1) tm.assert_index_equal(bf.columns, other.columns) af, bf = float_frame.align(other, join="inner", axis=1, method="pad") tm.assert_index_equal(bf.columns, other.columns) af, bf = float_frame.align( other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=None ) tm.assert_index_equal(bf.index, Index([])) af, bf = float_frame.align( other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0 ) tm.assert_index_equal(bf.index, Index([])) # Try to align DataFrame to Series along bad axis msg = "No axis named 2 for object type DataFrame" with pytest.raises(ValueError, match=msg): float_frame.align(af.iloc[0, :3], join="inner", axis=2) # align dataframe to series with broadcast or not idx = float_frame.index s = Series(range(len(idx)), index=idx) left, right = float_frame.align(s, axis=0) tm.assert_index_equal(left.index, float_frame.index) tm.assert_index_equal(right.index, float_frame.index) assert isinstance(right, Series) left, right = float_frame.align(s, broadcast_axis=1) tm.assert_index_equal(left.index, float_frame.index) expected = {c: s for c in float_frame.columns} expected = DataFrame( expected, index=float_frame.index, columns=float_frame.columns ) tm.assert_frame_equal(right, expected) # see gh-9558 df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) result = df[df["a"] == 2] expected = DataFrame([[2, 5]], index=[1], columns=["a", "b"]) tm.assert_frame_equal(result, expected) result = df.where(df["a"] == 2, 0) expected = DataFrame({"a": [0, 2, 0], "b": [0, 5, 0]}) tm.assert_frame_equal(result, expected) def test_align_int(self, int_frame): # test other non-float types other = DataFrame(index=range(5), columns=["A", "B", "C"]) af, bf = int_frame.align(other, join="inner", axis=1, method="pad") tm.assert_index_equal(bf.columns, other.columns) def test_align_mixed_type(self, float_string_frame): af, bf = float_string_frame.align( float_string_frame, join="inner", axis=1, method="pad" ) tm.assert_index_equal(bf.columns, float_string_frame.columns) def test_align_mixed_float(self, mixed_float_frame): # mixed floats/ints other = DataFrame(index=range(5), columns=["A", "B", "C"]) af, bf = mixed_float_frame.align( other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0 ) tm.assert_index_equal(bf.index, Index([])) def test_align_mixed_int(self, mixed_int_frame): other = DataFrame(index=range(5), columns=["A", "B", "C"]) af, bf = mixed_int_frame.align( other.iloc[:, 0], join="inner", axis=1, method=None, fill_value=0 ) tm.assert_index_equal(bf.index, Index([])) @pytest.mark.parametrize( "l_ordered,r_ordered,expected", [ [True, True, pd.CategoricalIndex], [True, False, Index], [False, True, Index], [False, False, pd.CategoricalIndex], ], ) def test_align_categorical(self, l_ordered, r_ordered, expected): # GH-28397 df_1 = DataFrame( { "A": np.arange(6, dtype="int64"), "B": Series(list("aabbca")).astype( pd.CategoricalDtype(list("cab"), ordered=l_ordered) ), } ).set_index("B") df_2 = DataFrame( { "A": np.arange(5, dtype="int64"), "B": Series(list("babca")).astype( pd.CategoricalDtype(list("cab"), ordered=r_ordered) ), } ).set_index("B") aligned_1, aligned_2 = df_1.align(df_2) assert isinstance(aligned_1.index, expected) assert isinstance(aligned_2.index, expected) tm.assert_index_equal(aligned_1.index, aligned_2.index) def test_align_multiindex(self): # GH#10665 # same test cases as test_align_multiindex in test_series.py midx = pd.MultiIndex.from_product( [range(2), range(3), range(2)], names=("a", "b", "c") ) idx = Index(range(2), name="b") df1 = DataFrame(np.arange(12, dtype="int64"), index=midx) df2 = DataFrame(np.arange(2, dtype="int64"), index=idx) # these must be the same results (but flipped) res1l, res1r = df1.align(df2, join="left") res2l, res2r = df2.align(df1, join="right") expl = df1 tm.assert_frame_equal(expl, res1l) tm.assert_frame_equal(expl, res2r) expr = DataFrame([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx) tm.assert_frame_equal(expr, res1r) tm.assert_frame_equal(expr, res2l) res1l, res1r = df1.align(df2, join="right") res2l, res2r = df2.align(df1, join="left") exp_idx = pd.MultiIndex.from_product( [range(2), range(2), range(2)], names=("a", "b", "c") ) expl = DataFrame([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx) tm.assert_frame_equal(expl, res1l) tm.assert_frame_equal(expl, res2r) expr = DataFrame([0, 0, 1, 1] * 2, index=exp_idx) tm.assert_frame_equal(expr, res1r) tm.assert_frame_equal(expr, res2l) def test_align_series_combinations(self): df = DataFrame({"a": [1, 3, 5], "b": [1, 3, 5]}, index=list("ACE")) s = Series([1, 2, 4], index=list("ABD"), name="x") # frame + series res1, res2 = df.align(s, axis=0) exp1 = DataFrame( {"a": [1, np.nan, 3, np.nan, 5], "b": [1, np.nan, 3, np.nan, 5]}, index=list("ABCDE"), ) exp2 = Series([1, 2, np.nan, 4, np.nan], index=list("ABCDE"), name="x") tm.assert_frame_equal(res1, exp1) tm.assert_series_equal(res2, exp2) # series + frame res1, res2 = s.align(df) tm.assert_series_equal(res1, exp2) tm.assert_frame_equal(res2, exp1) def _check_align(self, a, b, axis, fill_axis, how, method, limit=None): aa, ab = a.align( b, axis=axis, join=how, method=method, limit=limit, fill_axis=fill_axis ) join_index, join_columns = None, None ea, eb = a, b if axis is None or axis == 0: join_index = a.index.join(b.index, how=how) ea = ea.reindex(index=join_index) eb = eb.reindex(index=join_index) if axis is None or axis == 1: join_columns = a.columns.join(b.columns, how=how) ea = ea.reindex(columns=join_columns) eb = eb.reindex(columns=join_columns) ea = ea.fillna(axis=fill_axis, method=method, limit=limit) eb = eb.fillna(axis=fill_axis, method=method, limit=limit) tm.assert_frame_equal(aa, ea) tm.assert_frame_equal(ab, eb) @pytest.mark.parametrize("meth", ["pad", "bfill"]) @pytest.mark.parametrize("ax", [0, 1, None]) @pytest.mark.parametrize("fax", [0, 1]) @pytest.mark.parametrize("how", ["inner", "outer", "left", "right"]) def test_align_fill_method(self, how, meth, ax, fax, float_frame): df = float_frame self._check_align_fill(df, how, meth, ax, fax) def _check_align_fill(self, frame, kind, meth, ax, fax): left = frame.iloc[0:4, :10] right = frame.iloc[2:, 6:] empty = frame.iloc[:0, :0] self._check_align(left, right, axis=ax, fill_axis=fax, how=kind, method=meth) self._check_align( left, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1 ) # empty left self._check_align(empty, right, axis=ax, fill_axis=fax, how=kind, method=meth) self._check_align( empty, right, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1 ) # empty right self._check_align(left, empty, axis=ax, fill_axis=fax, how=kind, method=meth) self._check_align( left, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1 ) # both empty self._check_align(empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth) self._check_align( empty, empty, axis=ax, fill_axis=fax, how=kind, method=meth, limit=1 )
# Copyright (c) 2020, Manfred Moitzi # License: MIT License import pytest import math from ezdxf.path import ( Path, make_path, converter, Command, tools, ) from ezdxf.math import ( Vec3, Vec2, Matrix44, Bezier4P, Bezier3P, close_vectors, OCS, ) from ezdxf.entities import ( factory, DXFEntity, Polymesh, LWPolyline, PolylinePath, EdgePath, Hatch, ) def test_init(): path = Path() assert path.start == (0, 0) assert len(path) == 0 assert path.end == (0, 0) def test_if_path_is_empty(): path = Path() assert bool(path) is False, "should work in boolean tests" assert not len(path), "len() should work in boolean tests" assert len(path) == 0, "len() should be 0" def test_if_path_is_not_empty(): path = Path(start=(1, 0)) path.line_to((2, 0)) assert bool(path) is True, "should work in boolean tests" assert len(path), "len() should work boolean tests" assert len(path) > 0, "len() should be > 0" def test_init_start(): path = Path(start=(1, 2)) assert path.start == (1, 2) def test_if_path_with_only_a_start_point_is_still_empty(): # Path() can not represent a point with any command path = Path(start=(1, 1)) assert bool(path) is False, "should work in boolean tests" assert not len(path), "len() should work in boolean tests" assert len(path) == 0, "len() should be 0" def test_line_to(): path = Path() path.line_to((1, 2, 3)) assert path[0] == (Vec3(1, 2, 3),) assert path.end == (1, 2, 3) def test_path_requires_a_command_to_represent_a_point(): path = Path((1, 1)) path.line_to((1, 1)) assert bool(path) assert len(path) > 0 def test_curve3_to(): path = Path() path.curve3_to((10, 0), (5, 5)) assert path[0] == ((10, 0), (5, 5)) assert path.end == (10, 0) def test_curve4_to(): path = Path() path.curve4_to((1, 2, 3), (0, 1, 0), (0, 2, 0)) assert path[0] == ((1, 2, 3), (0, 1, 0), (0, 2, 0)) assert path.end == (1, 2, 3) def test_user_data_is_none_by_default(): assert Path().user_data is None def test_set_and_get_user_data(): path = Path() path.user_data = [1, 2, 3] assert path.user_data == [1, 2, 3] def test_path_clones_share_user_data(): path = Path() data = [1, 2, 3] path.user_data = data assert path.clone().user_data is data def test_reversed_path_preserves_user_data(): path = Path() path.user_data = "data" path.line_to((1, 2, 3)) assert path.reversed().user_data == "data" def test_transformed_path_preserves_user_data(): path = Path() path.user_data = "data" path.line_to((1, 2, 3)) assert path.transform(Matrix44()).user_data == "data" def test_sub_paths_inherit_parent_user_data(): path = Path() path.user_data = "data" path.line_to((1, 2, 3)) path.move_to((7, 8, 9)) path.line_to((7, 8, 9)) assert path.has_sub_paths is True for p in path.sub_paths(): assert p.user_data == "data" def test_add_curves3(): path = Path() c1 = Bezier3P(((0, 0), (1, 1), (2, 0))) c2 = Bezier3P(((2, 0), (1, -1), (0, 0))) tools.add_bezier3p(path, [c1, c2]) assert len(path) == 2 assert path.end == (0, 0) def test_add_curves4(): path = Path() c1 = Bezier4P(((0, 0), (0, 1), (2, 1), (2, 0))) c2 = Bezier4P(((2, 0), (2, -1), (0, -1), (0, 0))) tools.add_bezier4p(path, [c1, c2]) assert len(path) == 2 assert path.end == (0, 0) def test_add_curves3_with_gap(): path = Path() c1 = Bezier3P(((0, 0), (1, 1), (2, 0))) c2 = Bezier3P(((2, -1), (3, -2), (0, -1))) tools.add_bezier3p(path, [c1, c2]) assert len(path) == 3 # added a line segment between curves assert path.end == (0, -1) def test_add_curves4_with_gap(): path = Path() c1 = Bezier4P(((0, 0, 0), (0, 1, 0), (2, 1, 0), (2, 0, 0))) c2 = Bezier4P(((2, -1, 0), (2, -2, 0), (0, -2, 0), (0, -1, 0))) tools.add_bezier4p(path, [c1, c2]) assert len(path) == 3 # added a line segment between curves assert path.end == (0, -1, 0) def test_add_curves3_reverse(): path = Path(start=(0, 0)) c1 = Bezier3P(((2, 0), (1, 1), (0, 0))) tools.add_bezier3p(path, [c1]) assert len(path) == 1 assert path.end == (2, 0, 0) def test_add_curves4_reverse(): path = Path(start=(0, 0, 0)) c1 = Bezier4P(((2, 0, 0), (2, 1, 0), (0, 1, 0), (0, 0, 0))) tools.add_bezier4p(path, [c1]) assert len(path) == 1 assert path.end == (2, 0, 0) class TestSubPath: def simple_multi_path(self): path = Path(start=(1, 0, 0)) path.line_to((2, 0, 0)) path.move_to((3, 0, 0)) return path def test_has_no_sub_paths_by_default(self): path = Path() assert path.has_sub_paths is False def test_first_move_to(self): path = Path(start=(1, 0, 0)) path.move_to((2, 0, 0)) assert path.start.isclose((2, 0, 0)), "should reset the start point" assert len(path) == 0, "should not add a MOVETO cmd as first cmd" assert path.has_sub_paths is False def test_multiple_first_move_to(self): path = Path(start=(1, 0, 0)) path.move_to((2, 0, 0)) path.move_to((3, 0, 0)) path.move_to((4, 0, 0)) assert path.start.isclose((4, 0, 0)), "should reset the start point" assert len(path) == 0, "should not add a MOVETO cmd as first cmd" assert path.has_sub_paths is False def test_move_to_creates_a_multi_path_object(self): path = Path(start=(1, 0, 0)) path.line_to((2, 0, 0)) path.move_to((3, 0, 0)) assert len(path) == 2, "should add a MOVETO cmd as last cmd" assert path.has_sub_paths is True, "should be a multi path object" assert path.end.isclose((3, 0, 0)), "should end at the MOVETO location" def test_merge_multiple_move_to_commands_at_the_end(self): path = self.simple_multi_path() path.move_to((4, 0, 0)) path.move_to((4, 0, 0)) assert ( len(path) == 2 ), "should merge multiple MOVETO commands at the end of the path" def test_clone_multi_path_object(self): path = self.simple_multi_path() path2 = path.clone() assert path2.has_sub_paths assert path.end == path2.end def test_cant_detect_orientation_of_multi_path_object(self): path = self.simple_multi_path() pytest.raises(TypeError, path.has_clockwise_orientation) def test_cant_convert_multi_path_object_to_clockwise_orientation(self): path = self.simple_multi_path() pytest.raises(TypeError, path.clockwise) def test_cant_convert_multi_path_object_to_ccw_orientation(self): path = self.simple_multi_path() pytest.raises(TypeError, path.counter_clockwise) def test_approximate_multi_path_object(self): path = self.simple_multi_path() vertices = list(path.approximate()) assert len(vertices) == 3 def test_flatten_multi_path_object(self): path = self.simple_multi_path() vertices = list(path.flattening(0.1)) assert len(vertices) == 3 def test_multi_path_object_to_wcs(self): path = self.simple_multi_path() path.to_wcs(OCS(), 0) assert path.end.isclose((3, 0, 0)) def test_transform_multi_path_object(self): path = self.simple_multi_path() m = Matrix44.translate(1, 1, 1) path2 = path.transform(m) assert path.end.isclose((3, 0, 0)) assert path2.has_sub_paths is True assert path2.end.isclose((4, 1, 1)) def test_sub_paths_from_single_path_object(self): path = Path(start=(1, 2, 3)) paths = list(path.sub_paths()) assert len(paths) == 1 s0 = paths[0] assert s0.start == (1, 2, 3) assert s0.end == (1, 2, 3) assert s0.has_sub_paths is False assert len(s0) == 0 def test_sub_paths_from_multi_path_object(self): path = self.simple_multi_path() s0, s1 = path.sub_paths() assert s0.start == (1, 0, 0) assert s0.end == (2, 0, 0) assert s0.has_sub_paths is False assert len(s0) == 1 assert s1.start == (3, 0, 0) assert s1.end == (3, 0, 0) assert len(s1) == 0 assert s1.has_sub_paths is False def test_add_spline(): from ezdxf.math import BSpline spline = BSpline.from_fit_points([(2, 0), (4, 1), (6, -1), (8, 0)]) path = Path() tools.add_spline(path, spline) assert path.start == (2, 0) assert path.end == (8, 0) # set start point to end of spline path = Path(start=(8, 0)) # add reversed spline, by default the start of # an empty path is set to the spline start tools.add_spline(path, spline, reset=False) assert path.start == (8, 0) assert path.end == (2, 0) path = Path() # add a line segment from (0, 0) to start of spline tools.add_spline(path, spline, reset=False) assert path.start == (0, 0) assert path.end == (8, 0) def test_from_spline(): spline = factory.new("SPLINE") spline.fit_points = [(2, 0), (4, 1), (6, -1), (8, 0)] path = make_path(spline) assert path.start == (2, 0) assert path.end == (8, 0) def test_add_ellipse(): from ezdxf.math import ConstructionEllipse ellipse = ConstructionEllipse( center=(3, 0), major_axis=(1, 0), ratio=0.5, start_param=0, end_param=math.pi, ) path = Path() tools.add_ellipse(path, ellipse) assert path.start.isclose((4, 0)) assert path.end.isclose((2, 0)) # set start point to end of ellipse path = Path(start=(2, 0)) # add reversed ellipse, by default the start of # an empty path is set to the ellipse start tools.add_ellipse(path, ellipse, reset=False) assert path.start.isclose((2, 0)) assert path.end.isclose((4, 0)) path = Path() # add a line segment from (0, 0) to start of ellipse tools.add_ellipse(path, ellipse, reset=False) assert path.start.isclose((0, 0)) assert path.end.isclose((2, 0)) def test_raises_type_error_for_unsupported_objects(): with pytest.raises(TypeError): make_path(DXFEntity()) with pytest.raises(TypeError): make_path(Polymesh.new(dxfattribs={"flags": Polymesh.POLYMESH})) with pytest.raises(TypeError): make_path(Polymesh.new(dxfattribs={"flags": Polymesh.POLYFACE})) def test_from_ellipse(): ellipse = factory.new( "ELLIPSE", dxfattribs={ "center": (3, 0), "major_axis": (1, 0), "ratio": 0.5, "start_param": 0, "end_param": math.pi, }, ) path = make_path(ellipse) assert path.start.isclose((4, 0)) assert path.end.isclose((2, 0)) def test_from_arc(): arc = factory.new( "ARC", dxfattribs={ "center": (1, 0, 0), "radius": 1, "start_angle": 0, "end_angle": 180, }, ) path = make_path(arc) assert path.start.isclose((2, 0)) assert path.end.isclose((0, 0)) @pytest.mark.parametrize("radius", [1, -1]) def test_from_circle(radius): circle = factory.new( "CIRCLE", dxfattribs={ "center": (1, 0, 0), "radius": radius, }, ) path = make_path(circle) assert path.start.isclose((2, 0)) assert path.end.isclose((2, 0)) assert path.is_closed is True def test_from_circle_with_zero_radius(): circle = factory.new( "CIRCLE", dxfattribs={ "center": (1, 0, 0), "radius": 0, }, ) path = make_path(circle) assert len(path) == 0 def test_from_line(): start = Vec3(1, 2, 3) end = Vec3(4, 5, 6) line = factory.new("LINE", dxfattribs={"start": start, "end": end}) path = make_path(line) assert path.start.isclose(start) assert path.end.isclose(end) @pytest.mark.parametrize("dxftype", ["SOLID", "TRACE", "3DFACE"]) def test_from_quadrilateral_with_4_points(dxftype): entity = factory.new(dxftype) entity.dxf.vtx0 = (0, 0, 0) entity.dxf.vtx1 = (1, 0, 0) entity.dxf.vtx2 = (1, 1, 0) entity.dxf.vtx3 = (0, 1, 0) path = make_path(entity) assert path.start == (0, 0, 0) assert path.is_closed is True assert len(list(path.approximate())) == 5 @pytest.mark.parametrize("dxftype", ["SOLID", "TRACE", "3DFACE"]) def test_from_quadrilateral_with_3_points(dxftype): entity = factory.new(dxftype) entity.dxf.vtx0 = (0, 0, 0) entity.dxf.vtx1 = (1, 0, 0) entity.dxf.vtx2 = (1, 1, 0) entity.dxf.vtx3 = (1, 1, 0) # last two points are equal path = make_path(entity) assert path.is_closed is True assert len(list(path.approximate())) == 4 def test_lwpolyline_lines(): from ezdxf.entities import LWPolyline pline = LWPolyline() pline.append_points([(1, 1), (2, 1), (2, 2)], format="xy") path = make_path(pline) assert path.start.isclose((1, 1)) assert path.end.isclose((2, 2)) assert len(path) == 2 pline.dxf.elevation = 1.0 path = make_path(pline) assert path.start.isclose((1, 1, 1)) assert path.end.isclose((2, 2, 1)) POINTS = [ (0, 0, 0), (3, 0, -1), (6, 0, 0), (9, 0, 0), (9, -3, 0), ] def test_make_path_from_lwpolyline_with_bulges(): pline = LWPolyline() pline.closed = True pline.append_points(POINTS, format="xyb") path = make_path(pline) assert path.start == (0, 0) assert path.end == (0, 0) # closed assert any(cmd.type == Command.CURVE4_TO for cmd in path) def test_make_path_from_full_circle_lwpolyline(): pline = LWPolyline() pline.closed = True pline.append_points([(0, 0, 1), (1, 0, 1)], format="xyb") path = make_path(pline) assert path.start.isclose((0, 0)) assert path.end.isclose((0, 0)) assert len(path) == 4 assert any(cmd.type == Command.CURVE4_TO for cmd in path) vertices = list(path.flattening(0.1)) assert len(vertices) == 65 def test_make_path_from_full_circle_lwpolyline_issue_424(): pline = LWPolyline() pline.closed = True points = [ (39_482_129.9462793, 3_554_328.753243976, 1.0), (39_482_129.95781776, 3_554_328.753243976, 1.0), ] pline.append_points(points, format="xyb") path = make_path(pline) assert len(path) == 2 S_SHAPE = [ (0, 0, 0), (5, 0, 1), (5, 1, 0), (0, 1, -1), (0, 2, 0), (5, 2, 0), ] def test_lwpolyline_s_shape(): from ezdxf.entities import LWPolyline pline = LWPolyline() pline.append_points(S_SHAPE, format="xyb") path = make_path(pline) assert path.start == (0, 0) assert path.end == (5, 2) # closed assert any(cmd.type == Command.CURVE4_TO for cmd in path) def test_polyline_lines(): from ezdxf.entities import Polyline pline = Polyline() pline.append_formatted_vertices([(1, 1), (2, 1), (2, 2)], format="xy") path = make_path(pline) assert path.start == (1, 1) assert path.end == (2, 2) assert len(path) == 2 pline.dxf.elevation = (0, 0, 1) path = make_path(pline) assert path.start == (1, 1, 1) assert path.end == (2, 2, 1) def test_polyline_with_bulges(): from ezdxf.entities import Polyline pline = Polyline() pline.close(True) pline.append_formatted_vertices(POINTS, format="xyb") path = make_path(pline) assert path.start == (0, 0) assert path.end == (0, 0) # closed assert any(cmd.type == Command.CURVE4_TO for cmd in path) def test_3d_polyline(): from ezdxf.entities import Polyline pline = Polyline.new(dxfattribs={"flags": Polyline.POLYLINE_3D}) pline.append_vertices([(1, 1, 1), (2, 1, 3), (2, 2, 2)]) path = make_path(pline) assert path.start == (1, 1, 1) assert path.end == (2, 2, 2) assert len(path) == 2 POLYLINE_POINTS = [ # x, y, b (0, 0, 0), (2, 2, -1), (4, 0, 1), (6, 0, 0), ] class TestPathFromBoundaryWithElevationAndFlippedExtrusion: @pytest.fixture def hatch(self): return Hatch.new( dxfattribs={ "elevation": (0, 0, 4), "extrusion": (0, 0, -1), } ) def test_from_hatch_polyline_path(self, hatch): hatch.paths.add_polyline_path(POLYLINE_POINTS) path = make_path(hatch) assert path.has_curves is True assert len(path) > 5 assert all(math.isclose(v.z, -4) for v in path.control_vertices()) def test_approximate_lines(): path = Path() path.line_to((1, 1)) path.line_to((2, 0)) vertices = list(path.approximate()) assert len(vertices) == 3 assert vertices[0] == path.start == (0, 0) assert vertices[2] == path.end == (2, 0) def test_approximate_curves(): path = Path() path.curve3_to((2, 0), (1, 1)) path.curve4_to((3, 0), (2, 1), (3, 1)) vertices = list(path.approximate(20)) assert len(vertices) == 41 assert vertices[0] == (0, 0) assert vertices[-1] == (3, 0) def test_path_from_hatch_polyline_path_without_bulge(): polyline_path = PolylinePath() polyline_path.set_vertices( [(0, 0), (0, 1), (1, 1), (1, 0)], is_closed=False ) path = converter.from_hatch_polyline_path(polyline_path) assert len(path) == 3 assert path.start == (0, 0) assert path.end == (1, 0) polyline_path.is_closed = True path = converter.from_hatch_polyline_path(polyline_path) assert len(path) == 4 assert path.start == (0, 0) assert path.end == (0, 0) def test_path_from_hatch_polyline_path_with_bulge(): polyline_path = PolylinePath() polyline_path.set_vertices( [(0, 0), (1, 0, 0.5), (2, 0), (3, 0)], is_closed=False ) path = converter.from_hatch_polyline_path(polyline_path) assert len(path) == 4 assert path.start == (0, 0) assert path.end == (3, 0) assert path[1].type == Command.CURVE4_TO assert path[1].end.isclose((1.5, -0.25)) @pytest.fixture def p1(): path = Path() path.line_to((2, 0)) path.curve4_to((4, 0), (2, 1), (4, 1)) # end, ctrl1, ctrl2 path.curve3_to((6, 0), (5, -1)) # end, ctrl return path def test_path_cloning(p1): p2 = p1.clone() for cmd1, cmd2 in zip(p1, p2): assert cmd1 == cmd2 # but have different command lists: p2.line_to((4, 4)) assert len(p2) == len(p1) + 1 def test_approximate_line_curves(p1): vertices = list(p1.approximate(20)) assert len(vertices) == 42 assert vertices[0].isclose((0, 0)) assert vertices[-1].isclose((6, 0)) def test_transform(p1): p2 = p1.transform(Matrix44.translate(1, 1, 0)) assert p2.start.isclose((1, 1)) assert p2[0].end.isclose((3, 1)) # line to location assert p2[1].end.isclose((5, 1)) # cubic to location assert p2[1].ctrl1.isclose((3, 2)) # cubic ctrl1 assert p2[1].ctrl2.isclose((5, 2)) # cubic ctrl2 assert p2[2].end.isclose((7, 1)) # quadratic to location assert p2[2].ctrl.isclose((6, 0)) # quadratic ctrl assert p2.end.isclose((7, 1)) def test_control_vertices(p1): vertices = list(p1.control_vertices()) assert close_vectors( vertices, [(0, 0), (2, 0), (2, 1), (4, 1), (4, 0), (5, -1), (6, 0)] ) path = Path() assert len(list(path.control_vertices())) == 0 assert list(path.control_vertices()) == list(path.approximate(2)) path = converter.from_vertices([(0, 0), (1, 0)]) assert len(list(path.control_vertices())) == 2 def test_has_clockwise_orientation(): # basic has_clockwise_orientation() function is tested in: # test_617_clockwise_orientation path = converter.from_vertices([(0, 0), (1, 0), (1, 1), (0, 1)]) assert path.has_clockwise_orientation() is False path = Path() path.line_to((2, 0)) path.curve4_to((4, 0), (2, 1), (4, 1)) # end, ctrl1, ctrl2 assert path.has_clockwise_orientation() is True def test_reversing_empty_path(): p = Path() assert len(p.reversed()) == 0 def test_reversing_one_line(): p = Path() p.line_to((1, 0)) p2 = list(p.reversed().control_vertices()) assert close_vectors(p2, [(1, 0), (0, 0)]) def test_reversing_one_curve3(): p = Path() p.curve3_to((3, 0), (1.5, 1)) p2 = list(p.reversed().control_vertices()) assert close_vectors(p2, [(3, 0), (1.5, 1), (0, 0)]) def test_reversing_one_curve4(): p = Path() p.curve4_to((3, 0), (1, 1), (2, 1)) p2 = list(p.reversed().control_vertices()) assert close_vectors(p2, [(3, 0), (2, 1), (1, 1), (0, 0)]) def test_reversing_path_ctrl_vertices(p1): p2 = p1.reversed() assert close_vectors( p2.control_vertices(), reversed(list(p1.control_vertices())) ) def test_reversing_path_approx(p1): p2 = p1.reversed() v1 = list(p1.approximate()) v2 = list(p2.approximate()) assert close_vectors(v1, reversed(v2)) def test_reversing_multi_path(): p = Path() p.line_to((1, 0, 0)) p.move_to((2, 0, 0)) p.line_to((3, 0, 0)) r = p.reversed() assert r.has_sub_paths is True assert len(r) == 3 assert r.start == (3, 0, 0) assert r.end == (0, 0, 0) r0, r1 = r.sub_paths() assert r0.start == (3, 0, 0) assert r0.end == (2, 0, 0) assert r1.start == (1, 0, 0) assert r1.end == (0, 0, 0) def test_reversing_multi_path_with_a_move_to_cmd_at_the_end(): p = Path() p.line_to((1, 0, 0)) p.move_to((2, 0, 0)) # The last move_to will become the first move_to. # A move_to as first command just moves the start point. r = p.reversed() assert len(r) == 1 assert r.start == (1, 0, 0) assert r.end == (0, 0, 0) assert r.has_sub_paths is False def test_clockwise(p1): from ezdxf.math import has_clockwise_orientation cw_path = p1.clockwise() ccw_path = p1.counter_clockwise() assert has_clockwise_orientation(cw_path.control_vertices()) is True assert has_clockwise_orientation(ccw_path.control_vertices()) is False @pytest.fixture def edge_path(): ep = EdgePath() ep.add_line( (70.79594401862802, 38.81021154906707), (61.49705431814723, 38.81021154906707), ) ep.add_ellipse( center=(49.64089977339618, 36.43095770602131), major_axis=(16.69099826506408, 6.96203799241026), ratio=0.173450304570581, start_angle=348.7055398636587, end_angle=472.8737032507014, ccw=True, ) ep.add_line( (47.21845383585098, 38.81021154906707), (32.00406637283394, 38.81021154906707), ) ep.add_arc( center=(27.23255482392775, 37.32841621274949), radius=4.996302620946588, start_angle=17.25220809399113, end_angle=162.7477919060089, ccw=True, ) ep.add_line( (22.46104327502155, 38.81021154906707), (15.94617981131185, 38.81021154906707), ) ep.add_line( (15.94617981131185, 38.81021154906707), (15.94617981131185, 17.88970141145027), ) ep.add_line( (15.94617981131185, 17.88970141145027), (22.07965616927404, 17.88970141145026), ) ep.add_spline( control_points=[ (22.07965616927404, 17.88970141145027), (23.44151487263461, 19.56130038573538), (28.24116384863678, 24.26061858002495), (35.32501805918895, 14.41241846270862), (46.6153937930182, 11.75667640124574), (47.53794331191931, 23.11460620899234), (51.8076764251228, 12.06821526039212), (60.37405963053161, 14.60131364832752), (63.71393926002737, 20.24075830571701), (67.36423789268184, 19.07462271006858), (68.72358721334537, 17.88970141145026), ], knot_values=[ 2.825276861104652, 2.825276861104652, 2.825276861104652, 2.825276861104652, 8.585563484895022, 22.93271064560279, 29.77376253023298, 35.89697937194972, 41.26107011625705, 51.23489795733507, 54.82267350174899, 59.57512798605262, 59.57512798605262, 59.57512798605262, 59.57512798605262, ], degree=3, periodic=0, ) ep.add_line( (68.72358721334535, 17.88970141145027), (70.79594401862802, 17.88970141145027), ) ep.add_line( (70.79594401862802, 17.88970141145027), (70.79594401862802, 38.81021154906707), ) return ep def test_from_edge_path(edge_path): path = converter.from_hatch_edge_path(edge_path) assert path.has_sub_paths is False assert len(path) == 19 def test_from_edge_path_with_two_closed_loops(): ep = EdgePath() # 1st loop: closed segments ep.add_line((0, 0), (0, 1)) ep.add_line((0, 1), (1, 1)) ep.add_line((1, 1), (0, 1)) ep.add_line((0, 1), (0, 0)) # 2nd loop: closed segments ep.add_line((2, 0), (3, 0)) ep.add_line((3, 0), (3, 1)) ep.add_line((3, 1), (2, 1)) ep.add_line((2, 1), (2, 0)) path = converter.from_hatch_edge_path(ep) assert path.has_sub_paths is True, "should return a multi-path" assert len(list(path.sub_paths())) == 2, "expected two sub paths" LOOP = Vec2(0, 0), Vec2(1, 0), Vec2(1, 1), Vec2(0, 1) A, B, C, D = LOOP def test_edge_path_open_loop(): ep = EdgePath() # open segments do not create a path ep.add_line(A, B) ep.add_line(B, C) ep.add_line(C, D) path = converter.from_hatch_edge_path(ep) assert bool(path) is False, "expected an open loop" @pytest.mark.parametrize( "e0,e1,e2,e3", [ [(A, B), (B, C), (C, D), (D, A)], # case 0: consecutive order # 0----> 1----> 2----> # end - start # <--------------------------3 [(D, C), (C, B), (B, A), (A, D)], # case 1: reversed order [(A, B), (C, B), (C, D), (D, A)], # case 2 # 0-------> 2----> # 0: end - end, reversing (C, B) # 1-------> # 1: end - start # <--------------------------3 [(A, B), (C, B), (D, C), (D, A)], # case 3 # 0-------> 2-------> # 0: end - end, reversing (C, B) # 1----------> # 1: end - end # <--------------------------3 [(A, B), (D, A), (D, C), (B, C)], # case 4 # 0----------> 2-------> # 0: start - end; 2: end - end, rev: B, C # 1-------> # 1: start - start # <--------------------3 [(A, B), (D, A), (D, C), (C, B)], # case 5 # 0----------> 2----> # 0: start - end # 1-------> # 1: start - start # <-----------------------3 [(A, B), (D, A), (C, D), (B, C)], # case 6 # 0----------> 2----------> # 0: start - end # 1----------> # 1: start - end # <--------------------3 [(A, B), (B, C), (A, D), (C, D)], # case 7 # 0----> 2-------> # 0: start - end # <---------------1 # 1: start - start # <------------3 ], ) def test_edge_path_closed_loop(e0, e1, e2, e3): ep = EdgePath() ep.add_line(e0[0], e0[1]) ep.add_line(e1[0], e1[1]) ep.add_line(e2[0], e2[1]) ep.add_line(e3[0], e3[1]) path = converter.from_hatch_edge_path(ep) assert len(list(path.sub_paths())) == 1, "expected one closed loop" assert len(list(path.control_vertices())) == 5 assert path.is_closed is True, "expected a closed loop" class TestPathFromEdgePathWithElevationAndFlippedExtrusion: def test_line_edge(self): ep = EdgePath() ep.add_line(A, B) ep.add_line(B, C) ep.add_line(C, D) ep.add_line(D, A) path = converter.from_hatch_edge_path( ep, ocs=OCS((0, 0, -1)), elevation=4 ) assert len(list(path.sub_paths())) == 1, "expected one closed loop" assert len(list(path.control_vertices())) == 5 assert all(math.isclose(v.z, -4) for v in path.control_vertices()) assert path.is_closed is True, "expected a closed loop" def test_arc_edge(self): ep = EdgePath() ep.add_arc( center=(5.0, 5.0), radius=5.0, start_angle=0, end_angle=90, ccw=True, ) ep.add_line((5, 10), (10, 5)) path = converter.from_hatch_edge_path( ep, ocs=OCS((0, 0, -1)), elevation=4 ) assert len(path) == 2 assert all(math.isclose(v.z, -4) for v in path.control_vertices()) def test_ellipse_edge(self): ep = EdgePath() ep.add_ellipse( center=(5.0, 5.0), major_axis=(5.0, 0.0), ratio=1, start_angle=0, end_angle=90, ccw=True, ) ep.add_line((5, 10), (10, 5)) path = converter.from_hatch_edge_path( ep, ocs=OCS((0, 0, -1)), elevation=4 ) assert len(path) == 2 assert all(math.isclose(v.z, -4) for v in path.control_vertices()) def test_spline_edge(self): ep = EdgePath() ep.add_spline(fit_points=[(10, 5), (8, 5), (6, 8), (5, 10)]) ep.add_line((5, 10), (10, 5)) path = converter.from_hatch_edge_path( ep, ocs=OCS((0, 0, -1)), elevation=4 ) assert len(path) > 2 assert all(math.isclose(v.z, -4) for v in path.control_vertices()) def test_from_complex_edge_path(self, edge_path): path = converter.from_hatch_edge_path( edge_path, ocs=OCS((0, 0, -1)), elevation=4 ) assert path.has_sub_paths is False assert len(path) == 19 assert all(math.isclose(v.z, -4) for v in path.control_vertices()) def test_extend_path_by_another_none_empty_path(): p0 = Path((1, 0, 0)) p0.line_to((2, 0, 0)) p1 = Path((3, 0, 0)) p1.line_to((3, 0, 0)) p0.extend_multi_path(p1) assert p0.has_sub_paths is True assert p0.start == (1, 0, 0) assert p0.end == (3, 0, 0) def test_extend_path_by_another_single_path(): path = Path((1, 0, 0)) path.line_to((2, 0, 0)) p1 = Path((3, 0, 0)) p1.line_to((4, 0, 0)) path.extend_multi_path(p1) assert path.has_sub_paths is True assert path.start == (1, 0, 0) assert path.end == (4, 0, 0) def test_extend_path_by_another_multi_path(): path = Path((1, 0, 0)) path.line_to((2, 0, 0)) p1 = Path((3, 0, 0)) p1.line_to((4, 0, 0)) p1.move_to((5, 0, 0)) path.extend_multi_path(p1) assert path.has_sub_paths is True assert path.start == (1, 0, 0) assert path.end == (5, 0, 0) def test_append_empty_path(): path = Path((1, 0, 0)) path.line_to((2, 0, 0)) start = path.start end = path.end path.append_path(Path()) assert start == path.start and end == path.end, "path should be unchanged" def test_append_path_without_a_gap(): p1 = Path((1, 0, 0)) p1.line_to((2, 0, 0)) p2 = Path((2, 0, 0)) p2.line_to((3, 0, 0)) p1.append_path(p2) assert p1.start == (1, 0) assert p1.end == (3, 0) assert len(p1) == 2 def test_append_path_with_a_gap(): p1 = Path((1, 0, 0)) p1.line_to((2, 0, 0)) p2 = Path((3, 0, 0)) p2.line_to((4, 0, 0)) p1.append_path(p2) assert p1.start == (1, 0) assert p1.end == (4, 0) assert len(p1) == 3 class TestCloseSubPath: def test_close_last_sub_path(self): p = Path() p.line_to((1, 0, 0)) p.move_to((2, 0, 0)) p.line_to((3, 0, 0)) p.close_sub_path() assert p.end == (2, 0, 0) def test_does_nothing_if_last_sub_path_is_closed(self): p = Path() p.line_to((1, 0, 0)) p.move_to((2, 0, 0)) p.line_to((3, 0, 0)) p.line_to((2, 0, 0)) assert len(p) == 4 p.close_sub_path() assert len(p) == 4 assert p.end == (2, 0, 0) def test_does_nothing_if_last_sub_path_is_empty(self): p = Path() p.line_to((1, 0, 0)) p.move_to((2, 0, 0)) assert len(p) == 2 p.close_sub_path() assert len(p) == 2 assert p.end == (2, 0, 0) def test_close_single_path(self): p = Path((1, 0, 0)) p.line_to((3, 0, 0)) p.close_sub_path() assert p.end == (1, 0, 0) if __name__ == "__main__": pytest.main([__file__])
# game.py # ------- # Licensing Information: Please do not distribute or publish solutions to this # project. You are free to use and extend these projects for educational # purposes. The Pacman AI projects were developed at UC Berkeley, primarily by # John DeNero ([email protected]) and Dan Klein ([email protected]). # For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html from util import * from util import raiseNotDefined import time import os import traceback try: import boinc _BOINC_ENABLED = True except: _BOINC_ENABLED = False ####################### # Parts worth reading # ####################### class Agent: """ An agent must define a getAction method, but may also define the following methods which will be called if they exist: def registerInitialState(self, state): # inspects the starting state """ def __init__(self, index=0): self.index = index def getAction(self, state): """ The Agent will receive a GameState (from either {pacman, capture, sonar}.py) and must return an action from Directions.{North, South, East, West, Stop} """ raiseNotDefined() class Directions: NORTH = 'North' SOUTH = 'South' EAST = 'East' WEST = 'West' STOP = 'Stop' LEFT = {NORTH: WEST, SOUTH: EAST, EAST: NORTH, WEST: SOUTH, STOP: STOP} RIGHT = dict([(y, x) for x, y in list(LEFT.items())]) REVERSE = {NORTH: SOUTH, SOUTH: NORTH, EAST: WEST, WEST: EAST, STOP: STOP} class Configuration: """ A Configuration holds the (x,y) coordinate of a character, along with its traveling direction. The convention for positions, like a graph, is that (0,0) is the lower left corner, x increases horizontally and y increases vertically. Therefore, north is the direction of increasing y, or (0,1). """ def __init__(self, pos, direction): self.pos = pos self.direction = direction def getPosition(self): return (self.pos) def getDirection(self): return self.direction def isInteger(self): x, y = self.pos return x == int(x) and y == int(y) def __eq__(self, other): if other is None: return False return (self.pos == other.pos and self.direction == other.direction) def __hash__(self): x = hash(self.pos) y = hash(self.direction) return hash(x + 13 * y) def __str__(self): return "(x,y)="+str(self.pos)+", "+str(self.direction) def generateSuccessor(self, vector): """ Generates a new configuration reached by translating the current configuration by the action vector. This is a low-level call and does not attempt to respect the legality of the movement. Actions are movement vectors. """ x, y = self.pos dx, dy = vector direction = Actions.vectorToDirection(vector) if direction == Directions.STOP: direction = self.direction # There is no stop direction return Configuration((x + dx, y+dy), direction) class AgentState: """ AgentStates hold the state of an agent (configuration, speed, scared, etc). """ def __init__(self, startConfiguration, isPacman): self.start = startConfiguration self.configuration = startConfiguration self.isPacman = isPacman self.scaredTimer = 0 def __str__(self): if self.isPacman: return "Pacman: " + str(self.configuration) else: return "Ghost: " + str(self.configuration) def __eq__(self, other): if other is None: return False return self.configuration == other.configuration and self.scaredTimer == other.scaredTimer def __hash__(self): return hash(hash(self.configuration) + 13 * hash(self.scaredTimer)) def copy(self): state = AgentState(self.start, self.isPacman) state.configuration = self.configuration state.scaredTimer = self.scaredTimer return state def getPosition(self): if self.configuration is None: return None return self.configuration.getPosition() def getDirection(self): return self.configuration.getDirection() class Grid: """ A 2-dimensional array of objects backed by a list of lists. Data is accessed via grid[x][y] where (x,y) are positions on a Pacman map with x horizontal, y vertical and the origin (0,0) in the bottom left corner. The __str__ method constructs an output that is oriented like a pacman board. """ def __init__(self, width, height, initialValue=False, bitRepresentation=None): if initialValue not in [False, True]: raise Exception('Grids can only contain booleans') self.CELLS_PER_INT = 30 self.width = width self.height = height self.data = [[initialValue for y in range( height)] for x in range(width)] if bitRepresentation: self._unpackBits(bitRepresentation) def __getitem__(self, i): return self.data[i] def __setitem__(self, key, item): self.data[key] = item def __str__(self): out = [[str(self.data[x][y])[0] for x in range(self.width)] for y in range(self.height)] out.reverse() return '\n'.join([''.join(x) for x in out]) def __eq__(self, other): if other is None: return False return self.data == other.data def __hash__(self): # return hash(str(self)) base = 1 h = 0 for l in self.data: for i in l: if i: h += base base *= 2 return hash(h) def copy(self): g = Grid(self.width, self.height) g.data = [x[:] for x in self.data] return g def deepCopy(self): return self.copy() def shallowCopy(self): g = Grid(self.width, self.height) g.data = self.data return g def count(self, item=True): return sum([x.count(item) for x in self.data]) def asList(self, key=True): list = [] for x in range(self.width): for y in range(self.height): if self[x][y] == key: list.append((x, y)) return list def packBits(self): """ Returns an efficient int list representation (width, height, bitPackedInts...) """ bits = [self.width, self.height] currentInt = 0 for i in range(self.height * self.width): bit = self.CELLS_PER_INT - (i % self.CELLS_PER_INT) - 1 x, y = self._cellIndexToPosition(i) if self[x][y]: currentInt += 2 ** bit if (i + 1) % self.CELLS_PER_INT == 0: bits.append(currentInt) currentInt = 0 bits.append(currentInt) return tuple(bits) def _cellIndexToPosition(self, index): x = index / self.height y = index % self.height return x, y def _unpackBits(self, bits): """ Fills in data from a bit-level representation """ cell = 0 for packed in bits: for bit in self._unpackInt(packed, self.CELLS_PER_INT): if cell == self.width * self.height: break x, y = self._cellIndexToPosition(cell) self[x][y] = bit cell += 1 def _unpackInt(self, packed, size): bools = [] if packed < 0: raise ValueError("must be a positive integer") for i in range(size): n = 2 ** (self.CELLS_PER_INT - i - 1) if packed >= n: bools.append(True) packed -= n else: bools.append(False) return bools def reconstituteGrid(bitRep): if type(bitRep) is not type((1, 2)): return bitRep width, height = bitRep[:2] return Grid(width, height, bitRepresentation=bitRep[2:]) #################################### # Parts you shouldn't have to read # #################################### class Actions: """ A collection of static methods for manipulating move actions. """ # Directions _directions = {Directions.NORTH: (0, 1), Directions.SOUTH: (0, -1), Directions.EAST: (1, 0), Directions.WEST: (-1, 0), Directions.STOP: (0, 0)} _directionsAsList = list(_directions.items()) TOLERANCE = .001 def reverseDirection(action): if action == Directions.NORTH: return Directions.SOUTH if action == Directions.SOUTH: return Directions.NORTH if action == Directions.EAST: return Directions.WEST if action == Directions.WEST: return Directions.EAST return action reverseDirection = staticmethod(reverseDirection) def vectorToDirection(vector): dx, dy = vector if dy > 0: return Directions.NORTH if dy < 0: return Directions.SOUTH if dx < 0: return Directions.WEST if dx > 0: return Directions.EAST return Directions.STOP vectorToDirection = staticmethod(vectorToDirection) def directionToVector(direction, speed=1.0): dx, dy = Actions._directions[direction] return (dx * speed, dy * speed) directionToVector = staticmethod(directionToVector) def getPossibleActions(config, walls): possible = [] x, y = config.pos x_int, y_int = int(x + 0.5), int(y + 0.5) # In between grid points, all agents must continue straight if (abs(x - x_int) + abs(y - y_int) > Actions.TOLERANCE): return [config.getDirection()] for dir, vec in Actions._directionsAsList: dx, dy = vec next_y = y_int + dy next_x = x_int + dx if not walls[next_x][next_y]: possible.append(dir) return possible getPossibleActions = staticmethod(getPossibleActions) def getLegalNeighbors(position, walls): x, y = position x_int, y_int = int(x + 0.5), int(y + 0.5) neighbors = [] for dir, vec in Actions._directionsAsList: dx, dy = vec next_x = x_int + dx if next_x < 0 or next_x == walls.width: continue next_y = y_int + dy if next_y < 0 or next_y == walls.height: continue if not walls[next_x][next_y]: neighbors.append((next_x, next_y)) return neighbors getLegalNeighbors = staticmethod(getLegalNeighbors) def getSuccessor(position, action): dx, dy = Actions.directionToVector(action) x, y = position return (x + dx, y + dy) getSuccessor = staticmethod(getSuccessor) class GameStateData: """ """ def __init__(self, prevState=None): """ Generates a new data packet by copying information from its predecessor. """ if prevState is not None: self.food = prevState.food.shallowCopy() self.capsules = prevState.capsules[:] self.agentStates = self.copyAgentStates(prevState.agentStates) self.layout = prevState.layout self._eaten = prevState._eaten self.score = prevState.score self._foodEaten = None self._capsuleEaten = None self._agentMoved = None self._lose = False self._win = False self.scoreChange = 0 def deepCopy(self): state = GameStateData(self) state.food = self.food.deepCopy() state.layout = self.layout.deepCopy() state._agentMoved = self._agentMoved state._foodEaten = self._foodEaten state._capsuleEaten = self._capsuleEaten return state def copyAgentStates(self, agentStates): copiedStates = [] for agentState in agentStates: copiedStates.append(agentState.copy()) return copiedStates def __eq__(self, other): """ Allows two states to be compared. """ if other is None: return False # TODO Check for type of other if not self.agentStates == other.agentStates: return False if not self.food == other.food: return False if not self.capsules == other.capsules: return False if not self.score == other.score: return False return True def __hash__(self): """ Allows states to be keys of dictionaries. """ for i, state in enumerate(self.agentStates): try: int(hash(state)) except TypeError as e: print(e) # hash(state) return int((hash(tuple(self.agentStates)) + 13*hash(self.food) + 113 * hash(tuple(self.capsules)) + 7 * hash(self.score)) % 1048575) def __str__(self): width, height = self.layout.width, self.layout.height map = Grid(width, height) if type(self.food) == type((1, 2)): self.food = reconstituteGrid(self.food) for x in range(width): for y in range(height): food, walls = self.food, self.layout.walls map[x][y] = self._foodWallStr(food[x][y], walls[x][y]) for agentState in self.agentStates: if agentState is None: continue if agentState.configuration is None: continue x, y = [int(i) for i in nearestPoint(agentState.configuration.pos)] agent_dir = agentState.configuration.direction if agentState.isPacman: map[x][y] = self._pacStr(agent_dir) else: map[x][y] = self._ghostStr(agent_dir) for x, y in self.capsules: map[x][y] = 'o' return str(map) + ("\nScore: %d\n" % self.score) def _foodWallStr(self, hasFood, hasWall): if hasFood: return '.' elif hasWall: return '%' else: return ' ' def _pacStr(self, dir): if dir == Directions.NORTH: return 'v' if dir == Directions.SOUTH: return '^' if dir == Directions.WEST: return '>' return '<' def _ghostStr(self, dir): return 'G' if dir == Directions.NORTH: return 'M' if dir == Directions.SOUTH: return 'W' if dir == Directions.WEST: return '3' return 'E' def initialize(self, layout, numGhostAgents): """ Creates an initial game state from a layout array (see layout.py). """ self.food = layout.food.copy() self.capsules = layout.capsules[:] self.layout = layout self.score = 0 self.scoreChange = 0 self.agentStates = [] numGhosts = 0 for isPacman, pos in layout.agentPositions: if not isPacman: if numGhosts == numGhostAgents: continue # Max ghosts reached already else: numGhosts += 1 self.agentStates.append(AgentState( Configuration(pos, Directions.STOP), isPacman)) self._eaten = [False for a in self.agentStates] class Game: """ The Game manages the control flow, soliciting actions from agents. """ def __init__(self, agents, display, rules, startingIndex=0, muteAgents=False, catchExceptions=False): self.agentCrashed = False self.agents = agents self.display = display self.rules = rules self.startingIndex = startingIndex self.gameOver = False self.muteAgents = muteAgents self.catchExceptions = catchExceptions self.moveHistory = [] self.totalAgentTimes = [0 for agent in agents] self.totalAgentTimeWarnings = [0 for agent in agents] self.agentTimeout = False def getProgress(self): if self.gameOver: return 1.0 else: return self.rules.getProgress(self) def _agentCrash(self, agentIndex, quiet=False): "Helper method for handling agent crashes" if not quiet: traceback.print_exc() self.gameOver = True self.agentCrashed = True self.rules.agentCrash(self, agentIndex) OLD_STDOUT = None OLD_STDERR = None def mute(self): if not self.muteAgents: return global OLD_STDOUT, OLD_STDERR import io OLD_STDOUT = sys.stdout OLD_STDERR = sys.stderr sys.stdout = io.StringIO() sys.stderr = io.StringIO() def unmute(self): if not self.muteAgents: return global OLD_STDOUT, OLD_STDERR sys.stdout.close() sys.stderr.close() # Revert stdout/stderr to originals sys.stdout = OLD_STDOUT sys.stderr = OLD_STDERR def run(self): """ Main control loop for game play. """ self.display.initialize(self.state.data) self.numMoves = 0 # self.display.initialize(self.state.makeObservation(1).data) # inform learning agents of the game start for i in range(len(self.agents)): agent = self.agents[i] if not agent: # this is a null agent, meaning it failed to load # the other team wins self._agentCrash(i, quiet=True) return if ("registerInitialState" in dir(agent)): self.mute() if self.catchExceptions: try: timed_func = TimeoutFunction( agent.registerInitialState, int(self.rules.getMaxStartupTime(i))) try: start_time = time.time() timed_func(self.state.deepCopy()) time_taken = time.time() - start_time self.totalAgentTimes[i] += time_taken except TimeoutFunctionException: print("Agent %d ran out of time on startup!" % i) self.unmute() self.agentTimeout = True self._agentCrash(i, quiet=True) return except Exception as data: self.unmute() self._agentCrash(i, quiet=True) return else: agent.registerInitialState(self.state.deepCopy()) # TODO: could this exceed the total time self.unmute() agentIndex = self.startingIndex numAgents = len(self.agents) while not self.gameOver: # Fetch the next agent agent = self.agents[agentIndex] move_time = 0 skip_action = False # Generate an observation of the state if 'observationFunction' in dir(agent): self.mute() if self.catchExceptions: try: timed_func = TimeoutFunction(agent.observationFunction, int( self.rules.getMoveTimeout(agentIndex))) try: start_time = time.time() observation = timed_func(self.state.deepCopy()) except TimeoutFunctionException: skip_action = True move_time += time.time() - start_time self.unmute() except Exception as data: self.unmute() self._agentCrash(agentIndex, quiet=True) return else: observation = agent.observationFunction( self.state.deepCopy()) self.unmute() else: observation = self.state.deepCopy() # Solicit an action action = None self.mute() if self.catchExceptions: try: timed_func = TimeoutFunction(agent.getAction, int( self.rules.getMoveTimeout(agentIndex)) - int(move_time)) try: start_time = time.time() if skip_action: raise TimeoutFunctionException() action = timed_func(observation) except TimeoutFunctionException: print("Agent %d timed out on a single move!" % agentIndex) self.agentTimeout = True self.unmute() self._agentCrash(agentIndex, quiet=True) return move_time += time.time() - start_time if move_time > self.rules.getMoveWarningTime(agentIndex): self.totalAgentTimeWarnings[agentIndex] += 1 print("Agent %d took too long to make a move! This is warning %d" % ( agentIndex, self.totalAgentTimeWarnings[agentIndex])) if self.totalAgentTimeWarnings[agentIndex] > self.rules.getMaxTimeWarnings(agentIndex): print("Agent %d exceeded the maximum number of warnings: %d" % ( agentIndex, self.totalAgentTimeWarnings[agentIndex])) self.agentTimeout = True self.unmute() self._agentCrash(agentIndex, quiet=True) self.totalAgentTimes[agentIndex] += move_time # print "Agent: %d, time: %f, total: %f" % (agentIndex, move_time, self.totalAgentTimes[agentIndex]) if self.totalAgentTimes[agentIndex] > self.rules.getMaxTotalTime(agentIndex): print("Agent %d ran out of time! (time: %1.2f)" % (agentIndex, self.totalAgentTimes[agentIndex])) self.agentTimeout = True self.unmute() self._agentCrash(agentIndex, quiet=True) return self.unmute() except Exception as data: self.unmute() self._agentCrash(agentIndex) return else: action = agent.getAction(observation) self.unmute() # Execute the action self.moveHistory.append((agentIndex, action)) if self.catchExceptions: try: self.state = self.state.generateSuccessor( agentIndex, action) except Exception as data: self._agentCrash(agentIndex) return else: self.state = self.state.generateSuccessor(agentIndex, action) # Change the display self.display.update(self.state.data) ###idx = agentIndex - agentIndex % 2 + 1 ###self.display.update( self.state.makeObservation(idx).data ) # Allow for game specific conditions (winning, losing, etc.) self.rules.process(self.state, self) # Track progress if agentIndex == numAgents + 1: self.numMoves += 1 # Next agent agentIndex = (agentIndex + 1) % numAgents if _BOINC_ENABLED: boinc.set_fraction_done(self.getProgress()) # inform a learning agent of the game result for agent in self.agents: if "final" in dir(agent): try: self.mute() agent.final(self.state) self.unmute() except Exception as data: if not self.catchExceptions: raise self.unmute() print("Exception", data) self._agentCrash(agent.index) return self.display.finish()
#!/usr/bin/env python3 # Copyright (c) 2016-2017 Bitcoin Core Developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # This script will locally construct a merge commit for a pull request on a # github repository, inspect it, sign it and optionally push it. # The following temporary branches are created/overwritten and deleted: # * pull/$PULL/base (the current master we're merging onto) # * pull/$PULL/head (the current state of the remote pull request) # * pull/$PULL/merge (github's merge) # * pull/$PULL/local-merge (our merge) # In case of a clean merge that is accepted by the user, the local branch with # name $BRANCH is overwritten with the merged result, and optionally pushed. from __future__ import division,print_function,unicode_literals import os from sys import stdin,stdout,stderr import argparse import hashlib import subprocess import json,codecs try: from urllib.request import Request,urlopen except: from urllib2 import Request,urlopen # External tools (can be overridden using environment) GIT = os.getenv('GIT','git') BASH = os.getenv('BASH','bash') # OS specific configuration for terminal attributes ATTR_RESET = '' ATTR_PR = '' COMMIT_FORMAT = '%h %s (%an)%d' if os.name == 'posix': # if posix, assume we can use basic terminal escapes ATTR_RESET = '\033[0m' ATTR_PR = '\033[1;36m' COMMIT_FORMAT = '%C(bold blue)%h%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset' def git_config_get(option, default=None): ''' Get named configuration option from git repository. ''' try: return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8') except subprocess.CalledProcessError as e: return default def retrieve_pr_info(repo,pull): ''' Retrieve pull request information from github. Return None if no title can be found, or an error happens. ''' try: req = Request("https://api.github.com/repos/"+repo+"/pulls/"+pull) result = urlopen(req) reader = codecs.getreader('utf-8') obj = json.load(reader(result)) return obj except Exception as e: print('Warning: unable to retrieve pull information from github: %s' % e) return None def ask_prompt(text): print(text,end=" ",file=stderr) stderr.flush() reply = stdin.readline().rstrip() print("",file=stderr) return reply def get_symlink_files(): files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', 'HEAD']).splitlines()) ret = [] for f in files: if (int(f.decode('utf-8').split(" ")[0], 8) & 0o170000) == 0o120000: ret.append(f.decode('utf-8').split("\t")[1]) return ret def tree_sha512sum(commit='HEAD'): # request metadata for entire tree, recursively files = [] blob_by_name = {} for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines(): name_sep = line.index(b'\t') metadata = line[:name_sep].split() # perms, 'blob', blobid assert(metadata[1] == b'blob') name = line[name_sep+1:] files.append(name) blob_by_name[name] = metadata[2] files.sort() # open connection to git-cat-file in batch mode to request data for all blobs # this is much faster than launching it per file p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE) overall = hashlib.sha512() for f in files: blob = blob_by_name[f] # request blob p.stdin.write(blob + b'\n') p.stdin.flush() # read header: blob, "blob", size reply = p.stdout.readline().split() assert(reply[0] == blob and reply[1] == b'blob') size = int(reply[2]) # hash the blob data intern = hashlib.sha512() ptr = 0 while ptr < size: bs = min(65536, size - ptr) piece = p.stdout.read(bs) if len(piece) == bs: intern.update(piece) else: raise IOError('Premature EOF reading git cat-file output') ptr += bs dig = intern.hexdigest() assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data # update overall hash with file hash overall.update(dig.encode("utf-8")) overall.update(" ".encode("utf-8")) overall.update(f) overall.update("\n".encode("utf-8")) p.stdin.close() if p.wait(): raise IOError('Non-zero return value executing git cat-file') return overall.hexdigest() def print_merge_details(pull, title, branch, base_branch, head_branch): print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET)) subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch]) def parse_arguments(): epilog = ''' In addition, you can set the following git configuration variables: githubmerge.repository (mandatory), user.signingkey (mandatory), githubmerge.host (default: [email protected]), githubmerge.branch (no default), githubmerge.testcmd (default: none). ''' parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests', epilog=epilog) parser.add_argument('pull', metavar='PULL', type=int, nargs=1, help='Pull request ID to merge') parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?', default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')') return parser.parse_args() def main(): # Extract settings from git repo repo = git_config_get('githubmerge.repository') host = git_config_get('githubmerge.host','[email protected]') opt_branch = git_config_get('githubmerge.branch',None) testcmd = git_config_get('githubmerge.testcmd') signingkey = git_config_get('user.signingkey') if repo is None: print("ERROR: No repository configured. Use this command to set:", file=stderr) print("git config githubmerge.repository <owner>/<repo>", file=stderr) exit(1) if signingkey is None: print("ERROR: No GPG signing key set. Set one using:",file=stderr) print("git config --global user.signingkey <key>",file=stderr) exit(1) host_repo = host+":"+repo # shortcut for push/pull target # Extract settings from command line args = parse_arguments() pull = str(args.pull[0]) # Receive pull information from github info = retrieve_pr_info(repo,pull) if info is None: exit(1) title = info['title'].strip() # precedence order for destination branch argument: # - command line argument # - githubmerge.branch setting # - base branch for pull (as retrieved from github) # - 'master' branch = args.branch or opt_branch or info['base']['ref'] or 'master' # Initialize source branches head_branch = 'pull/'+pull+'/head' base_branch = 'pull/'+pull+'/base' merge_branch = 'pull/'+pull+'/merge' local_merge_branch = 'pull/'+pull+'/local-merge' devnull = open(os.devnull,'w') try: subprocess.check_call([GIT,'checkout','-q',branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot check out branch %s." % (branch), file=stderr) exit(3) try: subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*']) except subprocess.CalledProcessError as e: print("ERROR: Cannot find pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout) except subprocess.CalledProcessError as e: print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout) except subprocess.CalledProcessError as e: print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr) exit(3) try: subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/heads/'+branch+':refs/heads/'+base_branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot find branch %s on %s." % (branch,host_repo), file=stderr) exit(3) subprocess.check_call([GIT,'checkout','-q',base_branch]) subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull) subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch]) try: # Go up to the repository's root. toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip() os.chdir(toplevel) # Create unsigned merge commit. if title: firstline = 'Merge #%s: %s' % (pull,title) else: firstline = 'Merge #%s' % (pull,) message = firstline + '\n\n' message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%h %s (%an)',base_branch+'..'+head_branch]).decode('utf-8') try: subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch]) except subprocess.CalledProcessError as e: print("ERROR: Cannot be merged cleanly.",file=stderr) subprocess.check_call([GIT,'merge','--abort']) exit(4) logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8') if logmsg.rstrip() != firstline.rstrip(): print("ERROR: Creating merge failed (already merged?).",file=stderr) exit(4) symlink_files = get_symlink_files() for f in symlink_files: print("ERROR: File %s was a symlink" % f) if len(symlink_files) > 0: exit(4) # Put tree SHA512 into the message try: first_sha512 = tree_sha512sum() message += '\n\nTree-SHA512: ' + first_sha512 except subprocess.CalledProcessError as e: printf("ERROR: Unable to compute tree hash") exit(4) try: subprocess.check_call([GIT,'commit','--amend','-m',message.encode('utf-8')]) except subprocess.CalledProcessError as e: printf("ERROR: Cannot update message.",file=stderr) exit(4) print_merge_details(pull, title, branch, base_branch, head_branch) print() # Run test command if configured. if testcmd: if subprocess.call(testcmd,shell=True): print("ERROR: Running %s failed." % testcmd,file=stderr) exit(5) # Show the created merge. diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch]) subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch]) if diff: print("WARNING: merge differs from github!",file=stderr) reply = ask_prompt("Type 'ignore' to continue.") if reply.lower() == 'ignore': print("Difference with github ignored.",file=stderr) else: exit(6) else: # Verify the result manually. print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr) print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr) print("Type 'exit' when done.",file=stderr) if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt os.putenv('debian_chroot',pull) subprocess.call([BASH,'-i']) second_sha512 = tree_sha512sum() if first_sha512 != second_sha512: print("ERROR: Tree hash changed unexpectedly",file=stderr) exit(8) # Sign the merge commit. print_merge_details(pull, title, branch, base_branch, head_branch) while True: reply = ask_prompt("Type 's' to sign off on the above merge, or 'x' to reject and exit.").lower() if reply == 's': try: subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit']) break except subprocess.CalledProcessError as e: print("Error signing, exiting.",file=stderr) exit(1) elif reply == 'x': print("Not signing off on merge, exiting.",file=stderr) exit(1) # Put the result in branch. subprocess.check_call([GIT,'checkout','-q',branch]) subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch]) finally: # Clean up temporary branches. subprocess.call([GIT,'checkout','-q',branch]) subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull) subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull) # Push the result. while True: reply = ask_prompt("Type 'push' to push the result to %s, branch %s, or 'x' to exit without pushing." % (host_repo,branch)).lower() if reply == 'push': subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch]) break elif reply == 'x': exit(1) if __name__ == '__main__': main()
#!/usr/bin/env python """ map.py State Estimation and Analysis for PYthon Utilities for dealing with basemap plotting. These routnes are simply abstractions over the existing basemap to make it quicker for generating basemap plots and figures. Examples ------- Assume you have longitude, latitude, and sst values: >>> m=seapy.mapping.map(llcrnrlon=lon[0,0],llcrnrlat=lat[0,0], >>> urcrnrlon=lon[-1,-1],urcrnrlat=lat[-1,-1],dlat=2,dlon=2) >>> m.pcolormesh(lon,lat,sst,vmin=22,vmax=26,cmap=plt.cm.bwr) >>> m.land() >>> m.colorbar(label="Sea Surface Temp [$^\circ$C]",cticks=[22,23,24,25,26]) >>> m.ax.patch.set_facecolor("aqua") >>> m.ax.patch.set_alpha(1) >>> m.fig.patch.set_alpha(0.0) >>> m.fig.savefig("sst.png",dpi=100) Written by Brian Powell on 9/4/14 Copyright (c)2017 University of Hawaii under the BSD-License. """ import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap from seapy.model import asgrid def gen_coastline(lon, lat, bathy, depth=0): """ Given lon, lat, and bathymetry, generate vectors of line segments of the coastline. This can be exported to matlab (via savemat) to be used with the 'editmask' routine for creating grid masks. Input ----- lon : array, longitudes of bathymetry locations lat : array, latitudes of bathymetry locations bathy : array, bathymetry (negative for ocean, positive for land) values depth : float, depth to use as the definition of the coast Returns ------- lon : ndarray, vector of coastlines, separated by nan (matlab-style) lat : ndarray, vector of coastlines, separated by nan (matlab-style) """ CS = plt.contour(lon, lat, bathy, [depth - 0.25, depth + 0.25]) lon = list() lat = list() for col in CS.collections: for path in col.get_paths(): lon.append(path.vertices[:, 0]) lon.append(np.nan) lat.append(path.vertices[:, 1]) lat.append(np.nan) return (np.hstack(lon), np.hstack(lat)) class map(object): def __init__(self, grid=None, llcrnrlon=-180, llcrnrlat=-40, urcrnrlon=180, urcrnrlat=40, proj='lcc', resolution='c', figsize=(8., 6.), dlat=1, dlon=2, fig=None, ax=None, fill_color="aqua"): """ map class for abstracting the basemap methods for quick and easy creation of geographically referenced data figures Parameters ---------- grid: seapy.model.grid or string, optional: grid to use to define boundaries llcrnrlon: float, optional longitude of lower, left corner llcrnrlat: float, optional latitude of lower, left corner urcrnrlon: float, optional longitude of upper, right corner urcrnrlat: float, optional latitude of upper, right corner proj: string, optional projection to use for map resolution: character resolution to use for coastline, etc. From Basemap: 'c' (crude), 'l' (low), 'i' (intermediate), 'h' (high), 'f' (full), or None figsize: list, optional dimensions to use for creation of figure dlat: float, optional interval to mark latitude lines (e.g., if dlat=0.5 every 0.5deg mark) dlon: float, optional interval to mark longitude lines (e.g., if dlon=0.5 every 0.5deg mark) fig: matplotlib.pyplot.figure object, optional If you want to plot on a pre-configured figure, pass the figure object along with the axis object. ax: matplotlib.pyplot.axis object, optional If you want to plot on a pre-configured figure, pass the axis object along with the figure object. fill_color: string, optional The color to use for the axis background Returns ------- None """ if grid is not None: grid = asgrid(grid) llcrnrlat = np.min(grid.lat_rho) urcrnrlat = np.max(grid.lat_rho) llcrnrlon = np.min(grid.lon_rho) urcrnrlon = np.max(grid.lon_rho) self.basemap = Basemap(llcrnrlon=llcrnrlon, llcrnrlat=llcrnrlat, urcrnrlon=urcrnrlon, urcrnrlat=urcrnrlat, projection=proj, lat_0=urcrnrlat - (urcrnrlat - llcrnrlat) / 2., lon_0=urcrnrlon - (urcrnrlon - llcrnrlon) / 2., resolution=resolution, area_thresh=0.0, ax=ax) self.figsize = figsize self.dlon = dlon self.dlat = dlat self.fig = fig self.ax = ax self.fill_color = fill_color reset = True if fig is None else False self.new_figure(reset=reset) def new_figure(self, fill_color=None, reset=False): """ Create or update a figure for plotting Parameters ---------- fill_color: string, optional Color to fill the background of the axes with reset: bool, optional Reset the figure """ if reset: if self.ax: self.ax.set_axis_off() if self.fig: plt.close(self.fig) if self.fig is None or self.ax is None: self.fig = plt.figure(figsize=self.figsize) self.ax = self.fig.add_axes([-0.01, 0.25, 1.01, 0.7]) if fill_color is None: fill_color = self.fill_color self.basemap.drawmapboundary(fill_color=fill_color) # Create the longitude lines nticks = int((self.basemap.urcrnrlon - self.basemap.llcrnrlon) / self.dlon) md = np.mod(self.basemap.llcrnrlon, self.dlon) if md: slon = self.basemap.llcrnrlon + self.dlon - md else: slon = self.basemap.llcrnrlon nticks += 1 lon_lines = np.arange(nticks) * self.dlon + slon self.basemap.drawmeridians(lon_lines, color="0.5", linewidth=0.25, dashes=[1, 1, 0.1, 1], labels=[0, 0, 0, 1], fontsize=12) # Create the latitude lines nticks = int((self.basemap.urcrnrlat - self.basemap.llcrnrlat) / self.dlat) md = np.mod(self.basemap.llcrnrlat, self.dlat) if md: slat = self.basemap.llcrnrlat + self.dlat - md else: slat = self.basemap.llcrnrlat nticks += 1 lat_lines = np.arange(nticks) * self.dlat + slat self.basemap.drawparallels(lat_lines, color="0.5", linewidth=0.25, dashes=[1, 1, 0.1, 1], labels=[1, 0, 0, 0], fontsize=12) def land(self, color="black"): """ Draw the land mask Parameters ---------- color: string, optional color to draw the mask with """ self.basemap.drawcoastlines() self.basemap.drawcountries() self.basemap.fillcontinents(color=color) def zoom(self, xrange, yrange): """ zoom the figure to a specified lat, lon range Parameters ---------- xrange: array minimum and maximum longitudes to display yrange: array minimum and maximum latitudes to display """ x, y = self.basemap(xrange, yrange) self.ax.set_xlim(x) self.ax.set_ylim(y) self.fig.canvas.draw() def pcolormesh(self, lon, lat, data, **kwargs): """ pcolormesh field data onto our geographic plot Parameters ---------- lon: array Longitude field for data lat: array Latitude field for data data: array data to pcolor **kwargs: arguments, optional additional arguments to pass to pcolor """ # Pcolor requires a modification to the locations to line up with # the geography dlon = lon * 0 dlat = lat * 0 dlon[:, 0:-1] = lon[:, 1:] - lon[:, 0:-1] dlat[0:-1, :] = lat[1:, :] - lat[0:-1, :] x, y = self.basemap(lon - dlon * 0.5, lat - dlat * 0.5) self.pc = self.ax.pcolormesh(x, y, data, **kwargs) def scatter(self, lon, lat, data, **kwargs): """ scatter plot data onto our geographic plot Parameters ---------- lon: array Longitude field for data lat: array Latitude field for data data: array data to pcolor **kwargs: arguments, optional additional arguments to pass to pcolor """ x, y = self.basemap(lon, lat) self.pc = self.ax.scatter(x, y, c=data, **kwargs) def colorbar(self, label=None, cticks=None, **kwargs): """ Display a colorbar on the figure Parameters ---------- label: string, optional Colorbar label title cticks: array, optional Where to place the tick marks and values for the colorbar **kwargs: arguments, optional additional arguments to pass to colorbar """ self.cax = self.fig.add_axes([0.25, 0.16, 0.5, 0.03]) self.cb = plt.colorbar(self.pc, cax=self.cax, orientation="horizontal", ticks=cticks, **kwargs) self.basemap.set_axes_limits(ax=self.ax) if label is not None: self.cb.set_label(label)
#!/usr/bin/env python """Operations on a series of points, indexed by time. """ import copy from grr.lib import rdfvalue NORMALIZE_MODE_GAUGE = 1 NORMALIZE_MODE_COUNTER = 2 class Timeseries(object): """Timeseries contains a sequence of points, each with a timestamp.""" def __init__(self, initializer=None): """Create a timeseries with an optional initializer. Args: initializer: An optional Timeseries to clone. Raises: RuntimeError: If initializer is not understood. """ if initializer is None: self.data = [] return if isinstance(initializer, Timeseries): self.data = copy.deepcopy(initializer.data) return raise RuntimeError("Unrecognized initializer.") def _NormalizeTime(self, time): """Normalize a time to be an int measured in microseconds.""" if isinstance(time, rdfvalue.RDFDatetime): return time.AsMicroSecondsFromEpoch() if isinstance(time, rdfvalue.Duration): return time.microseconds return int(time) def Append(self, value, timestamp): """Adds value at timestamp. Values must be added in order of increasing timestamp. Args: value: An observed value. timestamp: The timestamp at which value was observed. Raises: RuntimeError: If timestamp is smaller than the previous timstamp. """ timestamp = self._NormalizeTime(timestamp) if self.data and timestamp < self.data[-1][1]: raise RuntimeError("Next timestamp must be larger.") self.data.append([value, timestamp]) def MultiAppend(self, value_timestamp_pairs): """Adds multiple value<->timestamp pairs. Args: value_timestamp_pairs: Tuples of (value, timestamp). """ for value, timestamp in value_timestamp_pairs: self.Append(value, timestamp) def FilterRange(self, start_time=None, stop_time=None): """Filter the series to lie between start_time and stop_time. Removes all values of the series which are outside of some time range. Args: start_time: If set, timestamps before start_time will be dropped. stop_time: If set, timestamps at or past stop_time will be dropped. """ start_time = self._NormalizeTime(start_time) stop_time = self._NormalizeTime(stop_time) self.data = [ p for p in self.data if (start_time is None or p[1] >= start_time ) and (stop_time is None or p[1] < stop_time) ] def Normalize(self, period, start_time, stop_time, mode=NORMALIZE_MODE_GAUGE): """Normalize the series to have a fixed period over a fixed time range. Supports two modes, depending on the type of data: NORMALIZE_MODE_GAUGE: support gauge values. If multiple original data points lie within an output interval, the output value is an average of the original data point. if no original data points lie within an output interval, the output value is None. NORMALIZE_MODE_COUNTER: supports counter values. Assumes that the sequence is already increasing (typically, MakeIncreasing will have been called). Each output value is the largest value seen during or before the corresponding output interval. Args: period: The desired time between points. Should be an rdfvalue.Duration or a count of microseconds. start_time: The first timestamp will be at start_time. Should be an rdfvalue.RDFDatetime or a count of microseconds since epoch. stop_time: The last timestamp will be at stop_time - period. Should be an rdfvalue.RDFDatetime or a count of microseconds since epoch. mode: The type of normalization to perform. May be NORMALIZE_MODE_GAUGE or NORMALIZE_MODE_COUNTER. Raises: RuntimeError: In case the sequence timestamps are misordered. """ period = self._NormalizeTime(period) start_time = self._NormalizeTime(start_time) stop_time = self._NormalizeTime(stop_time) if not self.data: return self.FilterRange(start_time, stop_time) grouped = {} for value, timestamp in self.data: offset = timestamp - start_time shifted_offset = offset - (offset % period) grouped.setdefault(shifted_offset, []).append(value) self.data = [] last_value = None for offset in range(0, stop_time - start_time, period): g = grouped.get(offset) if mode == NORMALIZE_MODE_GAUGE: v = None if g: v = float(sum(g)) / float(len(g)) self.data.append([v, offset + start_time]) else: if g: for v in g: if v < last_value: raise RuntimeError("Next value must not be smaller.") last_value = v self.data.append([last_value, offset + start_time]) def MakeIncreasing(self): """Makes the time series increasing. Assumes that series is based on a counter which is occasionally reset, and using this assumption converts the sequence to estimate the total number of counts which occurred. NOTE: Could give inacurate numbers in either of the following cases: 1) Multiple resets occur between samples. 2) A reset is followed by a spike larger than the previous level. """ offset = 0 last_value = None for p in self.data: if last_value and last_value > p[0]: # Assume that it was only reset once. offset += last_value last_value = p[0] if offset: p[0] += offset def ToDeltas(self): """Convert the sequence to the sequence of differences between points. The value of each point v[i] is replaced by v[i+1] - v[i], except for the last point which is dropped. """ if len(self.data) < 2: self.data = [] return for i in range(0, len(self.data) - 1): if self.data[i][0] is None or self.data[i + 1][0] is None: self.data[i][0] = None else: self.data[i][0] = self.data[i + 1][0] - self.data[i][0] del self.data[-1] def Add(self, other): """Add other to self pointwise. Requires that both self and other are of the same length, and contain identical timestamps. Typically this means that Normalize has been called on both with identical time parameters. Args: other: The sequence to add to self. Raises: RuntimeError: other does not contain the same timestamps as self. """ if len(self.data) != len(other.data): raise RuntimeError("Can only add series of identical lengths.") for i in range(len(self.data)): if self.data[i][1] != other.data[i][1]: raise RuntimeError("Timestamp mismatch.") if self.data[i][0] is None and other.data[i][0] is None: continue self.data[i][0] = (self.data[i][0] or 0) + (other.data[i][0] or 0) def Rescale(self, multiplier): """Multiply pointwise by multiplier.""" for p in self.data: if p[0] is not None: p[0] *= multiplier def Mean(self): """Return the arithmatic mean of all values.""" values = [v for v, _ in self.data if v is not None] if not values: return None return sum(values) / len(values)
import cookielib import urllib import urllib2 import os import pickle import pymongo import pygal import locale from bson.son import SON from bs4 import BeautifulSoup from datetime import datetime, timedelta from flask import Flask, session, render_template, request from pprint import pprint app = Flask(__name__) user_agent = u"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; " + \ u"rv:1.9.2.11) Gecko/20101012 Firefox/3.6.11" def getCases(html, name, names): for row in html.find(class_="nameList").find_all('tr'): cols = row.find_all('td') if len(cols) > 4: if name not in cols[1].string: return True names.append({ 'caseNumber': cols[0].span.a.string.strip(), 'name': cols[1].string.strip(), 'charge': cols[2].string.strip(), 'date': cols[3].string.strip(), 'status': cols[4].string.strip() }) elif len(cols) > 3: if name not in cols[1].get_text() and name not in \ cols[2].get_text(): return True names.append({ 'caseNumber': cols[0].span.a.string.strip(), 'name': cols[1].get_text(), 'otherName': cols[2].get_text(), 'status': cols[3].string.strip() }) return False def lookupCases(opener, name, court, division): cases = [] data = urllib.urlencode({ 'category': division, 'lastName': name, 'courtId': court, 'submitValue': 'N'}) cases_url = u"http://ewsocis1.courts.state.va.us/CJISWeb/Search.do" searchResults = opener.open(cases_url, data) html = searchResults.read() done = getCases(BeautifulSoup(html), name, cases) data = urllib.urlencode({ 'courtId': court, 'pagelink': 'Next', 'lastCaseProcessed': '', 'firstCaseProcessed': '', 'lastNameProcessed': '', 'firstNameProcessed': '', 'category': division, 'firstCaseSerialNumber': 0, 'lastCaseSerialNumber': 0, 'searchType': '', 'emptyList': ''}) count = 1 search_url = u"http://ewsocis1.courts.state.va.us/CJISWeb/Search.do" while(not done and count < 6): search_results = opener.open(search_url, data) html = search_results.read() done = getCases(BeautifulSoup(html), name, cases) count += 1 return cases def getCasesInVirginiaBeach(html, name, names): resultsTable = html.find(class_="tablesorter") if resultsTable is None: return True for row in resultsTable.find('tbody').find_all('tr'): cols = row.find_all('td') if len(cols) > 5: names.append({ 'caseNumber': cols[0].a.string or '', 'link': 'https://vbcircuitcourt.com' + cols[0].a['href'], 'otherName': cols[1].string or '', 'caseStyle': ''.join(cols[2].findAll(text=True)) .replace('\r\n', ' ') or '', 'name': ''.join(cols[3].findAll(text=True)) .replace('\r\n', ' ') or '', 'partyType': cols[4].string.capitalize() + ':', 'status': cols[5].string or '' }) return False def lookupCasesInVirginiaBeach(name, division): cases = [] url = u'https://vbcircuitcourt.com/public/search.do?searchType=1' + \ u'&indexName=publiccasesearch&q=' + name.replace(' ', '+') + \ u'%20FilterByCourtType:"' + division + u'"' searchResults = urllib2.urlopen(url) html = searchResults.read() done = getCasesInVirginiaBeach(BeautifulSoup(html), name, cases) count = 1 while(not done and count < 6): searchResults = urllib2.urlopen(url + '&start=' + str(count * 30)) html = searchResults.read() done = getCasesInVirginiaBeach(BeautifulSoup(html), name, cases) count += 1 return cases @app.route("/search/<name>/court/<path:court>") def searchCourt(name, court): if 'cookies' not in session: return "Error. Please reload the page." courtId = court[:3] courtSearch = {'name': court[5:], 'id': courtId} db = pymongo.MongoClient(os.environ['MONGO_URI'])['va-circuit-court-search'] cases = db['cases'].find_one({'name': name, 'court': court}) if cases is not None: print 'Found cached search' courtSearch['criminalCases'] = cases['criminalCases'] courtSearch['civilCases'] = cases['civilCases'] elif 'Virginia Beach' in court: courtSearch['criminalCases'] = lookupCasesInVirginiaBeach(name, 'CRIMINAL') courtSearch['civilCases'] = lookupCasesInVirginiaBeach(name, 'CIVIL') else: cookieJar = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar)) opener.addheaders = [('User-Agent', user_agent)] for cookie in pickle.loads(session['cookies']): cookieJar.set_cookie(cookie) data = urllib.urlencode({ 'courtId': courtId, 'courtType': 'C', 'caseType': 'ALL', 'testdos': False, 'sessionCreate': 'NEW', 'whichsystem': court}) place_url = u"http://ewsocis1.courts.state.va.us/CJISWeb/MainMenu.do" opener.open(place_url, data) courtSearch['criminalCases'] = lookupCases(opener, name.upper(), courtId, 'R') courtSearch['civilCases'] = lookupCases(opener, name.upper(), courtId, 'CIVIL') if cases is None: print 'Caching search' db['cases'].insert({ 'name': name, 'court': court, 'criminalCases': courtSearch['criminalCases'], 'civilCases': courtSearch['civilCases'], 'dateSaved': datetime.utcnow() }) return render_template('court.html', court=courtSearch) @app.route("/search/<name>") def search(name): db = pymongo.MongoClient(os.environ['MONGO_URI'])['va-circuit-court-search'] db['cases'].remove({ 'dateSaved': {'$lt': datetime.utcnow() + timedelta(days=-3)} }) cookieJar = cookielib.CookieJar() opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookieJar)) opener.addheaders = [('User-Agent', user_agent)] home = opener.open('http://ewsocis1.courts.state.va.us/CJISWeb/circuit.jsp') session['cookies'] = pickle.dumps(list(cookieJar)) courts = [] html = BeautifulSoup(home.read()) for option in html.find_all('option'): courts.append({ 'fullName': option['value'], 'id': option['value'][:3], 'name': option['value'][5:] }) data = {'name': name.upper(), 'courts': courts} cases = db['cases'].find({'name': name.upper()}) for case in cases: for court in data['courts']: if case['court'] == court['fullName']: court['criminalCases'] = case['criminalCases'] court['civilCases'] = case['civilCases'] return render_template('search.html', data=data) @app.route("/") def index(): return render_template('index.html') @app.route("/charges") def charges(): client = pymongo.MongoClient(os.environ['MONGO_URI']) db = client.va_circuit_court charges = db.criminal_cases.aggregate([ {'$group':{ '_id': { 'CodeSection': '$CodeSection', 'Race': '$Race' }, 'charge': {'$first': '$Charge'}, 'court': {'$first': '$Court'}, 'caseNumber': {'$first': '$CaseNumber'}, 'avgSentence': {'$avg': '$SentenceTimeDays'}, 'avgSentenceSuspended': {'$avg': '$SentenceSuspendedDays'}, 'count': {'$sum': 1} }}, {'$group':{ '_id': { 'CodeSection': '$_id.CodeSection' }, 'races': {'$push': { 'race': '$_id.Race', 'avgSentence': '$avgSentence', 'avgSentenceSuspended': '$avgSentenceSuspended', 'count': '$count' }}, 'count': {'$sum': '$count'}, 'avgSentence': {'$avg': '$avgSentence'}, 'avgSentenceSuspended': {'$avg': '$avgSentenceSuspended'}, 'charge': {'$first': '$charge'}, 'court': {'$first': '$court'}, 'caseNumber': {'$first': '$caseNumber'} }}, {'$match' : { 'count' : {'$gt' : 50} }}, {'$sort': SON([ ('_id.CodeSection', 1) ])} ])['result'] charges_amended = db.criminal_cases.aggregate([ {'$match': {'AmendedCharge': {'$ne': None}}}, {'$group':{ '_id': { 'CodeSection': '$CodeSection', 'Race': '$Race' }, 'charge': {'$first': '$Charge'}, 'court': {'$first': '$Court'}, 'caseNumber': {'$first': '$CaseNumber'}, 'avgSentence': {'$avg': '$SentenceTimeDays'}, 'avgSentenceSuspended': {'$avg': '$SentenceSuspendedDays'}, 'count': {'$sum': 1} }}, {'$group':{ '_id': { 'CodeSection': '$_id.CodeSection' }, 'races': {'$push': { 'race': '$_id.Race', 'avgSentence': '$avgSentence', 'avgSentenceSuspended': '$avgSentenceSuspended', 'count': '$count' }}, 'count': {'$sum': '$count'}, 'avgSentence': {'$avg': '$avgSentence'}, 'avgSentenceSuspended': {'$avg': '$avgSentenceSuspended'}, 'charge': {'$first': '$charge'}, 'court': {'$first': '$court'}, 'caseNumber': {'$first': '$caseNumber'} }}, {'$sort': SON([ ('_id.CodeSection', 1) ])} ])['result'] for charge in charges: charge['amended'] = { 'count': 0, 'avgSentence': 0, 'avgSentenceSuspended': 0, 'races': [] } for charge_amended in charges_amended: if charge_amended['_id']['CodeSection'] == charge['_id']['CodeSection']: charge['amended'] = charge_amended break charge['races_dict'] = { 'White Caucasian (Non-Hispanic)': { 'count': 0, 'avgSentence': 0, 'avgSentenceSuspended': 0 }, 'Black (Non-Hispanic)': { 'count': 0, 'avgSentence': 0, 'avgSentenceSuspended': 0 } } charge['amended']['races_dict'] = { 'White Caucasian (Non-Hispanic)': { 'count': 0, 'avgSentence': 0, 'avgSentenceSuspended': 0 }, 'Black (Non-Hispanic)': { 'count': 0, 'avgSentence': 0, 'avgSentenceSuspended': 0 } } for race in charge['races']: if 'race' in race: charge['races_dict'][race['race']] = race for race in charge['amended']['races']: if 'race' in race: charge['amended']['races_dict'][race['race']] = race return render_template('charges.html', charges=charges, charges_amended=charges_amended) @app.route("/opendata") def open_data(): client = pymongo.MongoClient(os.environ['MONGO_CASES_URI']) db = client.va_circuit_court_cases locale.resetlocale() case_number_count = locale.format("%d", db.case_numbers.count(), grouping=True) data = { 'case_number_count': case_number_count } return render_template('open_data.html', data=data) @app.route("/opendata/progress") def open_data_progress(): client = pymongo.MongoClient(os.environ['MONGO_CASES_URI']) db = client.va_circuit_court_cases data = db.case_numbers.aggregate([ {'$sort': SON([ ('court', 1), ('name', 1) ])}, {'$group':{ '_id': { 'court': '$court' }, 'firstName': {'$first': '$name'}, 'lastName': {'$last': '$name'}, 'count': {'$sum': 1} }}, {'$sort': SON([ ('_id.court', 1) ])} ])['result'] chart = pygal.HorizontalBar(style=pygal.style.RedBlueStyle, show_legend=False, height=1500) chart.x_labels = [x['_id']['court'] for x in data][::-1] chart.add('', [x['count'] for x in data][::-1]) return chart.render() + str(render_template('open_data_progress.html', data=data)) @app.route("/sampleLetter") def sample_letter(): return render_template('sample_letter.html') @app.route("/stats") def stats(): return render_template('stats.html') @app.route("/stats/graph", methods=['POST']) def graph(): categories = request.get_json(force=True)['categories'] print categories category = categories[0]['category'] sub_category = categories[1]['category'] sort_by = categories[0]['sort'] if sort_by == 'alpha': sort_by = '_id.' + category sort_direction = int(categories[0]['sortDirection']) sort = (sort_by, sort_direction) filters = categories[0]['filter'] first_group_stage = {'$group':{ '_id': { category: '$' + category }, 'count': {'$sum': 1} }} second_group_stage = None if sub_category != '': first_group_stage['$group']['_id'][sub_category] = '$' + sub_category second_group_stage = {'$group':{ '_id': { category: '$_id.' + category, }, 'data': {'$push': { sub_category: '$_id.' + sub_category, 'count': '$count' }}, 'count': {'$sum': '$count'} }} sort_stage = {'$sort': SON([ sort ])} client = pymongo.MongoClient(os.environ['MONGO_URI']) db = client.va_circuit_court data = None if second_group_stage is None: data = db.criminal_cases.aggregate([ first_group_stage, sort_stage ])['result'] else: data = db.criminal_cases.aggregate([ first_group_stage, second_group_stage, sort_stage ])['result'] sub_category_names = [] if sub_category != '': for group in data: for sub_category_group in group['data']: sub_category_name = 'None' if sub_category in sub_category_group: sub_category_name = sub_category_group[sub_category] if sub_category_name not in sub_category_names: sub_category_names.append(sub_category_name) group[sub_category_name] = sub_category_group['count'] print pprint(data) pprint(sub_category_names) values = [str(x['_id'][category]) for x in data] labels = [v for v in values if v not in filters][:20] bar_chart = pygal.Bar(height=450, style=LightStyle, x_label_rotation=70) bar_chart.title = 'VA Circuit Court Cases in 2014' bar_chart.x_labels = labels if sub_category == '': bar_chart.add(category, [x['count'] for x in data if str(x['_id'][category]) not in filters][:20]) else: for item in sub_category_names[:10]: item_counts = [] for x in data: if str(x['_id'][category]) in filters: continue if item in x: item_counts.append(x[item]) else: item_counts.append(0) bar_chart.add(item, item_counts[:20]) return str(render_template('stats_filters.html', category=category, filter_values=sorted(values), filters_unchecked=filters)) + \ bar_chart.render() if __name__ == "__main__": # Bind to PORT if defined, otherwise default to 5000. port = int(os.environ.get('PORT', 5000)) app.secret_key = 'doesnt-need-to-be-secret' app.run(host='0.0.0.0', port=port, debug=True)
#!/usr/bin/env python # Copyright (c) 2012 Cloudera, Inc. All rights reserved. import os from tests.beeswax.impala_beeswax import ImpalaBeeswaxException from tests.common.test_vector import * from tests.common.impala_test_suite import * from tests.common.impala_cluster import ImpalaCluster from tests.common.skip import SkipIfS3 from tests.util.filesystem_utils import get_fs_path, IS_S3 from subprocess import call class TestUdfs(ImpalaTestSuite): @classmethod def get_workload(cls): return 'functional-query' @classmethod def add_test_dimensions(cls): super(TestUdfs, cls).add_test_dimensions() # Without limiting the test suite to a single exec option, the tests will fail # because the same test case may be executed in parallel with different exec option # values leading to conflicting DDL ops. cls.TestMatrix.add_dimension(create_single_exec_option_dimension()) # There is no reason to run these tests using all dimensions. cls.TestMatrix.add_dimension(create_uncompressed_text_dimension(cls.get_workload())) def test_native_functions(self, vector): database = 'native_function_test' self.__load_functions( self.create_udfs_template, vector, database, get_fs_path('/test-warehouse/libTestUdfs.so')) self.__load_functions( self.create_udas_template, vector, database, get_fs_path('/test-warehouse/libudasample.so')) self.run_test_case('QueryTest/udf', vector, use_db=database) if not IS_S3: # S3 doesn't support INSERT self.run_test_case('QueryTest/udf-init-close', vector, use_db=database) self.run_test_case('QueryTest/uda', vector, use_db=database) def test_ir_functions(self, vector): database = 'ir_function_test' self.__load_functions( self.create_udfs_template, vector, database, get_fs_path('/test-warehouse/test-udfs.ll')) self.run_test_case('QueryTest/udf', vector, use_db=database) if not IS_S3: # S3 doesn't support INSERT self.run_test_case('QueryTest/udf-init-close', vector, use_db=database) def test_udf_errors(self, vector): self.run_test_case('QueryTest/udf-errors', vector) def test_udf_invalid_symbol(self, vector): """ IMPALA-1642: Impala crashes if the symbol for a Hive UDF doesn't exist Crashing is non-deterministic so we run the UDF several times.""" drop_fn_stmt = "drop function if exists default.fn_invalid_symbol(STRING)" create_fn_stmt = ("create function default.fn_invalid_symbol(STRING) returns " "STRING LOCATION '%s' SYMBOL='not.a.Symbol'" % get_fs_path('/test-warehouse/impala-hive-udfs.jar')) query = "select default.fn_invalid_symbol('test')" self.client.execute(drop_fn_stmt) try: self.client.execute(create_fn_stmt) for _ in xrange(5): ex = self.execute_query_expect_failure(self.client, query) assert "Unable to find class" in str(ex) finally: self.client.execute(drop_fn_stmt) def test_hive_udfs(self, vector): #self.client.execute('create database if not exists udf_test') #self.client.execute('create database if not exists uda_test') self.run_test_case('QueryTest/load-hive-udfs', vector) self.run_test_case('QueryTest/hive-udf', vector) def test_libs_with_same_filenames(self, vector): self.run_test_case('QueryTest/libs_with_same_filenames', vector) def test_udf_update_via_drop(self, vector): """Test updating the UDF binary without restarting Impala. Dropping the function should remove the binary from the local cache.""" # Run with sync_ddl to guarantee the drop is processed by all impalads. exec_options = vector.get_value('exec_option') exec_options['sync_ddl'] = 1 old_udf = os.path.join(os.environ['IMPALA_HOME'], 'testdata/udfs/impala-hive-udfs.jar') new_udf = os.path.join(os.environ['IMPALA_HOME'], 'tests/test-hive-udfs/target/test-hive-udfs-1.0.jar') udf_dst = get_fs_path('/test-warehouse/impala-hive-udfs2.jar') drop_fn_stmt = 'drop function if exists default.udf_update_test_drop()' create_fn_stmt = "create function default.udf_update_test_drop() returns string "\ "LOCATION '" + udf_dst + "' SYMBOL='com.cloudera.impala.TestUpdateUdf'" query_stmt = "select default.udf_update_test_drop()" # Put the old UDF binary on HDFS, make the UDF in Impala and run it. call(["hadoop", "fs", "-put", "-f", old_udf, udf_dst]) self.execute_query_expect_success(self.client, drop_fn_stmt, exec_options) self.execute_query_expect_success(self.client, create_fn_stmt, exec_options) self.__run_query_all_impalads(exec_options, query_stmt, ["Old UDF"]) # Update the binary, drop and create the function again. The new binary should # be running. call(["hadoop", "fs", "-put", "-f", new_udf, udf_dst]) self.execute_query_expect_success(self.client, drop_fn_stmt, exec_options) self.execute_query_expect_success(self.client, create_fn_stmt, exec_options) self.__run_query_all_impalads(exec_options, query_stmt, ["New UDF"]) def test_udf_update_via_create(self, vector): """Test updating the UDF binary without restarting Impala. Creating a new function from the library should refresh the cache.""" # Run with sync_ddl to guarantee the create is processed by all impalads. exec_options = vector.get_value('exec_option') exec_options['sync_ddl'] = 1 old_udf = os.path.join(os.environ['IMPALA_HOME'], 'testdata/udfs/impala-hive-udfs.jar') new_udf = os.path.join(os.environ['IMPALA_HOME'], 'tests/test-hive-udfs/target/test-hive-udfs-1.0.jar') udf_dst = get_fs_path('/test-warehouse/impala-hive-udfs3.jar') old_function_name = "udf_update_test_create1" new_function_name = "udf_update_test_create2" drop_fn_template = 'drop function if exists default.%s()' self.execute_query_expect_success( self.client, drop_fn_template % old_function_name, exec_options) self.execute_query_expect_success( self.client, drop_fn_template % new_function_name, exec_options) create_fn_template = "create function default.%s() returns string "\ "LOCATION '" + udf_dst + "' SYMBOL='com.cloudera.impala.TestUpdateUdf'" query_template = "select default.%s()" # Put the old UDF binary on HDFS, make the UDF in Impala and run it. call(["hadoop", "fs", "-put", "-f", old_udf, udf_dst]) self.execute_query_expect_success( self.client, create_fn_template % old_function_name, exec_options) self.__run_query_all_impalads( exec_options, query_template % old_function_name, ["Old UDF"]) # Update the binary, and create a new function using the binary. The new binary # should be running. call(["hadoop", "fs", "-put", "-f", new_udf, udf_dst]) self.execute_query_expect_success( self.client, create_fn_template % new_function_name, exec_options) self.__run_query_all_impalads( exec_options, query_template % new_function_name, ["New UDF"]) # The old function should use the new library now self.__run_query_all_impalads( exec_options, query_template % old_function_name, ["New UDF"]) def test_drop_function_while_running(self, vector): self.client.execute("drop function if exists default.drop_while_running(BIGINT)") self.client.execute("create function default.drop_while_running(BIGINT) returns "\ "BIGINT LOCATION '%s' SYMBOL='Identity'" % get_fs_path('/test-warehouse/libTestUdfs.so')) query = \ "select default.drop_while_running(l_orderkey) from tpch.lineitem limit 10000"; # Run this query asynchronously. handle = self.execute_query_async(query, vector.get_value('exec_option'), table_format=vector.get_value('table_format')) # Fetch some rows from the async query to make sure the UDF is being used results = self.client.fetch(query, handle, 1) assert results.success assert len(results.data) == 1 # Drop the function while the original query is running. self.client.execute("drop function default.drop_while_running(BIGINT)") # Fetch the rest of the rows, this should still be able to run the UDF results = self.client.fetch(query, handle, -1) assert results.success assert len(results.data) == 9999 # Run serially because this will blow the process limit, potentially causing other # queries to fail @pytest.mark.execute_serially def test_mem_limits(self, vector): # Set the mem limit high enough that a simple scan can run mem_limit = 1024 * 1024 vector.get_value('exec_option')['mem_limit'] = mem_limit try: self.run_test_case('QueryTest/udf-mem-limit', vector) assert False, "Query was expected to fail" except ImpalaBeeswaxException, e: self.__check_exception(e) try: self.run_test_case('QueryTest/uda-mem-limit', vector) assert False, "Query was expected to fail" except ImpalaBeeswaxException, e: self.__check_exception(e) def __check_exception(self, e): # The interesting exception message may be in 'e' or in its inner_exception # depending on the point of query failure. if 'Memory limit exceeded' in str(e) or 'Cancelled' in str(e): return if e.inner_exception is not None\ and ('Memory limit exceeded' in e.inner_exception.message or 'Cancelled' not in e.inner_exception.message): return raise e def __run_query_all_impalads(self, exec_options, query, expected): impala_cluster = ImpalaCluster() for impalad in impala_cluster.impalads: client = impalad.service.create_beeswax_client() result = self.execute_query_expect_success(client, query, exec_options) assert result.data == expected def __load_functions(self, template, vector, database, location): queries = template.format(database=database, location=location) # Split queries and remove empty lines queries = [q for q in queries.split(';') if q.strip()] exec_options = vector.get_value('exec_option') for query in queries: if query.strip() == '': continue result = self.execute_query_expect_success(self.client, query, exec_options) assert result is not None # Create test UDA functions in {database} from library {location} create_udas_template = """ drop function if exists {database}.test_count(int); drop function if exists {database}.hll(int); drop function if exists {database}.sum_small_decimal(decimal(9,2)); create database if not exists {database}; create aggregate function {database}.test_count(int) returns bigint location '{location}' update_fn='CountUpdate'; create aggregate function {database}.hll(int) returns string location '{location}' update_fn='HllUpdate'; create aggregate function {database}.sum_small_decimal(decimal(9,2)) returns decimal(9,2) location '{location}' update_fn='SumSmallDecimalUpdate'; """ # Create test UDF functions in {database} from library {location} create_udfs_template = """ drop function if exists {database}.identity(boolean); drop function if exists {database}.identity(tinyint); drop function if exists {database}.identity(smallint); drop function if exists {database}.identity(int); drop function if exists {database}.identity(bigint); drop function if exists {database}.identity(float); drop function if exists {database}.identity(double); drop function if exists {database}.identity(string); drop function if exists {database}.identity(timestamp); drop function if exists {database}.identity(decimal(9,0)); drop function if exists {database}.identity(decimal(18,1)); drop function if exists {database}.identity(decimal(38,10)); drop function if exists {database}.all_types_fn( string, boolean, tinyint, smallint, int, bigint, float, double, decimal(2,0)); drop function if exists {database}.no_args(); drop function if exists {database}.var_and(boolean...); drop function if exists {database}.var_sum(int...); drop function if exists {database}.var_sum(double...); drop function if exists {database}.var_sum(string...); drop function if exists {database}.var_sum(decimal(4,2)...); drop function if exists {database}.var_sum_multiply(double, int...); drop function if exists {database}.constant_timestamp(); drop function if exists {database}.validate_arg_type(string); drop function if exists {database}.count_rows(); drop function if exists {database}.constant_arg(int); drop function if exists {database}.validate_open(int); drop function if exists {database}.mem_test(bigint); drop function if exists {database}.mem_test_leaks(bigint); drop function if exists {database}.unmangled_symbol(); drop function if exists {database}.four_args(int, int, int, int); drop function if exists {database}.five_args(int, int, int, int, int); drop function if exists {database}.six_args(int, int, int, int, int, int); drop function if exists {database}.seven_args(int, int, int, int, int, int, int); drop function if exists {database}.eight_args(int, int, int, int, int, int, int, int); create database if not exists {database}; create function {database}.identity(boolean) returns boolean location '{location}' symbol='Identity'; create function {database}.identity(tinyint) returns tinyint location '{location}' symbol='Identity'; create function {database}.identity(smallint) returns smallint location '{location}' symbol='Identity'; create function {database}.identity(int) returns int location '{location}' symbol='Identity'; create function {database}.identity(bigint) returns bigint location '{location}' symbol='Identity'; create function {database}.identity(float) returns float location '{location}' symbol='Identity'; create function {database}.identity(double) returns double location '{location}' symbol='Identity'; create function {database}.identity(string) returns string location '{location}' symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_9StringValE'; create function {database}.identity(timestamp) returns timestamp location '{location}' symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_12TimestampValE'; create function {database}.identity(decimal(9,0)) returns decimal(9,0) location '{location}' symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE'; create function {database}.identity(decimal(18,1)) returns decimal(18,1) location '{location}' symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE'; create function {database}.identity(decimal(38,10)) returns decimal(38,10) location '{location}' symbol='_Z8IdentityPN10impala_udf15FunctionContextERKNS_10DecimalValE'; create function {database}.all_types_fn( string, boolean, tinyint, smallint, int, bigint, float, double, decimal(2,0)) returns int location '{location}' symbol='AllTypes'; create function {database}.no_args() returns string location '{location}' symbol='_Z6NoArgsPN10impala_udf15FunctionContextE'; create function {database}.var_and(boolean...) returns boolean location '{location}' symbol='VarAnd'; create function {database}.var_sum(int...) returns int location '{location}' symbol='VarSum'; create function {database}.var_sum(double...) returns double location '{location}' symbol='VarSum'; create function {database}.var_sum(string...) returns int location '{location}' symbol='VarSum'; create function {database}.var_sum(decimal(4,2)...) returns decimal(18,2) location '{location}' symbol='VarSum'; create function {database}.var_sum_multiply(double, int...) returns double location '{location}' symbol='_Z14VarSumMultiplyPN10impala_udf15FunctionContextERKNS_9DoubleValEiPKNS_6IntValE'; create function {database}.constant_timestamp() returns timestamp location '{location}' symbol='ConstantTimestamp'; create function {database}.validate_arg_type(string) returns boolean location '{location}' symbol='ValidateArgType'; create function {database}.count_rows() returns bigint location '{location}' symbol='Count' prepare_fn='CountPrepare' close_fn='CountClose'; create function {database}.constant_arg(int) returns int location '{location}' symbol='ConstantArg' prepare_fn='ConstantArgPrepare' close_fn='ConstantArgClose'; create function {database}.validate_open(int) returns boolean location '{location}' symbol='ValidateOpen' prepare_fn='ValidateOpenPrepare' close_fn='ValidateOpenClose'; create function {database}.mem_test(bigint) returns bigint location '{location}' symbol='MemTest' prepare_fn='MemTestPrepare' close_fn='MemTestClose'; create function {database}.mem_test_leaks(bigint) returns bigint location '{location}' symbol='MemTest' prepare_fn='MemTestPrepare'; -- Regression test for IMPALA-1475 create function {database}.unmangled_symbol() returns bigint location '{location}' symbol='UnmangledSymbol'; create function {database}.four_args(int, int, int, int) returns int location '{location}' symbol='FourArgs'; create function {database}.five_args(int, int, int, int, int) returns int location '{location}' symbol='FiveArgs'; create function {database}.six_args(int, int, int, int, int, int) returns int location '{location}' symbol='SixArgs'; create function {database}.seven_args(int, int, int, int, int, int, int) returns int location '{location}' symbol='SevenArgs'; create function {database}.eight_args(int, int, int, int, int, int, int, int) returns int location '{location}' symbol='EightArgs'; """
#!/usr/bin/python import sys, os, re, platform from os.path import exists, abspath, dirname, join, isdir try: # Allow use of setuptools so eggs can be built. from setuptools import setup, Command except ImportError: from distutils.core import setup, Command from distutils.extension import Extension from distutils.errors import * OFFICIAL_BUILD = 9999 def _print(s): # Python 2/3 compatibility sys.stdout.write(s + '\n') class VersionCommand(Command): description = "prints the pyodbc version, determined from git" user_options = [] def initialize_options(self): self.verbose = 0 def finalize_options(self): pass def run(self): version_str, version = get_version() sys.stdout.write(version_str + '\n') class TagsCommand(Command): description = 'runs etags' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): # Windows versions of etag do not seem to expand wildcards (which Unix shells normally do for Unix utilities), # so find all of the files ourselves. files = [ join('src', f) for f in os.listdir('src') if f.endswith(('.h', '.cpp')) ] cmd = 'etags %s' % ' '.join(files) return os.system(cmd) def main(): version_str, version = get_version() settings = get_compiler_settings(version_str) files = [ abspath(join('src', f)) for f in os.listdir('src') if f.endswith('.cpp') ] if exists('MANIFEST'): os.remove('MANIFEST') kwargs = { 'name': "pyodbc", 'version': version_str, 'description': "DB API Module for ODBC", 'long_description': ('A Python DB API 2 module for ODBC. This project provides an up-to-date, ' 'convenient interface to ODBC using native data types like datetime and decimal.'), 'maintainer': "Michael Kleehammer", 'maintainer_email': "[email protected]", 'ext_modules': [Extension('pyodbc', files, **settings)], 'license': 'MIT', 'classifiers': ['Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: MIT License', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', 'Topic :: Database', ], 'url': 'http://code.google.com/p/pyodbc', 'download_url': 'http://code.google.com/p/pyodbc/downloads/list', 'cmdclass': { 'version' : VersionCommand, 'tags' : TagsCommand } } if sys.hexversion >= 0x02060000: kwargs['options'] = { 'bdist_wininst': {'user_access_control' : 'auto'} } setup(**kwargs) def get_compiler_settings(version_str): settings = { 'libraries': [], 'define_macros' : [ ('PYODBC_VERSION', version_str) ] } # This isn't the best or right way to do this, but I don't see how someone is supposed to sanely subclass the build # command. for option in ['assert', 'trace', 'leak-check']: try: sys.argv.remove('--%s' % option) settings['define_macros'].append(('PYODBC_%s' % option.replace('-', '_').upper(), 1)) except ValueError: pass if os.name == 'nt': settings['extra_compile_args'] = ['/Wall', '/wd4668', '/wd4820', '/wd4711', # function selected for automatic inline expansion '/wd4100', # unreferenced formal parameter '/wd4127', # "conditional expression is constant" testing compilation constants '/wd4191', # casts to PYCFunction which doesn't have the keywords parameter ] settings['libraries'].append('odbc32') settings['libraries'].append('advapi32') if '--debug' in sys.argv: sys.argv.remove('--debug') settings['extra_compile_args'].extend('/Od /Ge /GS /GZ /RTC1 /Wp64 /Yd'.split()) elif os.environ.get("OS", '').lower().startswith('windows'): # Windows Cygwin (posix on windows) # OS name not windows, but still on Windows settings['libraries'].append('odbc32') elif sys.platform == 'darwin': # OS/X now ships with iODBC. settings['libraries'].append('iodbc') # Apple has decided they won't maintain the iODBC system in OS/X and has added deprecation warnings in 10.8. # For now target 10.7 to eliminate the warnings. # Python functions take a lot of 'char *' that really should be const. gcc complains about this *a lot* settings['extra_compile_args'] = ['-Wno-write-strings', '-Wno-deprecated-declarations'] settings['define_macros'].append( ('MAC_OS_X_VERSION_10_7',) ) else: # Other posix-like: Linux, Solaris, etc. # Python functions take a lot of 'char *' that really should be const. gcc complains about this *a lot* settings['extra_compile_args'] = ['-Wno-write-strings'] # What is the proper way to detect iODBC, MyODBC, unixODBC, etc.? settings['libraries'].append('odbc') return settings def add_to_path(): """ Prepends the build directory to the path so pyodbcconf can be imported without installing it. """ # Now run the utility import imp library_exts = [ t[0] for t in imp.get_suffixes() if t[-1] == imp.C_EXTENSION ] library_names = [ 'pyodbcconf%s' % ext for ext in library_exts ] # Only go into directories that match our version number. dir_suffix = '-%s.%s' % (sys.version_info[0], sys.version_info[1]) build = join(dirname(abspath(__file__)), 'build') for top, dirs, files in os.walk(build): dirs = [ d for d in dirs if d.endswith(dir_suffix) ] for name in library_names: if name in files: sys.path.insert(0, top) return raise SystemExit('Did not find pyodbcconf') def get_version(): """ Returns the version of the product as (description, [major,minor,micro,beta]). If the release is official, `beta` will be 9999 (OFFICIAL_BUILD). 1. If in a git repository, use the latest tag (git describe). 2. If in an unzipped source directory (from setup.py sdist), read the version from the PKG-INFO file. 3. Use 3.0.0.0 and complain a lot. """ # My goal is to (1) provide accurate tags for official releases but (2) not have to manage tags for every test # release. # # Official versions are tagged using 3 numbers: major, minor, micro. A build of a tagged version should produce # the version using just these pieces, such as 2.1.4. # # Unofficial versions are "working towards" the next version. So the next unofficial build after 2.1.4 would be a # beta for 2.1.5. Using 'git describe' we can find out how many changes have been made after 2.1.4 and we'll use # this count as the beta id (beta1, beta2, etc.) # # Since the 4 numbers are put into the Windows DLL, we want to make sure the beta versions sort *before* the # official, so we set the official build number to 9999, but we don't show it. name = None # branch/feature name. Should be None for official builds. numbers = None # The 4 integers that make up the version. # If this is a source release the version will have already been assigned and be in the PKG-INFO file. name, numbers = _get_version_pkginfo() # If not a source release, we should be in a git repository. Look for the latest tag. if not numbers: name, numbers = _get_version_git() if not numbers: _print('WARNING: Unable to determine version. Using 3.0.0.0') name, numbers = '3.0.0-unsupported', [3,0,0,0] return name, numbers def _get_version_pkginfo(): filename = join(dirname(abspath(__file__)), 'PKG-INFO') if exists(filename): re_ver = re.compile(r'^Version: \s+ (\d+)\.(\d+)\.(\d+) (?: -beta(\d+))?', re.VERBOSE) for line in open(filename): match = re_ver.search(line) if match: name = line.split(':', 1)[1].strip() numbers = [int(n or 0) for n in match.groups()[:3]] numbers.append(int(match.group(4) or OFFICIAL_BUILD)) # don't use 0 as a default for build return name, numbers return None, None def _get_version_git(): n, result = getoutput('git describe --tags --match 3.*') if n: _print('WARNING: git describe failed with: %s %s' % (n, result)) return None, None match = re.match(r'(\d+).(\d+).(\d+) (?: -(\d+)-g[0-9a-z]+)?', result, re.VERBOSE) if not match: return None, None numbers = [int(n or OFFICIAL_BUILD) for n in match.groups()] if numbers[-1] == OFFICIAL_BUILD: name = '%s.%s.%s' % tuple(numbers[:3]) if numbers[-1] != OFFICIAL_BUILD: # This is a beta of the next micro release, so increment the micro number to reflect this. numbers[-2] += 1 name = '%s.%s.%s-beta%02d' % tuple(numbers) n, result = getoutput('git branch') branch = re.search(r'\* (\w+)', result).group(1) if branch != 'master' and not re.match('^v\d+$', branch): name = branch + '-' + name return name, numbers def getoutput(cmd): pipe = os.popen(cmd, 'r') text = pipe.read().rstrip('\n') status = pipe.close() or 0 return status, text if __name__ == '__main__': main()
# sshrepo.py - ssh repository proxy class for mercurial # # Copyright 2005, 2006 Matt Mackall <[email protected]> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. from node import bin, hex from i18n import _ import repo, util, error, encoding import re, urllib class remotelock(object): def __init__(self, repo): self.repo = repo def release(self): self.repo.unlock() self.repo = None def __del__(self): if self.repo: self.release() class sshrepository(repo.repository): def __init__(self, ui, path, create=0): self._url = path self.ui = ui m = re.match(r'^ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))?$', path) if not m: self.abort(error.RepoError(_("couldn't parse location %s") % path)) self.user = m.group(2) self.host = m.group(3) self.port = m.group(5) self.path = m.group(7) or "." sshcmd = self.ui.config("ui", "ssh", "ssh") remotecmd = self.ui.config("ui", "remotecmd", "hg") args = util.sshargs(sshcmd, self.host, self.user, self.port) if create: cmd = '%s %s "%s init %s"' cmd = cmd % (sshcmd, args, remotecmd, self.path) ui.note(_('running %s\n') % cmd) res = util.system(cmd) if res != 0: self.abort(error.RepoError(_("could not create remote repo"))) self.validate_repo(ui, sshcmd, args, remotecmd) def url(self): return self._url def validate_repo(self, ui, sshcmd, args, remotecmd): # cleanup up previous run self.cleanup() cmd = '%s %s "%s -R %s serve --stdio"' cmd = cmd % (sshcmd, args, remotecmd, self.path) cmd = util.quotecommand(cmd) ui.note(_('running %s\n') % cmd) self.pipeo, self.pipei, self.pipee = util.popen3(cmd) # skip any noise generated by remote shell self.do_cmd("hello") r = self.do_cmd("between", pairs=("%s-%s" % ("0"*40, "0"*40))) lines = ["", "dummy"] max_noise = 500 while lines[-1] and max_noise: l = r.readline() self.readerr() if lines[-1] == "1\n" and l == "\n": break if l: ui.debug("remote: ", l) lines.append(l) max_noise -= 1 else: self.abort(error.RepoError(_("no suitable response from remote hg"))) self.capabilities = set() for l in reversed(lines): if l.startswith("capabilities:"): self.capabilities.update(l[:-1].split(":")[1].split()) break def readerr(self): while 1: size = util.fstat(self.pipee).st_size if size == 0: break l = self.pipee.readline() if not l: break self.ui.status(_("remote: "), l) def abort(self, exception): self.cleanup() raise exception def cleanup(self): try: self.pipeo.close() self.pipei.close() # read the error descriptor until EOF for l in self.pipee: self.ui.status(_("remote: "), l) self.pipee.close() except: pass __del__ = cleanup def do_cmd(self, cmd, **args): self.ui.debug("sending %s command\n" % cmd) self.pipeo.write("%s\n" % cmd) for k, v in args.iteritems(): self.pipeo.write("%s %d\n" % (k, len(v))) self.pipeo.write(v) self.pipeo.flush() return self.pipei def call(self, cmd, **args): self.do_cmd(cmd, **args) return self._recv() def _recv(self): l = self.pipei.readline() self.readerr() try: l = int(l) except: self.abort(error.ResponseError(_("unexpected response:"), l)) return self.pipei.read(l) def _send(self, data, flush=False): self.pipeo.write("%d\n" % len(data)) if data: self.pipeo.write(data) if flush: self.pipeo.flush() self.readerr() def lock(self): self.call("lock") return remotelock(self) def unlock(self): self.call("unlock") def lookup(self, key): self.requirecap('lookup', _('look up remote revision')) d = self.call("lookup", key=key) success, data = d[:-1].split(" ", 1) if int(success): return bin(data) else: self.abort(error.RepoError(data)) def heads(self): d = self.call("heads") try: return map(bin, d[:-1].split(" ")) except: self.abort(error.ResponseError(_("unexpected response:"), d)) def branchmap(self): d = self.call("branchmap") try: branchmap = {} for branchpart in d.splitlines(): branchheads = branchpart.split(' ') branchname = urllib.unquote(branchheads[0]) # Earlier servers (1.3.x) send branch names in (their) local # charset. The best we can do is assume it's identical to our # own local charset, in case it's not utf-8. try: branchname.decode('utf-8') except UnicodeDecodeError: branchname = encoding.fromlocal(branchname) branchheads = [bin(x) for x in branchheads[1:]] branchmap[branchname] = branchheads return branchmap except: raise error.ResponseError(_("unexpected response:"), d) def branches(self, nodes): n = " ".join(map(hex, nodes)) d = self.call("branches", nodes=n) try: br = [tuple(map(bin, b.split(" "))) for b in d.splitlines()] return br except: self.abort(error.ResponseError(_("unexpected response:"), d)) def between(self, pairs): n = " ".join(["-".join(map(hex, p)) for p in pairs]) d = self.call("between", pairs=n) try: p = [l and map(bin, l.split(" ")) or [] for l in d.splitlines()] return p except: self.abort(error.ResponseError(_("unexpected response:"), d)) def changegroup(self, nodes, kind): n = " ".join(map(hex, nodes)) return self.do_cmd("changegroup", roots=n) def changegroupsubset(self, bases, heads, kind): self.requirecap('changegroupsubset', _('look up remote changes')) bases = " ".join(map(hex, bases)) heads = " ".join(map(hex, heads)) return self.do_cmd("changegroupsubset", bases=bases, heads=heads) def unbundle(self, cg, heads, source): d = self.call("unbundle", heads=' '.join(map(hex, heads))) if d: # remote may send "unsynced changes" self.abort(error.RepoError(_("push refused: %s") % d)) while 1: d = cg.read(4096) if not d: break self._send(d) self._send("", flush=True) r = self._recv() if r: # remote may send "unsynced changes" self.abort(error.RepoError(_("push failed: %s") % r)) r = self._recv() try: return int(r) except: self.abort(error.ResponseError(_("unexpected response:"), r)) def addchangegroup(self, cg, source, url): d = self.call("addchangegroup") if d: self.abort(error.RepoError(_("push refused: %s") % d)) while 1: d = cg.read(4096) if not d: break self.pipeo.write(d) self.readerr() self.pipeo.flush() self.readerr() r = self._recv() if not r: return 1 try: return int(r) except: self.abort(error.ResponseError(_("unexpected response:"), r)) def stream_out(self): return self.do_cmd('stream_out') instance = sshrepository
import sys if sys.version_info < (3,): range = xrange import numpy as np import pandas as pd import scipy.stats as ss from patsy import dmatrices, dmatrix, demo_data from .. import families as fam from .. import output as op from .. import tests as tst from .. import tsm as tsm from .. import data_check as dc from .nn_architecture import neural_network_tanh, neural_network_tanh_mb class NNARX(tsm.TSM): """ Inherits time series methods from TSM parent class. **** NEURAL NETWORK AUTOREGRESSIVE (NNAR) MODELS **** Parameters ---------- data : pd.DataFrame or np.ndarray Field to specify the univariate time series data that will be used. ar : int Field to specify how many AR lags the model will have. units : int How many units for the neural network layers : int How many layers for the neural networ activation : function E.g. np.tanh, np.array (linear) family: family object E.g. pf.Normal() """ def __init__(self, data, formula, ar, units, layers, family=fam.Normal()): # Initialize TSM object super(NNARX, self).__init__('NNARX') # Latent Variable information self.ar = ar self.units = units self.layers = layers self.activation = np.tanh self.model_name = "NNARX(" + str(self.ar) + ")" self.z_no = self.ar + 2 self.max_lag = self.ar self._z_hide = 0 # Whether to cutoff latent variables from results table self.supported_methods = ["BBVI"] self.default_method = "BBVI" self.multivariate_model = False # Format the data self.is_pandas = True # This is compulsory for this model type self.data_original = data.copy() self.formula = formula self.y, self.X = dmatrices(formula, data) self.y_name = self.y.design_info.describe() self.X_names = self.X.design_info.describe().split(" + ") self.y = self.y.astype(np.float) self.X = self.X.astype(np.float) self.z_no = self.X.shape[1] self.data_name = self.y_name self.y = np.array([self.y]).ravel() self.data = self.y.copy() self.X = np.array([self.X])[0] self.index = data.index self.data_length = self.data.shape[0] self.X = self.X[self.ar:, :] self.X = np.concatenate([self._ar_matrix().T, self.X], axis=1).T self._create_latent_variables() self.family = family self.model_name2, self.link, self.scale, self.shape, self.skewness, self.mean_transform, self.cythonized = self.family.setup() self.model_name = self.model_name2 + " NNARX(" + str(self.ar) + ")" # Build any remaining latent variables that are specific to the family chosen for no, i in enumerate(self.family.build_latent_variables()): self.latent_variables.add_z(i[0], i[1], i[2]) self.latent_variables.z_list[-1].start = i[3] self.z_no = len(self.latent_variables.z_list) self.family_z_no = len(self.family.build_latent_variables()) # Initialize with random weights for var_no in range(len(self.latent_variables.z_list)-self.family_z_no): self.latent_variables.z_list[var_no].start = np.random.normal() if isinstance(self.family, fam.Normal): self.neg_loglik = self.normal_neg_loglik else: self.neg_loglik = self.general_neg_loglik def _ar_matrix(self): """ Creates Autoregressive matrix Returns ---------- X : np.ndarray Autoregressive Matrix """ if self.ar != 0: X = self.data[(self.max_lag-1):-1] for i in range(1, self.ar): X = np.vstack((X, self.data[(self.max_lag-i-1):-i-1])) return X else: return np.zeros(self.data_length-self.max_lag) def _create_latent_variables(self): """ Creates the model's latent variables Returns ---------- None (changes model attributes) """ # Input layer for unit in range(self.units): self.latent_variables.add_z('Constant | Layer ' + str(1) + ' | Unit ' + str(unit+1), fam.Cauchy(0,1,transform=None), fam.Normal(0, 3)) for ar_term in range(self.ar): self.latent_variables.add_z('AR' + str(ar_term+1) + ' | Layer ' + str(1) + ' | Unit ' + str(unit+1), fam.Cauchy(0,1,transform=None), fam.Normal(0, 3)) for z in range(len(self.X_names)): self.latent_variables.add_z('Weight ' + self.X_names[z], fam.Cauchy(0, 1, transform=None), fam.Normal(0, 3)) # Hidden layers for layer in range(1, self.layers): for unit in range(self.units): for weight in range(self.units): self.latent_variables.add_z('Weight ' + str(weight+1) + ' | Layer ' + str(layer+1) + ' | Unit ' + str(unit+1), fam.Cauchy(0,1,transform=None), fam.Normal(0, 3)) # Output layer for weight in range(self.units): self.latent_variables.add_z('Output Weight ' + str(weight+1), fam.Cauchy(0,1,transform=None), fam.Normal(0, 3)) def _get_scale_and_shape(self,parm): """ Obtains appropriate model scale and shape latent variables Parameters ---------- parm : np.array Transformed latent variable vector Returns ---------- None (changes model attributes) """ if self.scale is True: if self.shape is True: model_shape = parm[-1] model_scale = parm[-2] else: model_shape = 0 model_scale = parm[-1] else: model_scale = 0 model_shape = 0 if self.skewness is True: model_skewness = parm[-3] else: model_skewness = 0 return model_scale, model_shape, model_skewness def _get_scale_and_shape_sim(self, transformed_lvs): """ Obtains model scale, shape, skewness latent variables for a 2d array of simulations. Parameters ---------- transformed_lvs : np.array Transformed latent variable vector (2d - with draws of each variable) Returns ---------- - Tuple of np.arrays (each being scale, shape and skewness draws) """ if self.scale is True: if self.shape is True: model_shape = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :]) model_scale = self.latent_variables.z_list[-2].prior.transform(transformed_lvs[-2, :]) else: model_shape = np.zeros(transformed_lvs.shape[1]) model_scale = self.latent_variables.z_list[-1].prior.transform(transformed_lvs[-1, :]) else: model_scale = np.zeros(transformed_lvs.shape[1]) model_shape = np.zeros(transformed_lvs.shape[1]) if self.skewness is True: model_skewness = self.latent_variables.z_list[-3].prior.transform(transformed_lvs[-3, :]) else: model_skewness = np.zeros(transformed_lvs.shape[1]) return model_scale, model_shape, model_skewness def _model(self, beta): """ Creates the structure of the model (model matrices etc) Parameters ---------- beta : np.ndarray Contains untransformed starting values for the latent variables Returns ---------- mu : np.ndarray Contains the predicted values (location) for the time series Y : np.ndarray Contains the length-adjusted time series (accounting for lags) """ Y = np.array(self.data[self.max_lag:]) # Transform latent variables z = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) return neural_network_tanh(Y, self.X, z, self.units, self.layers, self.ar+len(self.X_names)), Y def _mb_model(self, beta, mini_batch): """ Creates the structure of the model (model matrices etc) for mini batch model Parameters ---------- beta : np.ndarray Contains untransformed starting values for the latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- mu : np.ndarray Contains the predicted values (location) for the time series Y : np.ndarray Contains the length-adjusted time series (accounting for lags) """ Y = np.array(self.data[self.max_lag:]) sample = np.random.choice(len(Y), mini_batch, replace=False) Y = Y[sample] X = self.X[:, sample] # Transform latent variables z = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) return neural_network_tanh_mb(Y, X, z, self.units, self.layers, self.ar+len(self.X_names)), Y def predict_new(self, X, z): first_layer_output = np.zeros(self.units) for unit in range(self.units): first_layer_output[unit] = self.activation(np.matmul(np.transpose(X), z[unit*(self.ar+len(self.X_names)+1):((unit+1)*(self.ar+len(self.X_names)+1))])) params_used = ((self.units)*(self.ar+len(self.X_names)+1)) # Hidden layers hidden_layer_output = np.zeros((self.units, self.layers-1)) for layer in range(1, self.layers): for unit in range(self.units): if layer == 1: hidden_layer_output[unit,layer-1] = self.activation(np.matmul(first_layer_output, z[params_used+unit*(self.units)+((layer-1)*self.units**2):((params_used+(unit+1)*self.units)+((layer-1)*self.units**2))])) else: hidden_layer_output[unit,layer-1] = self.activation(np.matmul(hidden_layer_output[:,layer-1], z[params_used+unit*(self.units)+((layer-1)*self.units**2):((params_used+(unit+1)*self.units)+((layer-1)*self.units**2))])) params_used = params_used + (self.layers-1)*self.units**2 # Output layer if self.layers == 1: mu = np.matmul(first_layer_output, z[params_used:params_used+self.units]) else: mu = np.matmul(hidden_layer_output[:,-1], z[params_used:params_used+self.units]) return mu def _mean_prediction(self, mu, Y, h, t_z): """ Creates a h-step ahead mean prediction Parameters ---------- mu : np.ndarray The past predicted values Y : np.ndarray The past data h : int How many steps ahead for the prediction t_z : np.ndarray A vector of (transformed) latent variables Returns ---------- h-length vector of mean predictions """ # Create arrays to iteratre over Y_exp = Y.copy() # Loop over h time periods for t in range(0,h): if self.ar != 0: Y_exp_normalized = (Y_exp[-self.ar:][::-1] - self._norm_mean) / self._norm_std new_value = self.predict_new(np.append(1.0, Y_exp_normalized), self.latent_variables.get_z_values()) else: new_value = self.predict_new(np.array([1.0]), self.latent_variables.get_z_values()) Y_exp = np.append(Y_exp, [self.link(new_value)]) return Y_exp def _sim_prediction(self, mu, Y, h, t_z, simulations): """ Simulates a h-step ahead mean prediction Parameters ---------- mu : np.ndarray The past predicted values Y : np.ndarray The past data h : int How many steps ahead for the prediction t_z : np.ndarray A vector of (transformed) latent variables simulations : int How many simulations to perform Returns ---------- Matrix of simulations """ model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z) sim_vector = np.zeros([simulations,h]) for n in range(0, simulations): # Create arrays to iteratre over Y_exp = Y.copy() # Loop over h time periods for t in range(0,h): if self.ar != 0: Y_exp_normalized = (Y_exp[-self.ar:][::-1] - self._norm_mean) / self._norm_std new_value = self.predict_new(np.append(1.0, Y_exp_normalized), self.latent_variables.get_z_values()) else: new_value = self.predict_new(np.array([1.0]), self.latent_variables.get_z_values()) new_value += np.random.randn(1)*t_z[-1] if self.model_name2 == "Exponential": rnd_value = self.family.draw_variable(1.0/self.link(new_value), model_scale, model_shape, model_skewness, 1)[0] else: rnd_value = self.family.draw_variable(self.link(new_value), model_scale, model_shape, model_skewness, 1)[0] Y_exp = np.append(Y_exp, [rnd_value]) sim_vector[n] = Y_exp[-h:] return np.transpose(sim_vector) def _summarize_simulations(self, mean_values, sim_vector, date_index, h, past_values): """ Produces simulation forecasted values and prediction intervals Parameters ---------- mean_values : np.ndarray Mean predictions for h-step ahead forecasts sim_vector : np.ndarray N simulated predictions for h-step ahead forecasts date_index : pd.DateIndex or np.ndarray Date index for the simulations h : int How many steps ahead are forecast past_values : int How many past observations to include in the forecast plot intervals : boolean Would you like to show prediction intervals for the forecast? """ error_bars = [] for pre in range(5,100,5): error_bars.append(np.insert([np.percentile(i,pre) for i in sim_vector] - mean_values[-h:],0,0)) forecasted_values = mean_values[-h-1:] plot_values = mean_values[-h-past_values:] plot_index = date_index[-h-past_values:] return error_bars, forecasted_values, plot_values, plot_index def general_neg_loglik(self, beta): """ Calculates the negative log-likelihood of the model Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- The negative logliklihood of the model """ mu, Y = self._model(beta) parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) #TODO: Replace above with transformation that only acts on scale, shape, skewness in future (speed-up) model_scale, model_shape, model_skewness = self._get_scale_and_shape(parm) return self.family.neg_loglikelihood(Y, self.link(mu), model_scale, model_shape, model_skewness) def mb_neg_loglik(self, beta, mini_batch): """ Calculates the negative log-likelihood of the model for a minibatch Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables mini_batch : int Size of each mini batch of data Returns ---------- The negative logliklihood of the model """ mu, Y = self._mb_model(beta, mini_batch) parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) #TODO: Replace above with transformation that only acts on scale, shape, skewness in future (speed-up) model_scale, model_shape, model_skewness = self._get_scale_and_shape(parm) return self.family.neg_loglikelihood(Y, self.link(mu), model_scale, model_shape, model_skewness) def normal_neg_loglik(self, beta): """ Creates the negative log-likelihood of the model Parameters ---------- beta : np.ndarray Contains untransformed starting values for latent variables Returns ---------- The negative logliklihood of the model """ mu, Y = self._model(beta) return -np.sum(ss.norm.logpdf(Y, loc=mu, scale=self.latent_variables.z_list[-1].prior.transform(beta[-1]))) def plot_fit(self, **kwargs): """ Plots the fit of the model against the data """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) plt.figure(figsize=figsize) date_index = self.index[self.ar:self.data.shape[0]] mu, Y = self._model(self.latent_variables.get_z_values()) plt.plot(date_index,Y,label='Data') plt.plot(date_index,mu,label='Filter',c='black') plt.title(self.data_name) plt.legend(loc=2) plt.show() def plot_predict(self, h=5, past_values=20, intervals=True, **kwargs): """ Plots forecasts with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? past_values : int (default : 20) How many past observations to show on the forecast graph? intervals : boolean Would you like to show prediction intervals for the forecast? Returns ---------- - Plot of the forecast """ figsize = kwargs.get('figsize',(10,7)) if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: import matplotlib.pyplot as plt import seaborn as sns # Retrieve data, dates and (transformed) latent variables mu, Y = self._model(self.latent_variables.get_z_values()) date_index = self.shift_dates(h) t_z = self.transform_z() # Get mean prediction and simulations (for errors) mean_values = self._mean_prediction(mu, Y, h, t_z) if intervals is True: sim_values = self._sim_prediction(mu, Y, h, t_z, 15000) else: sim_values = self._sim_prediction(mu, Y, h, t_z, 2) error_bars, forecasted_values, plot_values, plot_index = self._summarize_simulations(mean_values, sim_values, date_index, h, past_values) plt.figure(figsize=figsize) if intervals is True: alpha =[0.15*i/float(100) for i in range(50,12,-2)] for count, pre in enumerate(error_bars): plt.fill_between(date_index[-h-1:], forecasted_values-pre, forecasted_values+pre,alpha=alpha[count]) plt.plot(plot_index,plot_values) plt.title("Forecast for " + self.data_name) plt.xlabel("Time") plt.ylabel(self.data_name) plt.show() def predict_is(self, h=5, fit_once=True, fit_method='MLE', intervals=False, **kwargs): """ Makes dynamic out-of-sample predictions with the estimated model on in-sample data Parameters ---------- h : int (default : 5) How many steps would you like to forecast? fit_once : boolean (default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint fit_method : string Which method to fit the model with intervals: boolean Whether to return prediction intervals Returns ---------- - pd.DataFrame with predicted values """ predictions = [] for t in range(0,h): x = NNAR(ar=self.ar, units=self.units, layers=self.layers, data=self.data_original[:-h+t], family=self.family) if fit_once is False: x.fit(method=fit_method, printer=False) if t == 0: if fit_once is True: x.fit(method=fit_method, printer=False) saved_lvs = x.latent_variables predictions = x.predict(1, intervals=intervals) else: if fit_once is True: x.latent_variables = saved_lvs predictions = pd.concat([predictions,x.predict(1, intervals=intervals)]) if intervals is True: predictions.rename(columns={0:self.data_name, 1: "1% Prediction Interval", 2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"}, inplace=True) else: predictions.rename(columns={0:self.data_name}, inplace=True) predictions.index = self.index[-h:] return predictions def plot_predict_is(self, h=5, fit_once=True, fit_method='MLE', **kwargs): """ Plots forecasts with the estimated model against data (Simulated prediction with data) Parameters ---------- h : int (default : 5) How many steps to forecast fit_once : boolean (default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint fit_method : string Which method to fit the model with Returns ---------- - Plot of the forecast against data """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) plt.figure(figsize=figsize) predictions = self.predict_is(h, fit_method=fit_method, fit_once=fit_once) data = self.data[-h:] plt.plot(predictions.index, data, label='Data') plt.plot(predictions.index, predictions, label='Predictions', c='black') plt.title(self.data_name) plt.legend(loc=2) plt.show() def predict(self, h=5, intervals=False): """ Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? intervals : boolean (default: False) Whether to return prediction intervals Returns ---------- - pd.DataFrame with predicted values """ if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: mu, Y = self._model(self.latent_variables.get_z_values()) date_index = self.shift_dates(h) if self.latent_variables.estimation_method in ['M-H']: sim_vector = self._sim_prediction_bayes(h, 15000) forecasted_values = np.array([np.mean(i) for i in sim_vector]) prediction_01 = np.array([np.percentile(i, 1) for i in sim_vector]) prediction_05 = np.array([np.percentile(i, 5) for i in sim_vector]) prediction_95 = np.array([np.percentile(i, 95) for i in sim_vector]) prediction_99 = np.array([np.percentile(i, 99) for i in sim_vector]) else: t_z = self.transform_z() mean_values = self._mean_prediction(mu, Y, h, t_z) if intervals is True: sim_values = self._sim_prediction(mu, Y, h, t_z, 15000) else: sim_values = self._sim_prediction(mu, Y, h, t_z, 2) if self.model_name2 == "Skewt": model_scale, model_shape, model_skewness = self._get_scale_and_shape(t_z) m1 = (np.sqrt(model_shape)*sp.gamma((model_shape-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(model_shape/2.0)) forecasted_values = mean_values[-h:] + (model_skewness - (1.0/model_skewness))*model_scale*m1 else: forecasted_values = mean_values[-h:] if intervals is False: result = pd.DataFrame(forecasted_values) result.rename(columns={0:self.data_name}, inplace=True) else: # Get mean prediction and simulations (for errors) if self.latent_variables.estimation_method not in ['M-H']: sim_values = self._sim_prediction(mu, Y, h, t_z, 15000) prediction_01 = np.array([np.percentile(i, 1) for i in sim_values]) prediction_05 = np.array([np.percentile(i, 5) for i in sim_values]) prediction_95 = np.array([np.percentile(i, 95) for i in sim_values]) prediction_99 = np.array([np.percentile(i, 99) for i in sim_values]) result = pd.DataFrame([forecasted_values, prediction_01, prediction_05, prediction_95, prediction_99]).T result.rename(columns={0:self.data_name, 1: "1% Prediction Interval", 2: "5% Prediction Interval", 3: "95% Prediction Interval", 4: "99% Prediction Interval"}, inplace=True) result.index = date_index[-h:] return result def sample(self, nsims=1000): """ Samples from the posterior predictive distribution Parameters ---------- nsims : int (default : 1000) How many draws from the posterior predictive distribution Returns ---------- - np.ndarray of draws from the data """ if self.latent_variables.estimation_method not in ['BBVI', 'M-H']: raise Exception("No latent variables estimated!") else: lv_draws = self.draw_latent_variables(nsims=nsims) mus = [self._model(lv_draws[:,i])[0] for i in range(nsims)] model_scale, model_shape, model_skewness = self._get_scale_and_shape_sim(lv_draws) data_draws = np.array([self.family.draw_variable(self.link(mus[i]), np.repeat(model_scale[i], mus[i].shape[0]), np.repeat(model_shape[i], mus[i].shape[0]), np.repeat(model_skewness[i], mus[i].shape[0]), mus[i].shape[0]) for i in range(nsims)]) return data_draws def plot_sample(self, nsims=10, plot_data=True, **kwargs): """ Plots draws from the posterior predictive density against the data Parameters ---------- nsims : int (default : 1000) How many draws from the posterior predictive distribution plot_data boolean Whether to plot the data or not """ if self.latent_variables.estimation_method not in ['BBVI', 'M-H']: raise Exception("No latent variables estimated!") else: import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) plt.figure(figsize=figsize) date_index = self.index[self.ar:self.data_length] mu, Y = self._model(self.latent_variables.get_z_values()) draws = self.sample(nsims).T plt.plot(date_index, draws, label='Posterior Draws', alpha=1.0) if plot_data is True: plt.plot(date_index, Y, label='Data', c='black', alpha=0.5, linestyle='', marker='s') plt.title(self.data_name) plt.show() def ppc(self, nsims=1000, T=np.mean): """ Computes posterior predictive p-value Parameters ---------- nsims : int (default : 1000) How many draws for the PPC T : function A discrepancy measure - e.g. np.mean, np.std, np.max Returns ---------- - float (posterior predictive p-value) """ if self.latent_variables.estimation_method not in ['BBVI', 'M-H']: raise Exception("No latent variables estimated!") else: lv_draws = self.draw_latent_variables(nsims=nsims) mus = [self._model(lv_draws[:,i])[0] for i in range(nsims)] model_scale, model_shape, model_skewness = self._get_scale_and_shape_sim(lv_draws) data_draws = np.array([self.family.draw_variable(self.link(mus[i]), np.repeat(model_scale[i], mus[i].shape[0]), np.repeat(model_shape[i], mus[i].shape[0]), np.repeat(model_skewness[i], mus[i].shape[0]), mus[i].shape[0]) for i in range(nsims)]) T_sims = T(self.sample(nsims=nsims), axis=1) T_actual = T(self.data) return len(T_sims[T_sims>T_actual])/nsims def plot_ppc(self, nsims=1000, T=np.mean, **kwargs): """ Plots histogram of the discrepancy from draws of the posterior Parameters ---------- nsims : int (default : 1000) How many draws for the PPC T : function A discrepancy measure - e.g. np.mean, np.std, np.max """ if self.latent_variables.estimation_method not in ['BBVI', 'M-H']: raise Exception("No latent variables estimated!") else: import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) lv_draws = self.draw_latent_variables(nsims=nsims) mus = [self._model(lv_draws[:,i])[0] for i in range(nsims)] model_scale, model_shape, model_skewness = self._get_scale_and_shape_sim(lv_draws) data_draws = np.array([self.family.draw_variable(self.link(mus[i]), np.repeat(model_scale[i], mus[i].shape[0]), np.repeat(model_shape[i], mus[i].shape[0]), np.repeat(model_skewness[i], mus[i].shape[0]), mus[i].shape[0]) for i in range(nsims)]) T_sim = T(self.sample(nsims=nsims), axis=1) T_actual = T(self.data) if T == np.mean: description = " of the mean" elif T == np.max: description = " of the maximum" elif T == np.min: description = " of the minimum" elif T == np.median: description = " of the median" else: description = "" plt.figure(figsize=figsize) ax = plt.subplot() ax.axvline(T_actual) sns.distplot(T_sim, kde=False, ax=ax) ax.set(title='Posterior predictive' + description, xlabel='T(x)', ylabel='Frequency'); plt.show()
# BSD 3-Clause License # # Copyright (c) 2019, Elasticsearch BV # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import pytest # isort:skip pytest.importorskip("flask") # isort:skip import logging import os import mock from elasticapm.conf import constants from elasticapm.conf.constants import ERROR, TRANSACTION from elasticapm.contrib.flask import ElasticAPM from elasticapm.utils import compat from elasticapm.utils.disttracing import TraceParent from tests.contrib.flask.utils import captured_templates try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen pytestmark = pytest.mark.flask def test_error_handler(flask_apm_client): client = flask_apm_client.app.test_client() response = client.get("/an-error/") assert response.status_code == 500 assert len(flask_apm_client.client.events[ERROR]) == 1 event = flask_apm_client.client.events[ERROR][0] assert "exception" in event exc = event["exception"] assert exc["type"] == "ValueError" assert exc["message"] == "ValueError: hello world" assert exc["handled"] is False assert event["culprit"] == "tests.contrib.flask.fixtures.an_error" transaction = flask_apm_client.client.events[TRANSACTION][0] assert transaction["result"] == "HTTP 5xx" assert transaction["name"] == "GET /an-error/" def test_get(flask_apm_client): client = flask_apm_client.app.test_client() response = client.get("/an-error/?foo=bar") assert response.status_code == 500 assert len(flask_apm_client.client.events[ERROR]) == 1 event = flask_apm_client.client.events[ERROR][0] assert "request" in event["context"] request = event["context"]["request"] assert request["url"]["full"] == "http://localhost/an-error/?foo=bar" assert request["url"]["search"] == "?foo=bar" assert request["method"] == "GET" assert "body" not in request assert "headers" in request headers = request["headers"] assert "host" in headers, headers.keys() assert headers["host"] == "localhost" env = request["env"] assert "SERVER_NAME" in env, env.keys() assert env["SERVER_NAME"] == "localhost" assert "SERVER_PORT" in env, env.keys() assert env["SERVER_PORT"] == "80" def test_get_debug(flask_apm_client): app = flask_apm_client.app app.debug = True app.config["TESTING"] = False with pytest.raises(ValueError): app.test_client().get("/an-error/?foo=bar") assert len(flask_apm_client.client.events) == 0 def test_get_debug_elasticapm(flask_apm_client): app = flask_apm_client.app app.debug = True app.config["TESTING"] = True flask_apm_client.client.config.debug = True with pytest.raises(ValueError): app.test_client().get("/an-error/?foo=bar") assert len(flask_apm_client.client.events[ERROR]) == 1 @pytest.mark.parametrize( "elasticapm_client", [{"capture_body": "errors"}, {"capture_body": "all"}, {"capture_body": "off"}], indirect=True ) def test_post(flask_apm_client): client = flask_apm_client.app.test_client() response = client.post("/an-error/?biz=baz", data={"foo": "bar"}) assert response.status_code == 500 assert len(flask_apm_client.client.events[ERROR]) == 1 event = flask_apm_client.client.events[ERROR][0] assert "request" in event["context"] request = event["context"]["request"] assert request["url"]["full"] == "http://localhost/an-error/?biz=baz" assert request["url"]["search"] == "?biz=baz" assert request["method"] == "POST" if flask_apm_client.client.config.capture_body in (constants.ERROR, "all"): assert request["body"] == {"foo": "bar"} else: assert request["body"] == "[REDACTED]" assert "headers" in request headers = request["headers"] assert "content-length" in headers, headers.keys() assert headers["content-length"] == "7" assert "content-type" in headers, headers.keys() assert headers["content-type"] == "application/x-www-form-urlencoded" assert "host" in headers, headers.keys() assert headers["host"] == "localhost" env = request["env"] assert "SERVER_NAME" in env, env.keys() assert env["SERVER_NAME"] == "localhost" assert "SERVER_PORT" in env, env.keys() assert env["SERVER_PORT"] == "80" @pytest.mark.parametrize( "elasticapm_client", [{"capture_body": "transactions"}, {"capture_body": "all"}, {"capture_body": "off"}], indirect=True, ) def test_instrumentation(flask_apm_client): resp = flask_apm_client.app.test_client().post("/users/", data={"foo": "bar"}) resp.close() assert resp.status_code == 200, resp.response transactions = flask_apm_client.client.events[TRANSACTION] assert len(transactions) == 1 transaction = transactions[0] assert transaction["type"] == "request" assert transaction["result"] == "HTTP 2xx" assert transaction["outcome"] == "success" assert "request" in transaction["context"] assert transaction["context"]["request"]["url"]["full"] == "http://localhost/users/" assert transaction["context"]["request"]["method"] == "POST" if flask_apm_client.client.config.capture_body in (constants.TRANSACTION, "all"): assert transaction["context"]["request"]["body"] == {"foo": "bar"} else: assert transaction["context"]["request"]["body"] == "[REDACTED]" assert transaction["context"]["response"]["status_code"] == 200 assert transaction["context"]["response"]["headers"] == { "foo": "bar;baz", "bar": "bazzinga", "Content-Length": "78", "Content-Type": "text/html; charset=utf-8", } spans = flask_apm_client.client.spans_for_transaction(transactions[0]) assert len(spans) == 1, [t["name"] for t in spans] expected_signatures = {"users.html"} assert {t["name"] for t in spans} == expected_signatures assert spans[0]["name"] == "users.html" assert spans[0]["type"] == "template" assert spans[0]["subtype"] == "jinja2" assert spans[0]["action"] == "render" def test_instrumentation_debug(flask_apm_client): flask_apm_client.app.debug = True assert len(flask_apm_client.client.events[TRANSACTION]) == 0 resp = flask_apm_client.app.test_client().post("/users/", data={"foo": "bar"}) resp.close() assert len(flask_apm_client.client.events[TRANSACTION]) == 0 @pytest.mark.parametrize("elasticapm_client", [{"debug": True}], indirect=True) def test_instrumentation_debug_client_debug(flask_apm_client): flask_apm_client.app.debug = True assert len(flask_apm_client.client.events[TRANSACTION]) == 0 resp = flask_apm_client.app.test_client().post("/users/", data={"foo": "bar"}) resp.close() assert len(flask_apm_client.client.events[TRANSACTION]) == 1 def test_instrumentation_404(flask_apm_client): resp = flask_apm_client.app.test_client().post("/no-such-page/") resp.close() assert resp.status_code == 404, resp.response transactions = flask_apm_client.client.events[TRANSACTION] assert len(transactions) == 1 spans = flask_apm_client.client.spans_for_transaction(transactions[0]) assert transactions[0]["result"] == "HTTP 4xx" assert transactions[0]["context"]["response"]["status_code"] == 404 assert len(spans) == 0, [t["signature"] for t in spans] @pytest.mark.parametrize("header_name", [constants.TRACEPARENT_HEADER_NAME, constants.TRACEPARENT_LEGACY_HEADER_NAME]) def test_traceparent_handling(flask_apm_client, header_name): with mock.patch( "elasticapm.contrib.flask.TraceParent.from_string", wraps=TraceParent.from_string ) as wrapped_from_string: resp = flask_apm_client.app.test_client().post( "/users/", data={"foo": "bar"}, headers={ header_name: "00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-03", constants.TRACESTATE_HEADER_NAME: "foo=bar,baz=bazzinga", }, ) resp.close() assert resp.status_code == 200, resp.response transaction = flask_apm_client.client.events[TRANSACTION][0] assert transaction["trace_id"] == "0af7651916cd43dd8448eb211c80319c" assert transaction["parent_id"] == "b7ad6b7169203331" assert "foo=bar,baz=bazzinga" in wrapped_from_string.call_args[0] def test_non_standard_http_status(flask_apm_client): resp = flask_apm_client.app.test_client().get("/non-standard-status/") resp.close() assert resp.status == "0 fail", resp.response assert resp.status_code == 0, resp.response transactions = flask_apm_client.client.events[TRANSACTION] assert transactions[0]["result"] == "0 fail" # "0" is prepended by Werkzeug BaseResponse assert transactions[0]["context"]["response"]["status_code"] == 0 def test_framework_name(flask_app): elasticapm = ElasticAPM(app=flask_app, metrics_interval="0ms") assert elasticapm.client.config.framework_name == "flask" app_info = elasticapm.client.get_service_info() assert app_info["framework"]["name"] == "flask" elasticapm.client.close() @pytest.mark.parametrize( "elasticapm_client", [{"capture_body": "errors"}, {"capture_body": "all"}, {"capture_body": "off"}], indirect=True ) def test_post_files(flask_apm_client): with open(os.path.abspath(__file__), mode="rb") as f: response = flask_apm_client.app.test_client().post( "/an-error/", data={ "foo": ["bar", "baz"], "f1": (compat.BytesIO(compat.b("1")), "bla"), "f2": [(f, "flask_tests.py"), (compat.BytesIO(compat.b("1")), "blub")], }, ) assert response.status_code == 500 assert len(flask_apm_client.client.events[ERROR]) == 1 event = flask_apm_client.client.events[ERROR][0] if flask_apm_client.client.config.capture_body in (constants.ERROR, "all"): assert event["context"]["request"]["body"] == { "foo": ["bar", "baz"], "_files": {"f1": "bla", "f2": ["flask_tests.py", "blub"]}, } else: assert event["context"]["request"]["body"] == "[REDACTED]" def test_capture_body_config_is_dynamic_for_errors(flask_apm_client): flask_apm_client.client.config.update(version="1", capture_body="all") resp = flask_apm_client.app.test_client().post("/an-error/", data={"foo": "bar"}) resp.close() error = flask_apm_client.client.events[ERROR][0] assert error["context"]["request"]["body"] == {"foo": "bar"} flask_apm_client.client.config.update(version="2", capture_body="off") resp = flask_apm_client.app.test_client().post("/an-error/", data={"foo": "bar"}) resp.close() error = flask_apm_client.client.events[ERROR][1] assert error["context"]["request"]["body"] == "[REDACTED]" def test_capture_body_config_is_dynamic_for_transactions(flask_apm_client): flask_apm_client.client.config.update(version="1", capture_body="all") resp = flask_apm_client.app.test_client().post("/users/", data={"foo": "bar"}) resp.close() transaction = flask_apm_client.client.events[TRANSACTION][0] assert transaction["context"]["request"]["body"] == {"foo": "bar"} flask_apm_client.client.config.update(version="2", capture_body="off") resp = flask_apm_client.app.test_client().post("/users/", data={"foo": "bar"}) resp.close() transaction = flask_apm_client.client.events[TRANSACTION][1] assert transaction["context"]["request"]["body"] == "[REDACTED]" def test_capture_headers_config_is_dynamic_for_errors(flask_apm_client): flask_apm_client.client.config.update(version="1", capture_headers=True) resp = flask_apm_client.app.test_client().post("/an-error/", data={"foo": "bar"}) resp.close() error = flask_apm_client.client.events[ERROR][0] assert error["context"]["request"]["headers"] flask_apm_client.client.config.update(version="2", capture_headers=False) resp = flask_apm_client.app.test_client().post("/an-error/", data={"foo": "bar"}) resp.close() error = flask_apm_client.client.events[ERROR][1] assert "headers" not in error["context"]["request"] def test_capture_headers_config_is_dynamic_for_transactions(flask_apm_client): flask_apm_client.client.config.update(version="1", capture_headers=True) resp = flask_apm_client.app.test_client().post("/users/", data={"foo": "bar"}) resp.close() transaction = flask_apm_client.client.events[TRANSACTION][0] assert transaction["context"]["request"]["headers"] flask_apm_client.client.config.update(version="2", capture_headers=False) resp = flask_apm_client.app.test_client().post("/users/", data={"foo": "bar"}) resp.close() transaction = flask_apm_client.client.events[TRANSACTION][1] assert "headers" not in transaction["context"]["request"] @pytest.mark.parametrize("elasticapm_client", [{"capture_body": "transactions"}], indirect=True) def test_options_request(flask_apm_client): resp = flask_apm_client.app.test_client().options("/") resp.close() transactions = flask_apm_client.client.events[TRANSACTION] assert transactions[0]["context"]["request"]["method"] == "OPTIONS" @pytest.mark.parametrize( "elasticapm_client", [{"capture_headers": "true"}, {"capture_headers": "false"}], indirect=True ) def test_capture_headers_errors(flask_apm_client): resp = flask_apm_client.app.test_client().post("/an-error/", headers={"some-header": "foo"}) resp.close() error = flask_apm_client.client.events[ERROR][0] if flask_apm_client.client.config.capture_headers: assert error["context"]["request"]["headers"]["some-header"] == "foo" else: assert "headers" not in error["context"]["request"] @pytest.mark.parametrize( "elasticapm_client", [{"capture_headers": "true"}, {"capture_headers": "false"}], indirect=True ) def test_capture_headers_transactions(flask_apm_client): resp = flask_apm_client.app.test_client().post("/users/", headers={"some-header": "foo"}) resp.close() transaction = flask_apm_client.client.events[TRANSACTION][0] if flask_apm_client.client.config.capture_headers: assert transaction["context"]["request"]["headers"]["some-header"] == "foo" assert transaction["context"]["response"]["headers"]["foo"] == "bar;baz" else: assert "headers" not in transaction["context"]["request"] assert "headers" not in transaction["context"]["response"] def test_streaming_response(flask_apm_client): resp = flask_apm_client.app.test_client().get("/streaming/") assert resp.data == b"01234" resp.close() transaction = flask_apm_client.client.events[TRANSACTION][0] spans = flask_apm_client.client.spans_for_transaction(transaction) assert transaction["duration"] > 50 assert len(spans) == 5 def test_response_close_wsgi(flask_wsgi_server): # this tests the response-close behavior using a real WSGI server elasticapm_client = flask_wsgi_server.app.apm_client.client url = flask_wsgi_server.url + "/streaming/" response = urlopen(url) response.read() transaction = elasticapm_client.events[TRANSACTION][0] spans = elasticapm_client.spans_for_transaction(transaction) assert transaction["duration"] > 50 assert len(spans) == 5 def test_set_transaction_name(flask_apm_client): resp = flask_apm_client.app.test_client().get("/transaction-name/") resp.close() transaction = flask_apm_client.client.events[TRANSACTION][0] assert transaction["name"] == "foo" assert transaction["result"] == "okydoky" def test_rum_tracing_context_processor(flask_apm_client): with captured_templates(flask_apm_client.app) as templates: resp = flask_apm_client.app.test_client().post("/users/", data={"foo": "bar"}) resp.close() transaction = flask_apm_client.client.events[TRANSACTION][0] template, context = templates[0] assert context["apm"]["trace_id"] == transaction["trace_id"] assert context["apm"]["is_sampled"] assert context["apm"]["is_sampled_js"] == "true" assert callable(context["apm"]["span_id"]) @pytest.mark.parametrize("flask_apm_client", [{"logging": True}], indirect=True) def test_logging_enabled(flask_apm_client): logger = logging.getLogger() logger.error("test") error = flask_apm_client.client.events[ERROR][0] assert error["log"]["level"] == "error" assert error["log"]["message"] == "test" @pytest.mark.parametrize("flask_apm_client", [{"logging": False}], indirect=True) def test_logging_disabled(flask_apm_client): logger = logging.getLogger() logger.error("test") assert len(flask_apm_client.client.events[ERROR]) == 0 @pytest.mark.parametrize("flask_apm_client", [{"logging": logging.ERROR}], indirect=True) def test_logging_by_level(flask_apm_client): logger = logging.getLogger() logger.warning("test") logger.error("test") assert len(flask_apm_client.client.events[ERROR]) == 1 error = flask_apm_client.client.events[ERROR][0] assert error["log"]["level"] == "error" def test_flask_transaction_ignore_urls(flask_apm_client): resp = flask_apm_client.app.test_client().get("/users/") resp.close() assert len(flask_apm_client.client.events[TRANSACTION]) == 1 flask_apm_client.client.config.update(version=1, transaction_ignore_urls="/user*") resp = flask_apm_client.app.test_client().get("/users/") resp.close() assert len(flask_apm_client.client.events[TRANSACTION]) == 1
import tensorflow as tf import settings #hy: collection of global variables settings.set_global() ####################################################### Header begin ################################################ dropout = [0.3, 0.3, 0.5, 0.5] #3,4,5,5 dropout_1s = [1]*len(dropout) n_hidden = 360 #162*6 # 128 n_classes = len(settings.LABELS) #hy: adapt to lego composed of 6 classes. Cifar10 total classes (0-9 digits) n_input = settings.h_resize * settings.w_resize #hy #300: horizontal 20% #360: until 1200 step good, after that test acc remains #200: start to increase early, 200, but does not increase lot any more #150, 250, 300, 330, 400: until 70 iter 17% optimizer_type = 'GD' #'adam' #GD-'gradient.descent',#'ProximalGradientDescent', #'SGD', #'RMSprop' trained_model = "./testbench/" + "model_GD720_h184_w184_c6_3conv_L0.75_R0.65_V1.0_8_0.81-6361.meta" ###################### #GD learning_rate = 0.045 #0.03549 #0.04049 #0.03049 #0.015 #0.07297 #0.09568# TODO 0.05 0.005 better, 0.001 good \0.02, 0.13799 to 0.14 good for 6 classes, ###################### #adam beta1 = 0.9 beta2 = 0.999 epsilon = 0.009 ###################### #RMSprop decay=0.00009 momentum=0 epsilon_R=0.009 ###################### #SGD lr_decay = 0.01 decay_step = 100 ###################### ####################################################### Header End#### ################################################ # Create model def conv2d(img, w, b, k): return tf.nn.relu(tf.nn.bias_add(tf.nn.conv2d(img, w, strides=[1, k, k, 1], padding='SAME'), b)) def max_pool(img, k): return tf.nn.max_pool(img, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME') ################################################################### def define_model(): # General input for tensorflow #hy: Graph input, same placeholders for various architectures x = tf.placeholder(tf.float32, [None, n_input, 1], name="x") y = tf.placeholder(tf.float32, [None, n_classes], name="y") tensor_h = settings.h_resize tensor_w = settings.w_resize ################################################ Graph 3conv begin # tf Graph input keep_prob = tf.placeholder(tf.float32, len(dropout), name="keep_prob") filter_size_1 = 11 filter_size_2 = 5 filter_size_3 = 3 SEED = 8 # hy: 8, 16, 64, number of filters, feature map size: input(42) - filter_size_1 + 1 = 38 conv2_out = 16 # hy: 16, 32, 64 outputs of final conv layer, feature map size: input(21) - filter_size_2 + 1 = 19 conv3_out = 32 # hy: 16, 32, 64 outputs of final conv layer, feature map size: input(21) - filter_size_2 + 1 = 19 def conv_net(_X, _weights, _biases, _dropout): # - INPUT Layer # Reshape input picture _X = tf.reshape(_X, shape=[-1, settings.h_resize, settings.w_resize, 1]) # hy: use updated proper values for shape print('\nArchitecture\ninput tensor', _X.get_shape()) # _X = tf.reshape(_X, shape=[-1, 32, 32, 3]) # TODO num channnels change # a = np.array(_X[0]) # print(a.shape) # Image._show(Image.fromarray(a, 'RGB')) ################################ # - Convolution Layer 1 k = 4 conv1 = conv2d(_X, _weights['wc1'], _biases['bc1'], k) # 4 print('conv1 ( f=', filter_size_1, 'k=', k, ')', conv1.get_shape()) # Max Pooling (down-sampling) k = 2 conv1 = max_pool(conv1, k) # TODO return it to K=2 print('conv1 max pooling ( k=', k, ')', conv1.get_shape()) # Apply Dropout conv1 = tf.nn.dropout(conv1, _dropout[0]) # TODO comment it later print('- dropout ( keep rate', dropout[0], ')', conv1.get_shape()) ################################ # - Convolution Layer 2 k = 1 conv2 = conv2d(conv1, _weights['wc2'], _biases['bc2'], k) print('\nconv2 ( f=', filter_size_2, 'k=', k, ')', conv2.get_shape()) # # Max Pooling (down-sampling) k = 2 conv2 = max_pool(conv2, k) print('conv2 - max pooling (k=', k, ')', conv2.get_shape()) # # Apply Dropout conv2 = tf.nn.dropout(conv2, _dropout[1]) # TODO comment it later! print('- dropout ( keep rate', dropout[1], ')', conv2.get_shape()) ################################ # - Convolution Layer 3 k = 1 conv3 = conv2d(conv2, _weights['wc3'], _biases['bc3'], k) print('\nconv3 ( f=', filter_size_3, 'k=', k, ')', conv3.get_shape()) k = 2 conv3 = max_pool(conv3, k) print('conv3 - max pooling ( k=', k, ')', conv3.get_shape()) conv3 = tf.nn.dropout(conv3, _dropout[2]) print('- dropout ( keep rate', dropout[2], ')', conv3.get_shape(), '<--') # Fully connected layer dense1 = tf.reshape(conv3, [-1, _weights['wd1'].get_shape().as_list()[0]]) # Reshape conv2 output to fit dense layer input print('\ndensel reshape:', dense1.get_shape(), 'n_hidden', n_hidden) dense1 = tf.nn.relu(tf.add(tf.matmul(dense1, _weights['wd1']), _biases['bd1'])) # Relu activation print('densel - relu:', dense1.get_shape()) dense1 = tf.nn.dropout(dense1, _dropout[3]) # Apply Dropout print('- dropout ( keep rate', dropout[3], ')', dense1.get_shape()) # Output, class prediction out = tf.add(tf.matmul(dense1, _weights['out']), _biases['out']) print('out:', out.get_shape()) return out # Store layers weight & bias #Graph_3conv # Store layers weight & bias #Graph_3conv weights = { 'wc1': tf.Variable(tf.random_normal([filter_size_1, filter_size_1, 1, SEED], stddev=0.1, seed=SEED), name="wc1"), # 5x5 conv, 1 input, 8 outputs 'wc2': tf.Variable(tf.random_normal([filter_size_2, filter_size_2, SEED, conv2_out], stddev=0.1, seed=SEED), name="wc2"), # 5x5 conv, 8 inputs, 16 outputs 'wc3': tf.Variable(tf.random_normal([filter_size_3, filter_size_3, conv2_out, conv3_out], stddev=0.1, seed=SEED), name="wc3"), # 5x5 conv, 8 inputs, 16 outputs # 'wc4': tf.Variable(tf.random_normal([filter_size_4, filter_size_4, conv3_out, conv4_out], stddev=0.1, seed=SEED), name="wc4"), # 5x5 conv, 8 inputs, 16 outputs # 'wd1': tf.Variable(tf.random_normal([16 * 24 / 2 * 42 / 2, n_hidden], stddev=0.1, seed=SEED)), # fully connected, 8*8*64 inputs, 1024 outputs # 'wd1': tf.Variable(tf.random_normal([8 * 8 * 64, 1024], stddev=0.1)), # fully connected, 8*8*64 inputs, 1024 outputs 'wd1': tf.Variable(tf.random_normal([6 * 6 * conv3_out, n_hidden], stddev=0.1, seed=SEED), name="wd1"), # hy: fully connected, 8*8*64 inputs, 1024 outputs 'out': tf.Variable(tf.random_normal([n_hidden, n_classes], stddev=0.1, seed=SEED), name="w_out") # 1024 inputs, 10 outputs (class prediction) } biases = { 'bc1': tf.Variable(tf.random_normal([SEED]), name="bc1"), 'bc2': tf.Variable(tf.random_normal([conv2_out]), name="bc2"), # hy: use variable, instead fixed number 'bc3': tf.Variable(tf.random_normal([conv3_out]), name="bc3"), # hy: use variable, instead fixed number 'bd1': tf.Variable(tf.random_normal([n_hidden]), name="bd1"), 'out': tf.Variable(tf.random_normal([n_classes]), name="b_out") # hy: } pred = conv_net(x, weights, biases, keep_prob) pred = tf.add(pred, 0, name="pred") # Define loss and optimizer cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y), name="cost") # learning_rate = tf.train.exponential_decay( # 0.01, # Base learning rate. # batch * BATCH_SIZE, # Current index into the dataset. # train_size, # Decay step. # 0.95, # Decay rate. # staircase=True) if optimizer_type == 'adam': # hy: Adam with these parameters beta1=0.9,beta2=0.999, epsilon=1e-08 etc the training accuracy is not stable, epsilon = 0.01 better for these data print('\noptimizer', optimizer_type, 'learning_rate', learning_rate, '\nbeta11:', beta1, '\tbeta2:', beta2, '\tepsilon:', epsilon) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon, use_locking=False, name='Adam').minimize(cost) # hy: Adam with only learning rate as parameter can also be used to continue a training that was done previously with beta,epsilon setup # optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # TODO change to ADAM if optimizer_type == 'GD': # hy: GradientDescentOptimizer print('\noptimizer', optimizer_type, '\tlearning_rate', learning_rate) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost) # hy: Adam with only learning rate as parameter can also be used to continue a training that was done previously with beta,epsilon setup # optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # TODO change to ADAM if optimizer_type == 'ProximalGradientDescent': optimizer = tf.train.ProximalGradientDescentOptimizer(learning_rate, l1_regularization_strength=0.01, l2_regularization_strength=0.01, use_locking=False, name='ProximalGradientDescent') if optimizer_type == 'RMSProp': # hy: GradientDescentOptimizer optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate, decay=decay, momentum=momentum, epsilon=epsilon_R, use_locking=False, name='RMSProp') # optimizer = tflearn.optimizers.SGD(learning_rate=0.001, lr_decay=lr_decay, decay_step=decay_step, staircase=False, # use_locking=False, name='SGD') print('\noptimizer', optimizer_type, '\tlearning_rate', learning_rate, 'lr_decay', lr_decay, 'decay_step', decay_step) # Evaluate model # Problem can be here ... amaxpred = tf.argmax(pred, 1) # Just to check the bug amaxy = tf.argmax(y, 1) # Just to check for the debug correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name="accuracy") # Build the summary operation based on the TF collection of Summaries. # Adding variables to be visualized # hy:add diagrams summary = tf.scalar_summary('Accuracy', accuracy) tf.scalar_summary('Loss', cost) # tf.image_summary('Images Original',tf.reshape(x, shape=[-1, 24, 42, 1]),max_images=4) tf.image_summary('Original', tf.reshape(x, shape=[-1, tensor_h, tensor_w, 1]), max_images=1) # hy:images_view # images after conv1 before max pool # _X = tf.reshape(x, shape=[-1, 24, 42, 1]) _X = tf.reshape(x, shape=[-1, tensor_h, tensor_w, 1]) # hy for display # conv1 = tf.placeholder(tf.float32, name="conv1") #hy added conv1 = conv2d(_X, weights['wc1'], biases['bc1'], 4) conv1 = tf.add(conv1, 0, name="conv1") print('for conv1 view', conv1.get_shape()) conv_view_size = 46 # tf.image_summary('Output of First Convolution', tf.reshape(x, shape=[-1, 24, 42, 1]), max_images=4) # tf.image_summary('Output of First Convolution', tf.reshape(conv1, shape=[-1, 24, 42, 1]), max_images=8) tf.image_summary('1.Conv', tf.reshape(conv1, shape=[-1, conv_view_size, conv_view_size, 1]), max_images=SEED) # hy # Max Pooling (down-sampling) # conv2 = tf.placeholder(tf.float32, name="conv2") #hy # hy: conv2 iview conv2 = conv2d(conv1, weights['wc2'], biases['bc2'], 1) conv2 = tf.add(conv2, 0, name="conv2") print('for conv2 view', conv2.get_shape()) # tf.image_summary('Output of Second Convolution',tf.reshape(conv2, shape=[-1, 24, 42, 1]), max_images=16) tf.image_summary('2.Conv', tf.reshape(conv2, shape=[-1, conv_view_size, conv_view_size, 1]), max_images=conv2_out) # hy # hy: conv3 iview conv3 = conv2d(conv2, weights['wc3'], biases['bc3'], 1) conv3 = tf.add(conv3, 0, name="conv3") print('for conv3 view', conv3.get_shape()) tf.image_summary('3.Conv', tf.reshape(conv3, shape=[-1, conv_view_size, conv_view_size, 1]), max_images=conv3_out) # hy # tf.image_summary('Weights 1.Conv', tf.reshape(weights['wc1'], [-1, filter_size_1, filter_size_1, 1]), max_images=images_view) #hy: use defined var patch # tf.image_summary('Weights Second Conv', tf.reshape(weights['wc2'], [-1, filter_size_1, filter_size_1, 1]), max_images=8) #hy: use defined var patch tf.histogram_summary('Histogram 1.Conv', weights['wc1']) # tf.histogram_summary('Histogram 2.Conv', weights['wc2']) #hy: added tf.histogram_summary('Histogram pred', pred) # hy: added summary_op = tf.merge_all_summaries() ################################################ Graph 3conv end return ( n_hidden, learning_rate, dropout, dropout_1s, optimizer_type, pred, x, y, keep_prob, optimizer, accuracy, cost, summary_op)
# log_gabor filter and __frequency_butterworth_filter are derived from Matlab # scripts written by Peter Kovesi. We maintain his copyright notice below. # # Copyright (c) 1999 Peter Kovesi # School of Computer Science & Software Engineering # The University of Western Australia # http://www.csse.uwa.edu.au/ # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # The Software is provided "as is", without warranty of any kind. import numpy as np def __adjusted_meshgrid(shape): """ Creates an adjusted meshgrid that accounts for odd image sizes. Linearly interpolates the values. This meshgrid assumes 'ij' indexing - which is due to the 1st dimension of an image being the y-dimension. Parameters ---------- shape: tuple Size of meshgrid, (M, N, ...). The dimensionality should not be swapped due to using images. Therefore, for a 2D image, the expected tuple is ``(HEIGHT, WIDTH)``. Returns ------- meshgrid : list of (M, N, ...) ndarrays The meshgrid over each dimension given by the shape. """ adjust_range = [] for dim in shape: adjust_range.append(np.linspace(-0.5, 0.5, dim)) return np.meshgrid(*adjust_range, indexing='ij') def __frequency_butterworth_filter(shape, cutoff, order): r""" Builds an N-D butterworth filter ..math:: f = \frac{1.0}{1.0 + (w / cutoff)^{2n}} The frequency origin of the returned filter is at the corners. Parameters ---------- shape : tuple The size of the filter (M, N, ...) cutoff : double Cutoff frequency of the filter in the range ``[0, 0.5]`` order : positive int Order of the filter. The higher it is the sharper the transition Returns ------- butterworth_filter : (M, N, ...) ndarray The butterworth filter for the given parameters. Will be the same shape as was requested. """ # Dimension-free sum of squares grid = __adjusted_meshgrid(shape) grid_sq = [g ** 2 for g in grid] grid_sq = sum(grid_sq) radius = np.sqrt(grid_sq) return np.fft.ifftshift(1.0 / ((radius / cutoff) ** (2 * order) + 1.0)) # TODO: merge the 2D and 3D versions if possible def log_gabor(image, **kwargs): r""" Creates a log-gabor filter bank, including smoothing the images via a low-pass filter at the edges. To create a 2D filter bank, simply specify the number of phi orientations (orientations in the xy-plane). To create a 3D filter bank, you must specify both the number of phi (azimuth) and theta (elevation) orientations. This algorithm is directly derived from work by Peter Kovesi. Parameters ---------- image : (M, N, ...) ndarray Image to be convolved num_scales : int, optional Number of wavelet scales. ========== == Default 2D 4 Default 3D 4 ========== == num_phi_orientations : int, optional Number of filter orientations in the xy-plane ========== == Default 2D 6 Default 3D 6 ========== == num_theta_orientations : int, optional **Only required for 3D**. Number of filter orientations in the z-plane ========== == Default 2D N/A Default 3D 4 ========== == min_wavelength : int, optional Wavelength of smallest scale filter. ========== == Default 2D 3 Default 3D 3 ========== == scaling_constant : int, optional Scaling factor between successive filters. ========== == Default 2D 2 Default 3D 2 ========== == center_sigma : double, optional Ratio of the standard deviation of the Gaussian describing the Log Gabor filter's transfer function in the frequency domain to the filter center frequency. ========== == Default 2D 0.65 Default 3D 0.65 ========== == d_phi_sigma : double, optional Angular bandwidth in xy-plane ========== == Default 2D 1.3 Default 3D 1.5 ========== == d_theta_sigma : double, optional **Only required for 3D**. Angular bandwidth in z-plane ========== == Default 2D N/A Default 3D 1.5 ========== == Returns ------- complex_conv : (num_scales x num_orientations x image.shape) ndarray Complex valued convolution results. The real part is the result of convolving with the even symmetric filter, the imaginary part is the result from convolution with the odd symmetric filter. bandpass : (num_scales x image.shape) ndarray Bandpass images corresponding to each scale ``s`` S: (image.shape) ndarray Convolved image Examples -------- Return the magnitude of the convolution over the image at scale ``s`` and orientation ``o``:: >>> np.abs(complex_conv[s, o, :, :]) Return the phase angles:: >>> np.angle(complex_conv[s, o, :, :]) References ---------- .. [1] D. J. Field, "Relations Between the Statistics of Natural Images and the Response Properties of Cortical Cells", Journal of The Optical Society of America A, Vol 4, No. 12, December 1987. pp 2379-2394 """ if len(image.shape) == 2: # 2D filter return __log_gabor_2d(image, **kwargs) elif len(image.shape) == 3: # 3D filter return __log_gabor_3d(image, **kwargs) else: raise ValueError("Image must be either 2D or 3D") def __log_gabor_3d(image, num_scales=4, num_phi_orientations=6, num_theta_orientations=4, min_wavelength=3, scaling_constant=2, center_sigma=0.65, d_theta_sigma=1.5, d_phi_sigma=1.5): # Pre-compute sigma values theta_sigma = np.pi / num_theta_orientations / d_theta_sigma phi_sigma = (2 * np.pi) / num_phi_orientations / d_phi_sigma # Allocate space for return structures bandpass = np.empty([num_scales, image.shape[0], image.shape[1], image.shape[2]], dtype=np.complex) log_gabor = np.empty([num_scales, image.shape[0], image.shape[1], image.shape[2]]) S = np.zeros(image.shape) complex_conv = np.empty([num_scales, num_theta_orientations, num_phi_orientations, image.shape[0], image.shape[1], image.shape[2]], dtype=np.complex) tmp_complex_conv = np.empty([num_scales, image.shape[0], image.shape[1], image.shape[2]], dtype=np.complex) # Pre-compute fourier values image_fft = np.fft.fftn(image) axis0, axis1, axis2 = __adjusted_meshgrid(image.shape) radius = np.sqrt(axis0 ** 2 + axis1 ** 2 + axis2 ** 2) theta = np.arctan2(axis0, axis1) # TODO: Is adding the mean REALLY a good idea? m_ab = np.abs(np.mean(radius)) phi = np.arccos(axis2 / (radius + m_ab)) radius = np.fft.ifftshift(radius) radius[0, 0, 0] = 1.0 theta = np.fft.ifftshift(theta) phi = np.fft.ifftshift(phi) sin_theta = np.sin(theta) cos_theta = np.cos(theta) sin_phi = np.sin(phi) cos_phi = np.cos(phi) # Compute the lowpass filter butterworth_filter = __frequency_butterworth_filter(image.shape, 0.45, 15) # Compute radial component of filter for s in range(num_scales): wavelength = min_wavelength * scaling_constant ** s fo = 1.0 / wavelength l = np.exp((-np.log(radius / fo) ** 2) / (2.0 * np.log(center_sigma) ** 2)) l = l * butterworth_filter l[0, 0, 0] = 0.0 log_gabor[s, :, :, :] = l bandpass[s, :, :, :] = np.fft.ifft2(image_fft * l) # Computer angular component of filter for e in range(num_theta_orientations): # Pre-compute filter data specific to this orientation elevation_angle = e * np.pi / num_theta_orientations d_theta_sin = (sin_theta * np.cos(elevation_angle) - cos_theta * np.sin(elevation_angle)) d_theta_cos = (cos_theta * np.cos(elevation_angle) + sin_theta * np.sin(elevation_angle)) d_theta = np.abs(np.arctan2(d_theta_sin, d_theta_cos)) for a in range(num_phi_orientations): azimuth_angle = a * 2 * np.pi / num_phi_orientations d_phi_sin = (sin_phi * np.cos(azimuth_angle) - cos_phi * np.sin(azimuth_angle)) d_phi_cos = (cos_phi * np.cos(azimuth_angle) + sin_phi * np.sin(azimuth_angle)) d_phi = np.abs(np.arctan2(d_phi_sin, d_phi_cos)) phi_spread = (-d_phi ** 2) / (2 * phi_sigma ** 2) theta_spread = (-d_theta ** 2) / (2 * theta_sigma ** 2) spread = np.exp(phi_spread + theta_spread) # For each scale, multiply by the angular spread for s in range(0, num_scales): filter_bank = log_gabor[s] * spread shifted_filter = np.fft.fftshift(filter_bank) S += shifted_filter * np.conjugate(shifted_filter) tmp_complex_conv[s, :, :] = np.fft.ifft2(image_fft * filter_bank) complex_conv[:, e, a, :, :] = tmp_complex_conv[None, None, ...] # TODO: Do we need to flip S as in the 2D version? return complex_conv, bandpass, S def __log_gabor_2d(image, num_scales=4, num_orientations=6, min_wavelength=3, scaling_constant=2, center_sigma=0.65, d_phi_sigma=1.3): # Allocate space for return structures bandpass = np.empty([num_scales, image.shape[0], image.shape[1]], dtype=np.complex) log_gabor = np.empty([num_scales, image.shape[0], image.shape[1]]) S = np.zeros(image.shape) complex_conv = np.empty([num_scales, num_orientations, image.shape[0], image.shape[1]], dtype=np.complex) tmp_complex_conv = np.empty([num_scales, image.shape[0], image.shape[1]], dtype=np.complex) # Pre-compute phi sigma phi_sigma = np.pi / num_orientations / d_phi_sigma # Pre-compute fourier values image_fft = np.fft.fft2(image) axis0, axis1 = __adjusted_meshgrid(image.shape) radius = np.sqrt(axis0 ** 2 + axis1 ** 2) phi = np.arctan2(axis0, axis1) radius = np.fft.ifftshift(radius) radius[0][0] = 1.0 phi = np.fft.ifftshift(phi) sin_phi = np.sin(phi) cos_phi = np.cos(phi) # Compute the lowpass filter butterworth_filter = __frequency_butterworth_filter(image.shape, 0.45, 15) # Compute radial component of filter for s in range(num_scales): wavelength = min_wavelength * scaling_constant ** s fo = 1.0 / wavelength l = np.exp((-(np.log(radius / fo)) ** 2) / (2.0 * np.log(center_sigma) ** 2)) l = l * butterworth_filter l[0][0] = 0.0 log_gabor[s, :, :] = l bandpass[s, :, :] = np.fft.ifft2(image_fft * l) # Computer angular component of filter for o in range(num_orientations): # Pre-compute filter data specific to this orientation filter_angle = o * np.pi / num_orientations ds = (sin_phi * np.cos(filter_angle) - cos_phi * np.sin(filter_angle)) dc = (cos_phi * np.cos(filter_angle) + sin_phi * np.sin(filter_angle)) d_phi = np.abs(np.arctan2(ds, dc)) # Calculate the standard deviation of the angular Gaussian # function used to construct filters in the freq. plane. spread = np.exp((-d_phi ** 2.0) / (2.0 * phi_sigma ** 2)) # For each scale, multiply by the angular spread for s in range(0, num_scales): filter_bank = log_gabor[s] * spread shifted_filter = np.fft.fftshift(filter_bank) S += shifted_filter * np.conjugate(shifted_filter) tmp_complex_conv[s, :, :] = np.fft.ifft2(image_fft * filter_bank) complex_conv[:, o, :, :] = tmp_complex_conv[None, ...] # TODO: Why is this done?? return complex_conv, bandpass, np.flipud(S)
import os import mimetypes from ems.converter.inputreader import InputReader from ems.converter.outputwriter import OutputWriter from ems.converter.tag import Tag from ems.converter.modifier import Modifier, ModifierException from ems.converter.plugin import Plugin from ems.converter.preprocessor import PreProcessor from ems.xml.xml2dict import xml2obj class Converter(object): reader = 1 writer = 2 tag = 3 modifier = 4 preprocessor = 5 replace = 1 update = 2 sync = 3 breakOnErrors = False def __init__(self): self.plugins = {} self.plugins[self.reader] = {} self.plugins[self.writer] = {} self.plugins[self.tag] = {} self.plugins[self.modifier] = {} self.plugins[self.preprocessor] = [] self._variables = {} self._ignoredTopLevelTags = {} self.writeMode = Converter.replace def getReaderForFileName(self, filename): mimeTypeFound = False mimeType = mimetypes.guess_type(filename) # print mimeType if mimeType[0] is not None and mimeType[1] is not None: mimeTypeFound = True #print mimetypes.guess_extension(mimeType[0]) if mimeTypeFound: # print "mimeType found" for reader in self.plugins[self.reader]: mimeTypes = self.plugins[self.reader][reader].getSupportedMimeTypes() for mType in mimeTypes: #print "%s %s" % (mType, mType.suffixes) if mimeType[0] == str(mType): return self.plugins[self.reader][reader] try: extension = ".%s" % unicode(filename).split('.')[-1:][0] # print "File: %s %s" % (filename, extension) for reader in self.plugins[self.reader]: mimeTypes = self.plugins[self.reader][reader].getSupportedMimeTypes() for mType in mimeTypes: # print mType if extension.lower() in mType.suffixes: # print extension # print self.plugins[self.reader][reader] return self.plugins[self.reader][reader] except StopIteration, e: pass raise NotImplementedError("Keinen passenden Importer zu Datei %s gefunden" % filename) def setVar(self,n,v): self._variables[n] = v def getVar(self,n): try: return self._variables[n] except LookupError: return None def getReaders(self): return self.plugins[self.reader] readers = property(getReaders, None, None, "readers's docstring") def getWriters(self): return self.plugins[self.writer] writers = property(getWriters, None, None, "writers's docstring") def getTags(self): return self.plugins[self.tag] tags = property(getTags,None,None,"Tags docstring") def getModifiers(self): return self.plugins[self.modifier] modifiers = property(getModifiers,None,None,"Modifiers docstring") def addPlugin(self,type,plugin): if not isinstance(plugin, Plugin): raise TypeError("The plugin has to be an Instance of ems.converter.plugin.Plugin") plugin.converter = self if type == self.reader: if isinstance(plugin, InputReader): self.plugins[self.reader][self._getPluginName(plugin)] = plugin else: raise TypeError("The reader has to be an Instance of InputReader") elif type == self.writer: if isinstance(plugin, OutputWriter): self.plugins[self.writer][self._getPluginName(plugin)] = plugin else: raise TypeError("The writer has to be an Instance of OutputWriter") elif type == self.tag: if isinstance(plugin, Tag): self.plugins[self.tag][self._getPluginName(plugin)] = plugin else: raise TypeError("The tag has to be an Instance of Tag") elif type == self.modifier: if isinstance(plugin, Modifier): self.plugins[self.modifier][self._getPluginName(plugin)] = plugin else: raise TypeError("The tag has to be an Instance of Tag") elif type == self.preprocessor: if isinstance(plugin, PreProcessor): self.plugins[self.preprocessor].append(plugin) else: raise TypeError("The preprocessor has to be an Instance of Preprocessor") else: raise TypeError("Unknown type \"%s\" of plugin" % type) def _getPluginName(self,plugin): if isinstance(plugin, Tag) or isinstance(plugin, Modifier): return "%s" % plugin return type(plugin).__name__ def getMimeTypes(self,type): mimeTypes = [] for reader in self.plugins[type]: for mimeType in self.plugins[type][reader].getSupportedMimeTypes(): try: mimeTypes.append(mimeType) except TypeError: pass return mimeTypes def getExtensions(self, type): mimeTypes = self.getMimeTypes(type) extensions = [] for mimeType in mimeTypes: for ext in mimetypes.guess_all_extensions(str(mimeType)): try: extensions.append(ext) except TypeError: pass return extensions def setInputUri(self,uri): self.__inputUri = uri def ignoreToplevelTag(self,name,ignore=True): self._ignoredTopLevelTags[name] = ignore def _applyPreProcessors(self,xmlDict): for preprocessor in self.plugins[self.preprocessor]: preprocessor.interpret(xmlDict) def convert(self,reader,writer,mappingFile,writeMode=1): ''' Starts conversion. You have to configure the reader and writer manually @param reader: ems.converter.reader.InputReader @type reader: ems.converter.reader.InputReader @param writer: ems.converter.writer.OutputWriter @type writer: ems.converter.writer.OutputWriter @param mappingFile: string The path to mapping xml File @type mappingFile: string ''' self.writeMode = writeMode if isinstance(reader, InputReader) and isinstance(writer, OutputWriter): # print self.plugins[self.tag]['value-of'].parsedXPaths if os.path.exists(mappingFile): xmlDict = self.getDictionaryOfMapping(open(mappingFile).read()) for preprocessor in self.plugins[self.preprocessor]: preprocessor.notify(Plugin.startProcess) for tag in self.plugins[self.tag]: self.plugins[self.tag][tag].notify(Plugin.startProcess) for modifierName in self.plugins[self.modifier]: self.plugins[self.modifier][modifierName].notify(Plugin.startProcess) self._applyPreProcessors(xmlDict) reader.notify(Plugin.startProcess) writer.notify(Plugin.startProcess) self._parse( reader, writer, xmlDict ) reader.notify(Plugin.endProcess) writer.notify(Plugin.endProcess) else: raise IOError("Mappingfile \"%s\" not found" % mappingFile) else: raise TypeError("First Param has to be InputReader, second OutputWriter") pass def _parse(self,reader,writer,mappingDict): if len(mappingDict['children']): if mappingDict['children'][0]['tag'] == 'mapping': if len(mappingDict['children'][0]['children']): for child in mappingDict['children'][0]['children']: if not self._ignoredTopLevelTags.has_key(child['tag']): self.interpretTag(child, reader, writer) else: raise SyntaxError("Mapping is empty") else: raise SyntaxError("Xml Data not well-formed or has no mapping root") else: raise SyntaxError("Xml Data is empty or not well-formed") def interpretTag(self,xmlDict,reader,writer): try: return self.plugins[self.tag][xmlDict['tag']].interpret(xmlDict,reader,writer) except LookupError,e: if not self.plugins[self.tag].has_key(xmlDict['tag']): raise SyntaxError("Tag \"%s\"is not supported or loaded" % xmlDict['tag']) if self.breakOnErrors: raise e except ModifierException,e: if self.breakOnErrors: raise e except ValueError, e: if self.breakOnErrors: raise e def getDictionaryOfMapping(self,mappingString): return xml2obj(mappingString) def getUsedPaths(self, mappingFile): mappingDict = self.getDictionaryOfMapping(open(mappingFile).read()) results = [] Converter._searchValueOfs(mappingDict, results) return results def getUsedFieldNames(self, mappingFile): valueOfs = self.getUsedPaths(mappingFile) result = [] for valueOf in valueOfs: select = valueOf['attributes']['select'] if self.plugins[self.tag]['value-of'].isFieldExpression(select): result.append(select) return result @staticmethod def _searchValueOfs(mappingDict, result): if mappingDict['tag'] == 'value-of': result.append(mappingDict) if mappingDict['children']: for child in mappingDict['children']: Converter._searchValueOfs(child, result)
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved. # # Powered by the Bokeh Development Team. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals import pytest ; pytest #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports from collections import deque import datetime as dt import decimal # External imports import dateutil.relativedelta as rd import numpy as np from six import string_types # Bokeh imports from bokeh.colors import RGB from bokeh.core.has_props import HasProps from bokeh.core.properties import Int, String from bokeh.models import Range1d # Module under test #----------------------------------------------------------------------------- # Setup #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- class HP(HasProps): foo = Int(default=10) bar = String() class TestBokehJSONEncoder(object): def setup_method(self, test_method): from bokeh.core.json_encoder import BokehJSONEncoder self.encoder = BokehJSONEncoder() def test_fail(self): with pytest.raises(TypeError): self.encoder.default({'testing': 1}) def test_panda_series(self, pd): s = pd.Series([1, 3, 5, 6, 8]) assert self.encoder.default(s) == [1, 3, 5, 6, 8] def test_numpyarray(self): a = np.arange(5) assert self.encoder.default(a) == [0, 1, 2, 3, 4] def test_numpyint(self): npint = np.asscalar(np.int64(1)) assert self.encoder.default(npint) == 1 assert isinstance(self.encoder.default(npint), int) def test_numpyfloat(self): npfloat = np.float64(1.33) assert self.encoder.default(npfloat) == 1.33 assert isinstance(self.encoder.default(npfloat), float) def test_numpybool_(self): nptrue = np.bool_(True) assert self.encoder.default(nptrue) == True assert isinstance(self.encoder.default(nptrue), bool) def test_numpydatetime64(self): npdt64 = np.datetime64('2017-01-01') assert self.encoder.default(npdt64) == 1483228800000.0 assert isinstance(self.encoder.default(npdt64), float) def test_time(self): dttime = dt.time(12, 32, 15) assert self.encoder.default(dttime) == 45135000.0 assert isinstance(self.encoder.default(dttime), float) def test_relativedelta(self): rdelt = rd.relativedelta() assert isinstance(self.encoder.default(rdelt), dict) def test_decimal(self): dec = decimal.Decimal(20.3) assert self.encoder.default(dec) == 20.3 assert isinstance(self.encoder.default(dec), float) def test_model(self): m = Range1d(start=10, end=20) assert self.encoder.default(m) == m.ref assert isinstance(self.encoder.default(m), dict) def test_hasprops(self): hp = HP() assert self.encoder.default(hp) == {} assert isinstance(self.encoder.default(hp), dict) hp.foo = 15 assert self.encoder.default(hp) == {'foo': 15} assert isinstance(self.encoder.default(hp), dict) hp.bar = "test" assert self.encoder.default(hp) == {'foo': 15, 'bar': 'test'} assert isinstance(self.encoder.default(hp), dict) def test_color(self): c = RGB(16, 32, 64) assert self.encoder.default(c) == "rgb(16, 32, 64)" assert isinstance(self.encoder.default(c), string_types) c = RGB(16, 32, 64, 0.1) assert self.encoder.default(c) == "rgba(16, 32, 64, 0.1)" assert isinstance(self.encoder.default(c), string_types) def test_slice(self): c = slice(2) assert self.encoder.default(c) == dict(start=None, stop=2, step=None) assert isinstance(self.encoder.default(c), dict) c = slice(0,2) assert self.encoder.default(c) == dict(start=0, stop=2, step=None) assert isinstance(self.encoder.default(c), dict) c = slice(0, 10, 2) assert self.encoder.default(c) == dict(start=0, stop=10, step=2) assert isinstance(self.encoder.default(c), dict) c = slice(0, None, 2) assert self.encoder.default(c) == dict(start=0, stop=None, step=2) assert isinstance(self.encoder.default(c), dict) c = slice(None, None, None) assert self.encoder.default(c) == dict(start=None, stop=None, step=None) assert isinstance(self.encoder.default(c), dict) def test_pd_timestamp(self, pd): ts = pd.Timestamp('April 28, 1948') assert self.encoder.default(ts) == -684115200000 class TestSerializeJson(object): def setup_method(self, test_method): from bokeh.core.json_encoder import serialize_json from json import loads self.serialize = serialize_json self.deserialize = loads def test_with_basic(self): assert self.serialize({'test': [1, 2, 3]}) == '{"test":[1,2,3]}' def test_pretty(self): assert self.serialize({'test': [1, 2, 3]}, pretty=True) == '{\n "test": [\n 1,\n 2,\n 3\n ]\n}' def test_with_np_array(self): a = np.arange(5) assert self.serialize(a) == '[0,1,2,3,4]' def test_with_pd_series(self, pd): s = pd.Series([0, 1, 2, 3, 4]) assert self.serialize(s) == '[0,1,2,3,4]' def test_nans_and_infs(self): arr = np.array([np.nan, np.inf, -np.inf, 0]) serialized = self.serialize(arr) deserialized = self.deserialize(serialized) assert deserialized[0] == 'NaN' assert deserialized[1] == 'Infinity' assert deserialized[2] == '-Infinity' assert deserialized[3] == 0 def test_nans_and_infs_pandas(self, pd): arr = pd.Series(np.array([np.nan, np.inf, -np.inf, 0])) serialized = self.serialize(arr) deserialized = self.deserialize(serialized) assert deserialized[0] == 'NaN' assert deserialized[1] == 'Infinity' assert deserialized[2] == '-Infinity' assert deserialized[3] == 0 def test_pandas_datetime_types(self, pd): """ should convert to millis """ idx = pd.date_range('2001-1-1', '2001-1-5') df = pd.DataFrame({'vals' :idx}, index=idx) serialized = self.serialize({'vals' : df.vals, 'idx' : df.index}) deserialized = self.deserialize(serialized) baseline = {u'vals': [978307200000, 978393600000, 978480000000, 978566400000, 978652800000], u'idx': [978307200000, 978393600000, 978480000000, 978566400000, 978652800000] } assert deserialized == baseline def test_builtin_datetime_types(self): """ should convert to millis as-is """ DT_EPOCH = dt.datetime.utcfromtimestamp(0) a = dt.date(2016, 4, 28) b = dt.datetime(2016, 4, 28, 2, 20, 50) serialized = self.serialize({'a' : [a], 'b' : [b]}) deserialized = self.deserialize(serialized) baseline = {u'a': [(dt.datetime(*a.timetuple()[:6]) - DT_EPOCH).total_seconds() * 1000], u'b': [(b - DT_EPOCH).total_seconds() * 1000. + b.microsecond / 1000.], } assert deserialized == baseline # test pre-computed values too assert deserialized == { u'a': [1461801600000.0], u'b': [1461810050000.0] } def test_builtin_timedelta_types(self): """ should convert time delta to a dictionary """ delta = dt.timedelta(days=42, seconds=1138, microseconds=1337) serialized = self.serialize(delta) deserialized = self.deserialize(serialized) assert deserialized == delta.total_seconds() * 1000 def test_numpy_timedelta_types(self): delta = np.timedelta64(3000, 'ms') serialized = self.serialize(delta) deserialized = self.deserialize(serialized) assert deserialized == 3000 delta = np.timedelta64(3000, 's') serialized = self.serialize(delta) deserialized = self.deserialize(serialized) assert deserialized == 3000000 def test_pandas_timedelta_types(self, pd): delta = pd.Timedelta("3000ms") serialized = self.serialize(delta) deserialized = self.deserialize(serialized) assert deserialized == 3000 def test_deque(self): """Test that a deque is deserialized as a list.""" assert self.serialize(deque([0, 1, 2])) == '[0,1,2]' def test_slice(self): """Test that a slice is deserialized as a list.""" assert self.serialize(slice(2)) == '{"start":null,"step":null,"stop":2}' assert self.serialize(slice(0, 2)) == '{"start":0,"step":null,"stop":2}' assert self.serialize(slice(0, 10, 2)) == '{"start":0,"step":2,"stop":10}' assert self.serialize(slice(0, None, 2)) == '{"start":0,"step":2,"stop":null}' assert self.serialize(slice(None, None, None)) == '{"start":null,"step":null,"stop":null}' def test_bad_kwargs(self): with pytest.raises(ValueError): self.serialize([1], allow_nan=True) with pytest.raises(ValueError): self.serialize([1], separators=("a", "b")) with pytest.raises(ValueError): self.serialize([1], sort_keys=False) #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
"""Purge old data helper.""" from __future__ import annotations from datetime import datetime import logging from typing import TYPE_CHECKING, Callable from sqlalchemy.orm.session import Session from sqlalchemy.sql.expression import distinct from .const import MAX_ROWS_TO_PURGE from .models import Events, RecorderRuns, States from .repack import repack_database from .util import retryable_database_job, session_scope if TYPE_CHECKING: from . import Recorder _LOGGER = logging.getLogger(__name__) @retryable_database_job("purge") def purge_old_data( instance: Recorder, purge_before: datetime, repack: bool, apply_filter: bool = False ) -> bool: """Purge events and states older than purge_before. Cleans up an timeframe of an hour, based on the oldest record. """ _LOGGER.debug( "Purging states and events before target %s", purge_before.isoformat(sep=" ", timespec="seconds"), ) with session_scope(session=instance.get_session()) as session: # type: ignore # Purge a max of MAX_ROWS_TO_PURGE, based on the oldest states or events record event_ids = _select_event_ids_to_purge(session, purge_before) state_ids = _select_state_ids_to_purge(session, purge_before, event_ids) if state_ids: _purge_state_ids(session, state_ids) if event_ids: _purge_event_ids(session, event_ids) # If states or events purging isn't processing the purge_before yet, # return false, as we are not done yet. _LOGGER.debug("Purging hasn't fully completed yet") return False if apply_filter and _purge_filtered_data(instance, session) is False: _LOGGER.debug("Cleanup filtered data hasn't fully completed yet") return False _purge_old_recorder_runs(instance, session, purge_before) if repack: repack_database(instance) return True def _select_event_ids_to_purge(session: Session, purge_before: datetime) -> list[int]: """Return a list of event ids to purge.""" events = ( session.query(Events.event_id) .filter(Events.time_fired < purge_before) .limit(MAX_ROWS_TO_PURGE) .all() ) _LOGGER.debug("Selected %s event ids to remove", len(events)) return [event.event_id for event in events] def _select_state_ids_to_purge( session: Session, purge_before: datetime, event_ids: list[int] ) -> list[int]: """Return a list of state ids to purge.""" if not event_ids: return [] states = ( session.query(States.state_id) .filter(States.last_updated < purge_before) .filter(States.event_id.in_(event_ids)) .all() ) _LOGGER.debug("Selected %s state ids to remove", len(states)) return [state.state_id for state in states] def _purge_state_ids(session: Session, state_ids: list[int]) -> None: """Disconnect states and delete by state id.""" # Update old_state_id to NULL before deleting to ensure # the delete does not fail due to a foreign key constraint # since some databases (MSSQL) cannot do the ON DELETE SET NULL # for us. disconnected_rows = ( session.query(States) .filter(States.old_state_id.in_(state_ids)) .update({"old_state_id": None}, synchronize_session=False) ) _LOGGER.debug("Updated %s states to remove old_state_id", disconnected_rows) deleted_rows = ( session.query(States) .filter(States.state_id.in_(state_ids)) .delete(synchronize_session=False) ) _LOGGER.debug("Deleted %s states", deleted_rows) def _purge_event_ids(session: Session, event_ids: list[int]) -> None: """Delete by event id.""" deleted_rows = ( session.query(Events) .filter(Events.event_id.in_(event_ids)) .delete(synchronize_session=False) ) _LOGGER.debug("Deleted %s events", deleted_rows) def _purge_old_recorder_runs( instance: Recorder, session: Session, purge_before: datetime ) -> None: """Purge all old recorder runs.""" # Recorder runs is small, no need to batch run it deleted_rows = ( session.query(RecorderRuns) .filter(RecorderRuns.start < purge_before) .filter(RecorderRuns.run_id != instance.run_info.run_id) .delete(synchronize_session=False) ) _LOGGER.debug("Deleted %s recorder_runs", deleted_rows) def _purge_filtered_data(instance: Recorder, session: Session) -> bool: """Remove filtered states and events that shouldn't be in the database.""" _LOGGER.debug("Cleanup filtered data") # Check if excluded entity_ids are in database excluded_entity_ids: list[str] = [ entity_id for (entity_id,) in session.query(distinct(States.entity_id)).all() if not instance.entity_filter(entity_id) ] if len(excluded_entity_ids) > 0: _purge_filtered_states(session, excluded_entity_ids) return False # Check if excluded event_types are in database excluded_event_types: list[str] = [ event_type for (event_type,) in session.query(distinct(Events.event_type)).all() if event_type in instance.exclude_t ] if len(excluded_event_types) > 0: _purge_filtered_events(session, excluded_event_types) return False return True def _purge_filtered_states(session: Session, excluded_entity_ids: list[str]) -> None: """Remove filtered states and linked events.""" state_ids: list[int] event_ids: list[int | None] state_ids, event_ids = zip( *( session.query(States.state_id, States.event_id) .filter(States.entity_id.in_(excluded_entity_ids)) .limit(MAX_ROWS_TO_PURGE) .all() ) ) event_ids = [id_ for id_ in event_ids if id_ is not None] _LOGGER.debug( "Selected %s state_ids to remove that should be filtered", len(state_ids) ) _purge_state_ids(session, state_ids) _purge_event_ids(session, event_ids) # type: ignore # type of event_ids already narrowed to 'list[int]' def _purge_filtered_events(session: Session, excluded_event_types: list[str]) -> None: """Remove filtered events and linked states.""" events: list[Events] = ( session.query(Events.event_id) .filter(Events.event_type.in_(excluded_event_types)) .limit(MAX_ROWS_TO_PURGE) .all() ) event_ids: list[int] = [event.event_id for event in events] _LOGGER.debug( "Selected %s event_ids to remove that should be filtered", len(event_ids) ) states: list[States] = ( session.query(States.state_id).filter(States.event_id.in_(event_ids)).all() ) state_ids: list[int] = [state.state_id for state in states] _purge_state_ids(session, state_ids) _purge_event_ids(session, event_ids) @retryable_database_job("purge") def purge_entity_data(instance: Recorder, entity_filter: Callable[[str], bool]) -> bool: """Purge states and events of specified entities.""" with session_scope(session=instance.get_session()) as session: # type: ignore selected_entity_ids: list[str] = [ entity_id for (entity_id,) in session.query(distinct(States.entity_id)).all() if entity_filter(entity_id) ] _LOGGER.debug("Purging entity data for %s", selected_entity_ids) if len(selected_entity_ids) > 0: # Purge a max of MAX_ROWS_TO_PURGE, based on the oldest states or events record _purge_filtered_states(session, selected_entity_ids) _LOGGER.debug("Purging entity data hasn't fully completed yet") return False return True
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. ''' TraceEventImporter imports TraceEvent-formatted data into the provided model. This is a port of the trace event importer from https://code.google.com/p/trace-viewer/ ''' import copy import json import re from telemetry.timeline import importer from telemetry.timeline import tracing_timeline_data import telemetry.timeline.async_slice as tracing_async_slice import telemetry.timeline.flow_event as tracing_flow_event class TraceEventTimelineImporter(importer.TimelineImporter): def __init__(self, model, timeline_data): super(TraceEventTimelineImporter, self).__init__( model, timeline_data, import_priority=1) event_data = timeline_data.EventData() self._events_were_from_string = False self._all_async_events = [] self._all_object_events = [] self._all_flow_events = [] if type(event_data) is str: # If the event data begins with a [, then we know it should end with a ]. # The reason we check for this is because some tracing implementations # cannot guarantee that a ']' gets written to the trace file. So, we are # forgiving and if this is obviously the case, we fix it up before # throwing the string at JSON.parse. if event_data[0] == '[': event_data = re.sub(r'[\r|\n]*$', '', event_data) event_data = re.sub(r'\s*,\s*$', '', event_data) if event_data[-1] != ']': event_data = event_data + ']' self._events = json.loads(event_data) self._events_were_from_string = True else: self._events = event_data # Some trace_event implementations put the actual trace events # inside a container. E.g { ... , traceEvents: [ ] } # If we see that, just pull out the trace events. if 'traceEvents' in self._events: container = self._events self._events = self._events['traceEvents'] for field_name in container: if field_name == 'traceEvents': continue # Any other fields in the container should be treated as metadata. self._model.metadata.append({ 'name' : field_name, 'value' : container[field_name]}) @staticmethod def CanImport(timeline_data): ''' Returns whether obj is a TraceEvent array. ''' if not isinstance(timeline_data, tracing_timeline_data.TracingTimelineData): return False event_data = timeline_data.EventData() # May be encoded JSON. But we dont want to parse it fully yet. # Use a simple heuristic: # - event_data that starts with [ are probably trace_event # - event_data that starts with { are probably trace_event # May be encoded JSON. Treat files that start with { as importable by us. if isinstance(event_data, str): return len(event_data) > 0 and (event_data[0] == '{' or event_data[0] == '[') # Might just be an array of events if (isinstance(event_data, list) and len(event_data) and 'ph' in event_data[0]): return True # Might be an object with a traceEvents field in it. if 'traceEvents' in event_data: trace_events = event_data.get('traceEvents', None) return (type(trace_events) is list and len(trace_events) > 0 and 'ph' in trace_events[0]) return False def _GetOrCreateProcess(self, pid): return self._model.GetOrCreateProcess(pid) def _DeepCopyIfNeeded(self, obj): if self._events_were_from_string: return obj return copy.deepcopy(obj) def _ProcessAsyncEvent(self, event): '''Helper to process an 'async finish' event, which will close an open slice. ''' thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) self._all_async_events.append({ 'event': event, 'thread': thread}) def _ProcessCounterEvent(self, event): '''Helper that creates and adds samples to a Counter object based on 'C' phase events. ''' if 'id' in event: ctr_name = event['name'] + '[' + str(event['id']) + ']' else: ctr_name = event['name'] ctr = (self._GetOrCreateProcess(event['pid']) .GetOrCreateCounter(event['cat'], ctr_name)) # Initialize the counter's series fields if needed. if len(ctr.series_names) == 0: #TODO: implement counter object for series_name in event['args']: ctr.series_names.append(series_name) if len(ctr.series_names) == 0: self._model.import_errors.append('Expected counter ' + event['name'] + ' to have at least one argument to use as a value.') # Drop the counter. del ctr.parent.counters[ctr.full_name] return # Add the sample values. ctr.timestamps.append(event['ts'] / 1000.0) for series_name in ctr.series_names: if series_name not in event['args']: ctr.samples.append(0) continue ctr.samples.append(event['args'][series_name]) def _ProcessObjectEvent(self, event): thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) self._all_object_events.append({ 'event': event, 'thread': thread}) def _ProcessDurationEvent(self, event): thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) if not thread.IsTimestampValidForBeginOrEnd(event['ts'] / 1000.0): self._model.import_errors.append( 'Timestamps are moving backward.') return if event['ph'] == 'B': thread.BeginSlice(event['cat'], event['name'], event['ts'] / 1000.0, event['tts'] / 1000.0 if 'tts' in event else None, event['args']) elif event['ph'] == 'E': thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) if not thread.IsTimestampValidForBeginOrEnd(event['ts'] / 1000.0): self._model.import_errors.append( 'Timestamps are moving backward.') return if not thread.open_slice_count: self._model.import_errors.append( 'E phase event without a matching B phase event.') return new_slice = thread.EndSlice( event['ts'] / 1000.0, event['tts'] / 1000.0 if 'tts' in event else None) for arg_name, arg_value in event.get('args', {}).iteritems(): if arg_name in new_slice.args: self._model.import_errors.append( 'Both the B and E phases of ' + new_slice.name + ' provided values for argument ' + arg_name + '. ' + 'The value of the E phase event will be used.') new_slice.args[arg_name] = arg_value def _ProcessCompleteEvent(self, event): thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) thread.PushCompleteSlice( event['cat'], event['name'], event['ts'] / 1000.0, event['dur'] / 1000.0 if 'dur' in event else None, event['tts'] / 1000.0 if 'tts' in event else None, event['tdur'] / 1000.0 if 'tdur' in event else None, event['args']) def _ProcessMetadataEvent(self, event): if event['name'] == 'thread_name': thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) thread.name = event['args']['name'] if event['name'] == 'process_name': process = self._GetOrCreateProcess(event['pid']) process.name = event['args']['name'] else: self._model.import_errors.append( 'Unrecognized metadata name: ' + event['name']) def _ProcessInstantEvent(self, event): # Treat an Instant event as a duration 0 slice. # SliceTrack's redraw() knows how to handle this. thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) thread.BeginSlice(event['cat'], event['name'], event['ts'] / 1000.0, args=event.get('args')) thread.EndSlice(event['ts'] / 1000.0) def _ProcessSampleEvent(self, event): thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) thread.AddSample(event['cat'], event['name'], event['ts'] / 1000.0, event.get('args')) def _ProcessFlowEvent(self, event): thread = (self._GetOrCreateProcess(event['pid']) .GetOrCreateThread(event['tid'])) self._all_flow_events.append({ 'event': event, 'thread': thread}) def ImportEvents(self): ''' Walks through the events_ list and outputs the structures discovered to model_. ''' for event in self._events: phase = event.get('ph', None) if phase == 'B' or phase == 'E': self._ProcessDurationEvent(event) elif phase == 'X': self._ProcessCompleteEvent(event) elif phase == 'S' or phase == 'F' or phase == 'T': self._ProcessAsyncEvent(event) # Note, I is historic. The instant event marker got changed, but we # want to support loading old trace files so we have both I and i. elif phase == 'I' or phase == 'i': self._ProcessInstantEvent(event) elif phase == 'P': self._ProcessSampleEvent(event) elif phase == 'C': self._ProcessCounterEvent(event) elif phase == 'M': self._ProcessMetadataEvent(event) elif phase == 'N' or phase == 'D' or phase == 'O': self._ProcessObjectEvent(event) elif phase == 's' or phase == 't' or phase == 'f': self._ProcessFlowEvent(event) else: self._model.import_errors.append('Unrecognized event phase: ' + phase + '(' + event['name'] + ')') return self._model def FinalizeImport(self): '''Called by the Model after all other importers have imported their events.''' self._model.UpdateBounds() # We need to reupdate the bounds in case the minimum start time changes self._model.UpdateBounds() self._CreateAsyncSlices() self._CreateFlowSlices() self._SetBrowserProcess() self._CreateExplicitObjects() self._CreateImplicitObjects() self._CreateTabIdsToThreadsMap() def _CreateAsyncSlices(self): if len(self._all_async_events) == 0: return self._all_async_events.sort( cmp=lambda x, y: int(x['event']['ts'] - y['event']['ts'])) async_event_states_by_name_then_id = {} all_async_events = self._all_async_events for async_event_state in all_async_events: event = async_event_state['event'] name = event.get('name', None) if name is None: self._model.import_errors.append( 'Async events (ph: S, T or F) require an name parameter.') continue event_id = event.get('id') if event_id is None: self._model.import_errors.append( 'Async events (ph: S, T or F) require an id parameter.') continue # TODO(simonjam): Add a synchronous tick on the appropriate thread. if event['ph'] == 'S': if not name in async_event_states_by_name_then_id: async_event_states_by_name_then_id[name] = {} if event_id in async_event_states_by_name_then_id[name]: self._model.import_errors.append( 'At %d, a slice of the same id %s was already open.' % ( event['ts'], event_id)) continue async_event_states_by_name_then_id[name][event_id] = [] async_event_states_by_name_then_id[name][event_id].append( async_event_state) else: if name not in async_event_states_by_name_then_id: self._model.import_errors.append( 'At %d, no slice named %s was open.' % (event['ts'], name,)) continue if event_id not in async_event_states_by_name_then_id[name]: self._model.import_errors.append( 'At %d, no slice named %s with id=%s was open.' % ( event['ts'], name, event_id)) continue events = async_event_states_by_name_then_id[name][event_id] events.append(async_event_state) if event['ph'] == 'F': # Create a slice from start to end. async_slice = tracing_async_slice.AsyncSlice( events[0]['event']['cat'], name, events[0]['event']['ts'] / 1000.0) async_slice.duration = ((event['ts'] / 1000.0) - (events[0]['event']['ts'] / 1000.0)) async_slice.start_thread = events[0]['thread'] async_slice.end_thread = async_event_state['thread'] if async_slice.start_thread == async_slice.end_thread: if 'tts' in event and 'tts' in events[0]['event']: async_slice.thread_start = events[0]['event']['tts'] / 1000.0 async_slice.thread_duration = ((event['tts'] / 1000.0) - (events[0]['event']['tts'] / 1000.0)) async_slice.id = event_id async_slice.args = events[0]['event']['args'] # Create sub_slices for each step. for j in xrange(1, len(events)): sub_name = name if events[j - 1]['event']['ph'] == 'T': sub_name = name + ':' + events[j - 1]['event']['args']['step'] sub_slice = tracing_async_slice.AsyncSlice( events[0]['event']['cat'], sub_name, events[j - 1]['event']['ts'] / 1000.0) sub_slice.parent_slice = async_slice sub_slice.duration = ((events[j]['event']['ts'] / 1000.0) - (events[j - 1]['event']['ts'] / 1000.0)) sub_slice.start_thread = events[j - 1]['thread'] sub_slice.end_thread = events[j]['thread'] if sub_slice.start_thread == sub_slice.end_thread: if 'tts' in events[j]['event'] and \ 'tts' in events[j - 1]['event']: sub_slice.thread_duration = \ ((events[j]['event']['tts'] / 1000.0) - (events[j - 1]['event']['tts'] / 1000.0)) sub_slice.id = event_id sub_slice.args = events[j - 1]['event']['args'] async_slice.AddSubSlice(sub_slice) # The args for the finish event go in the last sub_slice. last_slice = async_slice.sub_slices[-1] for arg_name, arg_value in event['args'].iteritems(): last_slice.args[arg_name] = arg_value # Add |async_slice| to the start-thread's async_slices. async_slice.start_thread.AddAsyncSlice(async_slice) del async_event_states_by_name_then_id[name][event_id] def _CreateExplicitObjects(self): # TODO(tengs): Implement object instance parsing pass def _CreateImplicitObjects(self): # TODO(tengs): Implement object instance parsing pass def _CreateFlowSlices(self): if len(self._all_flow_events) == 0: return self._all_flow_events.sort( cmp=lambda x, y: int(x['event']['ts'] - y['event']['ts'])) flow_id_to_event = {} for data in self._all_flow_events: event = data['event'] thread = data['thread'] if 'name' not in event: self._model.import_errors.append( 'Flow events (ph: s, t or f) require a name parameter.') continue if 'id' not in event: self._model.import_errors.append( 'Flow events (ph: s, t or f) require an id parameter.') continue flow_event = tracing_flow_event.FlowEvent( event['cat'], event['id'], event['name'], event['ts'] / 1000.0, event['args']) thread.AddFlowEvent(flow_event) if event['ph'] == 's': if event['id'] in flow_id_to_event: self._model.import_errors.append( 'event id %s already seen when encountering start of' 'flow event.' % event['id']) continue flow_id_to_event[event['id']] = flow_event elif event['ph'] == 't' or event['ph'] == 'f': if not event['id'] in flow_id_to_event: self._model.import_errors.append( 'Found flow phase %s for id: %s but no flow start found.' % ( event['ph'], event['id'])) continue flow_position = flow_id_to_event[event['id']] self._model.flow_events.append([flow_position, flow_event]) if event['ph'] == 'f': del flow_id_to_event[event['id']] else: # Make this event the next start event in this flow. flow_id_to_event[event['id']] = flow_event def _SetBrowserProcess(self): for thread in self._model.GetAllThreads(): if thread.name == 'CrBrowserMain': self._model.browser_process = thread.parent def _CreateTabIdsToThreadsMap(self): tab_ids_list = [] for metadata in self._model.metadata: if metadata['name'] == 'tabIds': tab_ids_list = metadata['value'] break for tab_id in tab_ids_list: timeline_markers = self._model.FindTimelineMarkers(tab_id) assert(len(timeline_markers) == 1) assert(timeline_markers[0].start_thread == timeline_markers[0].end_thread) self._model.AddMappingFromTabIdToRendererThread( tab_id, timeline_markers[0].start_thread)
from __future__ import print_function from __future__ import unicode_literals from __future__ import division import vim import re import xml.etree.ElementTree as ET import coqtop as CT import project_file from collections import deque import vimbufsync vimbufsync.check_version([0,1,0], who="coquille") # Define unicode in python 3 if isinstance(__builtins__, dict): unicode = __builtins__.get('unicode', str) else: unicode = getattr(__builtins__, 'unicode', str) # Cache whether vim has a bool type vim_has_bool = vim.eval("exists('v:false')") def vim_repr(value): "Converts a python value into a vim value" if isinstance(value, bool): if value: if vim_has_bool: return "v:true" else: return "1" else: if vim_has_bool: return "v:false" else: return "0" if isinstance(value, int) or isinstance(value, long): return str(value) if isinstance(value, bytes): value = value.decode("utf-8") if isinstance(value, unicode): return value.replace("'", "''") return "unknown" # Convert 0-based (line, col, byte) tuples into 1-based lists in the form # [line, byte] def make_vim_range(start, stop): return [[start[0] + 1, start[2] + 1], [stop[0] + 1, stop[2] + 1]] # Return a list of all windows that are displaying the buffer, along with their # current cursor positions. def get_cursors_for_buffer(vim_buffer): result = [] for win in vim.windows: if win.buffer is vim_buffer: result.append((win, win.cursor)) return result # Takes the list of window cursor positions from get_cursor_for_buffer. If the # cursor position is now lower for any of the windows, they are entered to # rescroll the window. def fix_scroll(cursors): refresh_now = None for win, (row, col) in cursors: if win.cursor[0] < row or win.cursor[1] < col: win.vars['coquille_needs_scroll_fix'] = 1 if win.tabpage is vim.current.tabpage: vim.command("call coquille#FixWindowScrollTabWin(%d, %d)" % (win.tabpage.number, win.number)) # All the python side state associated with the vim source buffer class BufferState(object): # Dict mapping source buffer id to BufferState source_mapping = {} @classmethod def lookup_bufid(cls, bufid): # For convenience, the vim script passes vim.eval("l:bufid") to this # function, and vim.eval() returns a string. bufid = int(bufid) if bufid in cls.source_mapping: state = cls.source_mapping[bufid] else: state = BufferState(vim.buffers[bufid]) cls.source_mapping[bufid] = state if state.sync_vars(): return state else: del cls.source_mapping[bufid] return None def __init__(self, source_buffer): self.source_buffer = source_buffer self.info_buffer = None self.goal_buffer = None #: See vimbufsync ( https://github.com/def-lkb/vimbufsync ) self.saved_sync = None self.coq_top = CT.CoqTop() def sync_vars(self): "Updates python member variables based on the vim variables" if not self.source_buffer.valid: return False if self.source_buffer.options["filetype"] != b"coq": return False goal_bufid = self.source_buffer.vars.get("coquille_goal_bufid", -1) if goal_bufid != -1: self.goal_buffer = vim.buffers[goal_bufid] else: self.goal_buffer = None info_bufid = self.source_buffer.vars.get("coquille_info_bufid", -1) if info_bufid != -1: self.info_buffer = vim.buffers[info_bufid] else: self.info_buffer = None return True ################### # synchronization # ################### def sync(self): curr_sync = vimbufsync.sync(self.source_buffer) if not self.saved_sync or curr_sync.buf() != self.saved_sync.buf(): if self.coq_top.get_active_command_count() > 1: self._reset() else: (line, col) = self.saved_sync.pos() # vim indexes from lines 1, coquille from 0 self.rewind_to(line - 1, col - 1) self.saved_sync = curr_sync def _reset(self): self.coq_top.kill_coqtop() self.saved_sync = None self.reset_color() ##################### # exported commands # ##################### def kill_coqtop(self): if self is None: return self._reset() def goto_last_sent_dot(self): last = self.coq_top.get_last_active_command() (line, col) = ((0,1) if not last else last.end) vim.current.window.cursor = (line + 1, col) def coq_rewind(self, steps=1): self.clear_info() # Do not allow the root state to be rewound if steps < 1 or self.coq_top.get_active_command_count() < 2: return if self.coq_top.coqtop is None: print("Error: Coqtop isn't running. Are you sure you called :CoqLaunch?") return response = self.coq_top.rewind(steps) if response is None: vim.command("call coquille#KillSession()") print('ERROR: the Coq process died') return self.refresh() # steps != 1 means that either the user called "CoqToCursor" or just started # editing in the "locked" zone. In both these cases we don't want to move # the cursor. if (steps == 1 and vim.eval('g:coquille_auto_move') == 'true'): self.goto_last_sent_dot() def coq_to_cursor(self): if self.coq_top.coqtop is None: print("Error: Coqtop isn't running. Are you sure you called :CoqLaunch?") return self.sync() (cline, ccol) = vim.current.window.cursor cline -= 1 last = self.coq_top.get_last_active_command() last_sent = ((0,0,0) if not last else last.end) (line, col, byte) = last_sent if cline < line or (cline == line and ccol < col): # Add 1 to the column to leave whatever is at the # cursor as sent. self.rewind_to(cline, ccol + 1) else: send_queue = deque([]) while True: r = self._get_message_range(last_sent) if (r is not None and (r[1][0], r[1][1]) <= (cline, ccol + 1)): last_sent = r[1] send_queue.append(r) else: break self.send_until_fail(send_queue) def coq_next(self): if self.coq_top.coqtop is None: print("Error: Coqtop isn't running. Are you sure you called :CoqLaunch?") return self.sync() last = self.coq_top.get_last_active_command() last_sent = ((0,0,0) if not last else last.end) message_range = self._get_message_range(last_sent) if message_range is None: return send_queue = deque([]) send_queue.append(message_range) self.send_until_fail(send_queue) if (vim.eval('g:coquille_auto_move') == 'true'): self.goto_last_sent_dot() def coq_raw_query(self, *args): self.clear_info() if self.coq_top.coqtop is None: print("Error: Coqtop isn't running. Are you sure you called :CoqLaunch?") return raw_query = ' '.join(args) response = self.coq_top.query(raw_query) if response is None: vim.command("call coquille#KillSession()") print('ERROR: the Coq process died') return info_msg = self.coq_top.get_messages() self.show_info(info_msg) def launch_coq(self, *args): use_project_args = self.source_buffer.vars.get( "coquille_append_project_args", vim.vars.get("coquille_append_project_args", 0)) if use_project_args: # Vim passes the args as a tuple args = list(args) args.extend(project_file.find_and_parse_file( self.source_buffer.name)) return self.coq_top.restart_coq(*args) def debug(self): commands = self.coq_top.get_active_commands() print("encountered dots = [") for (line, col) in commands: print(" (%d, %d) ; " % (line, col)) print("]") ##################################### # IDE tools: Goal, Infos and colors # ##################################### def refresh(self): last_info = [None] def update(): self.reset_color() vim.command('redraw') new_info = self.coq_top.get_messages() if last_info[0] != new_info: self.show_info(new_info) last_info[0] = new_info # It seems that coqtop needs some kind of call like Status or Goal to # trigger it to start processing all the commands that have been added. # So show_goal needs to be called before waiting for all the unchecked # commands finished. response = self.coq_top.goals(update) if self.show_goal(response): while self.coq_top.has_unchecked_commands(): self.coq_top.process_response() update() update() def show_goal(self, response): # Temporarily make the goal buffer modifiable modifiable = self.goal_buffer.options["modifiable"] self.goal_buffer.options["modifiable"] = True try: cursors = get_cursors_for_buffer(self.goal_buffer) del self.goal_buffer[:] if response is None: return False goals = response.val if goals is None: self.goal_buffer[0] = 'No goals.' return True sub_goals = goals.fg msg_format = '{0} subgoal{1}' show_hyps = True if not sub_goals: show_hyps = False sub_goals = [] for (before, after) in goals.bg: sub_goals.extend(reversed(before)) sub_goals.extend(after) if sub_goals: msg_format = ('This subproof is complete, but there {2} {0}' ' unfocused goal{1}') if not sub_goals: msg_format = 'No more subgoals.' nb_subgoals = len(sub_goals) self.goal_buffer[0] = msg_format.format(nb_subgoals, '' if nb_subgoals == 1 else 's', 'is' if nb_subgoals == 1 else 'are') self.goal_buffer.append(['']) for idx, sub_goal in enumerate(sub_goals): _id = sub_goal.id hyps = sub_goal.hyp ccl = sub_goal.ccl if show_hyps: # we print the environment only for the current subgoal for hyp in hyps: lst = map(lambda s: s.encode('utf-8'), hyp.split('\n')) self.goal_buffer.append(list(lst)) show_hyps = False self.goal_buffer.append('') self.goal_buffer.append('======================== ( %d / %d )' % (idx+1 , nb_subgoals)) lines = map(lambda s: s.encode('utf-8'), ccl.split('\n')) self.goal_buffer.append(list(lines)) self.goal_buffer.append('') fix_scroll(cursors) finally: self.goal_buffer.options["modifiable"] = modifiable return True def show_info(self, message): # Temporarily make the info buffer modifiable modifiable = self.info_buffer.options["modifiable"] self.info_buffer.options["modifiable"] = True try: cursors = get_cursors_for_buffer(self.info_buffer) del self.info_buffer[:] lst = [] if message is not None: lst = list(map(lambda s: s.encode('utf-8'), message.split('\n'))) if len(lst) >= 1: # If self.info_buffers was a regular list, the del statement # above would have deleted all the lines. However with a vim # buffer, that actually leaves 1 blank line. So now for setting # the new contents, the very first line has to be overwritten, # then the rest can be appended. # # Also note that if info_buffer was a list, extend would be the # appropriate function. However info_buffer does not have an # extend function, and its append mostly behaves like extend. self.info_buffer[0] = lst[0] self.info_buffer.append(lst[1:]) fix_scroll(cursors) finally: self.info_buffer.options["modifiable"] = modifiable def clear_info(self): self.coq_top.clear_messages() self.show_info(None) def convert_offset(self, range_start, offset, range_end): message = self._between(range_start, range_end) (line, col, byte) = _pos_from_offset(range_start[1], range_start[2], message, offset) return (line + range_start[0], col, byte) def reset_color(self): sent = [] checked = [] warnings = [] errors = [] prev_end = None sent_start = None checked_start = None commands = self.coq_top.get_commands() for c in commands: if c.state in (CT.Command.REVERTED, CT.Command.ABANDONED): break if c.state == CT.Command.SENT: if sent_start is None: # Start a sent range sent_start = prev_end elif sent_start is not None: # Finish a sent range sent.append(make_vim_range(sent_start, prev_end)) sent_start = None # Include all the processed commands as checked, even if they # produced a warning or error message. A subrange will also be # marked as a warning or error, but that will override the checked # group. if c.state == CT.Command.PROCESSED: if checked_start is None: # Start a checked range checked_start = prev_end elif checked_start is not None: # Finish a checked range checked.append(make_vim_range(checked_start, prev_end)) checked_start = None prev_end = c.end if sent_start is not None: # Finish a sent range sent.append(make_vim_range(sent_start, prev_end)) if checked_start is not None: # Finish a checked range checked.append(make_vim_range(checked_start, prev_end)) prev_end = None for c in commands: if c.msg_type != CT.Command.NONE: # Normalize the start and stop positions, if it hasn't been done yet. if c.msg_start_offset is not None and c.msg_start is None: c.msg_start = self.convert_offset(prev_end, c.msg_start_offset, c.end) if c.msg_stop_offset is not None and c.msg_stop is None: c.msg_stop = self.convert_offset(prev_end, c.msg_stop_offset, c.end) start = c.msg_start stop = c.msg_stop if start == stop: start = prev_end stop = c.end if c.msg_type == CT.Command.WARNING: warnings.append(make_vim_range(start, stop)) else: errors.append(make_vim_range(start, stop)) prev_end = c.end self.source_buffer.vars['coquille_sent'] = sent self.source_buffer.vars['coquille_checked'] = checked self.source_buffer.vars['coquille_warnings'] = warnings self.source_buffer.vars['coquille_errors'] = errors vim.command("call coquille#SyncBufferColors(%d)" % self.source_buffer.number) def rewind_to(self, line, col): """ Go backwards to the specified position line and col are 0-based and point to the first position to remove from the sent region. """ if self.coq_top.coqtop is None: print('Internal error: vimbufsync is still being called but coqtop\ appears to be down.') print('Please report.') return last = self.coq_top.get_last_active_command() if (last and (last.end[0], last.end[1]) <= (line, col)): # The caller asked to rewind to a position after what has been # processed. This quick path exits without having to search the # state list. return predicate = lambda x: (x.end[0], x.end[1]) <= (line, col) commands = self.coq_top.get_active_commands() lst = filter(predicate, commands) steps = len(commands) - len(list(lst)) if steps != 0: self.coq_rewind(steps) ############################# # Communication with Coqtop # ############################# def send_until_fail(self, send_queue): """ Tries to send every message in [send_queue] to Coq, stops at the first error. When this function returns, [send_queue] is empty. """ self.clear_info() # Start sending on a background thread self.coq_top.send_async(send_queue) # Redraw the screen when the background thread makes progress while True: result = self.coq_top.wait_for_result() if result & CT.CoqTop.COMMAND_CHANGED: self.reset_color() vim.command('redraw') if result & CT.CoqTop.MESSAGE_RECEIVED: new_info = self.coq_top.get_messages() self.show_info(new_info) if result & CT.CoqTop.SEND_DONE: break self.coq_top.finish_send() self.refresh() ################# # Miscellaneous # ################# # col_offset is a character offset, not byte offset def _get_remaining_line(self, line, col_offset): s = self.source_buffer[line] if not isinstance(s, unicode): s = s.decode("utf-8") return s[col_offset:] def _between(self, begin, end): """ Returns a string corresponding to the portion of the buffer between the [begin] and [end] positions. """ (bline, bcol, bbyte) = begin (eline, ecol, ebyte) = end acc = "" for line, str in enumerate(self.source_buffer[bline:eline + 1]): if not isinstance(str, unicode): str = str.decode("utf-8") start = bcol if line == 0 else 0 stop = ecol + 1 if line == eline - bline else len(str) acc += str[start:stop] + '\n' return acc # Convert a pos from (line, col) to (line, col, byte_offset) # # The byte_offset is relative to the start of the line. It is the same as # col, unless there are non-ascii characters. # # line, col, and byte_offset are all 0-indexed. def _add_byte_offset(self, pos): (line, col) = pos s = self.source_buffer[line] if not isinstance(s, unicode): s = s.decode("utf-8") return (line, col, len(s[:col].encode("utf-8"))) def _get_message_range(self, after): """ See [_find_next_chunk] """ (line, col, byte) = after end_pos = self._find_next_chunk(line, col) if end_pos is None: return None else: end_pos = self._add_byte_offset(end_pos) (eline, ecol, ebyte) = end_pos message = self._between(after, (eline, ecol - 1, ebyte - 1)) return (message, end_pos) # A bullet is: # - One or more '-' # - One or more '+' # - One or more '*' # - Exactly 1 '{' (additional ones are parsed as separate statements) # - Exactly 1 '}' (additional ones are parsed as separate statements) bullets = re.compile("-+|\++|\*+|{|}") def _find_next_chunk(self, line, col): """ Returns the position of the next chunk dot after a certain position. That can either be a bullet if we are in a proof, or "a string" terminated by a dot (outside of a comment, and not denoting a path). """ blen = len(self.source_buffer) # We start by striping all whitespaces (including \n) from the beginning of # the chunk. while line < blen: line_val = self.source_buffer[line] if not isinstance(line_val, unicode): line_val = line_val.decode("utf-8") while col < len(line_val) and line_val[col] in (' ', '\t'): col += 1 if col < len(line_val) and line_val[col] not in (' ', '\t'): break line += 1 col = 0 if line >= blen: return # Then we check if the first character of the chunk is a bullet. # Intially I did that only when I was sure to be in a proof (by looking in # [encountered_dots] whether I was after a "collapsable" chunk or not), but # 1/ that didn't play well with coq_to_cursor (as the "collapsable chunk" # might not have been sent/detected yet). # 2/ The bullet chars can never be used at the *beginning* of a chunk # outside of a proof. So the check was unecessary. bullet_match = self.bullets.match(line_val, col) if bullet_match: return (line, bullet_match.end()) # We might have a commentary before the bullet, we should be skiping it and # keep on looking. tail_len = len(line_val) - col if ((tail_len - 1 > 0) and line_val[col] == '(' and line_val[col + 1] == '*'): com_end = self._skip_comment(line, col + 2, 1) if not com_end: return (line, col) = com_end return self._find_next_chunk(line, col) # If the chunk doesn't start with a bullet, we look for a dot. dot = self._find_dot_after(line, col) if dot: # Return the position one after the dot return (dot[0], dot[1] + 1) else: return None def _find_dot_after(self, line, col): """ Returns the position of the next "valid" dot after a certain position. Valid here means: recognized by Coq as terminating an input, so dots in comments, strings or ident paths are not valid. """ if line >= len(self.source_buffer): return s = self._get_remaining_line(line, col) dot_pos = s.find('.') com_pos = s.find('(*') str_pos = s.find('"') if com_pos == -1 and dot_pos == -1 and str_pos == -1: # Nothing on this line return self._find_dot_after(line + 1, 0) elif dot_pos == -1 or (com_pos > - 1 and dot_pos > com_pos) or (str_pos > - 1 and dot_pos > str_pos): if str_pos == -1 or (com_pos > -1 and str_pos > com_pos): # We see a comment opening before the next dot com_end = self._skip_comment(line, com_pos + 2 + col, 1) if not com_end: return (line, col) = com_end return self._find_dot_after(line, col) else: # We see a string starting before the next dot str_end = self._skip_str(line, str_pos + col + 1) if not str_end: return (line, col) = str_end return self._find_dot_after(line, col) elif dot_pos < len(s) - 1 and s[dot_pos + 1] != ' ': # Sometimes dot are used to access module fields, we don't want to stop # just after the module name. # Example: [Require Import Coq.Arith] return self._find_dot_after(line, col + dot_pos + 1) elif dot_pos + col > 0 and self._get_remaining_line(line, col + dot_pos - 1)[0] == '.': # FIXME? There might be a cleaner way to express this. # We don't want to capture ".." if dot_pos + col > 1 and self._get_remaining_line(line, col + dot_pos - 2)[0] == '.': # But we want to capture "..." return (line, dot_pos + col) else: return self._find_dot_after(line, col + dot_pos + 1) else: return (line, dot_pos + col) # TODO? factorize [_skip_str] and [_skip_comment] def _skip_str(self, line, col): """ Used when we encountered the start of a string before a valid dot (see [_find_dot_after]). Returns the position of the end of the string. """ while True: if line >= len(self.source_buffer): return s = self._get_remaining_line(line, col) str_end = s.find('"') if str_end > -1: return (line, col + str_end + 1) line += 1 col = 0 def _skip_comment(self, line, col, nb_left): """ Used when we encountered the start of a comment before a valid dot (see [_find_dot_after]). Returns the position of the end of the comment. """ while nb_left > 0: if line >= len(self.source_buffer): return None s = self._get_remaining_line(line, col) com_start = s.find('(*') com_end = s.find('*)') if com_end > -1 and (com_end < com_start or com_start == -1): col += com_end + 2 nb_left -= 1 elif com_start > -1: col += com_start + 2 nb_left += 1 else: line += 1 col = 0 return (line, col) def _empty_range(): return [ { 'line': 0, 'col': 0}, { 'line': 0, 'col': 0} ] # Converts a byte offset into a message into a (line, col, byte) tuple # # msg is a unicode string the offset is relative to. col is the column where # msg starts, and byte is the byte offset where it starts. # # All indecies are 0 based. def _pos_from_offset(col, byte, msg, offset): str = msg.encode("utf-8")[:offset].decode("utf-8") lst = str.split('\n') line = len(lst) - 1 col = len(lst[-1]) + (col if line == 0 else 0) byte = len(lst[-1].encode("utf-8")) + (byte if line == 0 else 0) return (line, col, byte)
import matplotlib.pyplot as plt def plot_surf_stat_map(coords, faces, stat_map=None, elev=0, azim=0, cmap='jet', threshold=None, bg_map=None, mask=None, bg_on_stat=False, alpha='auto', vmax=None, symmetric_cbar="auto", returnAx=False, figsize=(14,11), label=None, lenient=None, **kwargs): ''' Visualize results on cortical surface using matplotlib''' import numpy as np import matplotlib.pyplot as plt import matplotlib.tri as tri from mpl_toolkits.mplot3d import Axes3D # load mesh and derive axes limits faces = np.array(faces, dtype=int) limits = [coords.min(), coords.max()] # set alpha if in auto mode if alpha == 'auto': if bg_map is None: alpha = .5 else: alpha = 1 # if cmap is given as string, translate to matplotlib cmap if type(cmap) == str: cmap = plt.cm.get_cmap(cmap) # initiate figure and 3d axes if figsize is not None: fig = plt.figure(figsize=figsize) else: fig = plt.figure() fig.patch.set_facecolor('white') ax1 = fig.add_subplot(111, projection='3d', xlim=limits, ylim=limits) # ax1._axis3don = False ax1.grid(False) ax1.set_axis_off() ax1.w_zaxis.line.set_lw(0.) ax1.set_zticks([]) ax1.view_init(elev=elev, azim=azim) # plot mesh without data p3dcollec = ax1.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], triangles=faces, linewidth=0., antialiased=False, color='white') if mask is not None: cmask = np.zeros(len(coords)) cmask[mask] = 1 cutoff = 2 if lenient: cutoff = 0 fmask = np.where(cmask[faces].sum(axis=1) > cutoff)[0] # If depth_map and/or stat_map are provided, map these onto the surface # set_facecolors function of Poly3DCollection is used as passing the # facecolors argument to plot_trisurf does not seem to work if bg_map is not None or stat_map is not None: face_colors = np.ones((faces.shape[0], 4)) face_colors[:, :3] = .5*face_colors[:, :3] if bg_map is not None: bg_data = bg_map if bg_data.shape[0] != coords.shape[0]: raise ValueError('The bg_map does not have the same number ' 'of vertices as the mesh.') bg_faces = np.mean(bg_data[faces], axis=1) bg_faces = bg_faces - bg_faces.min() bg_faces = bg_faces / bg_faces.max() face_colors = plt.cm.gray_r(bg_faces) # modify alpha values of background face_colors[:, 3] = alpha*face_colors[:, 3] if stat_map is not None: stat_map_data = stat_map stat_map_faces = np.mean(stat_map_data[faces], axis=1) if label: stat_map_faces = np.median(stat_map_data[faces], axis=1) # Call _get_plot_stat_map_params to derive symmetric vmin and vmax # And colorbar limits depending on symmetric_cbar settings cbar_vmin, cbar_vmax, vmin, vmax = \ _get_plot_stat_map_params(stat_map_faces, vmax, symmetric_cbar, kwargs) if threshold is not None: kept_indices = np.where(abs(stat_map_faces) >= threshold)[0] stat_map_faces = stat_map_faces - vmin stat_map_faces = stat_map_faces / (vmax-vmin) if bg_on_stat: face_colors[kept_indices] = cmap(stat_map_faces[kept_indices]) * face_colors[kept_indices] else: face_colors[kept_indices] = cmap(stat_map_faces[kept_indices]) else: stat_map_faces = stat_map_faces - vmin stat_map_faces = stat_map_faces / (vmax-vmin) if bg_on_stat: if mask is not None: face_colors[fmask,:] = cmap(stat_map_faces)[fmask,:] * face_colors[fmask,:] else: face_colors = cmap(stat_map_faces) * face_colors else: face_colors = cmap(stat_map_faces) p3dcollec.set_facecolors(face_colors) if returnAx == True: return fig, ax1 else: return fig def _get_plot_stat_map_params(stat_map_data, vmax, symmetric_cbar, kwargs, force_min_stat_map_value=None): import numpy as np ''' Helper function copied from nilearn to force symmetric colormaps https://github.com/nilearn/nilearn/blob/master/nilearn/plotting/img_plotting.py#L52 ''' # make sure that the color range is symmetrical if vmax is None or symmetric_cbar in ['auto', False]: # Avoid dealing with masked_array: if hasattr(stat_map_data, '_mask'): stat_map_data = np.asarray( stat_map_data[np.logical_not(stat_map_data._mask)]) stat_map_max = np.nanmax(stat_map_data) if force_min_stat_map_value == None: stat_map_min = np.nanmin(stat_map_data) else: stat_map_min = force_min_stat_map_value if symmetric_cbar == 'auto': symmetric_cbar = stat_map_min < 0 and stat_map_max > 0 if vmax is None: vmax = max(-stat_map_min, stat_map_max) if 'vmin' in kwargs: raise ValueError('this function does not accept a "vmin" ' 'argument, as it uses a symmetrical range ' 'defined via the vmax argument. To threshold ' 'the map, use the "threshold" argument') vmin = -vmax if not symmetric_cbar: negative_range = stat_map_max <= 0 positive_range = stat_map_min >= 0 if positive_range: cbar_vmin = 0 cbar_vmax = None elif negative_range: cbar_vmax = 0 cbar_vmin = None else: cbar_vmin = stat_map_min cbar_vmax = stat_map_max else: cbar_vmin, cbar_vmax = None, None return cbar_vmin, cbar_vmax, vmin, vmax def showSurf(input_data, surf, sulc, cort, showall=None, output_file=None): import matplotlib.pyplot as plt f = plot_surf_stat_map(surf[0], surf[1], bg_map=sulc, mask=cort, stat_map=input_data, bg_on_stat=True, azim=0) plt.show() if output_file: count = 0 f.savefig((output_file + '.%s.png') % str(count)) count += 1 f = plot_surf_stat_map(surf[0], surf[1], bg_map=sulc, mask=cort, stat_map=input_data, bg_on_stat=True, azim=180) plt.show() if output_file: f.savefig((output_file + '.%s.png') % str(count)) count += 1 if showall: f = plot_surf_stat_map(surf[0], surf[1], bg_map=sulc, mask=cort, stat_map=input_data, bg_on_stat=True, azim=90) plt.show() if output_file: f.savefig((output_file + '.%s.png') % str(count)) count += 1 f = plot_surf_stat_map(surf[0], surf[1], bg_map=sulc, mask=cort, stat_map=input_data, bg_on_stat=True, azim=270) plt.show() if output_file: f.savefig((output_file + '.%s.png') % str(count)) count += 1 f = plot_surf_stat_map(surf[0], surf[1], bg_map=sulc, mask=cort, stat_map=input_data, bg_on_stat=True, elev=90) plt.show() if output_file: f.savefig((output_file + '.%s.png') % str(count)) count += 1 f = plot_surf_stat_map(surf[0], surf[1], bg_map=sulc, mask=cort, stat_map=input_data, bg_on_stat=True, elev=270) plt.show() if output_file: f.savefig((output_file + '.%s.png') % str(count)) count += 1
""" Global variables. WARNING: Never use `from globals import *`! Since these global variables are modified during runtime, using `import *` would lead to unpredictable consequences. """ # Imports, sorted alphabetically. # Python packages from ConfigParser import ConfigParser, NoSectionError, NoOptionError import argparse import getpass from math import pi import os # Third-party packages import pyglet from pyglet.resource import get_settings_path # Modules from this project # Nothing for now... APP_NAME = 'pyCraft' # should I stay or should I go? APP_VERSION = 0.1 DEBUG = False LOG_DEBUG, LOG_INFO, LOG_WARNING, LOG_ERROR, LOG_FATAL = range(5) LOG_LEVEL = LOG_INFO IP_ADDRESS = "neb.nebtown.info" # The IP Address to connect to USERNAME = getpass.getuser() # Default to system username CLIENT = None # Becomes the instance of PacketReceiver if running the client SERVER = None # Becomes the instance of Server if running the server # Game modes SURVIVAL_MODE = 'survival' CREATIVE_MODE = 'creative' GAME_MODE_CHOICES = (SURVIVAL_MODE, CREATIVE_MODE) GAME_MODE = CREATIVE_MODE SINGLEPLAYER = False # User input # Movement MOVE_FORWARD_KEY = 'W' MOVE_BACKWARD_KEY = 'S' MOVE_LEFT_KEY = 'A' MOVE_RIGHT_KEY = 'D' JUMP_KEY = 'SPACE' CROUCH_KEY = 'LSHIFT' FLY_KEY = 'TAB' # Action INVENTORY_KEY = 'E' INVENTORY_SORT_KEY = 'M' INVENTORY_1_KEY = '1' INVENTORY_2_KEY = '2' INVENTORY_3_KEY = '3' INVENTORY_4_KEY = '4' INVENTORY_5_KEY = '5' INVENTORY_6_KEY = '6' INVENTORY_7_KEY = '7' INVENTORY_8_KEY = '8' INVENTORY_9_KEY = '9' INVENTORY_10_KEY = '0' TALK_KEY = 'T' VALIDATE_KEY = 'ENTER' # Settings SOUND_UP_KEY = 'PAGEUP' SOUND_DOWN_KEY = 'PAGEDOWN' TOGGLE_HUD_KEY = 'F1' SCREENCAP_KEY = 'F2' TOGGLE_DEBUG_TEXT_KEY = 'F3' SHOWMAP_KEY = 'F4' # Various ESCAPE_KEY = 'ESCAPE' KEY_BINDINGS = dict( (k.lower()[:-4], v) for k, v in locals().items() if k[-4:] == '_KEY' ) # Saves DISABLE_SAVE = True SAVE_FILENAME = None # Game engine SECTOR_SIZE = 8 TILESET_SIZE = 16 # The tileset therefore contains TILESET_SIZE ** 2 tiles. # Game logic BLOCKS_DIR = {} # Block ID => block object ITEMS_DIR = {} # Item ID => item object VERTEX_CUBE = 'cube' VERTEX_CROSS = 'cross' VERTEX_GRID = 'grid' VERTEX_MODES = ( VERTEX_CUBE, VERTEX_CROSS, VERTEX_GRID, ) # items and blocks share a common id table # ids of items should be >= ITEM_ID_MIN ITEM_ID_MIN = 256 TIME_RATE = 240 * 10 # Rate of change (steps per hour). SPREADING_MUTATION_DELAY = 4 # in seconds # Terrain generation TERRAIN_CHOICES = { # hill_height & max_trees mandatory for the moment. 'plains': { 'hill_height': 2, 'max_trees': 700, }, 'desert': { 'hill_height': 5, 'max_trees': 50, }, 'island': { 'hill_height': 8, 'max_trees': 700, }, 'mountains': { 'hill_height': 12, 'max_trees': 4000, }, 'snow': { 'hill_height': 4, 'max_trees': 1500, }, 'nether': { 'hill_height': 1, 'max_trees': 0, } } SEED = None TREE_CHANCE = 0.006 WILDFOOD_CHANCE = 0.0005 GRASS_CHANCE = 0.05 # Biome DESERT, PLAINS, MOUNTAINS, SNOW, FOREST, ISLAND, NETHER = range(7) # Direction EAST, SOUTH, WEST, NORTH = range(4) # Graphical rendering FULLSCREEN = False WINDOW_WIDTH = 850 # Screen width (in pixels) WINDOW_HEIGHT = 480 # Screen height (in pixels) MAX_FPS = 60 # Maximum frames per second. #Maximum time to process the queue QUEUE_PROCESS_SPEED = 0.1 / MAX_FPS #Try shrinking this if chunk loading is laggy, higher loads chunks faster VISIBLE_SECTORS_RADIUS = 8 DELOAD_SECTORS_RADIUS = 12 DRAW_DISTANCE_CHOICES = { 'short': 60.0, 'medium': 60.0 * 1.5, 'long': 60.0 * 2.0 } DEFAULT_DRAW_DISTANCE_CHOICE = 'short' DRAW_DISTANCE_CHOICE = DEFAULT_DRAW_DISTANCE_CHOICE DRAW_DISTANCE = DRAW_DISTANCE_CHOICES[DRAW_DISTANCE_CHOICE] FOV = 65.0 # TODO: add menu option to change FOV NEAR_CLIP_DISTANCE = 0.1 # TODO: make min and max clip distance dynamic FAR_CLIP_DISTANCE = 200.0 # Maximum render distance, # ignoring effects of sector_size MOTION_BLUR = False FOG_ENABLED = False TEXTURE_PACK = 'default' texture_pack_list = None HUD_ENABLED = True DEBUG_TEXT_ENABLED = True # Sound EFFECT_VOLUME = 1 # Tool types WOODEN_TOOL, STONE_TOOL, IRON_TOOL, DIAMOND_TOOL, GOLDEN_TOOL = range(5) PICKAXE, AXE, SHOVEL, HOE, SWORD = range(5) HELMET, CHESTPLATE, LEGGINGS, BOOTS = range(4) # Static aliases DEG_RAD = pi / 180.0 HALF_PI = pi / 2.0 # 90 degrees # Recipes recipes = None smelting_recipes = None # Timer TIMER_INTERVAL = 1 main_timer = None CHAT_FADE_TIME = 8 # Localization LANGUAGE = 'default' _ = lambda x:x # Global files & directories game_dir = get_settings_path(APP_NAME) if not os.path.exists(game_dir): os.makedirs(game_dir) config = ConfigParser() config_file = os.path.join(game_dir, 'game.cfg') config.read(config_file) LAUNCH_OPTIONS = argparse.Namespace() ANCHOR_NONE = 0 ANCHOR_LEFT = 1 ANCHOR_TOP = 1 << 1 ANCHOR_RIGHT = 1 << 2 ANCHOR_BOTTOM = 1 << 3 ICONS_PATH = os.path.join('resources', 'textures', 'icons') TEXTURES_PATH = os.path.join('resources', 'textures') DEFAULT_FONT = 'ChunkFive Roman' CHAT_FONT = 'Silkscreen' class InvalidChoice(Exception): pass class InvalidKey(Exception): pass def get_key(key_name): key_code = getattr(pyglet.window.key, key_name, None) if key_code is None: # Handles cases like pyglet.window.key._1 key_code = getattr(pyglet.window.key, '_' + key_name, None) if key_code is None: raise InvalidKey('%s is not a valid key.' % key_name) return key_code def get_or_update_config(section, option, default_value, conv=str, choices=()): user_value = False try: if conv is bool: user_value = config.getboolean(section, option) else: user_value = conv(config.get(section, option)) except NoSectionError: config.add_section(section) except NoOptionError: pass if not user_value: user_value = default_value # If the option is already set: if choices and user_value not in choices: raise InvalidChoice('"%s" %s.%s must be in %s' % (user_value, section, option, repr(tuple(choices)))) config.set(section, option, str(user_value)) return user_value def save_config(): config.set("General","username", USERNAME.encode('utf-8')) config.set("General","ip_address", IP_ADDRESS) with open(config_file, 'wb') as handle: config.write(handle) def initialize_config(): # # General # global DEBUG, FULLSCREEN, WINDOW_WIDTH, WINDOW_HEIGHT, DRAW_DISTANCE_CHOICE, DRAW_DISTANCE_CHOICES, DRAW_DISTANCE, MOTION_BLUR, FOG_ENABLED, TEXTURE_PACK, USERNAME, IP_ADDRESS, LANGUAGE general = 'General' DEBUG = get_or_update_config( general, 'debug', DEBUG, conv=bool) USERNAME = get_or_update_config( general, 'username', USERNAME, conv=str) USERNAME = USERNAME.decode('utf-8') IP_ADDRESS = get_or_update_config( general, 'ip_address', IP_ADDRESS, conv=str) # # Graphics # graphics = 'Graphics' FULLSCREEN = get_or_update_config( graphics, 'fullscreen', FULLSCREEN, conv=bool) WINDOW_WIDTH = get_or_update_config( graphics, 'width', WINDOW_WIDTH, conv=int) WINDOW_HEIGHT = get_or_update_config( graphics, 'height', WINDOW_HEIGHT, conv=int) DRAW_DISTANCE_CHOICE = get_or_update_config( graphics, 'draw_distance', DRAW_DISTANCE_CHOICE, choices=DRAW_DISTANCE_CHOICES) DRAW_DISTANCE = DRAW_DISTANCE_CHOICES[DRAW_DISTANCE_CHOICE] MOTION_BLUR = get_or_update_config( graphics, 'motion_blur', MOTION_BLUR, conv=bool) TEXTURE_PACK = get_or_update_config( graphics, 'texture_pack', TEXTURE_PACK, conv=str) # # World # world = 'World' # TODO: This setting must be removed when terrain generation will improve. get_or_update_config(world, 'size', 64, conv=int) # # Controls # controls = 'Controls' # Adds missing keys to configuration file and converts to pyglet keys. for control, default_key_name in KEY_BINDINGS.items(): key_name = get_or_update_config(controls, control, default_key_name) try: pyglet_key = get_key(key_name) except InvalidKey: pyglet_key = get_key(default_key_name) config.set(controls, control, default_key_name) globals()[control.upper() + '_KEY'] = pyglet_key # # Localization # localization = 'Localization' LANGUAGE = get_or_update_config( localization, 'language', LANGUAGE, conv=str) save_config() initialize_config()
###################### # MEZZANINE SETTINGS # ###################### # The following settings are already defined with default values in # the ``defaults.py`` module within each of Mezzanine's apps, but are # common enough to be put here, commented out, for convenient # overriding. Please consult the settings documentation for a full list # of settings Mezzanine implements: # http://mezzanine.jupo.org/docs/configuration.html#default-settings # Controls the ordering and grouping of the admin menu. # # ADMIN_MENU_ORDER = ( # ("Content", ("pages.Page", "blog.BlogPost", # "generic.ThreadedComment", ("Media Library", "fb_browse"),)), # ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")), # ("Users", ("auth.User", "auth.Group",)), # ) # A three item sequence, each containing a sequence of template tags # used to render the admin dashboard. # # DASHBOARD_TAGS = ( # ("blog_tags.quick_blog", "mezzanine_tags.app_list"), # ("comment_tags.recent_comments",), # ("mezzanine_tags.recent_actions",), # ) # A sequence of templates used by the ``page_menu`` template tag. Each # item in the sequence is a three item sequence, containing a unique ID # for the template, a label for the template, and the template path. # These templates are then available for selection when editing which # menus a page should appear in. Note that if a menu template is used # that doesn't appear in this setting, all pages will appear in it. # PAGE_MENU_TEMPLATES = ( # (1, "Top navigation bar", "pages/menus/dropdown.html"), # (2, "Left-hand tree", "pages/menus/tree.html"), # (3, "Footer", "pages/menus/footer.html"), # ) # A sequence of fields that will be injected into Mezzanine's (or any # library's) models. Each item in the sequence is a four item sequence. # The first two items are the dotted path to the model and its field # name to be added, and the dotted path to the field class to use for # the field. The third and fourth items are a sequence of positional # args and a dictionary of keyword args, to use when creating the # field instance. When specifying the field class, the path # ``django.models.db.`` can be omitted for regular Django model fields. # # EXTRA_MODEL_FIELDS = ( # ( # # Dotted path to field. # "mezzanine.blog.models.BlogPost.image", # # Dotted path to field class. # "somelib.fields.ImageField", # # Positional args for field class. # ("Image",), # # Keyword args for field class. # {"blank": True, "upload_to": "blog"}, # ), # # Example of adding a field to *all* of Mezzanine's content types: # ( # "mezzanine.pages.models.Page.another_field", # "IntegerField", # 'django.db.models.' is implied if path is omitted. # ("Another name",), # {"blank": True, "default": 1}, # ), # ) # Setting to turn on featured images for blog posts. Defaults to False. # # BLOG_USE_FEATURED_IMAGE = True # If True, the south application will be automatically added to the # INSTALLED_APPS setting. USE_SOUTH = True ######################## # MAIN DJANGO SETTINGS # ######################## # People who get code error notifications. # In the format (('Full Name', '[email protected]'), # ('Full Name', '[email protected]')) ADMINS = ( # ('Your Name', '[email protected]'), ) MANAGERS = ADMINS # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = None # If you set this to True, Django will use timezone-aware datetimes. USE_TZ = True # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = "en" # A boolean that turns on/off debug mode. When set to ``True``, stack traces # are displayed for error pages. Should always be set to ``False`` in # production. Best set to ``True`` in local_settings.py DEBUG = False # Whether a user's session cookie expires when the Web browser is closed. SESSION_EXPIRE_AT_BROWSER_CLOSE = True SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = False # Make this unique, and don't share it with anybody. SECRET_KEY = "%(SECRET_KEY)s" # Tuple of IP addresses, as strings, that: # * See debug comments, when DEBUG is true # * Receive x-headers INTERNAL_IPS = ("127.0.0.1",) # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( "django.template.loaders.filesystem.Loader", "django.template.loaders.app_directories.Loader", ) AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) ############# # DATABASES # ############# DATABASES = { "default": { # Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle". "ENGINE": "django.db.backends.", # DB name or path to database file if using sqlite3. "NAME": "", # Not used with sqlite3. "USER": "", # Not used with sqlite3. "PASSWORD": "", # Set to empty string for localhost. Not used with sqlite3. "HOST": "", # Set to empty string for default. Not used with sqlite3. "PORT": "", } } ######### # PATHS # ######### import os # Full filesystem path to the project. PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__)) # Name of the directory for the project. PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1] # Every cache key will get prefixed with this value - here we set it to # the name of the directory the project is in to try and use something # project specific. CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = "/static/" # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/")) # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = STATIC_URL + "media/" # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/")) # Package/module name to import the root urlpatterns from for the project. ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME # Put strings here, like "/home/html/django_templates" # or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),) ################ # APPLICATIONS # ################ INSTALLED_APPS = ( "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.redirects", "django.contrib.sessions", "django.contrib.sites", "django.contrib.sitemaps", "django.contrib.staticfiles", "mezzanine.boot", "mezzanine.conf", "mezzanine.core", "mezzanine.generic", "mezzanine.blog", "mezzanine.forms", "mezzanine.pages", "mezzanine.galleries", "mezzanine.twitter", #"mezzanine.accounts", #"mezzanine.mobile", ) # List of processors used by RequestContext to populate the context. # Each one should be a callable that takes the request object as its # only parameter and returns a dictionary to add to the context. TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.static", "django.core.context_processors.media", "django.core.context_processors.request", "django.core.context_processors.tz", "mezzanine.conf.context_processors.settings", ) # List of middleware classes to use. Order is important; in the request phase, # these middleware classes will be applied in the order given, and in the # response phase the middleware will be applied in reverse order. MIDDLEWARE_CLASSES = ( "mezzanine.core.middleware.UpdateCacheMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "mezzanine.core.request.CurrentRequestMiddleware", "mezzanine.core.middleware.RedirectFallbackMiddleware", "mezzanine.core.middleware.TemplateForDeviceMiddleware", "mezzanine.core.middleware.TemplateForHostMiddleware", "mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware", "mezzanine.core.middleware.SitePermissionMiddleware", # Uncomment the following if using any of the SSL settings: # "mezzanine.core.middleware.SSLRedirectMiddleware", "mezzanine.pages.middleware.PageMiddleware", "mezzanine.core.middleware.FetchFromCacheMiddleware", ) # Store these package names here as they may change in the future since # at the moment we are using custom forks of them. PACKAGE_NAME_FILEBROWSER = "filebrowser_safe" PACKAGE_NAME_GRAPPELLI = "grappelli_safe" ######################### # OPTIONAL APPLICATIONS # ######################### # These will be added to ``INSTALLED_APPS``, only if available. OPTIONAL_APPS = ( "debug_toolbar", "django_extensions", "compressor", PACKAGE_NAME_FILEBROWSER, PACKAGE_NAME_GRAPPELLI, ) DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False} ################### # DEPLOY SETTINGS # ################### # These settings are used by the default fabfile.py provided. # Check fabfile.py for defaults. # FABRIC = { # "SSH_USER": "", # SSH username # "SSH_PASS": "", # SSH password (consider key-based authentication) # "SSH_KEY_PATH": "", # Local path to SSH key file, for key-based auth # "HOSTS": [], # List of hosts to deploy to # "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs # "PROJECT_NAME": "", # Unique identifier for project # "REQUIREMENTS_PATH": "", # Path to pip requirements, relative to project # "GUNICORN_PORT": 8000, # Port gunicorn will listen on # "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8" # "LIVE_HOSTNAME": "www.example.com", # Host for public site. # "REPO_URL": "", # Git or Mercurial remote repo URL for the project # "DB_PASS": "", # Live database password # "ADMIN_PASS": "", # Live admin user password # } ################## # LOCAL SETTINGS # ################## # Allow any settings to be defined in local_settings.py which should be # ignored in your version control system allowing for settings to be # defined per machine. try: from local_settings import * except ImportError: pass #################### # DYNAMIC SETTINGS # #################### # set_dynamic_settings() will rewrite globals based on what has been # defined so far, in order to provide some better defaults where # applicable. We also allow this settings module to be imported # without Mezzanine installed, as the case may be when using the # fabfile, where setting the dynamic settings below isn't strictly # required. try: from mezzanine.utils.conf import set_dynamic_settings except ImportError: pass else: set_dynamic_settings(globals())
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os, sys, platform, cgi, socket import datetime HEADER_TEMPLATE = \ "HTTP/1.1 200 OK\r\n" \ "Server: webhttpd/2.0\r\n" \ "Cache-Control: no-cache, no-store, must-revalidate\r\n" \ "Connection: close\r\n" \ "Content-Type: text/html; charset=utf-8\r\n" \ "Date: {}\r\n" \ "\r\n" def pyinfo(): output = '<!DOCTYPE html>\n' output += '<html>' output += '<head>' output += '<title>pyinfo()</title>' output += '<meta name="robots" content="noindex,nofollow,noarchive,nosnippet">' output += styles() output += '</head>' output += '<body>' output += '<div class="center">' output += section_title() output += '<h2>System</h2>' output += section_system() output += '<h2>Python Internals</h2>' output += section_py_internals() output += '<h2>OS Internals</h2>' output += section_os_internals() output += '<h2>WSGI Environment</h2>' output += section_environ() output += '<h2>Database support</h2>' output += section_database() output += '<h2>Compression and archiving</h2>' output += section_compression() if 'ldap' in sys.modules: output += '<h2>LDAP support</h2>' output += section_ldap() if 'socket' in sys.modules: output += '<h2>Socket</h2>' output += section_socket() output += '<h2>Multimedia support</h2>' output += section_multimedia() output += '<h2>Copyright</h2>' output += section_copyright() output += '</div>' output += '</body>' output += '</html>' return output def styles(): css = '<style type="text/css">' css += 'body{background-color:#fff;color:#000}' css += 'body,td,th,h1,h2{font-family:sans-serif}' css += 'pre{margin:0px;font-family:monospace}' css += 'a:link{color:#009;text-decoration:none;background-color:#fff}' css += 'a:hover{text-decoration:underline}' css += 'table{border-collapse:collapse}' css += '.center{text-align:center}' css += '.center table{margin-left:auto;margin-right:auto;text-align:left}' css += '.center th{text-align:center !important}' css += 'td,th{border:1px solid #999999;font-size:75%;vertical-align:baseline}' css += 'h1{font-size:150%}' css += 'h2{font-size:125%}' css += '.p{text-align:left}' css += '.e{width:30%;background-color:#ffffcc;font-weight:bold;color:#000}' css += '.h{background:url(\'http://python.org/images/header-bg2.png\') repeat-x;font-weight:bold;color:#000}' css += '.v{background-color:#f2f2f2;color:#000}' css += '.vr{background-color:#cccccc;text-align:right;color:#000}' css += 'img{float:right;border:0px;}' css += 'hr{width:600px;background-color:#ccc;border:0px;height:1px;color:#000}' css += '</style>' return css def table(html): return '<table border="0" cellpadding="3" width="600">%s</table><br>' % html def makecells(data): html = '' while data: html += '<tr><td class="e">%s</td><td class="v">%s</td></tr>' % (data.pop(0), data.pop(0)) return table(html) def imported(module): if module in sys.modules: return 'enabled' return 'disabled' def section_title(): html = '<tr class="h"><td>' html += '<a href="http://python.org/"><img border="0" src="http://python.org/images/python-logo.gif"></a>' html += '<h1 class="p">Python %s</h1>' % platform.python_version() html += '</td></tr>' return table(html) def section_system(): data = [] if hasattr(sys, 'subversion'): data += 'Python Subversion', ', '.join(sys.subversion) if platform.dist()[0] != '' and platform.dist()[1] != '': data += 'OS Version', '%s %s (%s %s)' % (platform.system(), platform.release(), platform.dist()[0].capitalize(), platform.dist()[1]) else: data += 'OS Version', '%s %s' % (platform.system(), platform.release()) if hasattr(sys, 'executable'): data += 'Executable', sys.executable data += 'Build Date', platform.python_build()[1] data += 'Compiler', platform.python_compiler() if hasattr(sys, 'api_version'): data += 'Python API', sys.api_version return makecells(data) def section_py_internals(): data = [] if hasattr(sys, 'builtin_module_names'): data += 'Built-in Modules', ', '.join(sys.builtin_module_names) data += 'Byte Order', sys.byteorder + ' endian' if hasattr(sys, 'getcheckinterval'): data += 'Check Interval', sys.getcheckinterval() if hasattr(sys, 'getfilesystemencoding'): data += 'File System Encoding', sys.getfilesystemencoding() if hasattr(sys, 'getrecursionlimit'): data += 'Maximum Recursion Depth', sys.getrecursionlimit() if hasattr(sys, 'tracebacklimit'): tabdatale += 'Maximum Traceback Limit', sys.tracebacklimit else: data += 'Maximum Traceback Limit', '1000' data += 'Maximum Unicode Code Point', sys.maxunicode return makecells(data) def section_os_internals(): data = [] if hasattr(os, 'getcwd'): data += 'Current Working Directory', os.getcwd() if hasattr(os, 'getegid'): data += 'Effective Group ID', os.getegid() if hasattr(os, 'geteuid'): data += 'Effective User ID', os.geteuid() if hasattr(os, 'getgid'): data += 'Group ID', os.getgid() if hasattr(os, 'getgroups'): data += 'Group Membership', ', '.join(map(str, os.getgroups())) if hasattr(os, 'linesep'): data += 'Line Seperator', repr(os.linesep)[1:-1] if hasattr(os, 'getloadavg'): data += 'Load Average', ', '.join(map(str, map(lambda x: round(x, 2), os.getloadavg()))) if hasattr(os, 'pathsep'): data += 'Path Seperator', os.pathsep try: if hasattr(os, 'getpid') and hasattr(os, 'getppid'): data += 'Process ID', ('%s (parent: %s)' % (os.getpid(), os.getppid())) except: pass if hasattr(os, 'getuid'): data += 'User ID', os.getuid() return makecells(data) def section_environ(): envvars = os.environ.keys() sorted(envvars) data = [] for envvar in envvars: data += envvar, cgi.escape(str(os.environ[envvar])) return makecells(data) def section_database(): data = [] data += 'DB2/Informix (ibm_db)', imported('ibm_db') data += 'MSSQL (adodbapi)', imported('adodbapi') data += 'MySQL (MySQL-Python)', imported('MySQLdb') data += 'ODBC (mxODBC)', imported('mxODBC') data += 'Oracle (cx_Oracle)', imported('cx_Oracle') data += 'PostgreSQL (PyGreSQL)', imported('pgdb') data += 'Python Data Objects (PyDO)', imported('PyDO') data += 'SAP DB (sapdbapi)', imported('sapdbapi') data += 'SQLite3', imported('sqlite3') return makecells(data) def section_compression(): data = [] data += 'Bzip2 Support', imported('bz2') data += 'Gzip Support', imported('gzip') data += 'Tar Support', imported('tarfile') data += 'Zip Support', imported('zipfile') data += 'Zlib Support', imported('zlib') return makecells(data) def section_ldap(): data = [] data += 'Python-LDAP Version' % urls['Python-LDAP'], ldap.__version__ data += 'API Version', ldap.API_VERSION data += 'Default Protocol Version', ldap.VERSION data += 'Minimum Protocol Version', ldap.VERSION_MIN data += 'Maximum Protocol Version', ldap.VERSION_MAX data += 'SASL Support (Cyrus-SASL)', ldap.SASL_AVAIL data += 'TLS Support (OpenSSL)', ldap.TLS_AVAIL data += 'Vendor Version', ldap.VENDOR_VERSION return makecells(data) def section_socket(): data = [] data += 'Hostname', socket.gethostname() data += 'Hostname (fully qualified)', socket.gethostbyaddr(socket.gethostname())[0] try: data += 'IP Address', socket.gethostbyname(socket.gethostname()) except: pass data += 'IPv6 Support', getattr(socket, 'has_ipv6', False) data += 'SSL Support', hasattr(socket, 'ssl') return makecells(data) def section_multimedia(): data = [] data += 'AIFF Support', imported('aifc') data += 'Color System Conversion Support', imported('colorsys') data += 'curses Support', imported('curses') data += 'IFF Chunk Support', imported('chunk') data += 'Image Header Support', imported('imghdr') data += 'OSS Audio Device Support', imported('ossaudiodev') data += 'Raw Audio Support', imported('audioop') data += 'Raw Image Support', imported('imageop') data += 'SGI RGB Support', imported('rgbimg') data += 'Sound Header Support', imported('sndhdr') data += 'Sun Audio Device Support', imported('sunaudiodev') data += 'Sun AU Support', imported('sunau') data += 'Wave Support', imported('wave') return makecells(data) def section_copyright(): html = '<tr class="v"><td>%s</td></tr>' % sys.copyright.replace('\n\n', '<br>').replace('\r\n', '<br />').replace('(c)', '&copy;') return table(html) optional_modules_list = [ 'Cookie', 'zlib', 'gzip', 'bz2', 'zipfile', 'tarfile', 'ldap', 'socket', 'audioop', 'curses', 'imageop', 'aifc', 'sunau', 'wave', 'chunk', 'colorsys', 'rgbimg', 'imghdr', 'sndhdr', 'ossaudiodev', 'sunaudiodev', 'adodbapi', 'cx_Oracle', 'ibm_db', 'mxODBC', 'MySQLdb', 'pgdb', 'PyDO', 'sapdbapi', 'sqlite3' ] for i in optional_modules_list: try: module = __import__(i) sys.modules[i] = module globals()[i] = module except: pass sys.stdout.write(HEADER_TEMPLATE.format(datetime.datetime.now().strftime("%a, %d %b %Y %H:%M:%S GMT"))) sys.stdout.write(pyinfo()) sys.stdout.write("\r\n\r\n")
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import fixtures from oslo_config import cfg from oslo_log import log as logging from nova import exception CONF = cfg.CONF LOG = logging.getLogger(__name__) def _get_device_profile(dp_name, trait): dp = { 'fakedev-dp': [ { 'name': 'fakedev-dp', 'uuid': 'cbec22f3-ac29-444e-b4bb-98509f32faae', 'groups': [ { 'resources:FPGA': 1, 'trait:' + trait: 'required', }, ], # Skipping links key in Cyborg API return value }, ], 'fakedev-dp-port': [ { 'name': 'fakedev-dp', 'uuid': 'cbec22f3-ac29-444e-b4bb-98509f32faae', 'groups': [ { 'resources:FPGA': 1, 'trait:' + trait: 'required', }, ], # Skipping links key in Cyborg API return value }, ], 'fakedev-dp-multi': [ { 'name': 'fakedev-dp-multi', 'uuid': 'cbec22f3-ac29-444e-b4bb-98509f32faae', 'groups': [ { 'resources:FPGA': 2, 'resources:FPGA2': 1, 'trait:' + trait: 'required', }, ], # Skipping links key in Cyborg API return value }, ], } return dp[dp_name] def get_arqs(dp_name): # prepare fixture arqs and bound info arqs = [ { 'uuid': 'b59d34d3-787b-4fb0-a6b9-019cd81172f8', 'device_profile_name': dp_name, 'device_profile_group_id': 0, 'state': 'Initial', 'device_rp_uuid': None, 'hostname': None, 'instance_uuid': None, 'attach_handle_info': {}, 'attach_handle_type': '', }, {'uuid': '73d5f9f3-23e9-4b45-909a-e8a1db4cf24c', 'device_profile_name': dp_name, 'device_profile_group_id': 0, 'state': 'Initial', 'device_rp_uuid': None, 'hostname': None, 'instance_uuid': None, 'attach_handle_info': {}, 'attach_handle_type': '', }, {'uuid': '69b83caf-dd1c-493d-8796-40af5a16e3f6', 'device_profile_name': dp_name, 'device_profile_group_id': 0, 'state': 'Initial', 'device_rp_uuid': None, 'hostname': None, 'instance_uuid': None, 'attach_handle_info': {}, 'attach_handle_type': '', }, {'uuid': 'e5fc1da7-216b-4102-a50d-43ba77bcacf7', 'device_profile_name': dp_name, 'device_profile_group_id': 0, 'state': 'Initial', 'device_rp_uuid': None, 'hostname': None, 'instance_uuid': None, 'attach_handle_info': {}, 'attach_handle_type': '', } ] # arqs bound info attach_handle_list = [ { 'bus': '0c', 'device': '0', 'domain': '0000', 'function': '1', 'physical_network': 'PHYNET1' }, { 'bus': '0c', 'device': '0', 'domain': '0000', 'function': '2', 'physical_network': 'PHYNET1' }, { 'bus': '0c', 'device': '0', 'domain': '0000', 'function': '3', 'physical_network': 'PHYNET1' }, { 'bus': '0c', 'device': '0', 'domain': '0000', 'function': '4', 'physical_network': 'PHYNET1' } ] bound_arqs = [] # combine bond info to arq generating a bonded arqs list for idx, arq in enumerate(arqs): bound_arq = copy.deepcopy(arq) bound_arq.update( {'state': 'Bound', 'attach_handle_type': 'TEST_PCI', 'attach_handle_info': attach_handle_list[idx]}, ) bound_arqs.append(bound_arq) return arqs, bound_arqs class CyborgFixture(fixtures.Fixture): """Fixture that mocks Cyborg APIs used by nova/accelerator/cyborg.py""" dp_name = 'fakedev-dp' trait = 'CUSTOM_FAKE_DEVICE' arq_list, bound_arq_list = copy.deepcopy(get_arqs(dp_name)) arq_uuids = [] for arq in arq_list: arq_uuids.append(arq["uuid"]) call_create_arq_count = 0 # NOTE(Sundar): The bindings passed to the fake_bind_arqs() from the # conductor are indexed by ARQ UUID and include the host name, device # RP UUID and instance UUID. (See params to fake_bind_arqs below.) # # Later, when the compute manager calls fake_get_arqs_for_instance() with # the instance UUID, the returned ARQs must contain the host name and # device RP UUID. But these can vary from test to test. # # So, fake_bind_arqs() below takes bindings indexed by ARQ UUID and # converts them to bindings indexed by instance UUID, which are then # stored in the dict below. This dict looks like: # { $instance_uuid: [ # {'hostname': $hostname, # 'device_rp_uuid': $device_rp_uuid, # 'arq_uuid': $arq_uuid # } # ] # } # Since it is indexed by instance UUID, and that is presumably unique # across concurrently executing tests, this should be safe for # concurrent access. def setUp(self): super().setUp() self.mock_get_dp = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient._get_device_profile_list', side_effect=self.fake_get_device_profile_list)).mock self.mock_create_arqs = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.create_arqs', side_effect=self.fake_create_arqs)).mock self.mock_bind_arqs = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.bind_arqs', side_effect=self.fake_bind_arqs)).mock self.mock_get_arqs = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'get_arqs_for_instance', side_effect=self.fake_get_arqs_for_instance)).mock self.mock_del_arqs = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'delete_arqs_for_instance', side_effect=self.fake_delete_arqs_for_instance)).mock self.mock_get_arq_by_uuid = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'get_arq_by_uuid', side_effect=self.fake_get_arq_by_uuid)).mock self.mock_get_arq_device_rp_uuid = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'get_arq_device_rp_uuid', side_effect=self.fake_get_arq_device_rp_uuid)).mock self.mock_create_arq_and_match_rp = self.useFixture(fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'create_arqs_and_match_resource_providers', side_effect=self.fake_create_arq_and_match_rp)).mock self.mock_fake_delete_arqs_by_uuid = self.useFixture( fixtures.MockPatch( 'nova.accelerator.cyborg._CyborgClient.' 'delete_arqs_by_uuid', side_effect=self.fake_delete_arqs_by_uuid)).mock def fake_get_device_profile_list(self, dp_name): return _get_device_profile(dp_name, self.trait) @staticmethod def fake_bind_arqs(bindings): """Simulate Cyborg ARQ bindings. Since Nova calls Cyborg for binding on per-instance basis, the instance UUIDs would be the same for all ARQs in a single call. This function converts bindings indexed by ARQ UUID to bindings indexed by instance UUID, so that fake_get_arqs_for_instance can retrieve them later. :param bindings: { "$arq_uuid": { "hostname": STRING "device_rp_uuid": UUID "instance_uuid": UUID }, ... } :returns: None """ if bindings.keys() and CyborgFixture.arq_uuids is None: LOG.error("ARQ not found") raise exception.AcceleratorRequestOpFailed() for arq_uuid, binding in bindings.items(): for bound_arq in CyborgFixture.bound_arq_list: if arq_uuid == bound_arq["uuid"]: bound_arq["hostname"] = binding["hostname"] bound_arq["instance_uuid"] = binding["instance_uuid"] bound_arq["device_rp_uuid"] = binding["device_rp_uuid"] break @staticmethod def fake_get_arqs_for_instance(instance_uuid, only_resolved=False): """Get list of bound ARQs for this instance. This function uses bindings indexed by instance UUID to populate the bound ARQ templates in CyborgFixture.bound_arq_list. """ bound_arq_list = copy.deepcopy(CyborgFixture.bound_arq_list) instance_bound_arqs = [] for arq in bound_arq_list: if arq["instance_uuid"] == instance_uuid: instance_bound_arqs.append(arq) return instance_bound_arqs def fake_get_arq_by_uuid(self, uuid): for arq in self.arq_list: if uuid == arq['uuid']: return arq return None def fake_delete_arqs_for_instance(self, instance_uuid): # clean up arq binding info while delete arqs for arq in self.bound_arq_list: if arq["instance_uuid"] == instance_uuid: arq["instance_uuid"] = None arq["hostname"] = None arq["device_rp_uuid"] = None def fake_create_arq_and_match_rp(self, dp_name, rg_rp_map=None, owner=None): # sync the device_rp_uuid to fake arq arqs = self.fake_create_arqs(dp_name) for arq in arqs: dp_group_id = arq['device_profile_group_id'] requester_id = ("device_profile_" + str(dp_group_id) + (str(owner) if owner else "")) arq["device_rp_uuid"] = rg_rp_map[requester_id][0] return arqs def fake_create_arqs(self, dp_name): index = self.call_create_arq_count self.call_create_arq_count += 1 if index < len(self.arq_list): return [self.arq_list[index]] else: return None def fake_get_arq_device_rp_uuid(self, arq_arg, rg_rp_map=None, port_id=None): # sync the device_rp_uuid to fake arq for arq in self.arq_list: if arq["uuid"] == arq_arg['uuid']: dp_group_id = arq['device_profile_group_id'] requester_id = ("device_profile_" + str(dp_group_id) + str(port_id)) arq["device_rp_uuid"] = rg_rp_map[requester_id][0] return arq["device_rp_uuid"] return None def fake_delete_arqs_by_uuid(self, arq_uuids): # clean up arq binding info while delete arqs for arq_uuid in arq_uuids: for arq in self.bound_arq_list: if arq["uuid"] == arq_uuid: arq["instance_uuid"] = None arq["hostname"] = None arq["device_rp_uuid"] = None
# plugin/plugin_base.py # Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: https://www.opensource.org/licenses/mit-license.php """Testing extensions. this module is designed to work as a testing-framework-agnostic library, created so that multiple test frameworks can be supported at once (mostly so that we can migrate to new ones). The current target is pytest. """ from __future__ import absolute_import import abc import logging import re import sys # flag which indicates we are in the SQLAlchemy testing suite, # and not that of Alembic or a third party dialect. bootstrapped_as_sqlalchemy = False log = logging.getLogger("sqlalchemy.testing.plugin_base") py3k = sys.version_info >= (3, 0) if py3k: import configparser ABC = abc.ABC else: import ConfigParser as configparser import collections as collections_abc # noqa class ABC(object): __metaclass__ = abc.ABCMeta # late imports fixtures = None engines = None exclusions = None warnings = None profiling = None provision = None assertions = None requirements = None config = None testing = None util = None file_config = None logging = None include_tags = set() exclude_tags = set() options = None def setup_options(make_option): make_option( "--log-info", action="callback", type=str, callback=_log, help="turn on info logging for <LOG> (multiple OK)", ) make_option( "--log-debug", action="callback", type=str, callback=_log, help="turn on debug logging for <LOG> (multiple OK)", ) make_option( "--db", action="append", type=str, dest="db", help="Use prefab database uri. Multiple OK, " "first one is run by default.", ) make_option( "--dbs", action="callback", zeroarg_callback=_list_dbs, help="List available prefab dbs", ) make_option( "--dburi", action="append", type=str, dest="dburi", help="Database uri. Multiple OK, " "first one is run by default.", ) make_option( "--dbdriver", action="append", type="string", dest="dbdriver", help="Additional database drivers to include in tests. " "These are linked to the existing database URLs by the " "provisioning system.", ) make_option( "--dropfirst", action="store_true", dest="dropfirst", help="Drop all tables in the target database first", ) make_option( "--disable-asyncio", action="store_true", help="disable test / fixtures / provisoning running in asyncio", ) make_option( "--backend-only", action="store_true", dest="backend_only", help="Run only tests marked with __backend__ or __sparse_backend__", ) make_option( "--nomemory", action="store_true", dest="nomemory", help="Don't run memory profiling tests", ) make_option( "--notimingintensive", action="store_true", dest="notimingintensive", help="Don't run timing intensive tests", ) make_option( "--profile-sort", type=str, default="cumulative", dest="profilesort", help="Type of sort for profiling standard output", ) make_option( "--profile-dump", type=str, dest="profiledump", help="Filename where a single profile run will be dumped", ) make_option( "--postgresql-templatedb", type=str, help="name of template database to use for PostgreSQL " "CREATE DATABASE (defaults to current database)", ) make_option( "--low-connections", action="store_true", dest="low_connections", help="Use a low number of distinct connections - " "i.e. for Oracle TNS", ) make_option( "--write-idents", type=str, dest="write_idents", help="write out generated follower idents to <file>, " "when -n<num> is used", ) make_option( "--reversetop", action="store_true", dest="reversetop", default=False, help="Use a random-ordering set implementation in the ORM " "(helps reveal dependency issues)", ) make_option( "--requirements", action="callback", type=str, callback=_requirements_opt, help="requirements class for testing, overrides setup.cfg", ) make_option( "--with-cdecimal", action="store_true", dest="cdecimal", default=False, help="Monkeypatch the cdecimal library into Python 'decimal' " "for all tests", ) make_option( "--include-tag", action="callback", callback=_include_tag, type=str, help="Include tests with tag <tag>", ) make_option( "--exclude-tag", action="callback", callback=_exclude_tag, type=str, help="Exclude tests with tag <tag>", ) make_option( "--write-profiles", action="store_true", dest="write_profiles", default=False, help="Write/update failing profiling data.", ) make_option( "--force-write-profiles", action="store_true", dest="force_write_profiles", default=False, help="Unconditionally write/update profiling data.", ) make_option( "--dump-pyannotate", type=str, dest="dump_pyannotate", help="Run pyannotate and dump json info to given file", ) make_option( "--mypy-extra-test-path", type=str, action="append", default=[], dest="mypy_extra_test_paths", help="Additional test directories to add to the mypy tests. " "This is used only when running mypy tests. Multiple OK", ) def configure_follower(follower_ident): """Configure required state for a follower. This invokes in the parent process and typically includes database creation. """ from sqlalchemy.testing import provision provision.FOLLOWER_IDENT = follower_ident def memoize_important_follower_config(dict_): """Store important configuration we will need to send to a follower. This invokes in the parent process after normal config is set up. This is necessary as pytest seems to not be using forking, so we start with nothing in memory, *but* it isn't running our argparse callables, so we have to just copy all of that over. """ dict_["memoized_config"] = { "include_tags": include_tags, "exclude_tags": exclude_tags, } def restore_important_follower_config(dict_): """Restore important configuration needed by a follower. This invokes in the follower process. """ global include_tags, exclude_tags include_tags.update(dict_["memoized_config"]["include_tags"]) exclude_tags.update(dict_["memoized_config"]["exclude_tags"]) def read_config(): global file_config file_config = configparser.ConfigParser() file_config.read(["setup.cfg", "test.cfg"]) def pre_begin(opt): """things to set up early, before coverage might be setup.""" global options options = opt for fn in pre_configure: fn(options, file_config) def set_coverage_flag(value): options.has_coverage = value def post_begin(): """things to set up later, once we know coverage is running.""" # Lazy setup of other options (post coverage) for fn in post_configure: fn(options, file_config) # late imports, has to happen after config. global util, fixtures, engines, exclusions, assertions, provision global warnings, profiling, config, testing from sqlalchemy import testing # noqa from sqlalchemy.testing import fixtures, engines, exclusions # noqa from sqlalchemy.testing import assertions, warnings, profiling # noqa from sqlalchemy.testing import config, provision # noqa from sqlalchemy import util # noqa warnings.setup_filters() def _log(opt_str, value, parser): global logging if not logging: import logging logging.basicConfig() if opt_str.endswith("-info"): logging.getLogger(value).setLevel(logging.INFO) elif opt_str.endswith("-debug"): logging.getLogger(value).setLevel(logging.DEBUG) def _list_dbs(*args): print("Available --db options (use --dburi to override)") for macro in sorted(file_config.options("db")): print("%20s\t%s" % (macro, file_config.get("db", macro))) sys.exit(0) def _requirements_opt(opt_str, value, parser): _setup_requirements(value) def _exclude_tag(opt_str, value, parser): exclude_tags.add(value.replace("-", "_")) def _include_tag(opt_str, value, parser): include_tags.add(value.replace("-", "_")) pre_configure = [] post_configure = [] def pre(fn): pre_configure.append(fn) return fn def post(fn): post_configure.append(fn) return fn @pre def _setup_options(opt, file_config): global options options = opt @pre def _set_nomemory(opt, file_config): if opt.nomemory: exclude_tags.add("memory_intensive") @pre def _set_notimingintensive(opt, file_config): if opt.notimingintensive: exclude_tags.add("timing_intensive") @pre def _monkeypatch_cdecimal(options, file_config): if options.cdecimal: import cdecimal sys.modules["decimal"] = cdecimal @post def _init_symbols(options, file_config): from sqlalchemy.testing import config config._fixture_functions = _fixture_fn_class() @post def _set_disable_asyncio(opt, file_config): if opt.disable_asyncio or not py3k: from sqlalchemy.testing import asyncio asyncio.ENABLE_ASYNCIO = False @post def _engine_uri(options, file_config): from sqlalchemy import testing from sqlalchemy.testing import config from sqlalchemy.testing import provision if options.dburi: db_urls = list(options.dburi) else: db_urls = [] extra_drivers = options.dbdriver or [] if options.db: for db_token in options.db: for db in re.split(r"[,\s]+", db_token): if db not in file_config.options("db"): raise RuntimeError( "Unknown URI specifier '%s'. " "Specify --dbs for known uris." % db ) else: db_urls.append(file_config.get("db", db)) if not db_urls: db_urls.append(file_config.get("db", "default")) config._current = None expanded_urls = list(provision.generate_db_urls(db_urls, extra_drivers)) for db_url in expanded_urls: log.info("Adding database URL: %s", db_url) if options.write_idents and provision.FOLLOWER_IDENT: with open(options.write_idents, "a") as file_: file_.write(provision.FOLLOWER_IDENT + " " + db_url + "\n") cfg = provision.setup_config( db_url, options, file_config, provision.FOLLOWER_IDENT ) if not config._current: cfg.set_as_current(cfg, testing) @post def _requirements(options, file_config): requirement_cls = file_config.get("sqla_testing", "requirement_cls") _setup_requirements(requirement_cls) def _setup_requirements(argument): from sqlalchemy.testing import config from sqlalchemy import testing if config.requirements is not None: return modname, clsname = argument.split(":") # importlib.import_module() only introduced in 2.7, a little # late mod = __import__(modname) for component in modname.split(".")[1:]: mod = getattr(mod, component) req_cls = getattr(mod, clsname) config.requirements = testing.requires = req_cls() config.bootstrapped_as_sqlalchemy = bootstrapped_as_sqlalchemy @post def _prep_testing_database(options, file_config): from sqlalchemy.testing import config if options.dropfirst: from sqlalchemy.testing import provision for cfg in config.Config.all_configs(): provision.drop_all_schema_objects(cfg, cfg.db) @post def _reverse_topological(options, file_config): if options.reversetop: from sqlalchemy.orm.util import randomize_unitofwork randomize_unitofwork() @post def _post_setup_options(opt, file_config): from sqlalchemy.testing import config config.options = options config.file_config = file_config @post def _setup_profiling(options, file_config): from sqlalchemy.testing import profiling profiling._profile_stats = profiling.ProfileStatsFile( file_config.get("sqla_testing", "profile_file"), sort=options.profilesort, dump=options.profiledump, ) def want_class(name, cls): if not issubclass(cls, fixtures.TestBase): return False elif name.startswith("_"): return False elif ( config.options.backend_only and not getattr(cls, "__backend__", False) and not getattr(cls, "__sparse_backend__", False) and not getattr(cls, "__only_on__", False) ): return False else: return True def want_method(cls, fn): if not fn.__name__.startswith("test_"): return False elif fn.__module__ is None: return False elif include_tags: return ( hasattr(cls, "__tags__") and exclusions.tags(cls.__tags__).include_test( include_tags, exclude_tags ) ) or ( hasattr(fn, "_sa_exclusion_extend") and fn._sa_exclusion_extend.include_test( include_tags, exclude_tags ) ) elif exclude_tags and hasattr(cls, "__tags__"): return exclusions.tags(cls.__tags__).include_test( include_tags, exclude_tags ) elif exclude_tags and hasattr(fn, "_sa_exclusion_extend"): return fn._sa_exclusion_extend.include_test(include_tags, exclude_tags) else: return True def generate_sub_tests(cls, module): if getattr(cls, "__backend__", False) or getattr( cls, "__sparse_backend__", False ): sparse = getattr(cls, "__sparse_backend__", False) for cfg in _possible_configs_for_cls(cls, sparse=sparse): orig_name = cls.__name__ # we can have special chars in these names except for the # pytest junit plugin, which is tripped up by the brackets # and periods, so sanitize alpha_name = re.sub(r"[_\[\]\.]+", "_", cfg.name) alpha_name = re.sub(r"_+$", "", alpha_name) name = "%s_%s" % (cls.__name__, alpha_name) subcls = type( name, (cls,), {"_sa_orig_cls_name": orig_name, "__only_on_config__": cfg}, ) setattr(module, name, subcls) yield subcls else: yield cls def start_test_class_outside_fixtures(cls): _do_skips(cls) _setup_engine(cls) def stop_test_class(cls): # close sessions, immediate connections, etc. fixtures.stop_test_class_inside_fixtures(cls) # close outstanding connection pool connections, dispose of # additional engines engines.testing_reaper.stop_test_class_inside_fixtures() def stop_test_class_outside_fixtures(cls): engines.testing_reaper.stop_test_class_outside_fixtures() provision.stop_test_class_outside_fixtures(config, config.db, cls) try: if not options.low_connections: assertions.global_cleanup_assertions() finally: _restore_engine() def _restore_engine(): if config._current: config._current.reset(testing) def final_process_cleanup(): engines.testing_reaper.final_cleanup() assertions.global_cleanup_assertions() _restore_engine() def _setup_engine(cls): if getattr(cls, "__engine_options__", None): opts = dict(cls.__engine_options__) opts["scope"] = "class" eng = engines.testing_engine(options=opts) config._current.push_engine(eng, testing) def before_test(test, test_module_name, test_class, test_name): # format looks like: # "test.aaa_profiling.test_compiler.CompileTest.test_update_whereclause" name = getattr(test_class, "_sa_orig_cls_name", test_class.__name__) id_ = "%s.%s.%s" % (test_module_name, name, test_name) profiling._start_current_test(id_) def after_test(test): fixtures.after_test() engines.testing_reaper.after_test() def after_test_fixtures(test): engines.testing_reaper.after_test_outside_fixtures(test) def _possible_configs_for_cls(cls, reasons=None, sparse=False): all_configs = set(config.Config.all_configs()) if cls.__unsupported_on__: spec = exclusions.db_spec(*cls.__unsupported_on__) for config_obj in list(all_configs): if spec(config_obj): all_configs.remove(config_obj) if getattr(cls, "__only_on__", None): spec = exclusions.db_spec(*util.to_list(cls.__only_on__)) for config_obj in list(all_configs): if not spec(config_obj): all_configs.remove(config_obj) if getattr(cls, "__only_on_config__", None): all_configs.intersection_update([cls.__only_on_config__]) if hasattr(cls, "__requires__"): requirements = config.requirements for config_obj in list(all_configs): for requirement in cls.__requires__: check = getattr(requirements, requirement) skip_reasons = check.matching_config_reasons(config_obj) if skip_reasons: all_configs.remove(config_obj) if reasons is not None: reasons.extend(skip_reasons) break if hasattr(cls, "__prefer_requires__"): non_preferred = set() requirements = config.requirements for config_obj in list(all_configs): for requirement in cls.__prefer_requires__: check = getattr(requirements, requirement) if not check.enabled_for_config(config_obj): non_preferred.add(config_obj) if all_configs.difference(non_preferred): all_configs.difference_update(non_preferred) if sparse: # pick only one config from each base dialect # sorted so we get the same backend each time selecting the highest # server version info. per_dialect = {} for cfg in reversed( sorted( all_configs, key=lambda cfg: ( cfg.db.name, cfg.db.driver, cfg.db.dialect.server_version_info, ), ) ): db = cfg.db.name if db not in per_dialect: per_dialect[db] = cfg return per_dialect.values() return all_configs def _do_skips(cls): reasons = [] all_configs = _possible_configs_for_cls(cls, reasons) if getattr(cls, "__skip_if__", False): for c in getattr(cls, "__skip_if__"): if c(): config.skip_test( "'%s' skipped by %s" % (cls.__name__, c.__name__) ) if not all_configs: msg = "'%s' unsupported on any DB implementation %s%s" % ( cls.__name__, ", ".join( "'%s(%s)+%s'" % ( config_obj.db.name, ".".join( str(dig) for dig in exclusions._server_version(config_obj.db) ), config_obj.db.driver, ) for config_obj in config.Config.all_configs() ), ", ".join(reasons), ) config.skip_test(msg) elif hasattr(cls, "__prefer_backends__"): non_preferred = set() spec = exclusions.db_spec(*util.to_list(cls.__prefer_backends__)) for config_obj in all_configs: if not spec(config_obj): non_preferred.add(config_obj) if all_configs.difference(non_preferred): all_configs.difference_update(non_preferred) if config._current not in all_configs: _setup_config(all_configs.pop(), cls) def _setup_config(config_obj, ctx): config._current.push(config_obj, testing) class FixtureFunctions(ABC): @abc.abstractmethod def skip_test_exception(self, *arg, **kw): raise NotImplementedError() @abc.abstractmethod def combinations(self, *args, **kw): raise NotImplementedError() @abc.abstractmethod def param_ident(self, *args, **kw): raise NotImplementedError() @abc.abstractmethod def fixture(self, *arg, **kw): raise NotImplementedError() def get_current_test_name(self): raise NotImplementedError() @abc.abstractmethod def mark_base_test_class(self): raise NotImplementedError() _fixture_fn_class = None def set_fixture_functions(fixture_fn_class): global _fixture_fn_class _fixture_fn_class = fixture_fn_class
"""Support for monitoring a Sense energy sensor.""" from homeassistant.components.sensor import ( STATE_CLASS_MEASUREMENT, STATE_CLASS_TOTAL_INCREASING, SensorEntity, ) from homeassistant.const import ( ATTR_ATTRIBUTION, DEVICE_CLASS_ENERGY, DEVICE_CLASS_POWER, ELECTRIC_POTENTIAL_VOLT, ENERGY_KILO_WATT_HOUR, PERCENTAGE, POWER_WATT, ) from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from .const import ( ACTIVE_NAME, ACTIVE_TYPE, ATTRIBUTION, CONSUMPTION_ID, CONSUMPTION_NAME, DOMAIN, FROM_GRID_ID, FROM_GRID_NAME, ICON, MDI_ICONS, NET_PRODUCTION_ID, NET_PRODUCTION_NAME, PRODUCTION_ID, PRODUCTION_NAME, PRODUCTION_PCT_ID, PRODUCTION_PCT_NAME, SENSE_DATA, SENSE_DEVICE_UPDATE, SENSE_DEVICES_DATA, SENSE_DISCOVERED_DEVICES_DATA, SENSE_TRENDS_COORDINATOR, SOLAR_POWERED_ID, SOLAR_POWERED_NAME, TO_GRID_ID, TO_GRID_NAME, ) class SensorConfig: """Data structure holding sensor configuration.""" def __init__(self, name, sensor_type): """Sensor name and type to pass to API.""" self.name = name self.sensor_type = sensor_type # Sensor types/ranges ACTIVE_SENSOR_TYPE = SensorConfig(ACTIVE_NAME, ACTIVE_TYPE) # Sensor types/ranges TRENDS_SENSOR_TYPES = { "daily": SensorConfig("Daily", "DAY"), "weekly": SensorConfig("Weekly", "WEEK"), "monthly": SensorConfig("Monthly", "MONTH"), "yearly": SensorConfig("Yearly", "YEAR"), } # Production/consumption variants SENSOR_VARIANTS = [(PRODUCTION_ID, PRODUCTION_NAME), (CONSUMPTION_ID, CONSUMPTION_NAME)] # Trend production/consumption variants TREND_SENSOR_VARIANTS = SENSOR_VARIANTS + [ (PRODUCTION_PCT_ID, PRODUCTION_PCT_NAME), (NET_PRODUCTION_ID, NET_PRODUCTION_NAME), (FROM_GRID_ID, FROM_GRID_NAME), (TO_GRID_ID, TO_GRID_NAME), (SOLAR_POWERED_ID, SOLAR_POWERED_NAME), ] def sense_to_mdi(sense_icon): """Convert sense icon to mdi icon.""" return "mdi:{}".format(MDI_ICONS.get(sense_icon, "power-plug")) async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Sense sensor.""" data = hass.data[DOMAIN][config_entry.entry_id][SENSE_DATA] sense_devices_data = hass.data[DOMAIN][config_entry.entry_id][SENSE_DEVICES_DATA] trends_coordinator = hass.data[DOMAIN][config_entry.entry_id][ SENSE_TRENDS_COORDINATOR ] # Request only in case it takes longer # than 60s await trends_coordinator.async_request_refresh() sense_monitor_id = data.sense_monitor_id sense_devices = hass.data[DOMAIN][config_entry.entry_id][ SENSE_DISCOVERED_DEVICES_DATA ] devices = [ SenseEnergyDevice(sense_devices_data, device, sense_monitor_id) for device in sense_devices if device["tags"]["DeviceListAllowed"] == "true" ] for variant_id, variant_name in SENSOR_VARIANTS: name = ACTIVE_SENSOR_TYPE.name sensor_type = ACTIVE_SENSOR_TYPE.sensor_type unique_id = f"{sense_monitor_id}-active-{variant_id}" devices.append( SenseActiveSensor( data, name, sensor_type, sense_monitor_id, variant_id, variant_name, unique_id, ) ) for i in range(len(data.active_voltage)): devices.append(SenseVoltageSensor(data, i, sense_monitor_id)) for type_id, typ in TRENDS_SENSOR_TYPES.items(): for variant_id, variant_name in TREND_SENSOR_VARIANTS: name = typ.name sensor_type = typ.sensor_type unique_id = f"{sense_monitor_id}-{type_id}-{variant_id}" devices.append( SenseTrendsSensor( data, name, sensor_type, variant_id, variant_name, trends_coordinator, unique_id, ) ) async_add_entities(devices) class SenseActiveSensor(SensorEntity): """Implementation of a Sense energy sensor.""" _attr_icon = ICON _attr_native_unit_of_measurement = POWER_WATT _attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION} _attr_should_poll = False _attr_available = False _attr_state_class = STATE_CLASS_MEASUREMENT def __init__( self, data, name, sensor_type, sense_monitor_id, variant_id, variant_name, unique_id, ): """Initialize the Sense sensor.""" self._attr_name = f"{name} {variant_name}" self._attr_unique_id = unique_id self._data = data self._sense_monitor_id = sense_monitor_id self._sensor_type = sensor_type self._variant_id = variant_id self._variant_name = variant_name async def async_added_to_hass(self): """Register callbacks.""" self.async_on_remove( async_dispatcher_connect( self.hass, f"{SENSE_DEVICE_UPDATE}-{self._sense_monitor_id}", self._async_update_from_data, ) ) @callback def _async_update_from_data(self): """Update the sensor from the data. Must not do I/O.""" new_state = round( self._data.active_solar_power if self._variant_id == PRODUCTION_ID else self._data.active_power ) if self._attr_available and self._attr_native_value == new_state: return self._attr_native_value = new_state self._attr_available = True self.async_write_ha_state() class SenseVoltageSensor(SensorEntity): """Implementation of a Sense energy voltage sensor.""" _attr_native_unit_of_measurement = ELECTRIC_POTENTIAL_VOLT _attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION} _attr_icon = ICON _attr_should_poll = False _attr_available = False def __init__( self, data, index, sense_monitor_id, ): """Initialize the Sense sensor.""" line_num = index + 1 self._attr_name = f"L{line_num} Voltage" self._attr_unique_id = f"{sense_monitor_id}-L{line_num}" self._data = data self._sense_monitor_id = sense_monitor_id self._voltage_index = index async def async_added_to_hass(self): """Register callbacks.""" self.async_on_remove( async_dispatcher_connect( self.hass, f"{SENSE_DEVICE_UPDATE}-{self._sense_monitor_id}", self._async_update_from_data, ) ) @callback def _async_update_from_data(self): """Update the sensor from the data. Must not do I/O.""" new_state = round(self._data.active_voltage[self._voltage_index], 1) if self._attr_available and self._attr_native_value == new_state: return self._attr_available = True self._attr_native_value = new_state self.async_write_ha_state() class SenseTrendsSensor(SensorEntity): """Implementation of a Sense energy sensor.""" _attr_device_class = DEVICE_CLASS_ENERGY _attr_state_class = STATE_CLASS_TOTAL_INCREASING _attr_native_unit_of_measurement = ENERGY_KILO_WATT_HOUR _attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION} _attr_icon = ICON _attr_should_poll = False def __init__( self, data, name, sensor_type, variant_id, variant_name, trends_coordinator, unique_id, ): """Initialize the Sense sensor.""" self._attr_name = f"{name} {variant_name}" self._attr_unique_id = unique_id self._data = data self._sensor_type = sensor_type self._coordinator = trends_coordinator self._variant_id = variant_id self._had_any_update = False if variant_id in [PRODUCTION_PCT_ID, SOLAR_POWERED_ID]: self._attr_native_unit_of_measurement = PERCENTAGE self._attr_entity_registry_enabled_default = False self._attr_state_class = None self._attr_device_class = None @property def native_value(self): """Return the state of the sensor.""" return round(self._data.get_trend(self._sensor_type, self._variant_id), 1) @property def available(self): """Return if entity is available.""" return self._had_any_update and self._coordinator.last_update_success @callback def _async_update(self): """Track if we had an update so we do not report zero data.""" self._had_any_update = True self.async_write_ha_state() async def async_update(self): """Update the entity. Only used by the generic entity update service. """ await self._coordinator.async_request_refresh() async def async_added_to_hass(self): """When entity is added to hass.""" self.async_on_remove(self._coordinator.async_add_listener(self._async_update)) class SenseEnergyDevice(SensorEntity): """Implementation of a Sense energy device.""" _attr_available = False _attr_state_class = STATE_CLASS_MEASUREMENT _attr_native_unit_of_measurement = POWER_WATT _attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION} _attr_device_class = DEVICE_CLASS_POWER _attr_should_poll = False def __init__(self, sense_devices_data, device, sense_monitor_id): """Initialize the Sense binary sensor.""" self._attr_name = f"{device['name']} {CONSUMPTION_NAME}" self._id = device["id"] self._sense_monitor_id = sense_monitor_id self._attr_unique_id = f"{sense_monitor_id}-{self._id}-{CONSUMPTION_ID}" self._attr_icon = sense_to_mdi(device["icon"]) self._sense_devices_data = sense_devices_data async def async_added_to_hass(self): """Register callbacks.""" self.async_on_remove( async_dispatcher_connect( self.hass, f"{SENSE_DEVICE_UPDATE}-{self._sense_monitor_id}", self._async_update_from_data, ) ) @callback def _async_update_from_data(self): """Get the latest data, update state. Must not do I/O.""" device_data = self._sense_devices_data.get_device_by_id(self._id) if not device_data or "w" not in device_data: new_state = 0 else: new_state = int(device_data["w"]) if self._attr_available and self._attr_native_value == new_state: return self._attr_native_value = new_state self._attr_available = True self.async_write_ha_state()
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from azure.mgmt.core.exceptions import ARMErrorFormat from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section T = TypeVar('T') JSONType = Any ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False def build_get_request( subscription_id: str, provider_namespace: str, feature_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2021-07-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Features/featureProviders/{providerNamespace}/subscriptionFeatureRegistrations/{featureName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "providerNamespace": _SERIALIZER.url("provider_namespace", provider_namespace, 'str'), "featureName": _SERIALIZER.url("feature_name", feature_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_create_or_update_request( subscription_id: str, provider_namespace: str, feature_name: str, *, json: JSONType = None, content: Any = None, **kwargs: Any ) -> HttpRequest: content_type = kwargs.pop('content_type', None) # type: Optional[str] api_version = "2021-07-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Features/featureProviders/{providerNamespace}/subscriptionFeatureRegistrations/{featureName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "providerNamespace": _SERIALIZER.url("provider_namespace", provider_namespace, 'str'), "featureName": _SERIALIZER.url("feature_name", feature_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, params=query_parameters, headers=header_parameters, json=json, content=content, **kwargs ) def build_delete_request( subscription_id: str, provider_namespace: str, feature_name: str, **kwargs: Any ) -> HttpRequest: api_version = "2021-07-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Features/featureProviders/{providerNamespace}/subscriptionFeatureRegistrations/{featureName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "providerNamespace": _SERIALIZER.url("provider_namespace", provider_namespace, 'str'), "featureName": _SERIALIZER.url("feature_name", feature_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="DELETE", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_by_subscription_request( subscription_id: str, provider_namespace: str, **kwargs: Any ) -> HttpRequest: api_version = "2021-07-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Features/featureProviders/{providerNamespace}/subscriptionFeatureRegistrations') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "providerNamespace": _SERIALIZER.url("provider_namespace", provider_namespace, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_list_all_by_subscription_request( subscription_id: str, **kwargs: Any ) -> HttpRequest: api_version = "2021-07-01" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Features/subscriptionFeatureRegistrations') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) class SubscriptionFeatureRegistrationsOperations(object): """SubscriptionFeatureRegistrationsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.resource.features.v2021_07_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def get( self, provider_namespace: str, feature_name: str, **kwargs: Any ) -> "_models.SubscriptionFeatureRegistration": """Returns a feature registration. :param provider_namespace: The provider namespace. :type provider_namespace: str :param feature_name: The feature name. :type feature_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: SubscriptionFeatureRegistration, or the result of cls(response) :rtype: ~azure.mgmt.resource.features.v2021_07_01.models.SubscriptionFeatureRegistration :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SubscriptionFeatureRegistration"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_request( subscription_id=self._config.subscription_id, provider_namespace=provider_namespace, feature_name=feature_name, template_url=self.get.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('SubscriptionFeatureRegistration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Features/featureProviders/{providerNamespace}/subscriptionFeatureRegistrations/{featureName}'} # type: ignore @distributed_trace def create_or_update( self, provider_namespace: str, feature_name: str, subscription_feature_registration_type: Optional["_models.SubscriptionFeatureRegistration"] = None, **kwargs: Any ) -> "_models.SubscriptionFeatureRegistration": """Create or update a feature registration. :param provider_namespace: The provider namespace. :type provider_namespace: str :param feature_name: The feature name. :type feature_name: str :param subscription_feature_registration_type: Subscription Feature Registration Type details. :type subscription_feature_registration_type: ~azure.mgmt.resource.features.v2021_07_01.models.SubscriptionFeatureRegistration :keyword callable cls: A custom type or function that will be passed the direct response :return: SubscriptionFeatureRegistration, or the result of cls(response) :rtype: ~azure.mgmt.resource.features.v2021_07_01.models.SubscriptionFeatureRegistration :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SubscriptionFeatureRegistration"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] if subscription_feature_registration_type is not None: _json = self._serialize.body(subscription_feature_registration_type, 'SubscriptionFeatureRegistration') else: _json = None request = build_create_or_update_request( subscription_id=self._config.subscription_id, provider_namespace=provider_namespace, feature_name=feature_name, content_type=content_type, json=_json, template_url=self.create_or_update.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('SubscriptionFeatureRegistration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Features/featureProviders/{providerNamespace}/subscriptionFeatureRegistrations/{featureName}'} # type: ignore @distributed_trace def delete( self, provider_namespace: str, feature_name: str, **kwargs: Any ) -> None: """Deletes a feature registration. :param provider_namespace: The provider namespace. :type provider_namespace: str :param feature_name: The feature name. :type feature_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_delete_request( subscription_id=self._config.subscription_id, provider_namespace=provider_namespace, feature_name=feature_name, template_url=self.delete.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) delete.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Features/featureProviders/{providerNamespace}/subscriptionFeatureRegistrations/{featureName}'} # type: ignore @distributed_trace def list_by_subscription( self, provider_namespace: str, **kwargs: Any ) -> Iterable["_models.SubscriptionFeatureRegistrationList"]: """Returns subscription feature registrations for given subscription and provider namespace. :param provider_namespace: The provider namespace. :type provider_namespace: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SubscriptionFeatureRegistrationList or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.features.v2021_07_01.models.SubscriptionFeatureRegistrationList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SubscriptionFeatureRegistrationList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_by_subscription_request( subscription_id=self._config.subscription_id, provider_namespace=provider_namespace, template_url=self.list_by_subscription.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_by_subscription_request( subscription_id=self._config.subscription_id, provider_namespace=provider_namespace, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("SubscriptionFeatureRegistrationList", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Features/featureProviders/{providerNamespace}/subscriptionFeatureRegistrations'} # type: ignore @distributed_trace def list_all_by_subscription( self, **kwargs: Any ) -> Iterable["_models.SubscriptionFeatureRegistrationList"]: """Returns subscription feature registrations for given subscription. :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SubscriptionFeatureRegistrationList or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.features.v2021_07_01.models.SubscriptionFeatureRegistrationList] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SubscriptionFeatureRegistrationList"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_list_all_by_subscription_request( subscription_id=self._config.subscription_id, template_url=self.list_all_by_subscription.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) else: request = build_list_all_by_subscription_request( subscription_id=self._config.subscription_id, template_url=next_link, ) request = _convert_request(request) request.url = self._client.format_url(request.url) request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("SubscriptionFeatureRegistrationList", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_all_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Features/subscriptionFeatureRegistrations'} # type: ignore
import sys from unittest import SkipTest import numpy as np from numpy.testing import assert_array_almost_equal from vispy.testing import run_tests_if_main from vispy.geometry.triangulation import Triangulation as T def assert_array_eq(a, b): assert a.shape == b.shape assert a.dtype == b.dtype mask = np.isnan(a) assert np.all(np.isnan(b[mask])) assert np.all(a[~mask] == b[~mask]) def test_intersect_edge_arrays(): global t pts = np.array([ [0., 0.], [0., 10.], [5., 0.], [-5., 0.], [-1., 11.], [1., 9.], ]) edges = np.array([ [0, 1], [2, 3], [0, 3], [4, 5], [4, 1], [0, 1], ]) lines = pts[edges] t = T(pts, edges) # intersect array of one edge with a array of many edges intercepts = t._intersect_edge_arrays(lines[0:1], lines[1:]) expect = np.array([0.5, 0.0, 0.5, 1.0, np.nan]) assert_array_eq(intercepts, expect) # intersect every line with every line intercepts = t._intersect_edge_arrays(lines[:, np.newaxis, ...], lines[np.newaxis, ...]) for i in range(lines.shape[0]): int2 = t._intersect_edge_arrays(lines[i], lines) assert_array_eq(intercepts[i], int2) def test_edge_intersections(): global t pts = np.array([ [0, 0], [1, 0], [1, 1], [0, 1], [0, 0.5], # three edges intersect here [2, 0.5], [-1, 0.2], [2, 0.8], [-1, 1], [0, 0.5], ]) edges = np.array([ [0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [6, 7], [8, 9], ]) t = T(pts, edges) # first test find_edge_intersections cuts = t._find_edge_intersections() expect = { 0: [], 1: [(0.5, [1., 0.5]), (0.6, [1., 0.6])], 2: [], 3: [(0.5, [0., 0.5]), (0.6, [0., 0.4])], 4: [(0.25, [0.5, 0.5]), (0.5, [1., 0.5])], 5: [(1./3., [0., 0.4]), (0.5, [0.5, 0.5]), (2./3., [1., 0.6])], } assert len(expect) == len(cuts) for k, v in expect.items(): assert len(v) == len(cuts[k]) for i, ecut in enumerate(v): vcut = cuts[k][i] assert len(vcut) == len(ecut) for j in range(len(vcut)): assert_array_almost_equal(np.array(ecut[j]), np.array(vcut[j])) # next test that we can split the edges correctly t._split_intersecting_edges() pts = np.array([[0., 0.], [1., 0.], [1., 1.], [0., 1.], [0., 0.5], [2., 0.5], [-1., 0.2], [2., 0.8], [-1., 1.], [0., 0.5], [1., 0.5], [1., 0.6], [0., 0.5], [0., 0.4], [0.5, 0.5], [1., 0.5], [0., 0.4], [0.5, 0.5], [1., 0.6]]) edges = np.array([[0, 1], [1, 10], [2, 3], [3, 12], [4, 14], [6, 16], [8, 9], [10, 11], [11, 2], [12, 13], [13, 0], [14, 15], [15, 5], [16, 17], [17, 18], [18, 7]]) if sys.version[0] == '3': raise SkipTest('Triangulation differences on Py3k') assert_array_almost_equal(pts, t.pts) assert np.all(edges == t.edges) # Test _nearly_ parallel lines. pts = np.array([[0., 0.], [1.62434542, 0.], [1.62434542, -0.61175638], [1.09617364, -0.61175638]]) edges = np.array([[0, 1], [1, 2], [2, 3], [3, 0]]) t = T(pts, edges) for edge, cuts in t._find_edge_intersections().items(): assert len(cuts) == 0 def test_merge_duplicate_points(): global t pts = np.array([ [0, 0], [1, 1], [0.1, 0.7], [2, 3], [0, 0], [0.1, 0.7], [5, 6], ]) edges = np.array([ [0, 6], [1, 5], [2, 4], [3, 6], [4, 5], ]) t = T(pts, edges) t._merge_duplicate_points() pts = np.array([ [0, 0], [1, 1], [0.1, 0.7], [2, 3], [5, 6], ]) edges = np.array([ [0, 4], [1, 2], [2, 0], [3, 4], [0, 2], ]) assert np.allclose(t.pts, pts) assert np.all(t.edges == edges) def test_initialize(): # check points are correctly sorted # check artificial points are outside bounds of all others # check tops / bottoms pass def test_utility_methods(): global t pts = np.array([ [0, 0], [1, 0], [2, 0], [3, 0], [1.5, 2], [1.5, -2], ]) edges = np.array([ [4, 5], # edge cuts through triangle (1, 2, 4) ]) t = T(pts, edges) # skip initialization and just simulate being part-way through # triangulation for tri in [[0, 1, 4], [1, 2, 4], [2, 3, 4]]: t._add_tri(*tri) # find_cut_triangle assert t._find_cut_triangle((4, 5)) == (4, 1, 2) # orientation assert t._orientation((4, 5), 0) == 1 assert t._orientation((4, 5), 1) == 1 assert t._orientation((4, 5), 2) == -1 assert t._orientation((4, 5), 3) == -1 assert t._orientation((4, 5), 4) == 0 assert t._orientation((4, 5), 5) == 0 # adjacent_tri assert t._adjacent_tri((1, 4), 0) == (4, 1, 2) assert t._adjacent_tri((0, 4), 1) is None assert t._adjacent_tri((1, 4), (1, 4, 0)) == (4, 1, 2) assert t._adjacent_tri((0, 4), (1, 4, 0)) is None try: t._adjacent_tri((1, 4), 5) except RuntimeError: pass else: raise Exception("Expected RuntimeError.") # edges_intersect assert not t._edges_intersect((0, 1), (1, 2)) assert not t._edges_intersect((0, 2), (1, 2)) assert t._edges_intersect((4, 5), (1, 2)) # is_constraining_edge assert t._is_constraining_edge((4, 5)) assert t._is_constraining_edge((5, 4)) assert not t._is_constraining_edge((3, 5)) assert not t._is_constraining_edge((3, 2)) def test_projection(): pts = np.array([[0, 0], [5, 0], [1, 2], [3, 4]]) t = T(pts, np.zeros((0, 2))) a, b, c, d = pts assert np.allclose(t._projection(a, c, b), [1, 0]) assert np.allclose(t._projection(b, c, a), [1, 0]) assert np.allclose(t._projection(a, d, b), [3, 0]) assert np.allclose(t._projection(b, d, a), [3, 0]) assert np.allclose(t._projection(a, b, c), [1, 2]) assert np.allclose(t._projection(c, b, a), [1, 2]) def test_random(): # Just test that these triangulate without exception. # TODO: later on, we can turn this same test into an image comparison # with Polygon. N = 10 np.random.seed(0) for i in range(4): pts = np.random.normal(size=(N, 2)) edges = np.zeros((N, 2), dtype=int) edges[:, 0] = np.arange(N) edges[:, 1] = np.arange(1, N+1) % N t = T(pts, edges) t.triangulate() theta = np.linspace(0, 2*np.pi, 11)[:-1] pts = np.hstack([np.cos(theta)[:, np.newaxis], np.sin(theta)[:, np.newaxis]]) pts[::2] *= 0.4 edges = np.empty((pts.shape[0], 2), dtype=np.uint) edges[:, 0] = np.arange(pts.shape[0]) edges[:, 1] = edges[:, 0] + 1 edges[-1, 1] = 0 t = T(pts, edges) t.triangulate() # much larger test # this should pass, but takes forever.. #N = 4000 #pts = np.random.normal(size=(N, 2)) #pts = np.cumsum(pts, axis=0) #edges = np.zeros((N, 2), dtype=int) #edges[:,0] = np.arange(N) #edges[:,1] = np.arange(1,N+1) % N #t = T(pts, edges) #t.triangulate() def test_orthogonal(): # make lines that are entirely vertical / horizontal np.random.seed(1) N = 100 pts = [[0, 0]] for i in range(N - 1): p = pts[-1][:] p[i % 2] += np.random.normal() pts.append(p) pts = np.array(pts) edges = np.zeros((N, 2), dtype=int) edges[:, 0] = np.arange(N) edges[:, 1] = np.arange(1, N + 1) % N t = T(pts, edges) t.triangulate() def test_edge_event(): # mode 1 pts = np.array([[0, 0], [5, -10], [10, 0], [6, -5], [5, 5], ]) inds = np.arange(pts.shape[0])[:, np.newaxis] edges = np.hstack([inds, np.roll(inds, -1)]) t = T(pts, edges) t.triangulate() t = T(pts * [-1, 1], edges) t.triangulate() # mode 2 pts = np.array([[0, 0], [10, 0], [20, 0], [5, 11], ]) inds = np.arange(pts.shape[0])[:, np.newaxis] edges = np.hstack([inds, np.roll(inds, -1)]) t = T(pts, edges) t.triangulate() t = T(pts * [-1, 1], edges) t.triangulate() # mode 1, 2 pts = np.array([[0, 0], [10, 0], [20, 0], [5, 11], [9, 10], [0, 20], ]) inds = np.arange(pts.shape[0])[:, np.newaxis] edges = np.hstack([inds, np.roll(inds, -1)]) t = T(pts, edges) t.triangulate() t = T(pts * [-1, 1], edges) t.triangulate() # mode 2, 1 pts = np.array([[0, 0], [10, 0], [20, 0], [15, 8], [15, 1], [-5, 10], ]) inds = np.arange(pts.shape[0])[:, np.newaxis] edges = np.hstack([inds, np.roll(inds, -1)]) t = T(pts, edges) t.triangulate() t = T(pts * [-1, 1], edges) t.triangulate() # mode 2, 1 with many triangles pts = np.array([[0, 10], [2, 8], [4, 6], [6, 4], [8, 2], [10, 0], [20, 5], [20, 20], [2, 13], [4, 11], [6, 9], [8, 7], [10, 5], [10, 1], [0, 15], ]) inds = np.arange(pts.shape[0])[:, np.newaxis] edges = np.hstack([inds, np.roll(inds, -1)]) t = T(pts, edges) t.triangulate() t = T(pts * [-1, 1], edges) t.triangulate() # mode 1, 2, 1, 2, 1 pts = np.array([[0, 10], [2, 9], [4, 8], [6, 7], [8, 6], [10, 5], [20, 5], [20, 20], [2, 11], [19, 19], [6, 9], [19, 18], [10, 7], [11, 5.1], [0, 11.1], ]) inds = np.arange(pts.shape[0])[:, np.newaxis] edges = np.hstack([inds, np.roll(inds, -1)]) t = T(pts, edges) t.triangulate() t = T(pts * [-1, 1], edges) t.triangulate() # mode 2, 1, 2, 1 pts = np.array([[0, 10], [2, 9], [4, 8], [6, 7], [8, 6], [10, 5], [20, 5], [20, 20], [6, 9], [19, 18], [10, 7], [11, 5.1], [0, 11.1], ]) inds = np.arange(pts.shape[0])[:, np.newaxis] edges = np.hstack([inds, np.roll(inds, -1)]) t = T(pts, edges) t.triangulate() t = T(pts * [-1, 1], edges) t.triangulate() # 1, 2 upper/lower polygon order check pts = np.array([[-5, 0], [-3, 0], [10, 0], [15, 15], [4, 9], [6, 8.8], [9, 10], ]) inds = np.arange(pts.shape[0])[:, np.newaxis] edges = np.hstack([inds, np.roll(inds, -1)]) t = T(pts, edges) t.triangulate() t = T(pts * [-1, 1], edges) t.triangulate() run_tests_if_main()
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslo_config import cfg from oslo_service import threadgroup from heat.common import context from heat.common import service_utils from heat.engine import service from heat.engine import worker from heat.objects import service as service_objects from heat.rpc import worker_api from heat.tests import common from heat.tests.engine import tools from heat.tests import utils class ServiceEngineTest(common.HeatTestCase): def setUp(self): super(ServiceEngineTest, self).setUp() self.ctx = utils.dummy_context(tenant_id='stack_service_test_tenant') self.eng = service.EngineService('a-host', 'a-topic') self.eng.engine_id = 'engine-fake-uuid' def test_make_sure_rpc_version(self): self.assertEqual( '1.13', service.EngineService.RPC_API_VERSION, ('RPC version is changed, please update this test to new version ' 'and make sure additional test cases are added for RPC APIs ' 'added in new version')) @mock.patch.object(service_objects.Service, 'get_all') @mock.patch.object(service_utils, 'format_service') def test_service_get_all(self, mock_format_service, mock_get_all): mock_get_all.return_value = [mock.Mock()] mock_format_service.return_value = mock.Mock() self.assertEqual(1, len(self.eng.list_services(self.ctx))) self.assertTrue(mock_get_all.called) mock_format_service.assert_called_once_with(mock.ANY) @mock.patch.object(service_objects.Service, 'update_by_id') @mock.patch.object(service_objects.Service, 'create') @mock.patch.object(context, 'get_admin_context') def test_service_manage_report_start(self, mock_admin_context, mock_service_create, mock_service_update): self.eng.service_id = None mock_admin_context.return_value = self.ctx srv = dict(id='mock_id') mock_service_create.return_value = srv self.eng.service_manage_report() mock_admin_context.assert_called_once_with() mock_service_create.assert_called_once_with( self.ctx, dict(host=self.eng.host, hostname=self.eng.hostname, binary=self.eng.binary, engine_id=self.eng.engine_id, topic=self.eng.topic, report_interval=cfg.CONF.periodic_interval)) self.assertEqual(srv['id'], self.eng.service_id) mock_service_update.assert_called_once_with( self.ctx, self.eng.service_id, dict(deleted_at=None)) @mock.patch.object(service_objects.Service, 'get_all_by_args') @mock.patch.object(service_objects.Service, 'delete') @mock.patch.object(context, 'get_admin_context') def test_service_manage_report_cleanup(self, mock_admin_context, mock_service_delete, mock_get_all): mock_admin_context.return_value = self.ctx ages_a_go = datetime.datetime.utcnow() - datetime.timedelta( seconds=4000) mock_get_all.return_value = [{'id': 'foo', 'deleted_at': None, 'updated_at': ages_a_go}] self.eng.service_manage_cleanup() mock_admin_context.assert_called_once_with() mock_get_all.assert_called_once_with(self.ctx, self.eng.host, self.eng.binary, self.eng.hostname) mock_service_delete.assert_called_once_with( self.ctx, 'foo') @mock.patch.object(service_objects.Service, 'update_by_id') @mock.patch.object(context, 'get_admin_context') def test_service_manage_report_update(self, mock_admin_context, mock_service_update): self.eng.service_id = 'mock_id' mock_admin_context.return_value = self.ctx self.eng.service_manage_report() mock_admin_context.assert_called_once_with() mock_service_update.assert_called_once_with( self.ctx, 'mock_id', dict(deleted_at=None)) @mock.patch.object(service_objects.Service, 'update_by_id') @mock.patch.object(context, 'get_admin_context') def test_service_manage_report_update_fail(self, mock_admin_context, mock_service_update): self.eng.service_id = 'mock_id' mock_admin_context.return_value = self.ctx mock_service_update.side_effect = Exception() self.eng.service_manage_report() msg = 'Service %s update failed' % self.eng.service_id self.assertIn(msg, self.LOG.output) def test_stop_rpc_server(self): with mock.patch.object(self.eng, '_rpc_server') as mock_rpc_server: self.eng._stop_rpc_server() mock_rpc_server.stop.assert_called_once_with() mock_rpc_server.wait.assert_called_once_with() def _test_engine_service_start( self, thread_group_class, worker_service_class, engine_listener_class, thread_group_manager_class, sample_uuid_method, rpc_client_class, target_class, rpc_server_method): self.patchobject(self.eng, 'service_manage_cleanup') self.patchobject(self.eng, 'reset_stack_status') self.eng.start() # engine id sample_uuid_method.assert_called_once_with() sampe_uuid = sample_uuid_method.return_value self.assertEqual(sampe_uuid, self.eng.engine_id, 'Failed to generated engine_id') # Thread group manager thread_group_manager_class.assert_called_once_with() thread_group_manager = thread_group_manager_class.return_value self.assertEqual(thread_group_manager, self.eng.thread_group_mgr, 'Failed to create Thread Group Manager') # Engine Listener engine_listener_class.assert_called_once_with( self.eng.host, self.eng.engine_id, self.eng.thread_group_mgr ) engine_lister = engine_listener_class.return_value engine_lister.start.assert_called_once_with() # Worker Service if cfg.CONF.convergence_engine: worker_service_class.assert_called_once_with( host=self.eng.host, topic=worker_api.TOPIC, engine_id=self.eng.engine_id, thread_group_mgr=self.eng.thread_group_mgr ) worker_service = worker_service_class.return_value worker_service.start.assert_called_once_with() # RPC Target target_class.assert_called_once_with( version=service.EngineService.RPC_API_VERSION, server=self.eng.host, topic=self.eng.topic) # RPC server target = target_class.return_value rpc_server_method.assert_called_once_with(target, self.eng) rpc_server = rpc_server_method.return_value self.assertEqual(rpc_server, self.eng._rpc_server, "Failed to create RPC server") rpc_server.start.assert_called_once_with() # RPC client rpc_client = rpc_client_class.return_value rpc_client_class.assert_called_once_with( version=service.EngineService.RPC_API_VERSION) self.assertEqual(rpc_client, self.eng._client, "Failed to create RPC client") # Manage Thread group thread_group_class.assert_called_once_with() manage_thread_group = thread_group_class.return_value manage_thread_group.add_timer.assert_called_once_with( cfg.CONF.periodic_interval, self.eng.service_manage_report ) @mock.patch('heat.common.messaging.get_rpc_server', return_value=mock.Mock()) @mock.patch('oslo_messaging.Target', return_value=mock.Mock()) @mock.patch('heat.common.messaging.get_rpc_client', return_value=mock.Mock()) @mock.patch('heat.engine.stack_lock.StackLock.generate_engine_id', return_value='sample-uuid') @mock.patch('heat.engine.service.ThreadGroupManager', return_value=mock.Mock()) @mock.patch('heat.engine.service.EngineListener', return_value=mock.Mock()) @mock.patch('oslo_service.threadgroup.ThreadGroup', return_value=mock.Mock()) def test_engine_service_start_in_non_convergence_mode( self, thread_group_class, engine_listener_class, thread_group_manager_class, sample_uuid_method, rpc_client_class, target_class, rpc_server_method): cfg.CONF.set_default('convergence_engine', False) self._test_engine_service_start( thread_group_class, None, engine_listener_class, thread_group_manager_class, sample_uuid_method, rpc_client_class, target_class, rpc_server_method ) @mock.patch('heat.common.messaging.get_rpc_server', return_value=mock.Mock()) @mock.patch('oslo_messaging.Target', return_value=mock.Mock()) @mock.patch('heat.common.messaging.get_rpc_client', return_value=mock.Mock()) @mock.patch('heat.engine.stack_lock.StackLock.generate_engine_id', return_value=mock.Mock()) @mock.patch('heat.engine.service.ThreadGroupManager', return_value=mock.Mock()) @mock.patch('heat.engine.service.EngineListener', return_value=mock.Mock()) @mock.patch('heat.engine.worker.WorkerService', return_value=mock.Mock()) @mock.patch('oslo_service.threadgroup.ThreadGroup', return_value=mock.Mock()) def test_engine_service_start_in_convergence_mode( self, thread_group_class, worker_service_class, engine_listener_class, thread_group_manager_class, sample_uuid_method, rpc_client_class, target_class, rpc_server_method): cfg.CONF.set_default('convergence_engine', True) self._test_engine_service_start( thread_group_class, worker_service_class, engine_listener_class, thread_group_manager_class, sample_uuid_method, rpc_client_class, target_class, rpc_server_method ) def _test_engine_service_stop( self, service_delete_method, admin_context_method): cfg.CONF.set_default('periodic_interval', 60) self.patchobject(self.eng, 'service_manage_cleanup') self.patchobject(self.eng, 'reset_stack_status') self.eng.start() # Add dummy thread group to test thread_group_mgr.stop() is executed? dtg1 = tools.DummyThreadGroup() dtg2 = tools.DummyThreadGroup() self.eng.thread_group_mgr.groups['sample-uuid1'] = dtg1 self.eng.thread_group_mgr.groups['sample-uuid2'] = dtg2 self.eng.service_id = 'sample-service-uuid' orig_stop = self.eng.thread_group_mgr.stop with mock.patch.object(self.eng.thread_group_mgr, 'stop') as stop: stop.side_effect = orig_stop self.eng.stop() # RPC server self.eng._stop_rpc_server.assert_called_once_with() if cfg.CONF.convergence_engine: # WorkerService self.eng.worker_service.stop.assert_called_once_with() # Wait for all active threads to be finished calls = [mock.call('sample-uuid1', True), mock.call('sample-uuid2', True)] self.eng.thread_group_mgr.stop.assert_has_calls(calls, True) # # Manage Thread group self.eng.manage_thread_grp.stop.assert_called_with(False) # Service delete admin_context_method.assert_called_once_with() ctxt = admin_context_method.return_value service_delete_method.assert_called_once_with( ctxt, self.eng.service_id ) @mock.patch.object(service.EngineService, '_stop_rpc_server') @mock.patch.object(worker.WorkerService, 'stop') @mock.patch.object(threadgroup.ThreadGroup, 'stop') @mock.patch('heat.common.context.get_admin_context', return_value=mock.Mock()) @mock.patch('heat.objects.service.Service.delete', return_value=mock.Mock()) def test_engine_service_stop_in_convergence_mode( self, service_delete_method, admin_context_method, thread_group_stop, worker_service_stop, rpc_server_stop): cfg.CONF.set_default('convergence_engine', True) self._test_engine_service_stop( service_delete_method, admin_context_method ) @mock.patch.object(service.EngineService, '_stop_rpc_server') @mock.patch.object(threadgroup.ThreadGroup, 'stop') @mock.patch('heat.common.context.get_admin_context', return_value=mock.Mock()) @mock.patch('heat.objects.service.Service.delete', return_value=mock.Mock()) def test_engine_service_stop_in_non_convergence_mode( self, service_delete_method, admin_context_method, thread_group_stop, rpc_server_stop): cfg.CONF.set_default('convergence_engine', False) self._test_engine_service_stop( service_delete_method, admin_context_method ) @mock.patch('oslo_log.log.setup') def test_engine_service_reset(self, setup_logging_mock): self.eng.reset() setup_logging_mock.assert_called_once_with(cfg.CONF, 'heat')
''' cache.py Cache which manages all of the data storage and access as needed ''' import sqlite3 import os import uuid import json import time class Cache: ''' SQLite 3 wrapper to cache the data associated with hitting the NYTimes API ''' # static name for db file # by default, it uses an in-memory database __DB_FILE_NAME = ':memory:' # cache version __ARTICLE_VERSION = 12 # this table stores all of the cached articles results __ARTICLES_TABLE_SQL = ''' CREATE TABLE articles ( id text, response string, PRIMARY KEY (id) ) ''' # we perform searches on this table to see if we previous have searched # for the same results before __SEARCH_TABLE_SQL = ''' CREATE VIRTUAL TABLE search using fts4(phrase, id) ''' # store a reference to to our search metadata __SEARCH_RESULTS_TABLE_SQL = ''' CREATE TABLE search_results ( id text, date int, ids text ); ''' # execute each drop statement 1 by 1 __DROP_ARTICLES_TABLE_SQL= [ 'DROP TABLE articles', 'DROP TABLE search', 'DROP TABLE search_results' ] # database version table __VERSIONS_TABLE_SQL = ''' CREATE TABLE versions ( version int, PRIMARY KEY (version) ) ''' # insert the cache table value __ARTICLES_VERSION_INSERT_SQL= ''' INSERT INTO versions (version) values (?) ''' # hold onto the connection here __connection = None def __init__(self, db_file=__DB_FILE_NAME, create=True): ''' Constructor db_file - path of database file, defaults to __DB_FILE_NAME, or actual database file create - boolean whether or not to blow away the cache, default to true ''' self.__connection = sqlite3.connect(db_file) self.__connection.row_factory = sqlite3.Row # auto execute self.__connection.isolation_level = None # try to create if (create): self.__create_database() def __create_database(self): ''' Create all of the tables that go in the database, should only be called fromt he constructor ''' cursor = self.__connection.cursor() result = None try: # check the database version and update if there's difference result = cursor.execute('SELECT * FROM versions').fetchone() except sqlite3.OperationalError: cursor.execute(self.__VERSIONS_TABLE_SQL) update = False # remove the version if necessary if (result and result['version'] < self.__ARTICLE_VERSION): cursor.execute('DELETE FROM versions') # add the version if necessary if (not result or result['version'] < self.__ARTICLE_VERSION): update = True cursor.execute(self.__ARTICLES_VERSION_INSERT_SQL, (self.__ARTICLE_VERSION,)) # handle the cache table here if (update): for drop_statement in self.__DROP_ARTICLES_TABLE_SQL: try: # fails, doesn't exist cursor.execute(drop_statement) except sqlite3.OperationalError: pass # make each of our results table cursor.execute(self.__ARTICLES_TABLE_SQL) cursor.execute(self.__SEARCH_TABLE_SQL) cursor.execute(self.__SEARCH_RESULTS_TABLE_SQL) # cleanup cursor.close() def get_connection(self): ''' Get a handle to the low level connection ''' return self.__connection def empty_cache(self): ''' Remove the entire database ''' # delete the old database if necessary if (os.path.exists(self.__DB_FILE_NAME)): os.remove(self.__DB_FILE_NAME) def insert_articles(self, phrase, articles): ''' Insert a list of NYTimes Articles into the cache phrase - the phrase which the search was performed under articles - a list of articles to insert ''' cursor = self.__connection.cursor() ids = [] # insert each article for article in articles: id = article['_id'] cursor.execute('INSERT OR IGNORE INTO articles (id, response) VALUES (?, ?)', (id, json.dumps(article))) ids.append(id) # insert into search search_id = str(uuid.uuid4()) cursor.execute('INSERT INTO search (phrase, id) VALUES (?, ?)', (phrase, search_id)) # insert the set of articles, always override, assume meta value inserted # earlier never changes cursor.execute('INSERT INTO search_results (id, date, ids) VALUES (?, ?, ?)', (search_id, int(time.time()), json.dumps(ids))) cursor.close() def fetch_articles(self, phrase, limit=1000): ''' Try and get as many articles from the cache as possible phrase - the phrase to search into the articles_virtual_table limit - how many articles to limit by returns a search_result row and articles row list if not empty ''' cursor = self.__connection.cursor() article_list = [] search_result = None article_ids = None # try and get a match match = cursor.execute('SELECT * FROM search WHERE phrase MATCH :phrase', { 'phrase': '*%s*' % (phrase,) }) result = match.fetchone() # I should be able to pull something out if (result): search_id = result['id'] search_result = cursor.execute('SELECT * from search_results WHERE id = ?', (search_id,)).fetchone() # there's something now! if (search_result): try: article_ids = json.loads(search_result['ids']) except Exception as e: article_ids = None if (article_ids): article_results = cursor.execute('SELECT * FROM articles WHERE id IN (\'%s\')' % (('\',\'').join(article_ids),)) for article in article_results: article_list.append(article) cursor.close() return search_result, article_list def close(self): ''' Cleanup the database connection ''' self.__connection.close()
import pytest import pandas._testing as tm from pandas.io.formats.css import CSSResolver, CSSWarning def assert_resolves(css, props, inherited=None): resolve = CSSResolver() actual = resolve(css, inherited=inherited) assert props == actual def assert_same_resolution(css1, css2, inherited=None): resolve = CSSResolver() resolved1 = resolve(css1, inherited=inherited) resolved2 = resolve(css2, inherited=inherited) assert resolved1 == resolved2 @pytest.mark.parametrize( "name,norm,abnorm", [ ( "whitespace", "hello: world; foo: bar", " \t hello \t :\n world \n ; \n foo: \tbar\n\n", ), ("case", "hello: world; foo: bar", "Hello: WORLD; foO: bar"), ("empty-decl", "hello: world; foo: bar", "; hello: world;; foo: bar;\n; ;"), ("empty-list", "", ";"), ], ) def test_css_parse_normalisation(name, norm, abnorm): assert_same_resolution(norm, abnorm) @pytest.mark.parametrize( "invalid_css,remainder", [ # No colon ("hello-world", ""), ("border-style: solid; hello-world", "border-style: solid"), ( "border-style: solid; hello-world; font-weight: bold", "border-style: solid; font-weight: bold", ), # Unclosed string fail # Invalid size ("font-size: blah", "font-size: 1em"), ("font-size: 1a2b", "font-size: 1em"), ("font-size: 1e5pt", "font-size: 1em"), ("font-size: 1+6pt", "font-size: 1em"), ("font-size: 1unknownunit", "font-size: 1em"), ("font-size: 10", "font-size: 1em"), ("font-size: 10 pt", "font-size: 1em"), ], ) def test_css_parse_invalid(invalid_css, remainder): with tm.assert_produces_warning(CSSWarning): assert_same_resolution(invalid_css, remainder) @pytest.mark.parametrize( "shorthand,expansions", [ ("margin", ["margin-top", "margin-right", "margin-bottom", "margin-left"]), ("padding", ["padding-top", "padding-right", "padding-bottom", "padding-left"]), ( "border-width", [ "border-top-width", "border-right-width", "border-bottom-width", "border-left-width", ], ), ( "border-color", [ "border-top-color", "border-right-color", "border-bottom-color", "border-left-color", ], ), ( "border-style", [ "border-top-style", "border-right-style", "border-bottom-style", "border-left-style", ], ), ], ) def test_css_side_shorthands(shorthand, expansions): top, right, bottom, left = expansions assert_resolves( f"{shorthand}: 1pt", {top: "1pt", right: "1pt", bottom: "1pt", left: "1pt"} ) assert_resolves( f"{shorthand}: 1pt 4pt", {top: "1pt", right: "4pt", bottom: "1pt", left: "4pt"} ) assert_resolves( f"{shorthand}: 1pt 4pt 2pt", {top: "1pt", right: "4pt", bottom: "2pt", left: "4pt"}, ) assert_resolves( f"{shorthand}: 1pt 4pt 2pt 0pt", {top: "1pt", right: "4pt", bottom: "2pt", left: "0pt"}, ) with tm.assert_produces_warning(CSSWarning): assert_resolves(f"{shorthand}: 1pt 1pt 1pt 1pt 1pt", {}) @pytest.mark.parametrize( "style,inherited,equiv", [ ("margin: 1px; margin: 2px", "", "margin: 2px"), ("margin: 1px", "margin: 2px", "margin: 1px"), ("margin: 1px; margin: inherit", "margin: 2px", "margin: 2px"), ( "margin: 1px; margin-top: 2px", "", "margin-left: 1px; margin-right: 1px; " + "margin-bottom: 1px; margin-top: 2px", ), ("margin-top: 2px", "margin: 1px", "margin: 1px; margin-top: 2px"), ("margin: 1px", "margin-top: 2px", "margin: 1px"), ( "margin: 1px; margin-top: inherit", "margin: 2px", "margin: 1px; margin-top: 2px", ), ], ) def test_css_precedence(style, inherited, equiv): resolve = CSSResolver() inherited_props = resolve(inherited) style_props = resolve(style, inherited=inherited_props) equiv_props = resolve(equiv) assert style_props == equiv_props @pytest.mark.parametrize( "style,equiv", [ ( "margin: 1px; margin-top: inherit", "margin-bottom: 1px; margin-right: 1px; margin-left: 1px", ), ("margin-top: inherit", ""), ("margin-top: initial", ""), ], ) def test_css_none_absent(style, equiv): assert_same_resolution(style, equiv) @pytest.mark.parametrize( "size,resolved", [ ("xx-small", "6pt"), ("x-small", f"{7.5:f}pt"), ("small", f"{9.6:f}pt"), ("medium", "12pt"), ("large", f"{13.5:f}pt"), ("x-large", "18pt"), ("xx-large", "24pt"), ("8px", "6pt"), ("1.25pc", "15pt"), (".25in", "18pt"), ("02.54cm", "72pt"), ("25.4mm", "72pt"), ("101.6q", "72pt"), ("101.6q", "72pt"), ], ) @pytest.mark.parametrize("relative_to", [None, "16pt"]) # invariant to inherited size def test_css_absolute_font_size(size, relative_to, resolved): if relative_to is None: inherited = None else: inherited = {"font-size": relative_to} assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited) @pytest.mark.parametrize( "size,relative_to,resolved", [ ("1em", None, "12pt"), ("1.0em", None, "12pt"), ("1.25em", None, "15pt"), ("1em", "16pt", "16pt"), ("1.0em", "16pt", "16pt"), ("1.25em", "16pt", "20pt"), ("1rem", "16pt", "12pt"), ("1.0rem", "16pt", "12pt"), ("1.25rem", "16pt", "15pt"), ("100%", None, "12pt"), ("125%", None, "15pt"), ("100%", "16pt", "16pt"), ("125%", "16pt", "20pt"), ("2ex", None, "12pt"), ("2.0ex", None, "12pt"), ("2.50ex", None, "15pt"), ("inherit", "16pt", "16pt"), ("smaller", None, "10pt"), ("smaller", "18pt", "15pt"), ("larger", None, f"{14.4:f}pt"), ("larger", "15pt", "18pt"), ], ) def test_css_relative_font_size(size, relative_to, resolved): if relative_to is None: inherited = None else: inherited = {"font-size": relative_to} assert_resolves(f"font-size: {size}", {"font-size": resolved}, inherited=inherited)
class Selecter(object): pass class Parent(Selecter): def __call__(self, parent_data_object, child_data_object): return parent_data_object class Child(Selecter): def __call__(self, parent_data_object, child_data_object): return child_data_object class Accessor(object): def __init__(self, selecter): self.selecter = selecter class NameAccessor(Accessor): def __call__(self, parent_data_object, child_data_object): return self.selecter(parent_data_object, child_data_object).name class NodeNameAccessor(Accessor): def __call__(self, parent_data_object, child_data_object): return self.selecter(parent_data_object, child_data_object).node_name class NodeTypeAccessor(Accessor): def __call__(self, parent_data_object, child_data_object): data_object = self.selecter(parent_data_object, child_data_object) try: return data_object.node_type except AttributeError, e: # Horrible hack to silence errors on filtering unicode objects # until we fix the parsing if type(data_object) == unicode: return "unicode" else: raise e class KindAccessor(Accessor): def __call__(self, parent_data_object, child_data_object): return self.selecter(parent_data_object, child_data_object).kind class LambdaAccessor(Accessor): def __init__(self, selecter, func): Accessor.__init__(self, selecter) self.func = func def __call__(self, parent_data_object, child_data_object): return self.func(self.selecter(parent_data_object, child_data_object)) class NamespaceAccessor(Accessor): def __call__(self, parent_data_object, child_data_object): return self.selecter(parent_data_object, child_data_object).namespaces class NameFilter(object): def __init__(self, accessor, members): self.accessor = accessor self.members = members def allow(self, parent_data_object, child_data_object): name = self.accessor(parent_data_object, child_data_object) return name in self.members class GlobFilter(object): def __init__(self, accessor, glob): self.accessor = accessor self.glob = glob def allow(self, parent_data_object, child_data_object): text = self.accessor(parent_data_object, child_data_object) return self.glob.match(text) class FilePathFilter(object): def __init__(self, accessor, target_file, path_handler): self.accessor = accessor self.target_file = target_file self.path_handler = path_handler def allow(self, parent_data_object, child_data_object): location = self.accessor(parent_data_object, child_data_object).file if self.path_handler.includes_directory(self.target_file): # If the target_file contains directory separators then # match against the same length at the ned of the location # location_match = location[-len(self.target_file):] return location_match == self.target_file else: # If there are not separators, match against the whole filename # at the end of the location # # This is to prevent "Util.cpp" matching "PathUtil.cpp" # location_basename = self.path_handler.basename(location) return location_basename == self.target_file class NamespaceFilter(object): def __init__(self, namespace_accessor, name_accessor): self.namespace_accessor = namespace_accessor self.name_accessor = name_accessor def allow(self, parent_data_object, child_data_object): namespaces = self.namespace_accessor(parent_data_object, child_data_object) name = self.name_accessor(parent_data_object, child_data_object) try: namespace, name = name.rsplit("::", 1) except ValueError: namespace, name = "", name return namespace in namespaces class OpenFilter(object): def allow(self, parent_data_object, child_data_object): return True class ClosedFilter(object): def allow(self, parent_data_object, child_data_object): return False class NotFilter(object): def __init__(self, child_filter): self.child_filter = child_filter def allow(self, parent_data_object, child_data_object): return not self.child_filter.allow(parent_data_object, child_data_object) class AndFilter(object): def __init__(self, first_filter, second_filter): self.first_filter = first_filter self.second_filter = second_filter def allow(self, parent_data_object, child_data_object): return self.first_filter.allow(parent_data_object, child_data_object) \ and self.second_filter.allow(parent_data_object, child_data_object) class OrFilter(object): def __init__(self, first_filter, second_filter): self.first_filter = first_filter self.second_filter = second_filter def allow(self, parent_data_object, child_data_object): return self.first_filter.allow(parent_data_object, child_data_object) \ or self.second_filter.allow(parent_data_object, child_data_object) class Glob(object): def __init__(self, method, pattern): self.method = method self.pattern = pattern def match(self, name): return self.method(name, self.pattern) class GlobFactory(object): def __init__(self, method): self.method = method def create(self, pattern): return Glob(self.method, pattern) class Gather(object): def __init__(self, accessor, names): self.accessor = accessor self.names = names def allow(self, parent_data_object, child_data_object): self.names.extend( self.accessor(parent_data_object, child_data_object) ) return False class FilterFactory(object): def __init__(self, globber_factory, path_handler): self.globber_factory = globber_factory self.path_handler = path_handler def create_class_filter(self, options): return AndFilter( self.create_members_filter(options), AndFilter( self.create_outline_filter(options), self.create_show_filter(options), ) ) def create_show_filter(self, options): """ Currently only handles the header-file entry """ try: text = options["show"] except KeyError: # Allow through everything except the header-file includes nodes return OrFilter( NotFilter(NameFilter(NodeTypeAccessor(Parent()), ["compounddef"])), NotFilter(NameFilter(NodeTypeAccessor(Child()), ["inc"])) ) if text == "header-file": # Allow through everything, including header-file includes return OpenFilter() # Allow through everything except the header-file includes nodes return OrFilter( NotFilter(NameFilter(NodeTypeAccessor(Parent()), ["compounddef"])), NotFilter(NameFilter(NodeTypeAccessor(Child()), ["inc"])) ) def create_members_filter(self, options): try: text = options["members"] except KeyError: return OrFilter( NotFilter(NameFilter(NodeTypeAccessor(Parent()), ["sectiondef"])), NotFilter(NameFilter(NodeTypeAccessor(Child()), ["memberdef"])) ) if not text.strip(): return OrFilter( NotFilter(NameFilter(NodeTypeAccessor(Child()), ["sectiondef"])), OrFilter( GlobFilter(KindAccessor(Child()), self.globber_factory.create("public*")), NameFilter(KindAccessor(Child()), ["user-defined"]) ) ) # Matches sphinx-autodoc behaviour of comma separated values members = set([x.strip() for x in text.split(",")]) return OrFilter( NotFilter(NameFilter(NodeTypeAccessor(Parent()),["sectiondef"])), NameFilter(NameAccessor(Child()), members) ) def create_outline_filter(self, options): if options.has_key("outline"): return NotFilter(NameFilter(NodeTypeAccessor(Child()), ["description"])) else: return OpenFilter() def create_file_filter(self, filename, options): valid_names = [] filter_ = AndFilter( AndFilter( AndFilter( NotFilter( # Gather the "namespaces" attribute from the # compounddef for the file we're rendering and # store the information in the "valid_names" list # # Gather always returns false, so, combined with # the NotFilter this chunk always returns true and # so does not affect the result of the filtering AndFilter( AndFilter( AndFilter( NameFilter(NodeTypeAccessor(Child()), ["compounddef"]), NameFilter(KindAccessor(Child()), ["file"]) ), FilePathFilter( LambdaAccessor(Child(), lambda x: x.location), filename, self.path_handler ) ), Gather(LambdaAccessor(Child(), lambda x: x.namespaces), valid_names) ) ), NotFilter( # Take the valid_names and everytime we handle an # innerclass or innernamespace, check that its name # was one of those initial valid names so that we # never end up rendering a namespace or class that # wasn't in the initial file. Notably this is # required as the location attribute for the # namespace in the xml is unreliable. AndFilter( NameFilter(NodeTypeAccessor(Parent()), ["compounddef"]), AndFilter( AndFilter( NameFilter(NodeTypeAccessor(Child()),["ref"]), NameFilter(NodeNameAccessor(Child()),["innerclass", "innernamespace"]) ), NotFilter(NameFilter( LambdaAccessor(Child(), lambda x: x.content_[0].getValue()), valid_names )) ) ) ) ), NotFilter( # Ignore innerclasses and innernamespaces that are inside a # namespace that is going to be rendered as they will be # rendered with that namespace and we don't want them twice AndFilter( NameFilter(NodeTypeAccessor(Parent()), ["compounddef"]), AndFilter( AndFilter( NameFilter(NodeTypeAccessor(Child()),["ref"]), NameFilter(NodeNameAccessor(Child()),["innerclass", "innernamespace"]) ), NamespaceFilter( NamespaceAccessor(Parent()), LambdaAccessor(Child(), lambda x: x.content_[0].getValue()) ) ) ) ), ), AndFilter( NotFilter( # Ignore memberdefs from files which are different to # the one we're rendering. This happens when we have to # cross into a namespace xml file which has entries # from multiple files in it AndFilter( NameFilter(NodeTypeAccessor(Child()), ["memberdef"]), NotFilter( FilePathFilter(LambdaAccessor(Child(), lambda x: x.location), filename, self.path_handler) ) ) ), NotFilter( # Ignore compounddefs which are from another file # (normally means classes and structs which are in a # namespace that we have other interests in) but only # check it if the compounddef is not a namespace # itself, as for some reason compounddefs for # namespaces are registered with just a single file # location even if they namespace is spread over # multiple files AndFilter( AndFilter( NameFilter(NodeTypeAccessor(Child()), ["compounddef"]), NotFilter(NameFilter(KindAccessor(Child()), ["namespace"])) ), NotFilter( FilePathFilter(LambdaAccessor(Child(), lambda x: x.location), filename, self.path_handler) ) ) ) ) ) return AndFilter( self.create_outline_filter(options), filter_ ) def create_index_filter(self, options): filter_ = AndFilter( NotFilter( AndFilter( NameFilter(NodeTypeAccessor(Parent()), ["compounddef"]), AndFilter( NameFilter(NodeTypeAccessor(Child()),["ref"]), NameFilter(NodeNameAccessor(Child()),["innerclass", "innernamespace"]) ) ) ), NotFilter( AndFilter( AndFilter( NameFilter(NodeTypeAccessor(Parent()), ["compounddef"]), NameFilter(KindAccessor(Parent()), ["group"]) ), AndFilter( NameFilter(NodeTypeAccessor(Child()),["sectiondef"]), NameFilter(KindAccessor(Child()),["func"]) ) ) ) ) return AndFilter( self.create_outline_filter(options), filter_ ) def create_open_filter(self): return OpenFilter() def create_file_finder_filter(self, filename): filter_ = AndFilter( AndFilter( NameFilter(NodeTypeAccessor(Child()), ["compounddef"]), NameFilter(KindAccessor(Child()), ["file"]), ), FilePathFilter(LambdaAccessor(Child(), lambda x: x.location), filename, self.path_handler) ) return filter_
# coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for methods in the interaction registry.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules import json import os from core.domain import exp_services from core.domain import interaction_registry from core.tests import test_utils from extensions.interactions import base import feconf import python_utils import schema_utils EXPECTED_TERMINAL_INTERACTIONS_COUNT = 1 class InteractionDependencyTests(test_utils.GenericTestBase): """Tests for the calculation of dependencies for interactions.""" def setUp(self): super(InteractionDependencyTests, self).setUp() # Register and login as an editor. self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME) self.login(self.EDITOR_EMAIL) def test_deduplication_of_dependency_ids(self): self.assertItemsEqual( interaction_registry.Registry.get_deduplicated_dependency_ids( ['CodeRepl']), ['skulpt', 'codemirror']) self.assertItemsEqual( interaction_registry.Registry.get_deduplicated_dependency_ids( ['CodeRepl', 'CodeRepl', 'CodeRepl']), ['skulpt', 'codemirror']) self.assertItemsEqual( interaction_registry.Registry.get_deduplicated_dependency_ids( ['CodeRepl', 'LogicProof']), ['skulpt', 'codemirror']) def test_dependency_loads_in_exploration_player_page(self): exp_id = '0' exp_services.load_demo(exp_id) # Ensure that dependencies are added in the exploration reader page. response = self.get_html_response('/explore/%s' % exp_id) response.mustcontain('dependency_html.html') def test_no_dependencies_in_non_exploration_pages(self): response = self.get_html_response(feconf.LIBRARY_INDEX_URL) response.mustcontain(no=['dependency_html.html']) def test_dependencies_loaded_in_exploration_editor(self): exp_services.load_demo('0') # Ensure that dependencies are added in the exploration editor page. response = self.get_html_response('/create/0') response.mustcontain('dependency_html.html') self.logout() class InteractionRegistryUnitTests(test_utils.GenericTestBase): """Test for the interaction registry.""" def test_interaction_registry(self): """Do some sanity checks on the interaction registry.""" self.assertEqual( len(interaction_registry.Registry.get_all_interactions()), len(interaction_registry.Registry.get_all_interaction_ids())) def test_get_all_specs(self): """Test the get_all_specs() method.""" specs_dict = interaction_registry.Registry.get_all_specs() self.assertEqual( len(list(specs_dict.keys())), len(interaction_registry.Registry.get_all_interaction_ids())) terminal_interactions_count = 0 for item in specs_dict.values(): self.assertIn(item['display_mode'], base.ALLOWED_DISPLAY_MODES) self.assertTrue(isinstance(item['is_terminal'], bool)) if item['is_terminal']: terminal_interactions_count += 1 self.assertEqual( terminal_interactions_count, EXPECTED_TERMINAL_INTERACTIONS_COUNT) def test_interaction_specs_json_sync_all_specs(self): """Test to ensure that the interaction_specs.json file is upto date with additions in the individual interaction files. """ all_specs = interaction_registry.Registry.get_all_specs() spec_file = os.path.join( 'extensions', 'interactions', 'interaction_specs.json') with python_utils.open_file(spec_file, 'r') as f: specs_from_json = json.loads(f.read()) self.assertDictEqual(all_specs, specs_from_json) def test_interaction_specs_customization_arg_specs_names_are_valid(self): """Test to ensure that all customization argument names in interaction specs only include alphabetic letters and are lowerCamelCase. This is because these properties are involved in the generation of content_ids for customization arguments. """ all_specs = interaction_registry.Registry.get_all_specs() ca_names_in_schema = [] def traverse_schema_to_find_names(schema): """Recursively traverses the schema to find all name fields. Recursion is required because names can be nested within 'type: dict' inside a schema. Args: schema: dict. The schema to traverse. """ if 'name' in schema: ca_names_in_schema.append(schema['name']) schema_type = schema['type'] if schema_type == schema_utils.SCHEMA_TYPE_LIST: traverse_schema_to_find_names(schema['items']) elif schema_type == schema_utils.SCHEMA_TYPE_DICT: for schema_property in schema['properties']: ca_names_in_schema.append(schema_property['name']) traverse_schema_to_find_names(schema_property['schema']) for interaction_id in all_specs: for ca_spec in all_specs[interaction_id]['customization_arg_specs']: ca_names_in_schema.append(ca_spec['name']) traverse_schema_to_find_names(ca_spec['schema']) for name in ca_names_in_schema: self.assertTrue(name.isalpha()) self.assertTrue(name[0].islower()) def test_interaction_specs_customization_arg_default_values_are_valid(self): """Test to ensure that all customization argument default values that contain content_ids are properly set to None. """ all_specs = interaction_registry.Registry.get_all_specs() def traverse_schema_to_find_and_validate_subtitled_content( value, schema): """Recursively traverse the schema to find SubtitledHtml or SubtitledUnicode contained or nested in value. Args: value: *. The value of the customization argument. schema: dict. The customization argument schema. """ is_subtitled_html_spec = ( schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and schema['obj_type'] == schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_HTML) is_subtitled_unicode_spec = ( schema['type'] == schema_utils.SCHEMA_TYPE_CUSTOM and schema['obj_type'] == schema_utils.SCHEMA_OBJ_TYPE_SUBTITLED_UNICODE) if is_subtitled_html_spec or is_subtitled_unicode_spec: self.assertIsNone(value['content_id']) elif schema['type'] == schema_utils.SCHEMA_TYPE_LIST: for x in value: traverse_schema_to_find_and_validate_subtitled_content( x, schema['items']) elif schema['type'] == schema_utils.SCHEMA_TYPE_DICT: for schema_property in schema['properties']: traverse_schema_to_find_and_validate_subtitled_content( x[schema_property.name], schema_property['schema'] ) for interaction_id in all_specs: for ca_spec in all_specs[interaction_id]['customization_arg_specs']: traverse_schema_to_find_and_validate_subtitled_content( ca_spec['default_value'], ca_spec['schema']) def test_get_all_specs_for_state_schema_version_for_unsaved_version(self): with self.assertRaisesRegexp( Exception, 'No specs json file found for state schema' ): ( interaction_registry.Registry .get_all_specs_for_state_schema_version(10) )
"""OpenAPI core validation request validators module""" import warnings from openapi_core.casting.schemas.exceptions import CastError from openapi_core.deserializing.exceptions import DeserializeError from openapi_core.exceptions import MissingParameter from openapi_core.exceptions import MissingRequestBody from openapi_core.exceptions import MissingRequiredParameter from openapi_core.exceptions import MissingRequiredRequestBody from openapi_core.schema.parameters import iter_params from openapi_core.security.exceptions import SecurityError from openapi_core.security.factories import SecurityProviderFactory from openapi_core.templating.media_types.exceptions import MediaTypeFinderError from openapi_core.templating.paths.exceptions import PathError from openapi_core.unmarshalling.schemas.enums import UnmarshalContext from openapi_core.unmarshalling.schemas.exceptions import UnmarshalError from openapi_core.unmarshalling.schemas.exceptions import ValidateError from openapi_core.unmarshalling.schemas.factories import ( SchemaUnmarshallersFactory, ) from openapi_core.validation.exceptions import InvalidSecurity from openapi_core.validation.request.datatypes import Parameters from openapi_core.validation.request.datatypes import RequestValidationResult from openapi_core.validation.request.exceptions import ParametersError from openapi_core.validation.validators import BaseValidator class BaseRequestValidator(BaseValidator): @property def schema_unmarshallers_factory(self): spec_resolver = ( self.spec.accessor.dereferencer.resolver_manager.resolver ) return SchemaUnmarshallersFactory( spec_resolver, self.format_checker, self.custom_formatters, context=UnmarshalContext.REQUEST, ) @property def security_provider_factory(self): return SecurityProviderFactory() def _get_parameters(self, request, path, operation): operation_params = operation.get("parameters", []) path_params = path.get("parameters", []) errors = [] seen = set() parameters = Parameters() params_iter = iter_params(operation_params, path_params) for param in params_iter: param_name = param["name"] param_location = param["in"] if (param_name, param_location) in seen: # skip parameter already seen # e.g. overriden path item paremeter on operation continue seen.add((param_name, param_location)) try: value = self._get_parameter(param, request) except MissingParameter: continue except ( MissingRequiredParameter, DeserializeError, CastError, ValidateError, UnmarshalError, ) as exc: errors.append(exc) continue else: location = getattr(parameters, param_location) location[param_name] = value if errors: raise ParametersError(context=errors, parameters=parameters) return parameters def _get_parameter(self, param, request): name = param["name"] deprecated = param.getkey("deprecated", False) if deprecated: warnings.warn( f"{name} parameter is deprecated", DeprecationWarning, ) param_location = param["in"] location = request.parameters[param_location] try: return self._get_param_or_header_value(param, location) except KeyError: required = param.getkey("required", False) if required: raise MissingRequiredParameter(name) raise MissingParameter(name) def _get_security(self, request, operation): security = None if "security" in self.spec: security = self.spec / "security" if "security" in operation: security = operation / "security" if not security: return {} for security_requirement in security: try: return { scheme_name: self._get_security_value(scheme_name, request) for scheme_name in list(security_requirement.keys()) } except SecurityError: continue raise InvalidSecurity def _get_security_value(self, scheme_name, request): security_schemes = self.spec / "components#securitySchemes" if scheme_name not in security_schemes: return scheme = security_schemes[scheme_name] security_provider = self.security_provider_factory.create(scheme) return security_provider(request) def _get_body(self, request, operation): if "requestBody" not in operation: return None request_body = operation / "requestBody" raw_body = self._get_body_value(request_body, request) media_type, mimetype = self._get_media_type( request_body / "content", request.mimetype ) deserialised = self._deserialise_data(mimetype, raw_body) casted = self._cast(media_type, deserialised) if "schema" not in media_type: return casted schema = media_type / "schema" body = self._unmarshal(schema, casted) return body def _get_body_value(self, request_body, request): if not request.body: if request_body.getkey("required", False): raise MissingRequiredRequestBody(request) raise MissingRequestBody(request) return request.body class RequestParametersValidator(BaseRequestValidator): def validate(self, request): try: path, operation, _, path_result, _ = self._find_path( request.method, request.full_url_pattern ) except PathError as exc: return RequestValidationResult(errors=[exc]) request.parameters.path = ( request.parameters.path or path_result.variables ) try: params = self._get_parameters(request, path, operation) except ParametersError as exc: params = exc.parameters params_errors = exc.context else: params_errors = [] return RequestValidationResult( errors=params_errors, parameters=params, ) class RequestBodyValidator(BaseRequestValidator): def validate(self, request): try: _, operation, _, _, _ = self._find_path( request.method, request.full_url_pattern ) except PathError as exc: return RequestValidationResult(errors=[exc]) try: body = self._get_body(request, operation) except ( MissingRequiredRequestBody, MediaTypeFinderError, DeserializeError, CastError, ValidateError, UnmarshalError, ) as exc: body = None errors = [exc] except MissingRequestBody: body = None errors = [] else: errors = [] return RequestValidationResult( errors=errors, body=body, ) class RequestSecurityValidator(BaseRequestValidator): def validate(self, request): try: _, operation, _, _, _ = self._find_path( request.method, request.full_url_pattern ) except PathError as exc: return RequestValidationResult(errors=[exc]) try: security = self._get_security(request, operation) except InvalidSecurity as exc: return RequestValidationResult(errors=[exc]) return RequestValidationResult( errors=[], security=security, ) class RequestValidator(BaseRequestValidator): def validate(self, request): try: path, operation, _, path_result, _ = self._find_path( request.method, request.full_url_pattern ) # don't process if operation errors except PathError as exc: return RequestValidationResult(errors=[exc]) try: security = self._get_security(request, operation) except InvalidSecurity as exc: return RequestValidationResult(errors=[exc]) request.parameters.path = ( request.parameters.path or path_result.variables ) try: params = self._get_parameters(request, path, operation) except ParametersError as exc: params = exc.parameters params_errors = exc.context else: params_errors = [] try: body = self._get_body(request, operation) except ( MissingRequiredRequestBody, MediaTypeFinderError, DeserializeError, CastError, ValidateError, UnmarshalError, ) as exc: body = None body_errors = [exc] except MissingRequestBody: body = None body_errors = [] else: body_errors = [] errors = params_errors + body_errors return RequestValidationResult( errors=errors, body=body, parameters=params, security=security, )
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Read and write for the RecordIO data format.""" from __future__ import absolute_import from collections import namedtuple import ctypes import struct import numbers import numpy as np from .base import _LIB from .base import RecordIOHandle from .base import check_call from .base import c_str try: import cv2 except ImportError: cv2 = None class MXRecordIO(object): """Reads/writes `RecordIO` data format, supporting sequential read and write. Examples --------- >>> record = mx.recordio.MXRecordIO('tmp.rec', 'w') <mxnet.recordio.MXRecordIO object at 0x10ef40ed0> >>> for i in range(5): ... record.write('record_%d'%i) >>> record.close() >>> record = mx.recordio.MXRecordIO('tmp.rec', 'r') >>> for i in range(5): ... item = record.read() ... print(item) record_0 record_1 record_2 record_3 record_4 >>> record.close() Parameters ---------- uri : string Path to the record file. flag : string 'w' for write or 'r' for read. """ def __init__(self, uri, flag): self.uri = c_str(uri) self.handle = RecordIOHandle() self.flag = flag self.is_open = False self.open() def open(self): """Opens the record file.""" if self.flag == "w": check_call(_LIB.MXRecordIOWriterCreate(self.uri, ctypes.byref(self.handle))) self.writable = True elif self.flag == "r": check_call(_LIB.MXRecordIOReaderCreate(self.uri, ctypes.byref(self.handle))) self.writable = False else: raise ValueError("Invalid flag %s"%self.flag) self.is_open = True def __del__(self): self.close() def __getstate__(self): """Override pickling behavior.""" # pickling pointer is not allowed is_open = self.is_open self.close() d = dict(self.__dict__) d['is_open'] = is_open uri = self.uri.value try: uri = uri.decode('utf-8') except AttributeError: pass del d['handle'] d['uri'] = uri return d def __setstate__(self, d): """Restore from pickled.""" self.__dict__ = d is_open = d['is_open'] self.is_open = False self.handle = RecordIOHandle() self.uri = c_str(self.uri) if is_open: self.open() def close(self): """Closes the record file.""" if not self.is_open: return if self.writable: check_call(_LIB.MXRecordIOWriterFree(self.handle)) else: check_call(_LIB.MXRecordIOReaderFree(self.handle)) self.is_open = False def reset(self): """Resets the pointer to first item. If the record is opened with 'w', this function will truncate the file to empty. Examples --------- >>> record = mx.recordio.MXRecordIO('tmp.rec', 'r') >>> for i in range(2): ... item = record.read() ... print(item) record_0 record_1 >>> record.reset() # Pointer is reset. >>> print(record.read()) # Started reading from start again. record_0 >>> record.close() """ self.close() self.open() def write(self, buf): """Inserts a string buffer as a record. Examples --------- >>> record = mx.recordio.MXRecordIO('tmp.rec', 'w') >>> for i in range(5): ... record.write('record_%d'%i) >>> record.close() Parameters ---------- buf : string (python2), bytes (python3) Buffer to write. """ assert self.writable check_call(_LIB.MXRecordIOWriterWriteRecord(self.handle, ctypes.c_char_p(buf), ctypes.c_size_t(len(buf)))) def read(self): """Returns record as a string. Examples --------- >>> record = mx.recordio.MXRecordIO('tmp.rec', 'r') >>> for i in range(5): ... item = record.read() ... print(item) record_0 record_1 record_2 record_3 record_4 >>> record.close() Returns ---------- buf : string Buffer read. """ assert not self.writable buf = ctypes.c_char_p() size = ctypes.c_size_t() check_call(_LIB.MXRecordIOReaderReadRecord(self.handle, ctypes.byref(buf), ctypes.byref(size))) if buf: buf = ctypes.cast(buf, ctypes.POINTER(ctypes.c_char*size.value)) return buf.contents.raw else: return None class MXIndexedRecordIO(MXRecordIO): """Reads/writes `RecordIO` data format, supporting random access. Examples --------- >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) >>> record.close() >>> record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'r') >>> record.read_idx(3) record_3 Parameters ---------- idx_path : str Path to the index file. uri : str Path to the record file. Only supports seekable file types. flag : str 'w' for write or 'r' for read. key_type : type Data type for keys. """ def __init__(self, idx_path, uri, flag, key_type=int): self.idx_path = idx_path self.idx = {} self.keys = [] self.key_type = key_type self.fidx = None super(MXIndexedRecordIO, self).__init__(uri, flag) def open(self): super(MXIndexedRecordIO, self).open() self.idx = {} self.keys = [] self.fidx = open(self.idx_path, self.flag) if not self.writable: for line in iter(self.fidx.readline, ''): line = line.strip().split('\t') key = self.key_type(line[0]) self.idx[key] = int(line[1]) self.keys.append(key) def close(self): """Closes the record file.""" if not self.is_open: return super(MXIndexedRecordIO, self).close() self.fidx.close() def __getstate__(self): """Override pickling behavior.""" d = super(MXIndexedRecordIO, self).__getstate__() d['fidx'] = None return d def seek(self, idx): """Sets the current read pointer position. This function is internally called by `read_idx(idx)` to find the current reader pointer position. It doesn't return anything.""" assert not self.writable pos = ctypes.c_size_t(self.idx[idx]) check_call(_LIB.MXRecordIOReaderSeek(self.handle, pos)) def tell(self): """Returns the current position of write head. Examples --------- >>> record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'w') >>> print(record.tell()) 0 >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) ... print(record.tell()) 16 32 48 64 80 """ assert self.writable pos = ctypes.c_size_t() check_call(_LIB.MXRecordIOWriterTell(self.handle, ctypes.byref(pos))) return pos.value def read_idx(self, idx): """Returns the record at given index. Examples --------- >>> record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'w') >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) >>> record.close() >>> record = mx.recordio.MXIndexedRecordIO('tmp.idx', 'tmp.rec', 'r') >>> record.read_idx(3) record_3 """ self.seek(idx) return self.read() def write_idx(self, idx, buf): """Inserts input record at given index. Examples --------- >>> for i in range(5): ... record.write_idx(i, 'record_%d'%i) >>> record.close() Parameters ---------- idx : int Index of a file. buf : Record to write. """ key = self.key_type(idx) pos = self.tell() self.write(buf) self.fidx.write('%s\t%d\n'%(str(key), pos)) self.idx[key] = pos self.keys.append(key) IRHeader = namedtuple('HEADER', ['flag', 'label', 'id', 'id2']) """An alias for HEADER. Used to store metadata (e.g. labels) accompanying a record. See mxnet.recordio.pack and mxnet.recordio.pack_img for example uses. Parameters ---------- flag : int Available for convenience, can be set arbitrarily. label : float or an array of float Typically used to store label(s) for a record. id: int Usually a unique id representing record. id2: int Higher order bits of the unique id, should be set to 0 (in most cases). """ _IR_FORMAT = 'IfQQ' _IR_SIZE = struct.calcsize(_IR_FORMAT) def pack(header, s): """Pack a string into MXImageRecord. Parameters ---------- header : IRHeader Header of the image record. ``header.label`` can be a number or an array. See more detail in ``IRHeader``. s : str Raw image string to be packed. Returns ------- s : str The packed string. Examples -------- >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3] >>> id = 2574 >>> header = mx.recordio.IRHeader(0, label, id, 0) >>> with open(path, 'r') as file: ... s = file.read() >>> packed_s = mx.recordio.pack(header, s) """ header = IRHeader(*header) if isinstance(header.label, numbers.Number): header = header._replace(flag=0) else: label = np.asarray(header.label, dtype=np.float32) header = header._replace(flag=label.size, label=0) s = label.tostring() + s s = struct.pack(_IR_FORMAT, *header) + s return s def unpack(s): """Unpack a MXImageRecord to string. Parameters ---------- s : str String buffer from ``MXRecordIO.read``. Returns ------- header : IRHeader Header of the image record. s : str Unpacked string. Examples -------- >>> record = mx.recordio.MXRecordIO('test.rec', 'r') >>> item = record.read() >>> header, s = mx.recordio.unpack(item) >>> header HEADER(flag=0, label=14.0, id=20129312, id2=0) """ header = IRHeader(*struct.unpack(_IR_FORMAT, s[:_IR_SIZE])) s = s[_IR_SIZE:] if header.flag > 0: header = header._replace(label=np.frombuffer(s, np.float32, header.flag)) s = s[header.flag*4:] return header, s def unpack_img(s, iscolor=-1): """Unpack a MXImageRecord to image. Parameters ---------- s : str String buffer from ``MXRecordIO.read``. iscolor : int Image format option for ``cv2.imdecode``. Returns ------- header : IRHeader Header of the image record. img : numpy.ndarray Unpacked image. Examples -------- >>> record = mx.recordio.MXRecordIO('test.rec', 'r') >>> item = record.read() >>> header, img = mx.recordio.unpack_img(item) >>> header HEADER(flag=0, label=14.0, id=20129312, id2=0) >>> img array([[[ 23, 27, 45], [ 28, 32, 50], ..., [ 36, 40, 59], [ 35, 39, 58]], ..., [[ 91, 92, 113], [ 97, 98, 119], ..., [168, 169, 167], [166, 167, 165]]], dtype=uint8) """ header, s = unpack(s) img = np.frombuffer(s, dtype=np.uint8) assert cv2 is not None img = cv2.imdecode(img, iscolor) return header, img def pack_img(header, img, quality=95, img_fmt='.jpg'): """Pack an image into ``MXImageRecord``. Parameters ---------- header : IRHeader Header of the image record. ``header.label`` can be a number or an array. See more detail in ``IRHeader``. img : numpy.ndarray Image to be packed. quality : int Quality for JPEG encoding in range 1-100, or compression for PNG encoding in range 1-9. img_fmt : str Encoding of the image (.jpg for JPEG, .png for PNG). Returns ------- s : str The packed string. Examples -------- >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3] >>> id = 2574 >>> header = mx.recordio.IRHeader(0, label, id, 0) >>> img = cv2.imread('test.jpg') >>> packed_s = mx.recordio.pack_img(header, img) """ assert cv2 is not None jpg_formats = ['.JPG', '.JPEG'] png_formats = ['.PNG'] encode_params = None if img_fmt.upper() in jpg_formats: encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality] elif img_fmt.upper() in png_formats: encode_params = [cv2.IMWRITE_PNG_COMPRESSION, quality] ret, buf = cv2.imencode(img_fmt, img, encode_params) assert ret, 'failed to encode image' return pack(header, buf.tostring())
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """A collection of ResourceGraphs. Processes multiple ResourceGraphs, all presumably from requests to the same site. Common urls are collected in Bags and different statistics on the relationship between bags are collected. """ import collections import json import sys import urlparse from collections import defaultdict import content_classification_lens import graph import user_satisfied_lens class GraphSack(object): """Aggreate of RequestDependencyGraphs. Collects RequestDependencyGraph nodes into bags, where each bag contains the nodes with common urls. Dependency edges are tracked between bags (so that each bag may be considered as a node of a graph). This graph of bags is referred to as a sack. Each bag is associated with a dag.Node, even though the bag graph may not be a DAG. The edges are annotated with list of graphs and nodes that generated them. """ # See CoreSet(). CORE_THRESHOLD = 0.8 _GraphInfo = collections.namedtuple('_GraphInfo', ( 'cost', # The graph cost (aka critical path length). )) def __init__(self): # Each bag in our sack is named as indicated by this map. self._name_to_bag = {} # List our edges by bag pairs: (from_bag, to_bag) -> graph.Edge. self._edges = {} # Maps graph -> _GraphInfo structures for each graph we've consumed. self._graph_info = {} # How we generate names. self._name_generator = lambda n: n.request.url # Our graph, updated after each ConsumeGraph. self._graph = None def SetNameGenerator(self, generator): """Set the generator we use for names. This will define the equivalence class of requests we use to define sacks. Args: generator: a function taking a RequestDependencyGraph node and returning a string. """ self._name_generator = generator def ConsumeGraph(self, request_graph): """Add a graph and process. Args: graph: (RequestDependencyGraph) the graph to add. """ assert graph not in self._graph_info cost = request_graph.Cost() self._graph_info[request_graph] = self._GraphInfo(cost=cost) for n in request_graph.graph.Nodes(): self.AddNode(request_graph, n) # TODO(mattcary): this is inefficient but our current API doesn't require an # explicit graph creation from the client. self._graph = graph.DirectedGraph(self.bags, self._edges.itervalues()) def GetBag(self, node): """Find the bag for a node, or None if not found.""" return self._name_to_bag.get(self._name_generator(node), None) def AddNode(self, request_graph, node): """Add a node to our collection. Args: graph: (RequestDependencyGraph) the graph in which the node lives. node: (RequestDependencyGraph node) the node to add. Returns: The Bag containing the node. """ sack_name = self._name_generator(node) if sack_name not in self._name_to_bag: self._name_to_bag[sack_name] = Bag(self, sack_name) bag = self._name_to_bag[sack_name] bag.AddNode(request_graph, node) return bag def AddEdge(self, from_bag, to_bag): """Add an edge between two bags.""" if (from_bag, to_bag) not in self._edges: self._edges[(from_bag, to_bag)] = graph.Edge(from_bag, to_bag) def CoreSet(self, *graph_sets): """Compute the core set of this sack. The core set of a sack is the set of resource that are common to most of the graphs in the sack. A core set of a set of graphs are the resources that appear with frequency at least CORE_THRESHOLD. For a collection of graph sets, for instance pulling the same page under different network connections, we intersect the core sets to produce a page core set that describes the key resources used by the page. See https://goo.gl/LmqQRS for context and discussion. Args: graph_sets: one or more collection of graphs to compute core sets. If one graph set is given, its core set is computed. If more than one set is given, the page core set of all sets is computed (the intersection of core sets). If no graph set is given, the core of all graphs is computed. Returns: A set of bags in the core set. """ if not graph_sets: graph_sets = [self._graph_info.keys()] return reduce(lambda a, b: a & b, (self._SingleCore(s) for s in graph_sets)) @classmethod def CoreSimilarity(cls, a, b): """Compute the similarity of two core sets. We use the Jaccard index. See https://goo.gl/LmqQRS for discussion. Args: a: The first core set, as a set of strings. b: The second core set, as a set of strings. Returns: A similarity score between zero and one. If both sets are empty the similarity is zero. """ if not a and not b: return 0 return float(len(a & b)) / len(a | b) @property def num_graphs(self): return len(self.graph_info) @property def graph_info(self): return self._graph_info @property def bags(self): return self._name_to_bag.values() def _SingleCore(self, graph_set): core = set() graph_set = set(graph_set) num_graphs = len(graph_set) for b in self.bags: count = sum([g in graph_set for g in b.graphs]) if float(count) / num_graphs > self.CORE_THRESHOLD: core.add(b) return core @classmethod def _MakeShortname(cls, url): # TODO(lizeb): Move this method to a convenient common location. parsed = urlparse.urlparse(url) if parsed.scheme == 'data': if ';' in parsed.path: kind, _ = parsed.path.split(';', 1) else: kind, _ = parsed.path.split(',', 1) return 'data:' + kind path = parsed.path[:10] hostname = parsed.hostname if parsed.hostname else '?.?.?' return hostname + '/' + path class Bag(graph.Node): def __init__(self, sack, name): super(Bag, self).__init__() self._sack = sack self._name = name self._label = GraphSack._MakeShortname(name) # Maps a ResourceGraph to its Nodes contained in this Bag. self._graphs = defaultdict(set) @property def name(self): return self._name @property def label(self): return self._label @property def graphs(self): return self._graphs.iterkeys() @property def num_nodes(self): return sum(len(g) for g in self._graphs.itervalues()) def GraphNodes(self, g): return self._graphs.get(g, set()) def AddNode(self, request_graph, node): if node in self._graphs[request_graph]: return # Already added. self._graphs[request_graph].add(node) for edge in request_graph.graph.OutEdges(node): out_bag = self._sack.AddNode(request_graph, edge.to_node) self._sack.AddEdge(self, out_bag)
# -*- coding: utf-8 -*- import json import base64 from functools import partial from pytest import fixture from flask import url_for from flask_perm.services import SuperAdminService class Client(object): def __init__(self, app, current_user=None): self.app = app self.current_user = current_user @property def client(self): return self.app.test_client() def request(self, method, url, **kwargs): headers = kwargs.get('headers', {}) code = '%s:%s' % ( '[email protected]', 'test' ) headers['Authorization'] = 'Basic ' + base64.b64encode(code) kwargs['headers'] = headers return self.client.open(url, method=method, **kwargs) def __getattr__(self, method): return partial(self.request, method) @fixture def super_admin(app, perm): return SuperAdminService.create('[email protected]', 'test') @fixture def client(app, perm, super_admin): return Client(app, super_admin) @fixture def permission(request, client): code = '%s.%s' % (request.module.__name__, request.function.__name__) resp = client.post( url_for('flask_perm_api.add_permission'), data=json.dumps(dict(title='Test Permission', code=code)), content_type='application/json', ) return json.loads(resp.data)['data'] @fixture def user_group(request, client): code = 'code.%s.%s' % (request.module.__name__, request.function.__name__) resp = client.post( url_for('flask_perm_api.add_user_group'), data=json.dumps(dict(title='Test UserGroup', code=code)), content_type='application/json', ) return json.loads(resp.data)['data'] def test_add_permission(permission): assert permission['id'] assert permission['title'] == 'Test Permission' assert permission['code'] == 'tests.test_blueprint.test_add_permission' def test_get_permissions(client, permission): resp = client.get(url_for('flask_perm_api.get_permissions')) assert resp.status_code == 200 assert permission in json.loads(resp.data)['data'] def test_filter_permission_by_id0(client, permission): resp = client.get(url_for('flask_perm_api.get_permissions'), query_string={ '_filters': '{"id": 0}', }) assert resp.status_code == 200 assert not json.loads(resp.data)['data'] def test_filter_permission_by_permission_id(client, permission): resp = client.get(url_for('flask_perm_api.get_permissions'), query_string={ '_filters': '{"id": %s}' % permission['id'], }) assert resp.status_code == 200 assert permission in json.loads(resp.data)['data'] def test_get_permission(client, permission): resp = client.get( url_for( 'flask_perm_api.get_permission', permission_id=permission['id'] ) ) assert resp.status_code == 200 assert permission == json.loads(resp.data)['data'] def test_update_permission(client, permission): resp = client.put( url_for( 'flask_perm_api.update_permission', permission_id=permission['id'], ), data=json.dumps(dict(title='Test Permission!', code='test_blueprint.test_update_permission!')), content_type='application/json', ) assert resp.status_code == 200 data = json.loads(resp.data) assert data['data']['id'] assert data['data']['title'] == 'Test Permission!' assert data['data']['code'] == 'test_blueprint.test_update_permission!' def test_delete_permission(client, permission): resp = client.delete( url_for( 'flask_perm_api.delete_permission', permission_id=permission['id'] ) ) assert resp.status_code == 200 resp = client.get( url_for( 'flask_perm_api.get_permission', permission_id=permission['id'] ), ) assert resp.status_code == 404 def add_user_permission(client, user_id, permission_id): return client.post( url_for( 'flask_perm_api.add_user_permission', ), data=json.dumps(dict( user_id=user_id, permission_id=permission_id )), content_type='application/json', ) def add_user_group_member(client, user_id, user_group_id): return client.post( url_for( 'flask_perm_api.add_user_group_member', ), data=json.dumps(dict( user_id=user_id, user_group_id=user_group_id, )), content_type='application/json', ) def add_user_group_permission(client, user_group_id, permission_id): return client.post( url_for( 'flask_perm_api.add_user_group_permission', ), data=json.dumps(dict( user_group_id=user_group_id, permission_id=permission_id )), content_type='application/json', ) def test_add_user_permission(client, permission, perm): resp = add_user_permission(client, 1, permission['id']) assert resp.status_code == 200 assert perm.has_permission(1, 'tests.test_blueprint.test_add_user_permission') def test_revoke_user_permission(client, perm, permission): resp = add_user_permission(client, 1, permission['id']) id = json.loads(resp.data)['data']['id'] resp = client.delete( url_for( 'flask_perm_api.revoke_user_permission', user_permission_id=id, ) ) assert resp.status_code == 200 assert not perm.has_permission(1, 'tests.test_blueprint.test_revoke_user_permission') def test_add_user_group_permissions(client, permission, user_group): resp = add_user_group_permission(client, user_group['id'], permission['id']) assert resp.status_code == 200 def test_revoke_user_group_permissions(client, permission, user_group): resp = add_user_group_permission(client, user_group['id'], permission['id']) id = json.loads(resp.data)['data']['id'] resp = client.delete( url_for( 'flask_perm_api.revoke_user_group_permission', user_group_permission_id=id, ) ) assert resp.status_code == 200 def test_get_user_permissions_by_user_id(client, permission): resp = add_user_permission(client, 1, permission['id']) resp = client.get( url_for( 'flask_perm_api.get_user_permissions', ), query_string={'_filters': '{"user_id":1}'} ) assert resp.status_code ==200 assert json.loads(resp.data)['data'] def test_get_user_permissions_by_permission_id(client, permission): resp = add_user_permission(client, 1, permission['id']) resp = client.get( url_for( 'flask_perm_api.get_user_permissions', ), query_string={'_filters': '{"permission_id":%s}' % permission['id']} ) assert resp.status_code ==200 assert json.loads(resp.data)['data'] def test_get_user_group_permissions_by_user_id(client, permission): resp = add_user_group_permission(client, 1, permission['id']) resp = client.get( url_for( 'flask_perm_api.get_user_group_permissions', ), query_string={'_filters': '{"user_group_id":1}'} ) assert resp.status_code ==200 assert json.loads(resp.data)['data'] def test_get_user_group_permissions_by_permission_id(client, permission): resp = add_user_group_permission(client, 1, permission['id']) resp = client.get( url_for( 'flask_perm_api.get_user_group_permissions', ), query_string={'_filters': '{"permission_id":%s}' % permission['id']} ) assert resp.status_code ==200 assert json.loads(resp.data)['data'] def test_add_user_group(client, user_group): assert user_group['id'] def test_get_user_groups(client, user_group): resp = client.get( url_for( 'flask_perm_api.get_user_groups' ) ) assert resp.status_code == 200 assert json.loads(resp.data)['data'] def test_update_user_group(client, user_group): resp = client.put( url_for( 'flask_perm_api.update_user_group', user_group_id=user_group['id'], ), data=json.dumps(dict(title='updated')), content_type='application/json' ) assert resp.status_code == 200 assert json.loads(resp.data)['data']['title'] == 'updated' def test_delete_user_group(client, user_group): resp = client.delete( url_for( 'flask_perm_api.delete_user_group', user_group_id=user_group['id'], ), ) assert resp.status_code == 200 def test_add_user_to_user_group(client, user_group): resp = add_user_group_member(client, 1, user_group['id']) assert resp.status_code == 200 def test_delete_user_from_user_group(client, user_group): resp = add_user_group_member(client, 1, user_group['id']) id = json.loads(resp.data)['data']['id'] resp = client.delete( url_for( 'flask_perm_api.delete_user_from_user_group', user_group_member_id=id ) ) assert resp.status_code == 200 def test_get_user_group_members(client, user_group): add_user_group_member(client, 1, user_group['id']) resp = client.get( url_for( 'flask_perm_api.get_user_group_members', ), query_string={ '_filters': '{"user_group_id":%s}' % user_group['id'], } ) assert resp.status_code == 200 assert json.loads(resp.data)['data'] def test_get_users(client): resp = client.get(url_for('flask_perm_api.get_users')) assert resp.status_code == 200 assert isinstance(json.loads(resp.data)['data'], list) def test_get_user(client): resp = client.get(url_for('flask_perm_api.get_user', user_id=1)) assert resp.status_code == 200 assert json.loads(resp.data)['data']['id'] == 1
#!/usr/bin/env python # Copyright (c) 2016 - 2017, Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """``test_tool_generate.py`` `ToolGeneral Unittests` """ from unittest.mock import Mock, MagicMock import pytest import itertools from testlib import clissh from testlib import dev_linux_host from testlib.dev_linux_host import GenericLinuxHost from testlib.cli_template import CmdStatus from testlib.custom_exceptions import UICmdException from testlib.linux import service_lib from testlib.linux.tool_general import GenericTool, RC_SERVICE_INACTIVE class CmdExecSimul(object): """Simulate clissh (blackbox). Specify commands behavior - an (sequence of) input command(s) to output(s) including side effect(s). """ MAKE_ITER_MAP = [ iter, itertools.cycle, ] def __init__(self, cmd_exec_sim, cycle=False): super(CmdExecSimul, self).__init__() self.sim_cmd_iter = None if cmd_exec_sim: self.set_simul(cmd_exec_sim, cycle=cycle) def set_simul(self, cmd_exec_sim, cycle=False): make_iterable = self.MAKE_ITER_MAP[cycle] if isinstance(cmd_exec_sim, list): pass else: cmd_exec_sim = [cmd_exec_sim] self.sim_cmd_iter = make_iterable(cmd_exec_sim) def __call__(self, *args, **kwargs): """Any exception raised is considered an expected behavior (side_effect). The mocked signature: exec_command(command, timeout=None) """ if args: command = args[0] mock_obj = self._find_match(command) if not mock_obj: raise self.InputCommandNoMatch(command) return mock_obj() else: raise self.InputCommandNoMatch(None) def _find_match(self, command): sim_cmd = None try: sim_cmd = next(self.sim_cmd_iter) except StopIteration: assert False if isinstance(sim_cmd, MagicMock): mock_obj = sim_cmd elif isinstance(sim_cmd, dict): mock_obj = sim_cmd.get(command) # TODO; implement other match representatinos if isinstance(mock_obj, Mock): return mock_obj if isinstance(mock_obj, dict): return MagicMock(**mock_obj) # TODO; implement other mock representatinos class InputCommandNoMatch(Exception): def __init__(self, command): super(CmdExecSimul.InputCommandNoMatch, self).__init__() class FakeSSH(clissh.CLISSH): login_status = True def __init__(self, *args, **kwargs): pass def shell_read(self, *args, **kwargs): pass def exec_command(self, *args, **kwargs): pass class MockSSH(FakeSSH): def __init__(self, *args, **kwargs): self.exec_command = MagicMock(wraps=self._exec_cmd_wrappee, **kwargs) _exec_cmd_wrappee = FakeSSH.exec_command class SimulatedSSH(MockSSH, CmdExecSimul): def __init__(self, *args, **kwargs): MockSSH.__init__(self, *args, **kwargs) CmdExecSimul.__init__(self, None) _exec_cmd_wrappee = CmdExecSimul.__call__ class FakeLinuxHost(GenericLinuxHost): FAKE_CFG = { 'name': 'FakeHost', 'id': 'FakeID', 'instance_type': 'generic_linux_host', 'ipaddr': 'localhost', 'ssh_user': 'fake_user', 'ssh_pass': 'fake_pass', } def __init__(self, config=None, opts=None): if not config: config = self.FAKE_CFG if not opts: opts = self.FakeOpts() super(FakeLinuxHost, self).__init__(config, opts) class FakeOpts(object): setup = 'fake.setup.json' env = 'fake.env.json' gen_only = False lhost_ui = 'linux_bash' @pytest.fixture def patch_clissh_sim(monkeypatch): monkeypatch.setattr(dev_linux_host.clissh, 'CLISSH', SimulatedSSH) @pytest.fixture def lh(request, patch_clissh_sim): lh = FakeLinuxHost() request.addfinalizer(lh.destroy) return lh SERVICE_NAME = 'generic_tool' @pytest.fixture def gen_tool(request, lh): gen_tool = GenericTool(lh.ui.cli_send_command, SERVICE_NAME) request.addfinalizer(gen_tool.cleanup) # destroy? return gen_tool @pytest.fixture def systemctl(gen_tool): service_factory = service_lib.SpecificServiceManager systemctl = service_factory(SERVICE_NAME, gen_tool.run_command) return systemctl @pytest.fixture def tool(gen_tool, systemctl): tool_iid = gen_tool.next_id() tool = { 'instance_id': tool_iid, 'service_name': SERVICE_NAME, 'service_manager': systemctl, } gen_tool.instances[tool_iid] = tool return tool class TestToolGeneral(object): ARG_SYSTEMCTL_STOP = 'systemctl stop {0}.service'.format(SERVICE_NAME) RET_VAL_INACTIVE = { 'return_value': CmdStatus('stdout', 'stderr', RC_SERVICE_INACTIVE), } def test_stop_raises_when_ignore_false(self, lh, gen_tool, tool): cmd_exec_simul = [ { self.ARG_SYSTEMCTL_STOP: MagicMock(**self.RET_VAL_INACTIVE), }, ] lh.ssh.set_simul(cmd_exec_simul) with pytest.raises(UICmdException): gen_tool.stop(tool['instance_id'], ignore_inactive=False) def test_stop_doesnt_raises_when_ignore_true(self, lh, gen_tool, tool): cmd_exec_simul = [ { self.ARG_SYSTEMCTL_STOP: MagicMock(**self.RET_VAL_INACTIVE), }, ] lh.ssh.set_simul(cmd_exec_simul) gen_tool.stop(tool['instance_id'], ignore_inactive=True) def test_stop_succeds_when_no_exception(self, gen_tool): instance_id = 1 gen_tool.instances[instance_id] = {'service_manager': MagicMock()} gen_tool.stop(instance_id) gen_tool.stop(instance_id) def test_start_with_prefix(self, lh, gen_tool, tool): cmd_exec_simul = [ MagicMock(return_value=CmdStatus("active", "", 0)), # systemd-run MagicMock(return_value=CmdStatus("active", "", 0)), # systemctl is-active ] lh.ssh.set_simul(cmd_exec_simul) gen_tool.start(command='a b c', prefix='Foo ') args_list = lh.ssh.exec_command.call_args_list[0][0][0].split() assert args_list[-3:] == ['a', 'b', 'c'] assert args_list[0] == 'Foo'
""" HTML Widget classes """ import copy import datetime import re from itertools import chain from django.conf import settings from django.forms.utils import to_current_timezone from django.templatetags.static import static from django.utils import datetime_safe, formats from django.utils.dates import MONTHS from django.utils.formats import get_format from django.utils.html import format_html, html_safe from django.utils.safestring import mark_safe from django.utils.translation import gettext_lazy as _ from .renderers import get_default_renderer __all__ = ( 'Media', 'MediaDefiningClass', 'Widget', 'TextInput', 'NumberInput', 'EmailInput', 'URLInput', 'PasswordInput', 'HiddenInput', 'MultipleHiddenInput', 'FileInput', 'ClearableFileInput', 'Textarea', 'DateInput', 'DateTimeInput', 'TimeInput', 'CheckboxInput', 'Select', 'NullBooleanSelect', 'SelectMultiple', 'RadioSelect', 'CheckboxSelectMultiple', 'MultiWidget', 'SplitDateTimeWidget', 'SplitHiddenDateTimeWidget', 'SelectDateWidget', ) MEDIA_TYPES = ('css', 'js') @html_safe class Media: def __init__(self, media=None, **kwargs): if media: media_attrs = media.__dict__ else: media_attrs = kwargs self._css = {} self._js = [] for name in MEDIA_TYPES: getattr(self, 'add_' + name)(media_attrs.get(name)) def __str__(self): return self.render() def render(self): return mark_safe('\n'.join(chain.from_iterable(getattr(self, 'render_' + name)() for name in MEDIA_TYPES))) def render_js(self): return [ format_html( '<script type="text/javascript" src="{}"></script>', self.absolute_path(path) ) for path in self._js ] def render_css(self): # To keep rendering order consistent, we can't just iterate over items(). # We need to sort the keys, and iterate over the sorted list. media = sorted(self._css.keys()) return chain.from_iterable([ format_html( '<link href="{}" type="text/css" media="{}" rel="stylesheet" />', self.absolute_path(path), medium ) for path in self._css[medium] ] for medium in media) def absolute_path(self, path): """ Given a relative or absolute path to a static asset, return an absolute path. An absolute path will be returned unchanged while a relative path will be passed to django.templatetags.static.static(). """ if path.startswith(('http://', 'https://', '/')): return path return static(path) def __getitem__(self, name): """Return a Media object that only contains media of the given type.""" if name in MEDIA_TYPES: return Media(**{str(name): getattr(self, '_' + name)}) raise KeyError('Unknown media type "%s"' % name) def add_js(self, data): if data: for path in data: if path not in self._js: self._js.append(path) def add_css(self, data): if data: for medium, paths in data.items(): for path in paths: if not self._css.get(medium) or path not in self._css[medium]: self._css.setdefault(medium, []).append(path) def __add__(self, other): combined = Media() for name in MEDIA_TYPES: getattr(combined, 'add_' + name)(getattr(self, '_' + name, None)) getattr(combined, 'add_' + name)(getattr(other, '_' + name, None)) return combined def media_property(cls): def _media(self): # Get the media property of the superclass, if it exists sup_cls = super(cls, self) try: base = sup_cls.media except AttributeError: base = Media() # Get the media definition for this class definition = getattr(cls, 'Media', None) if definition: extend = getattr(definition, 'extend', True) if extend: if extend is True: m = base else: m = Media() for medium in extend: m = m + base[medium] return m + Media(definition) else: return Media(definition) else: return base return property(_media) class MediaDefiningClass(type): """ Metaclass for classes that can have media definitions. """ def __new__(mcs, name, bases, attrs): new_class = super(MediaDefiningClass, mcs).__new__(mcs, name, bases, attrs) if 'media' not in attrs: new_class.media = media_property(new_class) return new_class class Widget(metaclass=MediaDefiningClass): needs_multipart_form = False # Determines does this widget need multipart form is_localized = False is_required = False supports_microseconds = True def __init__(self, attrs=None): if attrs is not None: self.attrs = attrs.copy() else: self.attrs = {} def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() memo[id(self)] = obj return obj @property def is_hidden(self): return self.input_type == 'hidden' if hasattr(self, 'input_type') else False def subwidgets(self, name, value, attrs=None): context = self.get_context(name, value, attrs) yield context['widget'] def format_value(self, value): """ Return a value as it should appear when rendered in a template. """ if value == '' or value is None: return None if self.is_localized: return formats.localize_input(value) return str(value) def get_context(self, name, value, attrs): context = {} context['widget'] = { 'name': name, 'is_hidden': self.is_hidden, 'required': self.is_required, 'value': self.format_value(value), 'attrs': self.build_attrs(self.attrs, attrs), 'template_name': self.template_name, } return context def render(self, name, value, attrs=None, renderer=None): """Render the widget as an HTML string.""" context = self.get_context(name, value, attrs) return self._render(self.template_name, context, renderer) def _render(self, template_name, context, renderer=None): if renderer is None: renderer = get_default_renderer() return mark_safe(renderer.render(template_name, context)) def build_attrs(self, base_attrs, extra_attrs=None): """Build an attribute dictionary.""" attrs = base_attrs.copy() if extra_attrs is not None: attrs.update(extra_attrs) return attrs def value_from_datadict(self, data, files, name): """ Given a dictionary of data and this widget's name, return the value of this widget or None if it's not provided. """ return data.get(name) def value_omitted_from_data(self, data, files, name): return name not in data def id_for_label(self, id_): """ Return the HTML ID attribute of this Widget for use by a <label>, given the ID of the field. Return None if no ID is available. This hook is necessary because some widgets have multiple HTML elements and, thus, multiple IDs. In that case, this method should return an ID value that corresponds to the first ID in the widget's tags. """ return id_ def use_required_attribute(self, initial): return not self.is_hidden class Input(Widget): """ Base class for all <input> widgets. """ input_type = None # Subclasses must define this. template_name = 'django/forms/widgets/input.html' def __init__(self, attrs=None): if attrs is not None: attrs = attrs.copy() self.input_type = attrs.pop('type', self.input_type) super().__init__(attrs) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context['widget']['type'] = self.input_type return context class TextInput(Input): input_type = 'text' template_name = 'django/forms/widgets/text.html' class NumberInput(Input): input_type = 'number' template_name = 'django/forms/widgets/number.html' class EmailInput(Input): input_type = 'email' template_name = 'django/forms/widgets/email.html' class URLInput(Input): input_type = 'url' template_name = 'django/forms/widgets/url.html' class PasswordInput(Input): input_type = 'password' template_name = 'django/forms/widgets/password.html' def __init__(self, attrs=None, render_value=False): super().__init__(attrs) self.render_value = render_value def get_context(self, name, value, attrs): if not self.render_value: value = None return super().get_context(name, value, attrs) class HiddenInput(Input): input_type = 'hidden' template_name = 'django/forms/widgets/hidden.html' class MultipleHiddenInput(HiddenInput): """ Handle <input type="hidden"> for fields that have a list of values. """ template_name = 'django/forms/widgets/multiple_hidden.html' def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) final_attrs = context['widget']['attrs'] id_ = context['widget']['attrs'].get('id') subwidgets = [] for index, value_ in enumerate(context['widget']['value']): widget_attrs = final_attrs.copy() if id_: # An ID attribute was given. Add a numeric index as a suffix # so that the inputs don't all have the same ID attribute. widget_attrs['id'] = '%s_%s' % (id_, index) widget = HiddenInput() widget.is_required = self.is_required subwidgets.append(widget.get_context(name, value_, widget_attrs)['widget']) context['widget']['subwidgets'] = subwidgets return context def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def format_value(self, value): return [] if value is None else value class FileInput(Input): input_type = 'file' needs_multipart_form = True template_name = 'django/forms/widgets/file.html' def format_value(self, value): """File input never renders a value.""" return def value_from_datadict(self, data, files, name): "File widgets take data from FILES, not POST" return files.get(name) def value_omitted_from_data(self, data, files, name): return name not in files FILE_INPUT_CONTRADICTION = object() class ClearableFileInput(FileInput): clear_checkbox_label = _('Clear') initial_text = _('Currently') input_text = _('Change') template_name = 'django/forms/widgets/clearable_file_input.html' def clear_checkbox_name(self, name): """ Given the name of the file input, return the name of the clear checkbox input. """ return name + '-clear' def clear_checkbox_id(self, name): """ Given the name of the clear checkbox input, return the HTML id for it. """ return name + '_id' def is_initial(self, value): """ Return whether value is considered to be initial value. """ return bool(value and getattr(value, 'url', False)) def format_value(self, value): """ Return the file object if it has a defined url attribute. """ if self.is_initial(value): return value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) checkbox_name = self.clear_checkbox_name(name) checkbox_id = self.clear_checkbox_id(checkbox_name) context.update({ 'checkbox_name': checkbox_name, 'checkbox_id': checkbox_id, 'is_initial': self.is_initial(value), 'input_text': self.input_text, 'initial_text': self.initial_text, 'clear_checkbox_label': self.clear_checkbox_label, }) return context def value_from_datadict(self, data, files, name): upload = super().value_from_datadict(data, files, name) if not self.is_required and CheckboxInput().value_from_datadict( data, files, self.clear_checkbox_name(name)): if upload: # If the user contradicts themselves (uploads a new file AND # checks the "clear" checkbox), we return a unique marker # object that FileField will turn into a ValidationError. return FILE_INPUT_CONTRADICTION # False signals to clear any existing value, as opposed to just None return False return upload def use_required_attribute(self, initial): return super().use_required_attribute(initial) and not initial def value_omitted_from_data(self, data, files, name): return ( super().value_omitted_from_data(data, files, name) and self.clear_checkbox_name(name) not in data ) class Textarea(Widget): template_name = 'django/forms/widgets/textarea.html' def __init__(self, attrs=None): # Use slightly better defaults than HTML's 20x2 box default_attrs = {'cols': '40', 'rows': '10'} if attrs: default_attrs.update(attrs) super().__init__(default_attrs) class DateTimeBaseInput(TextInput): format_key = '' supports_microseconds = False def __init__(self, attrs=None, format=None): super().__init__(attrs) self.format = format if format else None def format_value(self, value): return formats.localize_input(value, self.format or formats.get_format(self.format_key)[0]) class DateInput(DateTimeBaseInput): format_key = 'DATE_INPUT_FORMATS' template_name = 'django/forms/widgets/date.html' class DateTimeInput(DateTimeBaseInput): format_key = 'DATETIME_INPUT_FORMATS' template_name = 'django/forms/widgets/datetime.html' class TimeInput(DateTimeBaseInput): format_key = 'TIME_INPUT_FORMATS' template_name = 'django/forms/widgets/time.html' # Defined at module level so that CheckboxInput is picklable (#17976) def boolean_check(v): return not (v is False or v is None or v == '') class CheckboxInput(Input): input_type = 'checkbox' template_name = 'django/forms/widgets/checkbox.html' def __init__(self, attrs=None, check_test=None): super().__init__(attrs) # check_test is a callable that takes a value and returns True # if the checkbox should be checked for that value. self.check_test = boolean_check if check_test is None else check_test def format_value(self, value): """Only return the 'value' attribute if value isn't empty.""" if value is True or value is False or value is None or value == '': return return str(value) def get_context(self, name, value, attrs): if self.check_test(value): if attrs is None: attrs = {} attrs['checked'] = True return super().get_context(name, value, attrs) def value_from_datadict(self, data, files, name): if name not in data: # A missing value means False because HTML form submission does not # send results for unselected checkboxes. return False value = data.get(name) # Translate true and false strings to boolean values. values = {'true': True, 'false': False} if isinstance(value, str): value = values.get(value.lower(), value) return bool(value) def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False class ChoiceWidget(Widget): allow_multiple_selected = False input_type = None template_name = None option_template_name = None add_id_index = True checked_attribute = {'checked': True} option_inherits_attrs = True def __init__(self, attrs=None, choices=()): super().__init__(attrs) # choices can be any iterable, but we may need to render this widget # multiple times. Thus, collapse it into a list so it can be consumed # more than once. self.choices = list(choices) def __deepcopy__(self, memo): obj = copy.copy(self) obj.attrs = self.attrs.copy() obj.choices = copy.copy(self.choices) memo[id(self)] = obj return obj def subwidgets(self, name, value, attrs=None): """ Yield all "subwidgets" of this widget. Used to enable iterating options from a BoundField for choice widgets. """ value = self.format_value(value) yield from self.options(name, value, attrs) def options(self, name, value, attrs=None): """Yield a flat list of options for this widgets.""" for group in self.optgroups(name, value, attrs): yield from group[1] def optgroups(self, name, value, attrs=None): """Return a list of optgroups for this widget.""" default = (None, [], 0) groups = [default] has_selected = False for option_value, option_label in chain(self.choices): if option_value is None: option_value = '' if isinstance(option_label, (list, tuple)): index = groups[-1][2] + 1 subindex = 0 subgroup = [] groups.append((option_value, subgroup, index)) choices = option_label else: index = len(default[1]) subgroup = default[1] subindex = None choices = [(option_value, option_label)] for subvalue, sublabel in choices: selected = ( str(subvalue) in value and (not has_selected or self.allow_multiple_selected) ) if selected and not has_selected: has_selected = True subgroup.append(self.create_option( name, subvalue, sublabel, selected, index, subindex=subindex, attrs=attrs, )) if subindex is not None: subindex += 1 return groups def create_option(self, name, value, label, selected, index, subindex=None, attrs=None): index = str(index) if subindex is None else "%s_%s" % (index, subindex) if attrs is None: attrs = {} option_attrs = self.build_attrs(self.attrs, attrs) if self.option_inherits_attrs else {} if selected: option_attrs.update(self.checked_attribute) if 'id' in option_attrs: option_attrs['id'] = self.id_for_label(option_attrs['id'], index) return { 'name': name, 'value': str(value), 'label': label, 'selected': selected, 'index': index, 'attrs': option_attrs, 'type': self.input_type, 'template_name': self.option_template_name, } def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) context['widget']['optgroups'] = self.optgroups(name, context['widget']['value'], attrs) context['wrap_label'] = True return context def id_for_label(self, id_, index='0'): """ Use an incremented id for each option where the main widget references the zero index. """ if id_ and self.add_id_index: id_ = '%s_%s' % (id_, index) return id_ def value_from_datadict(self, data, files, name): getter = data.get if self.allow_multiple_selected: try: getter = data.getlist except AttributeError: pass return getter(name) def format_value(self, value): """Return selected values as a list.""" if not isinstance(value, (tuple, list)): value = [value] return [str(v) if v is not None else '' for v in value] class Select(ChoiceWidget): input_type = 'select' template_name = 'django/forms/widgets/select.html' option_template_name = 'django/forms/widgets/select_option.html' add_id_index = False checked_attribute = {'selected': True} option_inherits_attrs = False def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.allow_multiple_selected: context['widget']['attrs']['multiple'] = 'multiple' return context @staticmethod def _choice_has_empty_value(choice): """Return True if the choice's value is empty string or None.""" value, _ = choice return (isinstance(value, str) and not bool(value)) or value is None def use_required_attribute(self, initial): """ Don't render 'required' if the first <option> has a value, as that's invalid HTML. """ use_required_attribute = super().use_required_attribute(initial) # 'required' is always okay for <select multiple>. if self.allow_multiple_selected: return use_required_attribute first_choice = next(iter(self.choices), None) return use_required_attribute and first_choice is not None and self._choice_has_empty_value(first_choice) class NullBooleanSelect(Select): """ A Select Widget intended to be used with NullBooleanField. """ def __init__(self, attrs=None): choices = ( ('1', _('Unknown')), ('2', _('Yes')), ('3', _('No')), ) super().__init__(attrs, choices) def format_value(self, value): try: return {True: '2', False: '3', '2': '2', '3': '3'}[value] except KeyError: return '1' def value_from_datadict(self, data, files, name): value = data.get(name) return { '2': True, True: True, 'True': True, '3': False, 'False': False, False: False, }.get(value) class SelectMultiple(Select): allow_multiple_selected = True def value_from_datadict(self, data, files, name): try: getter = data.getlist except AttributeError: getter = data.get return getter(name) def value_omitted_from_data(self, data, files, name): # An unselected <select multiple> doesn't appear in POST data, so it's # never known if the value is actually omitted. return False class RadioSelect(ChoiceWidget): input_type = 'radio' template_name = 'django/forms/widgets/radio.html' option_template_name = 'django/forms/widgets/radio_option.html' class CheckboxSelectMultiple(ChoiceWidget): allow_multiple_selected = True input_type = 'checkbox' template_name = 'django/forms/widgets/checkbox_select.html' option_template_name = 'django/forms/widgets/checkbox_option.html' def use_required_attribute(self, initial): # Don't use the 'required' attribute because browser validation would # require all checkboxes to be checked instead of at least one. return False def value_omitted_from_data(self, data, files, name): # HTML checkboxes don't appear in POST data if not checked, so it's # never known if the value is actually omitted. return False def id_for_label(self, id_, index=None): """" Don't include for="field_0" in <label> because clicking such a label would toggle the first checkbox. """ if index is None: return '' return super().id_for_label(id_, index) class MultiWidget(Widget): """ A widget that is composed of multiple widgets. In addition to the values added by Widget.get_context(), this widget adds a list of subwidgets to the context as widget['subwidgets']. These can be looped over and rendered like normal widgets. You'll probably want to use this class with MultiValueField. """ template_name = 'django/forms/widgets/multiwidget.html' def __init__(self, widgets, attrs=None): self.widgets = [w() if isinstance(w, type) else w for w in widgets] super().__init__(attrs) @property def is_hidden(self): return all(w.is_hidden for w in self.widgets) def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) if self.is_localized: for widget in self.widgets: widget.is_localized = self.is_localized # value is a list of values, each corresponding to a widget # in self.widgets. if not isinstance(value, list): value = self.decompress(value) final_attrs = context['widget']['attrs'] input_type = final_attrs.pop('type', None) id_ = final_attrs.get('id') subwidgets = [] for i, widget in enumerate(self.widgets): if input_type is not None: widget.input_type = input_type widget_name = '%s_%s' % (name, i) try: widget_value = value[i] except IndexError: widget_value = None if id_: widget_attrs = final_attrs.copy() widget_attrs['id'] = '%s_%s' % (id_, i) else: widget_attrs = final_attrs subwidgets.append(widget.get_context(widget_name, widget_value, widget_attrs)['widget']) context['widget']['subwidgets'] = subwidgets return context def id_for_label(self, id_): if id_: id_ += '_0' return id_ def value_from_datadict(self, data, files, name): return [widget.value_from_datadict(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets)] def value_omitted_from_data(self, data, files, name): return all( widget.value_omitted_from_data(data, files, name + '_%s' % i) for i, widget in enumerate(self.widgets) ) def decompress(self, value): """ Return a list of decompressed values for the given compressed value. The given value can be assumed to be valid, but not necessarily non-empty. """ raise NotImplementedError('Subclasses must implement this method.') def _get_media(self): """ Media for a multiwidget is the combination of all media of the subwidgets. """ media = Media() for w in self.widgets: media = media + w.media return media media = property(_get_media) def __deepcopy__(self, memo): obj = super().__deepcopy__(memo) obj.widgets = copy.deepcopy(self.widgets) return obj @property def needs_multipart_form(self): return any(w.needs_multipart_form for w in self.widgets) class SplitDateTimeWidget(MultiWidget): """ A widget that splits datetime input into two <input type="text"> boxes. """ supports_microseconds = False template_name = 'django/forms/widgets/splitdatetime.html' def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None): widgets = ( DateInput( attrs=attrs if date_attrs is None else date_attrs, format=date_format, ), TimeInput( attrs=attrs if time_attrs is None else time_attrs, format=time_format, ), ) super().__init__(widgets) def decompress(self, value): if value: value = to_current_timezone(value) return [value.date(), value.time().replace(microsecond=0)] return [None, None] class SplitHiddenDateTimeWidget(SplitDateTimeWidget): """ A widget that splits datetime input into two <input type="hidden"> inputs. """ template_name = 'django/forms/widgets/splithiddendatetime.html' def __init__(self, attrs=None, date_format=None, time_format=None, date_attrs=None, time_attrs=None): super().__init__(attrs, date_format, time_format, date_attrs, time_attrs) for widget in self.widgets: widget.input_type = 'hidden' class SelectDateWidget(Widget): """ A widget that splits date input into three <select> boxes. This also serves as an example of a Widget that has more than one HTML element and hence implements value_from_datadict. """ none_value = (0, '---') month_field = '%s_month' day_field = '%s_day' year_field = '%s_year' template_name = 'django/forms/widgets/select_date.html' input_type = 'select' select_widget = Select date_re = re.compile(r'(\d{4}|0)-(\d\d?)-(\d\d?)$') def __init__(self, attrs=None, years=None, months=None, empty_label=None): self.attrs = attrs or {} # Optional list or tuple of years to use in the "year" select box. if years: self.years = years else: this_year = datetime.date.today().year self.years = range(this_year, this_year + 10) # Optional dict of months to use in the "month" select box. if months: self.months = months else: self.months = MONTHS # Optional string, list, or tuple to use as empty_label. if isinstance(empty_label, (list, tuple)): if not len(empty_label) == 3: raise ValueError('empty_label list/tuple must have 3 elements.') self.year_none_value = (0, empty_label[0]) self.month_none_value = (0, empty_label[1]) self.day_none_value = (0, empty_label[2]) else: if empty_label is not None: self.none_value = (0, empty_label) self.year_none_value = self.none_value self.month_none_value = self.none_value self.day_none_value = self.none_value def get_context(self, name, value, attrs): context = super().get_context(name, value, attrs) date_context = {} year_choices = [(i, i) for i in self.years] if not self.is_required: year_choices.insert(0, self.year_none_value) year_attrs = context['widget']['attrs'].copy() year_name = self.year_field % name year_attrs['id'] = 'id_%s' % year_name date_context['year'] = self.select_widget(attrs, choices=year_choices).get_context( name=year_name, value=context['widget']['value']['year'], attrs=year_attrs, ) month_choices = list(self.months.items()) if not self.is_required: month_choices.insert(0, self.month_none_value) month_attrs = context['widget']['attrs'].copy() month_name = self.month_field % name month_attrs['id'] = 'id_%s' % month_name date_context['month'] = self.select_widget(attrs, choices=month_choices).get_context( name=month_name, value=context['widget']['value']['month'], attrs=month_attrs, ) day_choices = [(i, i) for i in range(1, 32)] if not self.is_required: day_choices.insert(0, self.day_none_value) day_attrs = context['widget']['attrs'].copy() day_name = self.day_field % name day_attrs['id'] = 'id_%s' % day_name date_context['day'] = self.select_widget(attrs, choices=day_choices,).get_context( name=day_name, value=context['widget']['value']['day'], attrs=day_attrs, ) subwidgets = [] for field in self._parse_date_fmt(): subwidgets.append(date_context[field]['widget']) context['widget']['subwidgets'] = subwidgets return context def format_value(self, value): """ Return a dict containing the year, month, and day of the current value. Use dict instead of a datetime to allow invalid dates such as February 31 to display correctly. """ year, month, day = None, None, None if isinstance(value, (datetime.date, datetime.datetime)): year, month, day = value.year, value.month, value.day elif isinstance(value, str): if settings.USE_L10N: try: input_format = get_format('DATE_INPUT_FORMATS')[0] d = datetime.datetime.strptime(value, input_format) year, month, day = d.year, d.month, d.day except ValueError: pass match = self.date_re.match(value) if match: year, month, day = [int(val) for val in match.groups()] return {'year': year, 'month': month, 'day': day} @staticmethod def _parse_date_fmt(): fmt = get_format('DATE_FORMAT') escaped = False for char in fmt: if escaped: escaped = False elif char == '\\': escaped = True elif char in 'Yy': yield 'year' elif char in 'bEFMmNn': yield 'month' elif char in 'dj': yield 'day' def id_for_label(self, id_): for first_select in self._parse_date_fmt(): return '%s_%s' % (id_, first_select) else: return '%s_month' % id_ def value_from_datadict(self, data, files, name): y = data.get(self.year_field % name) m = data.get(self.month_field % name) d = data.get(self.day_field % name) if y == m == d == "0": return None if y and m and d: if settings.USE_L10N: input_format = get_format('DATE_INPUT_FORMATS')[0] try: date_value = datetime.date(int(y), int(m), int(d)) except ValueError: return '%s-%s-%s' % (y, m, d) else: date_value = datetime_safe.new_date(date_value) return date_value.strftime(input_format) else: return '%s-%s-%s' % (y, m, d) return data.get(name) def value_omitted_from_data(self, data, files, name): return not any( ('{}_{}'.format(name, interval) in data) for interval in ('year', 'month', 'day') )
from __future__ import absolute_import, division, print_function import logging import os import signal import subprocess import sys from tornado.httpclient import HTTPClient, HTTPError from tornado.httpserver import HTTPServer from tornado.ioloop import IOLoop from tornado.log import gen_log from tornado.process import fork_processes, task_id, Subprocess from tornado.simple_httpclient import SimpleAsyncHTTPClient from tornado.testing import bind_unused_port, ExpectLog, AsyncTestCase, gen_test from tornado.test.util import unittest, skipIfNonUnix from tornado.web import RequestHandler, Application try: import asyncio except ImportError: asyncio = None def skip_if_twisted(): if IOLoop.configured_class().__name__.endswith('TwistedIOLoop'): raise unittest.SkipTest("Process tests not compatible with TwistedIOLoop") # Not using AsyncHTTPTestCase because we need control over the IOLoop. @skipIfNonUnix class ProcessTest(unittest.TestCase): def get_app(self): class ProcessHandler(RequestHandler): def get(self): if self.get_argument("exit", None): # must use os._exit instead of sys.exit so unittest's # exception handler doesn't catch it os._exit(int(self.get_argument("exit"))) if self.get_argument("signal", None): os.kill(os.getpid(), int(self.get_argument("signal"))) self.write(str(os.getpid())) return Application([("/", ProcessHandler)]) def tearDown(self): if task_id() is not None: # We're in a child process, and probably got to this point # via an uncaught exception. If we return now, both # processes will continue with the rest of the test suite. # Exit now so the parent process will restart the child # (since we don't have a clean way to signal failure to # the parent that won't restart) logging.error("aborting child process from tearDown") logging.shutdown() os._exit(1) # In the surviving process, clear the alarm we set earlier signal.alarm(0) super(ProcessTest, self).tearDown() def test_multi_process(self): # This test doesn't work on twisted because we use the global # reactor and don't restore it to a sane state after the fork # (asyncio has the same issue, but we have a special case in # place for it). skip_if_twisted() with ExpectLog(gen_log, "(Starting .* processes|child .* exited|uncaught exception)"): sock, port = bind_unused_port() def get_url(path): return "http://127.0.0.1:%d%s" % (port, path) # ensure that none of these processes live too long signal.alarm(5) # master process try: id = fork_processes(3, max_restarts=3) self.assertTrue(id is not None) signal.alarm(5) # child processes except SystemExit as e: # if we exit cleanly from fork_processes, all the child processes # finished with status 0 self.assertEqual(e.code, 0) self.assertTrue(task_id() is None) sock.close() return try: if asyncio is not None: # Reset the global asyncio event loop, which was put into # a broken state by the fork. asyncio.set_event_loop(asyncio.new_event_loop()) if id in (0, 1): self.assertEqual(id, task_id()) server = HTTPServer(self.get_app()) server.add_sockets([sock]) IOLoop.current().start() elif id == 2: self.assertEqual(id, task_id()) sock.close() # Always use SimpleAsyncHTTPClient here; the curl # version appears to get confused sometimes if the # connection gets closed before it's had a chance to # switch from writing mode to reading mode. client = HTTPClient(SimpleAsyncHTTPClient) def fetch(url, fail_ok=False): try: return client.fetch(get_url(url)) except HTTPError as e: if not (fail_ok and e.code == 599): raise # Make two processes exit abnormally fetch("/?exit=2", fail_ok=True) fetch("/?exit=3", fail_ok=True) # They've been restarted, so a new fetch will work int(fetch("/").body) # Now the same with signals # Disabled because on the mac a process dying with a signal # can trigger an "Application exited abnormally; send error # report to Apple?" prompt. # fetch("/?signal=%d" % signal.SIGTERM, fail_ok=True) # fetch("/?signal=%d" % signal.SIGABRT, fail_ok=True) # int(fetch("/").body) # Now kill them normally so they won't be restarted fetch("/?exit=0", fail_ok=True) # One process left; watch it's pid change pid = int(fetch("/").body) fetch("/?exit=4", fail_ok=True) pid2 = int(fetch("/").body) self.assertNotEqual(pid, pid2) # Kill the last one so we shut down cleanly fetch("/?exit=0", fail_ok=True) os._exit(0) except Exception: logging.error("exception in child process %d", id, exc_info=True) raise @skipIfNonUnix class SubprocessTest(AsyncTestCase): @gen_test def test_subprocess(self): if IOLoop.configured_class().__name__.endswith('LayeredTwistedIOLoop'): # This test fails non-deterministically with LayeredTwistedIOLoop. # (the read_until('\n') returns '\n' instead of 'hello\n') # This probably indicates a problem with either TornadoReactor # or TwistedIOLoop, but I haven't been able to track it down # and for now this is just causing spurious travis-ci failures. raise unittest.SkipTest("Subprocess tests not compatible with " "LayeredTwistedIOLoop") subproc = Subprocess([sys.executable, '-u', '-i'], stdin=Subprocess.STREAM, stdout=Subprocess.STREAM, stderr=subprocess.STDOUT) self.addCleanup(lambda: (subproc.proc.terminate(), subproc.proc.wait())) self.addCleanup(subproc.stdout.close) self.addCleanup(subproc.stdin.close) yield subproc.stdout.read_until(b'>>> ') subproc.stdin.write(b"print('hello')\n") data = yield subproc.stdout.read_until(b'\n') self.assertEqual(data, b"hello\n") yield subproc.stdout.read_until(b">>> ") subproc.stdin.write(b"raise SystemExit\n") data = yield subproc.stdout.read_until_close() self.assertEqual(data, b"") @gen_test def test_close_stdin(self): # Close the parent's stdin handle and see that the child recognizes it. subproc = Subprocess([sys.executable, '-u', '-i'], stdin=Subprocess.STREAM, stdout=Subprocess.STREAM, stderr=subprocess.STDOUT) self.addCleanup(lambda: (subproc.proc.terminate(), subproc.proc.wait())) yield subproc.stdout.read_until(b'>>> ') subproc.stdin.close() data = yield subproc.stdout.read_until_close() self.assertEqual(data, b"\n") @gen_test def test_stderr(self): # This test is mysteriously flaky on twisted: it succeeds, but logs # an error of EBADF on closing a file descriptor. skip_if_twisted() subproc = Subprocess([sys.executable, '-u', '-c', r"import sys; sys.stderr.write('hello\n')"], stderr=Subprocess.STREAM) self.addCleanup(lambda: (subproc.proc.terminate(), subproc.proc.wait())) data = yield subproc.stderr.read_until(b'\n') self.assertEqual(data, b'hello\n') # More mysterious EBADF: This fails if done with self.addCleanup instead of here. subproc.stderr.close() def test_sigchild(self): # Twisted's SIGCHLD handler and Subprocess's conflict with each other. skip_if_twisted() Subprocess.initialize() self.addCleanup(Subprocess.uninitialize) subproc = Subprocess([sys.executable, '-c', 'pass']) subproc.set_exit_callback(self.stop) ret = self.wait() self.assertEqual(ret, 0) self.assertEqual(subproc.returncode, ret) @gen_test def test_sigchild_future(self): skip_if_twisted() Subprocess.initialize() self.addCleanup(Subprocess.uninitialize) subproc = Subprocess([sys.executable, '-c', 'pass']) ret = yield subproc.wait_for_exit() self.assertEqual(ret, 0) self.assertEqual(subproc.returncode, ret) def test_sigchild_signal(self): skip_if_twisted() Subprocess.initialize() self.addCleanup(Subprocess.uninitialize) subproc = Subprocess([sys.executable, '-c', 'import time; time.sleep(30)'], stdout=Subprocess.STREAM) self.addCleanup(subproc.stdout.close) subproc.set_exit_callback(self.stop) os.kill(subproc.pid, signal.SIGTERM) try: ret = self.wait(timeout=1.0) except AssertionError: # We failed to get the termination signal. This test is # occasionally flaky on pypy, so try to get a little more # information: did the process close its stdout # (indicating that the problem is in the parent process's # signal handling) or did the child process somehow fail # to terminate? subproc.stdout.read_until_close(callback=self.stop) try: self.wait(timeout=1.0) except AssertionError: raise AssertionError("subprocess failed to terminate") else: raise AssertionError("subprocess closed stdout but failed to " "get termination signal") self.assertEqual(subproc.returncode, ret) self.assertEqual(ret, -signal.SIGTERM) @gen_test def test_wait_for_exit_raise(self): skip_if_twisted() Subprocess.initialize() self.addCleanup(Subprocess.uninitialize) subproc = Subprocess([sys.executable, '-c', 'import sys; sys.exit(1)']) with self.assertRaises(subprocess.CalledProcessError) as cm: yield subproc.wait_for_exit() self.assertEqual(cm.exception.returncode, 1) @gen_test def test_wait_for_exit_raise_disabled(self): skip_if_twisted() Subprocess.initialize() self.addCleanup(Subprocess.uninitialize) subproc = Subprocess([sys.executable, '-c', 'import sys; sys.exit(1)']) ret = yield subproc.wait_for_exit(raise_error=False) self.assertEqual(ret, 1)
# -*- coding: utf-8 -*- # # Copyright 2014 Thomas Amland <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers.fsevents2 :synopsis: FSEvents based emitter implementation. :platforms: Mac OS X """ import os import logging import unicodedata from threading import Thread from watchdog.utils.compat import queue from watchdog.events import ( FileDeletedEvent, FileModifiedEvent, FileCreatedEvent, FileMovedEvent, DirDeletedEvent, DirModifiedEvent, DirCreatedEvent, DirMovedEvent ) from watchdog.observers.api import ( BaseObserver, EventEmitter, DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, ) # pyobjc import AppKit from FSEvents import ( FSEventStreamCreate, CFRunLoopGetCurrent, FSEventStreamScheduleWithRunLoop, FSEventStreamStart, CFRunLoopRun, CFRunLoopStop, FSEventStreamStop, FSEventStreamInvalidate, FSEventStreamRelease, ) from FSEvents import ( kCFAllocatorDefault, kCFRunLoopDefaultMode, kFSEventStreamEventIdSinceNow, kFSEventStreamCreateFlagNoDefer, kFSEventStreamCreateFlagFileEvents, kFSEventStreamEventFlagItemCreated, kFSEventStreamEventFlagItemRemoved, kFSEventStreamEventFlagItemInodeMetaMod, kFSEventStreamEventFlagItemRenamed, kFSEventStreamEventFlagItemModified, kFSEventStreamEventFlagItemFinderInfoMod, kFSEventStreamEventFlagItemChangeOwner, kFSEventStreamEventFlagItemXattrMod, kFSEventStreamEventFlagItemIsFile, kFSEventStreamEventFlagItemIsDir, kFSEventStreamEventFlagItemIsSymlink, ) logger = logging.getLogger(__name__) class FSEventsQueue(Thread): """ Low level FSEvents client. """ def __init__(self, path): Thread.__init__(self) self._queue = queue.Queue() self._run_loop = None if isinstance(path, bytes): self._path = path.decode('utf-8') self._path = unicodedata.normalize('NFC', self._path) context = None latency = 1.0 self._stream_ref = FSEventStreamCreate( kCFAllocatorDefault, self._callback, context, [self._path], kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents) if self._stream_ref is None: raise IOError("FSEvents. Could not create stream.") def run(self): pool = AppKit.NSAutoreleasePool.alloc().init() self._run_loop = CFRunLoopGetCurrent() FSEventStreamScheduleWithRunLoop( self._stream_ref, self._run_loop, kCFRunLoopDefaultMode) if not FSEventStreamStart(self._stream_ref): FSEventStreamInvalidate(self._stream_ref) FSEventStreamRelease(self._stream_ref) raise IOError("FSEvents. Could not start stream.") CFRunLoopRun() FSEventStreamStop(self._stream_ref) FSEventStreamInvalidate(self._stream_ref) FSEventStreamRelease(self._stream_ref) del pool # Make sure waiting thread is notified self._queue.put(None) def stop(self): if self._run_loop is not None: CFRunLoopStop(self._run_loop) def _callback(self, streamRef, clientCallBackInfo, numEvents, eventPaths, eventFlags, eventIDs): events = [NativeEvent(path, flags, _id) for path, flags, _id in zip(eventPaths, eventFlags, eventIDs)] logger.debug("FSEvents callback. Got %d events:" % numEvents) for e in events: logger.debug(e) self._queue.put(events) def read_events(self): """ Returns a list or one or more events, or None if there are no more events to be read. """ if not self.is_alive(): return None return self._queue.get() class NativeEvent(object): def __init__(self, path, flags, event_id): self.path = path self.flags = flags self.event_id = event_id self.is_created = bool(flags & kFSEventStreamEventFlagItemCreated) self.is_removed = bool(flags & kFSEventStreamEventFlagItemRemoved) self.is_renamed = bool(flags & kFSEventStreamEventFlagItemRenamed) self.is_modified = bool(flags & kFSEventStreamEventFlagItemModified) self.is_change_owner = bool(flags & kFSEventStreamEventFlagItemChangeOwner) self.is_inode_meta_mod = bool(flags & kFSEventStreamEventFlagItemInodeMetaMod) self.is_finder_info_mod = bool(flags & kFSEventStreamEventFlagItemFinderInfoMod) self.is_xattr_mod = bool(flags & kFSEventStreamEventFlagItemXattrMod) self.is_symlink = bool(flags & kFSEventStreamEventFlagItemIsSymlink) self.is_directory = bool(flags & kFSEventStreamEventFlagItemIsDir) @property def _event_type(self): if self.is_created: return "Created" if self.is_removed: return "Removed" if self.is_renamed: return "Renamed" if self.is_modified: return "Modified" if self.is_inode_meta_mod: return "InodeMetaMod" if self.is_xattr_mod: return "XattrMod" return "Unknown" def __repr__(self): s ="<NativeEvent: path=%s, type=%s, is_dir=%s, flags=%s, id=%s>" return s % (repr(self.path), self._event_type, self.is_directory, hex(self.flags), self.event_id) class FSEventsEmitter(EventEmitter): """ FSEvents based event emitter. Handles conversion of native events. """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): EventEmitter.__init__(self, event_queue, watch, timeout) self._fsevents = FSEventsQueue(watch.path) self._fsevents.start() def on_thread_stop(self): self._fsevents.stop() def queue_events(self, timeout): events = self._fsevents.read_events() if events is None: return i = 0 while i < len(events): event = events[i] # For some reason the create and remove flags are sometimes also # set for rename and modify type events, so let those take # precedence. if event.is_renamed: # Internal moves appears to always be consecutive in the same # buffer and have IDs differ by exactly one (while others # don't) making it possible to pair up the two events coming # from a singe move operation. (None of this is documented!) # Otherwise, guess whether file was moved in or out. #TODO: handle id wrapping if (i+1 < len(events) and events[i+1].is_renamed and events[i+1].event_id == event.event_id + 1): cls = DirMovedEvent if event.is_directory else FileMovedEvent self.queue_event(cls(event.path, events[i+1].path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) self.queue_event(DirModifiedEvent(os.path.dirname(events[i+1].path))) i += 1 elif os.path.exists(event.path): cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) else: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) #TODO: generate events for tree elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod : cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(event.path)) elif event.is_created: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) elif event.is_removed: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) i += 1 class FSEventsObserver2(BaseObserver): def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): BaseObserver.__init__(self, emitter_class=FSEventsEmitter, timeout=timeout)
from spasm.web.core import Web from spasm.web.util import * import re import urllib2 import time from datetime import datetime class Google(Web): INTERVAL = 6.0 SEARCH_URL = "http://ajax.googleapis.com/ajax/services/search/web?v=1.0&q=%s&rsz=large&start=%s" INTERVIEW_QUERY = '"%s" interview' REVIEW_QUERY = '"%s" "%s" review' def search(self, query, validate=True, max_results=32): results = [] for n in range(0,max_results,8): url = self.SEARCH_URL % (urllib2.quote(query), n) results += self.grabJSON(url, {}).get('responseData',{}).get('results',[]) if validate: for result in results: if not self.is_valid(result, query): results.remove(result) return results def search_reviews(self, artist, album): query = self.REVIEW_QUERY % (artist['name'], album['name']) results = self.search(query, max_results=8) reviews = [{ 'url' : result['url'], 'title' : result['title'], 'text' : result['content'], 'album_id' : album['id'], 'album' : album['name'], 'artist' : artist['name'], 'artist_id' : artist['id'] } for result in results] return reviews def search_interviews(self, artist): query = self.INTERVIEW_QUERY % (artist['name']) results = self.search(query, max_results=8) interviews = [{ 'url' : result['url'], 'title' : result['title'], 'text' : result['content'], 'artist_id' : artist['id'] } for result in results] return interviews def is_valid(self, result, query): valid_title = True valid_content = True p = re.compile("['\"](.*?)['\"]", re.I|re.DOTALL|re.M) quoted = p.findall(query) tokens = p.sub("", query).strip().split(" ") for match in quoted + tokens: valid_title = valid_title and match.lower() in result['title'].lower() valid_content = valid_content and match.lower() in result['content'].lower() valid = valid_title or valid_content return valid class Twitter(Web): SEARCH_URL = "http://search.twitter.com/search.json?q=%s&rpp=100" TWEETS_URL = "http://twitter.com/statuses/user_timeline/%s.json?count=100" USER_URL = "http://twitter.com/users/show.json?screen_name=%s" INTERVAL = 2.0 REST_INTERVAL = 30.0 def search_tweets(self, query, filter_terms=[]): url = self.SEARCH_URL % urllib2.quote(query) data = self.grabJSON(url, {}).get('results', []) return data def search_news(self, artist): results = self.search_artist_tweets(artist) news = self._tweet2news(results, artist, official=True) return news def get_news(self, artist): results = self.get_artist_tweets(artist) news = self._tweet2news(results, artist, official=True) return news def _tweet2news(self, tweets, artist, official=False): username = self._username(artist['twitter_url']) news = [{ 'title' : tweet['text'], 'url' : "http://twitter.com/%s/statuses/%s" % (username, tweet['id']), 'artist_id' : artist['id'], 'official' : official } for tweet in tweets] return news def get_stats(self, artist): _stats = self.get_artist(artist) stats = { 'followers' : _stats.get('followers_count'), 'friends' : _stats.get('friends_count'), 'tweets' : _stats.get('statuses_count'), 'artist_id' : artist['id'], } return [stats] def search_artist_tweets(self, artist, query="", filter_terms=[]): username = self._username(artist['twitter_url']) query = "from:%s %s" % (username, query) return self.search_tweets(query, filter_terms) def get_artist(self, artist): username = self._username(artist['twitter_url']) url = self.USER_URL % urllib2.quote(username) data = self.grabJSON(url, {}, interval=self.REST_INTERVAL) return data def get_artist_tweets(self, artist): username = self._username(artist['twitter_url']) url = self.TWEETS_URL % urllib2.quote(username) data = self.grabJSON(url, [], interval=self.REST_INTERVAL) return data def _username(self, url): return url.split("/")[-1] """ p = re.compile("http://(?:www\.)?twitter.com/(.*?)(?:/|$)",re.I|re.M) m = p.findall(url) username = m[0] if m else None return username """ class MySpace(Web): STATS_REGEX = r'%s\s*</span>.*?Plays:.*?([0-9,]+).*?Views:.*?([0-9,]+).*?Fans:.*?([0-9,]+)' STATS_URL = 'http://searchservice.myspace.com/index.cfm?fuseaction=sitesearch.results&qry=%s&type=Music&musictype=2' SHOWS_REGEX = re.compile(r'(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) (\d{1,2}) (\d{4}).*?(\d:\d+)(A|P).*?<a.*?href="(http://music.myspace.com.*?)".*?>(.*?)</a>.*?<td.*?>(<.*?>)*(.*?)<', re.DOTALL) NEWS_REGEX = re.compile(r'<span class="text">(.*?)</span>.*?<a.*?href="(http://blogs.myspace.com.*?)"', re.DOTALL) INTERVAL = 5.0 def get_stats(self, artist): '''plays, views, fans''' url = self.STATS_URL % urllib2.quote(artist['name']) html = self.grabHTML(url) artist_url = artist['myspace_url'].replace('http://','') artist_url = artist_url.replace(artist['name'].lower(), '(<b>)?%s(</b>)?' % artist['name'].lower()) p = re.compile(self.STATS_REGEX % artist_url, re.DOTALL) m = p.findall(html) if m: stats = [val.replace(',','') for val in m[0]] results = dict(zip(['plays','views','fans'], stats[2:5]\ if len(stats) == 5 else stats[0:3])) results['artist_id'] = artist['id'] return [results] else: return [] def get_shows(self, artist): html = self.grabHTML(artist['myspace_url']) return [{ 'date' : datetime.strptime('%sM' % ' '.join(show[0:5]), '%b %d %Y %H:%M %p'), 'url' : show[5], 'title' : show[6], 'location' : show[8], 'artist_id' : artist['id'], 'official' : True, } for show in self.SHOWS_REGEX.findall(html)] def get_news(self, artist): html = self.grabHTML(artist['myspace_url']) return [{ 'title' : news[0], 'url' : news[1], 'artist_id' : artist['id'], 'official' : True } for news in self.NEWS_REGEX.findall(html)] class BTJunkie(Web): DEFAULT_TERMS = ['discography'] SEARCH_URL = 'http://btjunkie.org/search?q=%s' SEARCH_REGEX = re.compile(r'(http://dl.btjunkie.org/.*?torrent)".*?class="BlckUnd">(.*?)</a>.*?(\d+MB).*?\d{2}/\d{2}.*?(?:<font.*?>)+(.*?)</font>.*?(?:<font.*?>)+(.*?)</font>', re.DOTALL) INTERVAL = 10.0 def search_torrents(self, artist, filter_terms=[]): url = self.SEARCH_URL % urllib2.quote(artist['name']) html = self.grabHTML(url) torrents = [] urls = [] for torrent in self.SEARCH_REGEX.findall(html): title = re.sub('<.*?>','',torrent[1]) url = torrent[0] valid = not bool(filter_terms) for term in filter_terms + self.DEFAULT_TERMS: if title.lower().count(term.lower()) and url not in urls: valid = True break if valid: urls.append(url) torrents.append({ 'url' : url, 'title' : title, 'size' : torrent[2], 'seeds' : torrent[3] if torrent[3] != 'X' else '0', 'leechers' : torrent[4] if torrent[4] != 'X' else '0', 'artist_id' : artist['id'] }) return torrents class YouTube(Web): SEARCH_URL = 'http://www.youtube.com/results?search_query=%s&page=%s' SEARCH_REGEX = re.compile(r'<div class="video-entry">.*?href="(.*?)".*?<img.*?title="(.*?)".*?src="(.*?)".*?class="video-time">.*?(\d+:\d{2}).*?class="video-view-count">([0-9,]+)', re.DOTALL) SCRAPE_INTERVAL = 7.5 JSON_DETAIL_URL = 'http://gdata.youtube.com/feeds/api/videos/%s?alt=json' JSON_SEARCH_URL = 'http://gdata.youtube.com/feeds/api/videos?q=%s&v=1&alt=json&start-index=%s' JSON_USER_URL = 'http://gdata.youtube.com/feeds/api/users/%s/uploads?alt=json' INTERVAL = 5.0 PAGES = 3 def get_videos(self, artist): username = artist['youtube_url'].split("/")[-1] url = self.JSON_USER_URL % urllib2.quote(username) results = self.grabJSON(url, {}).get('feed', {}).get('entry',[]) videos = [self._convert_result(result, artist, official=True) for result in results] return videos def search_videos(self, artist, filter_terms=[]): videos = [] artist_name = urllib2.quote(artist['name']) for n in [1, 26, 51]: url = self.JSON_SEARCH_URL % (artist_name, n) results = self.grabJSON(url, {}).get('feed',{}).get('entry',[]) for result in results: valid = not bool(filter_terms) title = result.get('title',{}).get('$t') for term in filter_terms: if title.lower().count(term.lower()): valid = True break if valid: video = self._convert_result(result, artist) videos.append(video) return videos def get_thumbnails(self, video): youtube_id = self._youtube_id(video['url']) json = self.grabJSON(self.JSON_DETAIL_URL % youtube_id) if not json: return [] thumbs = [thumb for thumb in json.get('entry',{} ).get('media$group',{}).get('media$thumbnail') if thumb] return thumbs def _convert_result(self, result, artist, official=False): title = result.get('title',{}).get('$t') views = result.get('yt$statistics',{}).get('viewCount') video = { 'url' : result.get('link', [{}])[0].get('href'), 'title' : title, 'thumbnail' : result.get('media$group',{} ).get('media$thumbnail',[{}])[0].get('url'), 'duration' : result.get('media$group',{} ).get('yt$duration',{}).get('seconds'), 'views' : views if views else 0, 'artist_id' : artist['id'], 'official' : official } return video def _youtube_id(self, url): p = re.compile("http://(?:www\.)youtube\.com/watch\?v=(.*?)(?:&|/|$)",re.I|re.M) m = p.findall(url) yid = m[0] if m else None return yid class LastFM(Web): API_KEY = '8d50b262d08bc4b406b65aabada39907' API_SECRET = '9b18cdfa75db57e9fed963f95d07dd26' ARTIST_URL = 'http://ws.audioscrobbler.com/2.0/?method=artist.getinfo&format=json&artist=%s&api_key=%s' TOPALBUMS_URL = 'http://ws.audioscrobbler.com/2.0/?method=artist.gettopalbums&format=json&artist=%s&api_key=%s' TOPTRACKS_URL = 'http://ws.audioscrobbler.com/2.0/?method=artist.gettoptracks&format=json&artist=%s&api_key=%s' TOPTAGS_URL = 'http://ws.audioscrobbler.com/2.0/?method=artist.gettoptags&format=json&artist=%s&api_key=%s' SIMILAR_URL = 'http://ws.audioscrobbler.com/2.0/?method=artist.getsimilar&format=json&artist=%s&api_key=%s' INTERVAL = 5.0 def get_artist(self, artist): url = self.ARTIST_URL % (urllib2.quote(artist['name']), self.API_KEY) result = self.grabJSON(url, {}).get('artist') if result: data = { 'mbid' : result['mbid'], 'image_small' : result['image'][0]['#text'] if len(result['image']) > 0 else None, 'image_medium' : result['image'][1]['#text'] if len(result['image']) > 1 else None, 'image_large' : result['image'][2]['#text'] if len(result['image']) > 2 else None, 'listeners' : result['stats']['listeners'], 'playcount' : result['stats']['playcount'], 'artist_id' : artist['id'] } return data else: return None def get_top_albums(self, artist): url = self.TOPALBUMS_URL % (urllib2.quote(artist['name']), self.API_KEY) results = self.grabJSON(url, {}).get('topalbums',{}).get('album', []) if isinstance(results, dict): # WTF LastFM? results = [results] albums = [{ 'name' : result['name'], 'url' : result['url'], 'playcount' : result['playcount'], 'mbid' : result['mbid'], 'image_small' : result['image'][0]['#text'] if len(result['image']) > 0 else None, 'image_medium' : result['image'][1]['#text'] if len(result['image']) > 1 else None, 'image_large' : result['image'][2]['#text'] if len(result['image']) > 2 else None, 'artist_id' : artist['id'] } for result in results] return albums def get_top_tracks(self, artist): url = self.TOPTRACKS_URL % (urllib2.quote(artist['name']), self.API_KEY) results = self.grabJSON(url, {}).get('toptracks',{}).get('track', []) if isinstance(results, dict): # WTF LastFM? results = [results] tracks = [{ 'name' : result['name'], 'url' : result['url'], 'listeners' : result['listeners'], 'mbid' : result['mbid'], 'artist_id' : artist['id'] } for result in results] return tracks def get_top_tags(self, artist): '''count, url, name''' url = self.TOPTAGS_URL % (urllib2.quote(artist['name']), self.API_KEY) results = self.grabJSON(url, {}).get('toptags',{}).get('tag', []) if isinstance(results, dict): # WTF LastFM? results = [results] tags = [{ 'count' : result['count'], 'url' : result['url'], 'name' : result['name'], 'artist_id' : artist['id'] } for result in results] return tags def get_similar_artists(self, artist): url = self.SIMILAR_URL % (urllib2.quote(artist['name']), self.API_KEY) results = self.grabJSON(url, {}).get('similarartists',{}).get('artist', []) if isinstance(results, dict): # WTF LastFM? results = [results] artists = [{ 'name' : result['name'], 'url' : result['url'], 'mbid' : result['mbid'], 'matchval' : result['match'], 'artist_id' : artist['id'] } for result in results] return artists class Flickr(Web): API_KEY = "e6f7bba2ed350cdad2a5e0a1fdd7a6cd" API_SECRET_KEY = "93652087da42b7e5" SEARCH_URL = "http://api.flickr.com/services/rest/?method=flickr.photos.search&format=json&nojsoncallback=1&api_key=%s" INTERVAL = 10.0 def search_photos(self, artist, group_id=None, mode="text"): url = self.SEARCH_URL % self.API_KEY if group_id: url += "&group_id=%s" % group_id if mode == "text": url += '&text="%s"&sort=relevance' % urllib2.quote(artist['name']) else: url += "&tags=%s,%s" % (urllib2.quote(artist['name'].replace(" ","")), "concert") results = self.grabJSON(url, {}).get('photos',{}).get('photo',[]) photos = [{ 'title' : result['title'], 'url' : 'http://www.flickr.com/photos/%(owner)s/%(id)s' % result, 'square' : 'http://farm%(farm)s.static.flickr.com/%(server)s/%(id)s_%(secret)s_s.jpg' % result, 'thumbnail' : 'http://farm%(farm)s.static.flickr.com/%(server)s/%(id)s_%(secret)s_t.jpg' % result, 'medium' : 'http://farm%(farm)s.static.flickr.com/%(server)s/%(id)s_%(secret)s_m.jpg' % result, 'large' : 'http://farm%(farm)s.static.flickr.com/%(server)s/%(id)s_%(secret)s_b.jpg' % result, 'artist_id' : artist['id'] } for result in results] return photos class TinySong(Web): SEARCH_URL = "http://tinysong.com/s/%s?limit=32" INTERVAL = 10.0 def search(self, query, filter_terms=[]): url = self.SEARCH_URL % urllib2.quote(query) results = self.grabCSV(url, [], delimiter=";") for result in results: if len(result) > 7: valid = not bool(filter_terms) title = result[2] for term in filter_terms: if title.lower().count(term.lower()): valid = True break if not valid: results.remove(result) return results def search_audio(self, artist, validate=True, filter_terms=[]): results = self.search(artist['name'], filter_terms=filter_terms) if validate: for result in results: if not result or len(result) < 4 or result[4].lower() != artist['name'].lower(): results.remove(result) audio = [{ 'url' : result[7], 'tiny_url' : result[0], 'title' : result[2], 'artist_id' : artist['id'] } for result in results if len(result) > 7] return audio def search_audio_tracks(self, artist, track, validate=True): results = self.search("%s %s" % (artist['name'], track['name'])) if validate: for result in results: if not result or len(result) < 4 or result[4].lower() != artist['name'].lower(): results.remove(result) audio = [{ 'url' : result[7], 'tiny_url' : result[0], 'title' : result[2], 'artist_id' : artist['id'] } for result in results if len(result) > 7] return audio
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Cloudscaling Group, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ The MatchMaker classes should except a Topic or Fanout exchange key and return keys for direct exchanges, per (approximate) AMQP parlance. """ import contextlib import itertools import json import eventlet from oslo.config import cfg from manila.openstack.common.gettextutils import _ from manila.openstack.common import log as logging matchmaker_opts = [ # Matchmaker ring file cfg.StrOpt('matchmaker_ringfile', default='/etc/nova/matchmaker_ring.json', help='Matchmaker ring file (JSON)'), cfg.IntOpt('matchmaker_heartbeat_freq', default=300, help='Heartbeat frequency'), cfg.IntOpt('matchmaker_heartbeat_ttl', default=600, help='Heartbeat time-to-live.'), ] CONF = cfg.CONF CONF.register_opts(matchmaker_opts) LOG = logging.getLogger(__name__) contextmanager = contextlib.contextmanager class MatchMakerException(Exception): """Signified a match could not be found.""" message = _("Match not found by MatchMaker.") class Exchange(object): """ Implements lookups. Subclass this to support hashtables, dns, etc. """ def __init__(self): pass def run(self, key): raise NotImplementedError() class Binding(object): """ A binding on which to perform a lookup. """ def __init__(self): pass def test(self, key): raise NotImplementedError() class MatchMakerBase(object): """ Match Maker Base Class. Build off HeartbeatMatchMakerBase if building a heartbeat-capable MatchMaker. """ def __init__(self): # Array of tuples. Index [2] toggles negation, [3] is last-if-true self.bindings = [] self.no_heartbeat_msg = _('Matchmaker does not implement ' 'registration or heartbeat.') def register(self, key, host): """ Register a host on a backend. Heartbeats, if applicable, may keepalive registration. """ pass def ack_alive(self, key, host): """ Acknowledge that a key.host is alive. Used internally for updating heartbeats, but may also be used publically to acknowledge a system is alive (i.e. rpc message successfully sent to host) """ pass def is_alive(self, topic, host): """ Checks if a host is alive. """ pass def expire(self, topic, host): """ Explicitly expire a host's registration. """ pass def send_heartbeats(self): """ Send all heartbeats. Use start_heartbeat to spawn a heartbeat greenthread, which loops this method. """ pass def unregister(self, key, host): """ Unregister a topic. """ pass def start_heartbeat(self): """ Spawn heartbeat greenthread. """ pass def stop_heartbeat(self): """ Destroys the heartbeat greenthread. """ pass def add_binding(self, binding, rule, last=True): self.bindings.append((binding, rule, False, last)) #NOTE(ewindisch): kept the following method in case we implement the # underlying support. #def add_negate_binding(self, binding, rule, last=True): # self.bindings.append((binding, rule, True, last)) def queues(self, key): workers = [] # bit is for negate bindings - if we choose to implement it. # last stops processing rules if this matches. for (binding, exchange, bit, last) in self.bindings: if binding.test(key): workers.extend(exchange.run(key)) # Support last. if last: return workers return workers class HeartbeatMatchMakerBase(MatchMakerBase): """ Base for a heart-beat capable MatchMaker. Provides common methods for registering, unregistering, and maintaining heartbeats. """ def __init__(self): self.hosts = set() self._heart = None self.host_topic = {} super(HeartbeatMatchMakerBase, self).__init__() def send_heartbeats(self): """ Send all heartbeats. Use start_heartbeat to spawn a heartbeat greenthread, which loops this method. """ for key, host in self.host_topic: self.ack_alive(key, host) def ack_alive(self, key, host): """ Acknowledge that a host.topic is alive. Used internally for updating heartbeats, but may also be used publically to acknowledge a system is alive (i.e. rpc message successfully sent to host) """ raise NotImplementedError("Must implement ack_alive") def backend_register(self, key, host): """ Implements registration logic. Called by register(self,key,host) """ raise NotImplementedError("Must implement backend_register") def backend_unregister(self, key, key_host): """ Implements de-registration logic. Called by unregister(self,key,host) """ raise NotImplementedError("Must implement backend_unregister") def register(self, key, host): """ Register a host on a backend. Heartbeats, if applicable, may keepalive registration. """ self.hosts.add(host) self.host_topic[(key, host)] = host key_host = '.'.join((key, host)) self.backend_register(key, key_host) self.ack_alive(key, host) def unregister(self, key, host): """ Unregister a topic. """ if (key, host) in self.host_topic: del self.host_topic[(key, host)] self.hosts.discard(host) self.backend_unregister(key, '.'.join((key, host))) LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host))) def start_heartbeat(self): """ Implementation of MatchMakerBase.start_heartbeat Launches greenthread looping send_heartbeats(), yielding for CONF.matchmaker_heartbeat_freq seconds between iterations. """ if len(self.hosts) == 0: raise MatchMakerException( _("Register before starting heartbeat.")) def do_heartbeat(): while True: self.send_heartbeats() eventlet.sleep(CONF.matchmaker_heartbeat_freq) self._heart = eventlet.spawn(do_heartbeat) def stop_heartbeat(self): """ Destroys the heartbeat greenthread. """ if self._heart: self._heart.kill() class DirectBinding(Binding): """ Specifies a host in the key via a '.' character Although dots are used in the key, the behavior here is that it maps directly to a host, thus direct. """ def test(self, key): if '.' in key: return True return False class TopicBinding(Binding): """ Where a 'bare' key without dots. AMQP generally considers topic exchanges to be those *with* dots, but we deviate here in terminology as the behavior here matches that of a topic exchange (whereas where there are dots, behavior matches that of a direct exchange. """ def test(self, key): if '.' not in key: return True return False class FanoutBinding(Binding): """Match on fanout keys, where key starts with 'fanout.' string.""" def test(self, key): if key.startswith('fanout~'): return True return False class StubExchange(Exchange): """Exchange that does nothing.""" def run(self, key): return [(key, None)] class RingExchange(Exchange): """ Match Maker where hosts are loaded from a static file containing a hashmap (JSON formatted). __init__ takes optional ring dictionary argument, otherwise loads the ringfile from CONF.mathcmaker_ringfile. """ def __init__(self, ring=None): super(RingExchange, self).__init__() if ring: self.ring = ring else: fh = open(CONF.matchmaker_ringfile, 'r') self.ring = json.load(fh) fh.close() self.ring0 = {} for k in self.ring.keys(): self.ring0[k] = itertools.cycle(self.ring[k]) def _ring_has(self, key): if key in self.ring0: return True return False class RoundRobinRingExchange(RingExchange): """A Topic Exchange based on a hashmap.""" def __init__(self, ring=None): super(RoundRobinRingExchange, self).__init__(ring) def run(self, key): if not self._ring_has(key): LOG.warn( _("No key defining hosts for topic '%s', " "see ringfile") % (key, ) ) return [] host = next(self.ring0[key]) return [(key + '.' + host, host)] class FanoutRingExchange(RingExchange): """Fanout Exchange based on a hashmap.""" def __init__(self, ring=None): super(FanoutRingExchange, self).__init__(ring) def run(self, key): # Assume starts with "fanout~", strip it for lookup. nkey = key.split('fanout~')[1:][0] if not self._ring_has(nkey): LOG.warn( _("No key defining hosts for topic '%s', " "see ringfile") % (nkey, ) ) return [] return map(lambda x: (key + '.' + x, x), self.ring[nkey]) class LocalhostExchange(Exchange): """Exchange where all direct topics are local.""" def __init__(self, host='localhost'): self.host = host super(Exchange, self).__init__() def run(self, key): return [('.'.join((key.split('.')[0], self.host)), self.host)] class DirectExchange(Exchange): """ Exchange where all topic keys are split, sending to second half. i.e. "compute.host" sends a message to "compute.host" running on "host" """ def __init__(self): super(Exchange, self).__init__() def run(self, key): e = key.split('.', 1)[1] return [(key, e)] class MatchMakerRing(MatchMakerBase): """ Match Maker where hosts are loaded from a static hashmap. """ def __init__(self, ring=None): super(MatchMakerRing, self).__init__() self.add_binding(FanoutBinding(), FanoutRingExchange(ring)) self.add_binding(DirectBinding(), DirectExchange()) self.add_binding(TopicBinding(), RoundRobinRingExchange(ring)) class MatchMakerLocalhost(MatchMakerBase): """ Match Maker where all bare topics resolve to localhost. Useful for testing. """ def __init__(self, host='localhost'): super(MatchMakerLocalhost, self).__init__() self.add_binding(FanoutBinding(), LocalhostExchange(host)) self.add_binding(DirectBinding(), DirectExchange()) self.add_binding(TopicBinding(), LocalhostExchange(host)) class MatchMakerStub(MatchMakerBase): """ Match Maker where topics are untouched. Useful for testing, or for AMQP/brokered queues. Will not work where knowledge of hosts is known (i.e. zeromq) """ def __init__(self): super(MatchMakerLocalhost, self).__init__() self.add_binding(FanoutBinding(), StubExchange()) self.add_binding(DirectBinding(), StubExchange()) self.add_binding(TopicBinding(), StubExchange())
# Copyright 2017 Nokia. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log as logging from neutron.objects import trunk as trunk_objects from neutron.services.trunk.drivers import base as trunk_base from neutron.services.trunk import exceptions as t_exc from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import context as n_ctx from neutron_lib.db import api as db_api from neutron_lib.plugins import directory from neutron_lib.services.trunk import constants as t_consts from nuage_neutron.plugins.common import constants as p_consts from nuage_neutron.plugins.common import exceptions as nuage_exc from nuage_neutron.plugins.common import nuagedb as db LOG = logging.getLogger(__name__) SUPPORTED_INTERFACES = ( portbindings.VIF_TYPE_OTHER ) SUPPORTED_SEGMENTATION_TYPES = ( t_consts.SEGMENTATION_TYPE_VLAN, ) class NuageTrunkHandler(object): _core_plugin = None def __init__(self, plugin_driver): self.plugin_driver = plugin_driver @property def core_plugin(self): if not self._core_plugin: self._core_plugin = directory.get_plugin() return self._core_plugin def set_trunk_status(self, context, trunk_id, status): with db_api.CONTEXT_WRITER.using(context): trunk = trunk_objects.Trunk.get_object(context, id=trunk_id) if trunk: trunk.update(status=status) def _trunk_status_change(self, resource, event, trigger, **kwargs): updated_port = kwargs['port'] trunk_details = updated_port.get('trunk_details') # If no trunk details port is not parent of a trunk if not trunk_details: return if (updated_port.get(portbindings.VNIC_TYPE) not in self.plugin_driver._supported_vnic_types()): LOG.debug("Ignoring trunk status change for port" " %s due to unsupported VNIC type", updated_port.get('id')) return original_port = kwargs['original_port'] updated_host_id = (original_port['binding:host_id'] and not updated_port['binding:host_id'] or not original_port['binding:host_id'] and updated_port['binding:host_id']) if not updated_host_id: return context = kwargs['context'] if trunk_details.get('trunk_id'): trunk = trunk_objects.Trunk.get_object( context, id=trunk_details.get('trunk_id')) if trunk: self.wire_trunk(context, trunk) return def wire_trunk(self, context, trunk): updated_ports = self._update_subport_bindings(context, trunk.id, trunk.sub_ports) if len(trunk.sub_ports) != len(updated_ports): LOG.error("Updated: %(up)s, subports: %(sub)s", {'up': len(updated_ports), 'sub': len(trunk.sub_ports)}) self.set_trunk_status(context, trunk.id, t_consts.TRUNK_DEGRADED_STATUS) def _update_subport_bindings(self, context, trunk_id, subports): # Assumption: all subports belong to trunk_id el = context.elevated() trunk = trunk_objects.Trunk.get_object(el, id=trunk_id) if not trunk: LOG.debug("Trunk not found. id: %s", trunk_id) trunk_updated_ports = self._process_binding(el, trunk, subports) return trunk_updated_ports def _process_binding(self, context, trunk, subports): updated_ports = [] trunk_port_id = trunk.port_id trunk_port = self.core_plugin.get_port(context, trunk_port_id) trunk_host = trunk_port.get(portbindings.HOST_ID) trunk_profile = trunk_port.get(portbindings.PROFILE) trunk.update(status=t_consts.TRUNK_BUILD_STATUS) trunk_target_state = ( t_consts.TRUNK_ACTIVE_STATUS if trunk_profile else t_consts.TRUNK_DOWN_STATUS) for port in subports: try: if trunk_profile: trunk_profile['vlan'] = port.segmentation_id updated_port = self.core_plugin.update_port( context, port.port_id, {'port': {portbindings.HOST_ID: trunk_host, portbindings.PROFILE: trunk_profile, 'device_owner': t_consts.TRUNK_SUBPORT_OWNER}}) vif_type = updated_port.get(portbindings.VIF_TYPE) if vif_type == portbindings.VIF_TYPE_BINDING_FAILED: raise t_exc.SubPortBindingError(port_id=port.port_id, trunk_id=trunk.id) updated_ports.append(updated_port) except t_exc.SubPortBindingError as e: LOG.error("Failed to bind subport: %s", e) trunk.update(status=t_consts.TRUNK_ERROR_STATUS) return [] except Exception as e: LOG.error("Failed to bind subport: %s", e) if len(subports) != len(updated_ports): LOG.debug("Trunk: %s is degraded", trunk.id) trunk.update(status=t_consts.TRUNK_DEGRADED_STATUS) else: trunk.update(status=trunk_target_state) return updated_ports def _validate_port_fixedip(self, port): if not port.get('fixed_ips'): msg = ("Port %s requires a FixedIP in order to be used" % port.get('id')) raise nuage_exc.NuageBadRequest(msg=msg) def _validate_no_transparent_network(self, context, trunk_port): network = self.core_plugin.get_network(context, trunk_port.get('network_id')) is_vlan_transparant = (network.get('vlan_transparent') if network is not None else False) if is_vlan_transparant: msg = ("Network {} is vlan_transparent. It is not" "allowed to create a trunk on a " "vlan_transparent network").format(network['id']) raise nuage_exc.NuageBadRequest(msg=msg) def _validate_same_netpartition(self, context, trunk_port, trunk_subports): parent_netpart = db.get_subnet_l2dom_by_port_id( context.session, trunk_port['id']).get('net_partition_id') bad_subport = next((port for port in trunk_subports if db.get_subnet_l2dom_by_port_id( context.session, port.port_id).get('net_partition_id') != parent_netpart), None) if bad_subport: raise nuage_exc.SubPortNetpartitionConflict( subport=bad_subport['id']) def _validate_subports_not_trunk_net(self, context, trunk_port, trunk_subports): """Validates if a subport is not in the trunk network""" neutron_subports = [self.core_plugin.get_port(context, port['port_id']) for port in trunk_subports] bad_port = next((port for port in neutron_subports if port['network_id'] == trunk_port['network_id']), None) if bad_port: raise nuage_exc.SubPortParentPortConflict(subport=bad_port['id']) def _validate_subports_vnic_type(self, context, trunk, trunk_port, subports): LOG.debug("validating vnic types for %(sub)s added subports in " "trunk %(trunk)s", {'sub': len(subports), 'trunk': trunk.id}) port_ids = [subport.port_id for subport in subports] port_bindings = db.get_port_bindings(context.session, port_ids) parent_vnic = trunk_port.get(portbindings.VNIC_TYPE) bad_port = next((binding for binding in port_bindings if binding.vnic_type != parent_vnic), None) if bad_port: raise nuage_exc.TrunkVnicTypeConflict( subport=bad_port.port_id, vnic_type_sub=bad_port.vnic_type, parent=trunk_port['id'], vnic_type_parent=parent_vnic) def _set_sub_ports(self, trunk_id, subports): ctx = n_ctx.get_admin_context() LOG.debug("updating subport bindings for trunk %s", trunk_id) updated_ports = self._update_subport_bindings(ctx, trunk_id, subports) if len(subports) != len(updated_ports): LOG.error("Updated: %(up)s, subports: %(sub)s", {'up': len(updated_ports), 'sub': len(subports)}) self.set_trunk_status(ctx, trunk_id, t_consts.TRUNK_DEGRADED_STATUS) def _unset_sub_ports(self, trunk_id, subports): ctx = n_ctx.get_admin_context() updated_ports = [] for port in subports: LOG.debug('unset port id : %(id)s', {'id': port.port_id}) try: updated_port = self.core_plugin.update_port( ctx, port.port_id, {'port': {portbindings.HOST_ID: None, portbindings.PROFILE: None, 'device_owner': '', 'device_id': ''}}) vif_type = updated_port.get(portbindings.VIF_TYPE) if vif_type != portbindings.VIF_TYPE_UNBOUND: raise t_exc.SubPortBindingError(port_id=port.port_id, trunk_id=trunk_id) updated_ports.append(updated_port) except t_exc.SubPortBindingError as e: LOG.error("Failed to clear binding for subport: %s", e) self.set_trunk_status(ctx, trunk_id, t_consts.TRUNK_DEGRADED_STATUS) except Exception as e: LOG.error("Failed to clear binding for subport: %s", e) if len(subports) != len(updated_ports): self.set_trunk_status(ctx, trunk_id, t_consts.TRUNK_DEGRADED_STATUS) def trunk_created(self, trunk): ctx = n_ctx.get_admin_context() # handle trunk with parent port supported by # mech driver only trunk_port = self.core_plugin.get_port(ctx, trunk.port_id) if (trunk_port.get(portbindings.VNIC_TYPE) in self.plugin_driver._supported_vnic_types()): LOG.debug('trunk_created: %(trunk)s', {'trunk': trunk}) self._set_sub_ports(trunk.id, trunk.sub_ports) def trunk_deleted(self, trunk): ctx = n_ctx.get_admin_context() # handle trunk with parent port supported by # mech driver only trunk_port = self.core_plugin.get_port(ctx, trunk.port_id) if (trunk_port.get(portbindings.VNIC_TYPE) in self.plugin_driver._supported_vnic_types()): LOG.debug('trunk_deleted: %(trunk)s', {'trunk': trunk}) self._unset_sub_ports(trunk.id, trunk.sub_ports) def subports_pre_create(self, context, trunk, subports): LOG.debug('subport_pre_create: %(trunk)s subports : %(sp)s', {'trunk': trunk, 'sp': subports}) ctx = n_ctx.get_admin_context() trunk_port = self.core_plugin.get_port(ctx, trunk.port_id) if (trunk_port.get(portbindings.VNIC_TYPE) in self.plugin_driver._supported_vnic_types()): self._validate_subports_vnic_type(context, trunk, trunk_port, subports) self._validate_subports_not_trunk_net(context, trunk_port, subports) self._validate_same_netpartition(context, trunk_port, subports) for subport in subports: neutron_port = self.core_plugin.get_port(ctx, subport["port_id"]) self._validate_port_fixedip(neutron_port) def trunk_pre_create(self, context, trunk): ctx = n_ctx.get_admin_context() trunk_port = self.core_plugin.get_port(ctx, trunk.port_id) if (trunk_port.get(portbindings.VNIC_TYPE) in self.plugin_driver._supported_vnic_types()): self._validate_port_fixedip(trunk_port) self._validate_no_transparent_network(context, trunk_port) if trunk.sub_ports: self.subports_pre_create(context, trunk, trunk.sub_ports) def subports_added(self, trunk, subports): LOG.debug('subport_added: %(trunk)s subports : %(sp)s', {'trunk': trunk, 'sp': subports}) ctx = n_ctx.get_admin_context() trunk_port = self.core_plugin.get_port(ctx, trunk.port_id) if (trunk_port.get(portbindings.VNIC_TYPE) in self.plugin_driver._supported_vnic_types()): self._set_sub_ports(trunk.id, subports) def subports_deleted(self, trunk, subports): LOG.debug('subport_deleted: %(trunk)s subports : %(sp)s', {'trunk': trunk, 'sp': subports}) ctx = n_ctx.get_admin_context() trunk_port = self.core_plugin.get_port(ctx, trunk.port_id) if (trunk_port.get(portbindings.VNIC_TYPE) in self.plugin_driver._supported_vnic_types()): self._unset_sub_ports(trunk.id, subports) def trunk_event(self, resource, event, trunk_plugin, payload): if event == events.PRECOMMIT_CREATE: self.trunk_pre_create(payload.context, payload.desired_state) elif event == events.AFTER_CREATE: self.trunk_created(payload.states[0]) elif event == events.AFTER_DELETE: self.trunk_deleted(payload.states[0]) def subport_event(self, resource, event, trunk_plugin, payload): if event == events.PRECOMMIT_CREATE: self.subports_pre_create(payload.context, payload.states[0], payload.metadata['subports']) if event == events.AFTER_CREATE: self.subports_added(payload.states[0], payload.metadata['subports']) elif event == events.AFTER_DELETE: self.subports_deleted(payload.states[0], payload.metadata['subports']) class NuageTrunkDriver(trunk_base.DriverBase): @property def is_loaded(self): try: return (p_consts.NUAGE_ML2_BM_DRIVER_NAME in cfg.CONF.ml2.mechanism_drivers) except cfg.NoSuchOptError: return False @registry.receives(resources.TRUNK_PLUGIN, [events.AFTER_INIT]) def register(self, resource, event, trigger, **kwargs): super(NuageTrunkDriver, self).register( resource, event, trigger, **kwargs) self._handler = NuageTrunkHandler(self.plugin_driver) for event in (events.AFTER_CREATE, events.AFTER_DELETE, events.PRECOMMIT_CREATE): registry.subscribe(self._handler.trunk_event, resources.TRUNK, event) registry.subscribe(self._handler.subport_event, resources.SUBPORTS, event) registry.subscribe(self._handler._trunk_status_change, resources.PORT, events.AFTER_UPDATE) @classmethod def create(cls, plugin_driver): cls.plugin_driver = plugin_driver return cls(p_consts.NUAGE_ML2_BM_DRIVER_NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES, None, can_trunk_bound_port=True)
#!/usr/bin/env python # # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # TODO(mberlin): Remove this file when SplitClone supports merge-sorting # primary key columns based on the MySQL collation. """This test covers the vtworker LegacySplitClone command. The vtworker LegacySplitClone should only be used when it is necessary to reshard a table that has textual primary key columns (e.g. VARCHAR). This is the case for the "timestamps" table in this end-to-end test. The reason why only LegacySplitClone supports this use case is because the new resharding clone code (as of https://github.com/vitessio/vitess/pull/1796) requires to sort rows by their primary key. Whereas LegacySplitClone does a simple copy and always assumes that the tables on the destination are empty, the SplitClone command can diff the source and destination tables. In case of a horizontal resharding this requires merge-sorting multiple destination shards. Since we currently do not support sorting VARCHAR primary key columns in SplitClone (due to missing support for MySQL collations), you'll have to resort to LegacySplitClone only for this use case. Note that this file was copied from the original resharding.py file. We start with shards -80 and 80-. We then split 80- into 80-c0 and c0-. This test is the main resharding test. It not only tests the regular resharding workflow for an horizontal split, but also a lot of error cases and side effects, like: - migrating the traffic one cell at a time. - migrating rdonly traffic back and forth. - making sure we can't migrate the master until replica and rdonly are migrated. - has a background thread to insert data during migration. - tests a destination shard master failover while replication is running. - tests a filtered replication source replacement while filtered replication is running. - tests 'vtctl SourceShardAdd' and 'vtctl SourceShardDelete'. - makes sure the key range rules are properly enforced on masters. """ import struct import logging import unittest import base_sharding import environment import tablet import utils from vtproto import topodata_pb2 from vtdb import keyrange_constants keyspace_id_type = keyrange_constants.KIT_UINT64 pack_keyspace_id = struct.Struct('!Q').pack # initial shards # range '' - 80 shard_0_master = tablet.Tablet() shard_0_replica = tablet.Tablet() shard_0_ny_rdonly = tablet.Tablet(cell='ny') # range 80 - '' shard_1_master = tablet.Tablet() shard_1_slave1 = tablet.Tablet() shard_1_slave2 = tablet.Tablet() shard_1_ny_rdonly = tablet.Tablet(cell='ny') shard_1_rdonly1 = tablet.Tablet() # split shards # range 80 - c0 shard_2_master = tablet.Tablet() shard_2_replica1 = tablet.Tablet() shard_2_replica2 = tablet.Tablet() # range c0 - '' shard_3_master = tablet.Tablet() shard_3_replica = tablet.Tablet() shard_3_rdonly1 = tablet.Tablet() all_tablets = [shard_0_master, shard_0_replica, shard_0_ny_rdonly, shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly, shard_1_rdonly1, shard_2_master, shard_2_replica1, shard_2_replica2, shard_3_master, shard_3_replica, shard_3_rdonly1] def setUpModule(): try: environment.topo_server().setup() setup_procs = [t.init_mysql() for t in all_tablets] utils.Vtctld().start() utils.wait_procs(setup_procs) except: tearDownModule() raise def tearDownModule(): utils.required_teardown() if utils.options.skip_teardown: return teardown_procs = [t.teardown_mysql() for t in all_tablets] utils.wait_procs(teardown_procs, raise_on_error=False) environment.topo_server().teardown() utils.kill_sub_processes() utils.remove_tmp_files() for t in all_tablets: t.remove_tree() class TestResharding(unittest.TestCase, base_sharding.BaseShardingTest): # create_schema will create the same schema on the keyspace # then insert some values def _create_schema(self): if keyspace_id_type == keyrange_constants.KIT_BYTES: t = 'varbinary(64)' else: t = 'bigint(20) unsigned' # Note that the primary key columns are not defined first on purpose to test # that a reordered column list is correctly used everywhere in vtworker. create_table_template = '''create table %s( msg varchar(64), custom_ksid_col ''' + t + ''' not null, id bigint not null, parent_id bigint not null, primary key (parent_id, id), index by_msg (msg) ) Engine=InnoDB''' create_view_template = ( 'create view %s' '(id, msg, custom_ksid_col) as select id, msg, custom_ksid_col ' 'from %s') create_timestamp_table = '''create table timestamps( name varchar(64), time_milli bigint(20) unsigned not null, custom_ksid_col ''' + t + ''' not null, primary key (name) ) Engine=InnoDB''' create_unrelated_table = '''create table unrelated( name varchar(64), primary key (name) ) Engine=InnoDB''' utils.run_vtctl(['ApplySchema', '-sql=' + create_table_template % ('resharding1'), 'test_keyspace'], auto_log=True) utils.run_vtctl(['ApplySchema', '-sql=' + create_table_template % ('resharding2'), 'test_keyspace'], auto_log=True) utils.run_vtctl(['ApplySchema', '-sql=' + create_view_template % ('view1', 'resharding1'), 'test_keyspace'], auto_log=True) utils.run_vtctl(['ApplySchema', '-sql=' + create_timestamp_table, 'test_keyspace'], auto_log=True) utils.run_vtctl(['ApplySchema', '-sql=' + create_unrelated_table, 'test_keyspace'], auto_log=True) def _insert_startup_values(self): self._insert_value(shard_0_master, 'resharding1', 1, 'msg1', 0x1000000000000000) self._insert_value(shard_1_master, 'resharding1', 2, 'msg2', 0x9000000000000000) self._insert_value(shard_1_master, 'resharding1', 3, 'msg3', 0xD000000000000000) def _check_startup_values(self): # check first value is in the right shard self._check_value(shard_2_master, 'resharding1', 2, 'msg2', 0x9000000000000000) self._check_value(shard_2_replica1, 'resharding1', 2, 'msg2', 0x9000000000000000) self._check_value(shard_2_replica2, 'resharding1', 2, 'msg2', 0x9000000000000000) self._check_value(shard_3_master, 'resharding1', 2, 'msg2', 0x9000000000000000, should_be_here=False) self._check_value(shard_3_replica, 'resharding1', 2, 'msg2', 0x9000000000000000, should_be_here=False) self._check_value(shard_3_rdonly1, 'resharding1', 2, 'msg2', 0x9000000000000000, should_be_here=False) # check second value is in the right shard too self._check_value(shard_2_master, 'resharding1', 3, 'msg3', 0xD000000000000000, should_be_here=False) self._check_value(shard_2_replica1, 'resharding1', 3, 'msg3', 0xD000000000000000, should_be_here=False) self._check_value(shard_2_replica2, 'resharding1', 3, 'msg3', 0xD000000000000000, should_be_here=False) self._check_value(shard_3_master, 'resharding1', 3, 'msg3', 0xD000000000000000) self._check_value(shard_3_replica, 'resharding1', 3, 'msg3', 0xD000000000000000) self._check_value(shard_3_rdonly1, 'resharding1', 3, 'msg3', 0xD000000000000000) def _insert_lots(self, count, base=0): for i in xrange(count): self._insert_value(shard_1_master, 'resharding1', 10000 + base + i, 'msg-range1-%d' % i, 0xA000000000000000 + base + i) self._insert_value(shard_1_master, 'resharding1', 20000 + base + i, 'msg-range2-%d' % i, 0xE000000000000000 + base + i) # _check_lots returns how many of the values we have, in percents. def _check_lots(self, count, base=0): found = 0 for i in xrange(count): if self._is_value_present_and_correct(shard_2_replica2, 'resharding1', 10000 + base + i, 'msg-range1-%d' % i, 0xA000000000000000 + base + i): found += 1 if self._is_value_present_and_correct(shard_3_replica, 'resharding1', 20000 + base + i, 'msg-range2-%d' % i, 0xE000000000000000 + base + i): found += 1 percent = found * 100 / count / 2 logging.debug('I have %d%% of the data', percent) return percent def _check_lots_timeout(self, count, threshold, timeout, base=0): while True: value = self._check_lots(count, base=base) if value >= threshold: return value timeout = utils.wait_step('waiting for %d%% of the data' % threshold, timeout, sleep_time=1) # _check_lots_not_present makes sure no data is in the wrong shard def _check_lots_not_present(self, count, base=0): for i in xrange(count): self._check_value(shard_3_replica, 'resharding1', 10000 + base + i, 'msg-range1-%d' % i, 0xA000000000000000 + base + i, should_be_here=False) self._check_value(shard_2_replica2, 'resharding1', 20000 + base + i, 'msg-range2-%d' % i, 0xE000000000000000 + base + i, should_be_here=False) def test_resharding(self): utils.run_vtctl(['CreateKeyspace', '--sharding_column_name', 'bad_column', '--sharding_column_type', 'bytes', 'test_keyspace']) utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace', 'custom_ksid_col', 'uint64'], expect_fail=True) utils.run_vtctl(['SetKeyspaceShardingInfo', '-force', 'test_keyspace', 'custom_ksid_col', keyspace_id_type]) shard_0_master.init_tablet('replica', 'test_keyspace', '-80') shard_0_replica.init_tablet('replica', 'test_keyspace', '-80') shard_0_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '-80') shard_1_master.init_tablet('replica', 'test_keyspace', '80-') shard_1_slave1.init_tablet('replica', 'test_keyspace', '80-') shard_1_slave2.init_tablet('replica', 'test_keyspace', '80-') shard_1_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '80-') shard_1_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-') utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace']) self.assertEqual(ks['sharding_column_name'], 'custom_ksid_col') # we set full_mycnf_args to True as a test in the KIT_BYTES case full_mycnf_args = keyspace_id_type == keyrange_constants.KIT_BYTES # create databases so vttablet can start behaving somewhat normally for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly, shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly, shard_1_rdonly1]: t.create_db('vt_test_keyspace') t.start_vttablet(wait_for_state=None, full_mycnf_args=full_mycnf_args, binlog_use_v3_resharding_mode=False) # wait for the tablets (replication is not setup, they won't be healthy) for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly, shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly, shard_1_rdonly1]: t.wait_for_vttablet_state('NOT_SERVING') # reparent to make the tablets work utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80', shard_0_master.tablet_alias], auto_log=True) utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-', shard_1_master.tablet_alias], auto_log=True) # check the shards shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace']) self.assertIn('-80', shards, 'unexpected shards: %s' % str(shards)) self.assertIn('80-', shards, 'unexpected shards: %s' % str(shards)) self.assertEqual(len(shards), 2, 'unexpected shards: %s' % str(shards)) # create the tables self._create_schema() self._insert_startup_values() # run a health check on source replicas so they respond to discovery # (for binlog players) and on the source rdonlys (for workers) for t in [shard_0_replica, shard_1_slave1]: utils.run_vtctl(['RunHealthCheck', t.tablet_alias]) for t in [shard_0_ny_rdonly, shard_1_ny_rdonly, shard_1_rdonly1]: utils.run_vtctl(['RunHealthCheck', t.tablet_alias]) # create the split shards shard_2_master.init_tablet('replica', 'test_keyspace', '80-c0') shard_2_replica1.init_tablet('replica', 'test_keyspace', '80-c0') shard_2_replica2.init_tablet('replica', 'test_keyspace', '80-c0') shard_3_master.init_tablet('replica', 'test_keyspace', 'c0-') shard_3_replica.init_tablet('replica', 'test_keyspace', 'c0-') shard_3_rdonly1.init_tablet('rdonly', 'test_keyspace', 'c0-') # start vttablet on the split shards (no db created, # so they're all not serving) shard_2_master.start_vttablet(wait_for_state=None, binlog_use_v3_resharding_mode=False) shard_3_master.start_vttablet(wait_for_state=None, binlog_use_v3_resharding_mode=False) for t in [shard_2_replica1, shard_2_replica2, shard_3_replica, shard_3_rdonly1]: t.start_vttablet(wait_for_state=None, binlog_use_v3_resharding_mode=False) for t in [shard_2_master, shard_2_replica1, shard_2_replica2, shard_3_master, shard_3_replica, shard_3_rdonly1]: t.wait_for_vttablet_state('NOT_SERVING') utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-c0', shard_2_master.tablet_alias], auto_log=True) utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/c0-', shard_3_master.tablet_alias], auto_log=True) # check the shards shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace']) for s in ['-80', '80-', '80-c0', 'c0-']: self.assertIn(s, shards, 'unexpected shards: %s' % str(shards)) self.assertEqual(len(shards), 4, 'unexpected shards: %s' % str(shards)) utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) utils.check_srv_keyspace( 'test_nj', 'test_keyspace', 'Partitions(master): -80 80-\n' 'Partitions(rdonly): -80 80-\n' 'Partitions(replica): -80 80-\n', keyspace_id_type=keyspace_id_type, sharding_column_name='custom_ksid_col') # disable shard_1_slave2, so we're sure filtered replication will go # from shard_1_slave1 utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'spare']) shard_1_slave2.wait_for_vttablet_state('NOT_SERVING') # we need to create the schema, and the worker will do data copying for keyspace_shard in ('test_keyspace/80-c0', 'test_keyspace/c0-'): utils.run_vtctl(['CopySchemaShard', '--exclude_tables', 'unrelated', shard_1_rdonly1.tablet_alias, keyspace_shard], auto_log=True) # --max_tps is only specified to enable the throttler and ensure that the # code is executed. But the intent here is not to throttle the test, hence # the rate limit is set very high. utils.run_vtworker(['--cell', 'test_nj', '--command_display_interval', '10ms', '--use_v3_resharding_mode=false', 'LegacySplitClone', '--exclude_tables', 'unrelated', '--min_healthy_rdonly_tablets', '1', '--max_tps', '9999', 'test_keyspace/80-'], auto_log=True) utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'], auto_log=True) # check the startup values are in the right place self._check_startup_values() # check the schema too utils.run_vtctl(['ValidateSchemaKeyspace', '--exclude_tables=unrelated', 'test_keyspace'], auto_log=True) # check the binlog players are running and exporting vars self.check_destination_master(shard_2_master, ['test_keyspace/80-']) self.check_destination_master(shard_3_master, ['test_keyspace/80-']) # check that binlog server exported the stats vars self.check_binlog_server_vars(shard_1_slave1, horizontal=True) # Check that the throttler was enabled. # The stream id is hard-coded as 1, which is the first id generated # through auto-inc. self.check_throttler_service(shard_2_master.rpc_endpoint(), ['BinlogPlayer/1'], 9999) self.check_throttler_service(shard_3_master.rpc_endpoint(), ['BinlogPlayer/1'], 9999) # testing filtered replication: insert a bunch of data on shard 1, # check we get most of it after a few seconds, wait for binlog server # timeout, check we get all of it. logging.debug('Inserting lots of data on source shard') self._insert_lots(1000) logging.debug('Checking 80 percent of data is sent quickly') v = self._check_lots_timeout(1000, 80, 5) if v != 100: # small optimization: only do this check if we don't have all the data # already anyway. logging.debug('Checking all data goes through eventually') self._check_lots_timeout(1000, 100, 20) logging.debug('Checking no data was sent the wrong way') self._check_lots_not_present(1000) self.check_binlog_player_vars(shard_2_master, ['test_keyspace/80-'], seconds_behind_master_max=30) self.check_binlog_player_vars(shard_3_master, ['test_keyspace/80-'], seconds_behind_master_max=30) self.check_binlog_server_vars(shard_1_slave1, horizontal=True, min_statements=1000, min_transactions=1000) # use vtworker to compare the data (after health-checking the destination # rdonly tablets so discovery works) utils.run_vtctl(['RunHealthCheck', shard_3_rdonly1.tablet_alias]) logging.debug('Running vtworker SplitDiff') utils.run_vtworker(['-cell', 'test_nj', '--use_v3_resharding_mode=false', 'SplitDiff', '--exclude_tables', 'unrelated', '--min_healthy_rdonly_tablets', '1', 'test_keyspace/c0-'], auto_log=True) utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'], auto_log=True) utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'], auto_log=True) utils.pause('Good time to test vtworker for diffs') # get status for destination master tablets, make sure we have it all self.check_running_binlog_player(shard_2_master, 4000, 2000) self.check_running_binlog_player(shard_3_master, 4000, 2000) # tests a failover switching serving to a different replica utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'replica']) utils.run_vtctl(['ChangeSlaveType', shard_1_slave1.tablet_alias, 'spare']) shard_1_slave2.wait_for_vttablet_state('SERVING') shard_1_slave1.wait_for_vttablet_state('NOT_SERVING') utils.run_vtctl(['RunHealthCheck', shard_1_slave2.tablet_alias]) # test data goes through again logging.debug('Inserting lots of data on source shard') self._insert_lots(1000, base=1000) logging.debug('Checking 80 percent of data was sent quickly') self._check_lots_timeout(1000, 80, 5, base=1000) self.check_binlog_server_vars(shard_1_slave2, horizontal=True, min_statements=800, min_transactions=800) # check we can't migrate the master just yet utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'], expect_fail=True) # check query service is off on master 2 and master 3, as filtered # replication is enabled. Even health check that is enabled on # master 3 should not interfere (we run it to be sure). utils.run_vtctl(['RunHealthCheck', shard_3_master.tablet_alias], auto_log=True) for master in [shard_2_master, shard_3_master]: utils.check_tablet_query_service(self, master, False, False) stream_health = utils.run_vtctl_json(['VtTabletStreamHealth', '-count', '1', master.tablet_alias]) logging.debug('Got health: %s', str(stream_health)) self.assertIn('realtime_stats', stream_health) self.assertNotIn('serving', stream_health) # check the destination master 3 is healthy, even though its query # service is not running (if not healthy this would exception out) shard_3_master.get_healthz() # now serve rdonly from the split shards, in test_nj only utils.run_vtctl(['MigrateServedTypes', '--cells=test_nj', 'test_keyspace/80-', 'rdonly'], auto_log=True) utils.check_srv_keyspace('test_nj', 'test_keyspace', 'Partitions(master): -80 80-\n' 'Partitions(rdonly): -80 80-c0 c0-\n' 'Partitions(replica): -80 80-\n', keyspace_id_type=keyspace_id_type, sharding_column_name='custom_ksid_col') utils.check_srv_keyspace('test_ny', 'test_keyspace', 'Partitions(master): -80 80-\n' 'Partitions(rdonly): -80 80-\n' 'Partitions(replica): -80 80-\n', keyspace_id_type=keyspace_id_type, sharding_column_name='custom_ksid_col') utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False) utils.check_tablet_query_service(self, shard_1_ny_rdonly, True, False) utils.check_tablet_query_service(self, shard_1_rdonly1, False, True) # now serve rdonly from the split shards, everywhere utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'rdonly'], auto_log=True) utils.check_srv_keyspace('test_nj', 'test_keyspace', 'Partitions(master): -80 80-\n' 'Partitions(rdonly): -80 80-c0 c0-\n' 'Partitions(replica): -80 80-\n', keyspace_id_type=keyspace_id_type, sharding_column_name='custom_ksid_col') utils.check_srv_keyspace('test_ny', 'test_keyspace', 'Partitions(master): -80 80-\n' 'Partitions(rdonly): -80 80-c0 c0-\n' 'Partitions(replica): -80 80-\n', keyspace_id_type=keyspace_id_type, sharding_column_name='custom_ksid_col') utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False) utils.check_tablet_query_service(self, shard_1_ny_rdonly, False, True) utils.check_tablet_query_service(self, shard_1_rdonly1, False, True) # then serve replica from the split shards destination_shards = ['80-c0', 'c0-'] utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'], auto_log=True) utils.check_srv_keyspace('test_nj', 'test_keyspace', 'Partitions(master): -80 80-\n' 'Partitions(rdonly): -80 80-c0 c0-\n' 'Partitions(replica): -80 80-c0 c0-\n', keyspace_id_type=keyspace_id_type, sharding_column_name='custom_ksid_col') utils.check_tablet_query_service(self, shard_1_slave2, False, True) # move replica back and forth utils.run_vtctl( ['MigrateServedTypes', '-reverse', 'test_keyspace/80-', 'replica'], auto_log=True) # After a backwards migration, queryservice should be enabled on # source and disabled on destinations utils.check_tablet_query_service(self, shard_1_slave2, True, False) # Destination tablets would have query service disabled for other # reasons than the migration, so check the shard record instead of # the tablets directly. utils.check_shard_query_services(self, 'test_nj', 'test_keyspace', destination_shards, topodata_pb2.REPLICA, False) utils.check_shard_query_services(self, 'test_ny', 'test_keyspace', destination_shards, topodata_pb2.REPLICA, False) utils.check_srv_keyspace('test_nj', 'test_keyspace', 'Partitions(master): -80 80-\n' 'Partitions(rdonly): -80 80-c0 c0-\n' 'Partitions(replica): -80 80-\n', keyspace_id_type=keyspace_id_type, sharding_column_name='custom_ksid_col') utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'], auto_log=True) # After a forwards migration, queryservice should be disabled on # source and enabled on destinations utils.check_tablet_query_service(self, shard_1_slave2, False, True) # Destination tablets would have query service disabled for other # reasons than the migration, so check the shard record instead of # the tablets directly utils.check_shard_query_services(self, 'test_nj', 'test_keyspace', destination_shards, topodata_pb2.REPLICA, True) utils.check_shard_query_services(self, 'test_ny', 'test_keyspace', destination_shards, topodata_pb2.REPLICA, True) utils.check_srv_keyspace('test_nj', 'test_keyspace', 'Partitions(master): -80 80-\n' 'Partitions(rdonly): -80 80-c0 c0-\n' 'Partitions(replica): -80 80-c0 c0-\n', keyspace_id_type=keyspace_id_type, sharding_column_name='custom_ksid_col') # use vtworker to compare the data again logging.debug('Running vtworker SplitDiff') utils.run_vtworker(['-cell', 'test_nj', '--use_v3_resharding_mode=false', 'SplitDiff', '--exclude_tables', 'unrelated', '--min_healthy_rdonly_tablets', '1', 'test_keyspace/c0-'], auto_log=True) utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'], auto_log=True) utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'], auto_log=True) # mock with the SourceShard records to test 'vtctl SourceShardDelete' # and 'vtctl SourceShardAdd' utils.run_vtctl(['SourceShardDelete', 'test_keyspace/c0-', '1'], auto_log=True) utils.run_vtctl(['SourceShardAdd', '--key_range=80-', 'test_keyspace/c0-', '1', 'test_keyspace/80-'], auto_log=True) # then serve master from the split shards, make sure the source master's # query service is now turned off utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'], auto_log=True) utils.check_srv_keyspace('test_nj', 'test_keyspace', 'Partitions(master): -80 80-c0 c0-\n' 'Partitions(rdonly): -80 80-c0 c0-\n' 'Partitions(replica): -80 80-c0 c0-\n', keyspace_id_type=keyspace_id_type, sharding_column_name='custom_ksid_col') utils.check_tablet_query_service(self, shard_1_master, False, True) # check the binlog players are gone now self.check_no_binlog_player(shard_2_master) self.check_no_binlog_player(shard_3_master) # delete the original tablets in the original shard tablet.kill_tablets([shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly, shard_1_rdonly1]) for t in [shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly, shard_1_rdonly1]: utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True) utils.run_vtctl(['DeleteTablet', '-allow_master', shard_1_master.tablet_alias], auto_log=True) # rebuild the serving graph, all mentions of the old shards should be gone utils.run_vtctl( ['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True) # test RemoveShardCell utils.run_vtctl( ['RemoveShardCell', 'test_keyspace/-80', 'test_nj'], auto_log=True, expect_fail=True) utils.run_vtctl( ['RemoveShardCell', 'test_keyspace/80-', 'test_nj'], auto_log=True) utils.run_vtctl( ['RemoveShardCell', 'test_keyspace/80-', 'test_ny'], auto_log=True) shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/80-']) self.assertTrue('cells' not in shard or not shard['cells']) # delete the original shard utils.run_vtctl(['DeleteShard', 'test_keyspace/80-'], auto_log=True) # kill everything tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_ny_rdonly, shard_2_master, shard_2_replica1, shard_2_replica2, shard_3_master, shard_3_replica, shard_3_rdonly1]) if __name__ == '__main__': utils.main()
# -*- coding: utf-8 -*- # (c) 2009-2022 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license.php """Unit test for lock_manager.py""" import os import sys import unittest from tempfile import gettempdir from time import sleep from wsgidav.dav_error import DAVError from wsgidav.lock_man import lock_manager, lock_storage from wsgidav.lock_man.lock_storage_redis import LockStorageRedis # ======================================================================== # BasicTest # ======================================================================== class BasicTest(unittest.TestCase): """Test lock_manager.LockManager().""" principal = "Joe Tester" owner = b"[email protected]" root = "/dav/res" timeout = 10 * 60 # Default lock timeout 10 minutes # @classmethod # def suite(cls): # """Return test case suite (so we can control the order).""" # suite = TestSuite() # suite.addTest(cls("testPreconditions")) # # suite.addTest(cls("testOpen")) # suite.addTest(cls("testValidation")) # suite.addTest(cls("testLock")) # suite.addTest(cls("testTimeout")) # suite.addTest(cls("testConflict")) # return suite def setUp(self): storage = lock_storage.LockStorageDict() self.lm = lock_manager.LockManager(storage) self.lm._verbose = 1 def tearDown(self): del self.lm def _acquire( self, url, lock_type, lock_scope, lock_depth, lock_owner, timeout, principal, token_list, ): """Wrapper for lm.acquire, that returns None instead of raising DAVError.""" try: return self.lm.acquire( url=url, lock_type=lock_type, lock_scope=lock_scope, lock_depth=lock_depth, lock_owner=lock_owner, timeout=timeout, principal=principal, token_list=token_list, ) except DAVError: return None def _isLockDict(self, o): try: _ = o["root"] # noqa F841 except Exception: return False return True def _isLockResultOK(self, resultTupleList): """Return True, if result is [ (lock_dict, None) ].""" try: return ( len(resultTupleList) == 1 and len(resultTupleList) == 2 and self._isLockDict(resultTupleList[0][0]) and resultTupleList[0][1] is None ) except Exception: return False def _isLockResultFault(self, lock, conflictList, status=None): """Return True, if it is a valid result tuple containing a DAVError.""" try: if lock is not None: return False if len(conflictList) < 1: return False resultTuple = conflictList[0] if ( len(resultTuple) != 2 or not self._isLockDict(resultTuple[0]) or not isinstance(resultTuple[1], DAVError) ): return False elif status and status != DAVError.value: return False return True except Exception: return False def testPreconditions(self): """Environment must be set.""" self.assertTrue( __debug__, "__debug__ must be True, otherwise asserts are ignored" ) # def testOpen(self): # """Lock manager should be lazy opening on first access.""" # lm = self.lm # # assert not lm._loaded, "LM must only be opened after first access" # lm._generate_lock(self.principal, "write", "exclusive", "infinity", # self.owner, # "/dav", # 10) # assert lm._loaded, "LM must be opened after first access" def testValidation(self): """Lock manager should raise errors on bad args.""" lm = self.lm self.assertRaises( AssertionError, lm._generate_lock, lm, "writeX", "exclusive", "infinity", self.owner, self.root, self.timeout, ) self.assertRaises( AssertionError, lm._generate_lock, lm, "write", "exclusiveX", "infinity", self.owner, self.root, self.timeout, ) self.assertRaises( AssertionError, lm._generate_lock, lm, "write", "exclusive", "infinityX", self.owner, self.root, self.timeout, ) self.assertRaises( AssertionError, lm._generate_lock, lm, "write", "exclusive", "infinity", None, self.root, self.timeout, ) self.assertRaises( AssertionError, lm._generate_lock, lm, "write", "exclusive", "infinity", self.owner, None, self.timeout, ) # assert lm._dict is None, "No locks should have been created by this test" def testLock(self): """Lock manager should create and find locks.""" lm = self.lm url = "/dav/res" # Create a new lock lock_dict = lm._generate_lock( self.principal, "write", "exclusive", "infinity", self.owner, url, self.timeout, ) # Check returned dictionary assert lock_dict is not None assert lock_dict["root"] == url assert lock_dict["type"] == "write" assert lock_dict["scope"] == "exclusive" assert lock_dict["depth"] == "infinity" assert lock_dict["owner"] == self.owner assert lock_dict["principal"] == self.principal # Test lookup tok = lock_dict.get("token") assert lm.get_lock(tok, key="root") == url lock_dict = lm.get_lock(tok) assert lock_dict is not None assert lock_dict["root"] == url assert lock_dict["type"] == "write" assert lock_dict["scope"] == "exclusive" assert lock_dict["depth"] == "infinity" assert lock_dict["owner"] == self.owner assert lock_dict["principal"] == self.principal # We locked "/dav/res", did we? assert lm.is_token_locked_by_user(tok, self.principal) # res = lm.get_url_lock_list(url, self.principal) res = lm.get_url_lock_list(url) self.assertEqual(len(res), 1) # res = lm.get_url_lock_list(url, "another user") # assert len(res) == 0 assert lm.is_url_locked_by_token( "/dav/res", tok ), "url not directly locked by lock_token." assert lm.is_url_locked_by_token( "/dav/res/", tok ), "url not directly locked by lock_token." assert lm.is_url_locked_by_token( "/dav/res/sub", tok ), "child url not indirectly locked" assert not lm.is_url_locked_by_token( "/dav/ressub", tok ), "non-child url reported as locked" assert not lm.is_url_locked_by_token( "/dav", tok ), "parent url reported as locked" assert not lm.is_url_locked_by_token( "/dav/", tok ), "parent url reported as locked" def testTimeout(self): """Locks should be purged after expiration date.""" lm = self.lm timeout = 1 lock_dict = lm._generate_lock( self.principal, "write", "exclusive", "infinity", self.owner, self.root, timeout, ) assert lock_dict is not None tok = lock_dict.get("token") assert lm.get_lock(tok, key="root") == self.root sleep(timeout - 0.5) lock_dict = lm.get_lock(tok) assert lock_dict is not None, "Lock expired too early" sleep(1) lock_dict = lm.get_lock(tok) assert lock_dict is None, "Lock has not expired" def testConflict(self): """Locks should prevent conflicts.""" token_list = [] # Create a lock for '/dav/res/' lock = self._acquire( "/dav/res/", "write", "exclusive", "infinity", self.owner, self.timeout, self.principal, token_list, ) assert lock, "Could not acquire lock" # Try to lock with a slightly different URL (without trailing '/') lock = self._acquire( "/dav/res", "write", "exclusive", "infinity", self.owner, self.timeout, "another principal", token_list, ) assert lock is None, "Could acquire a conflicting lock" # Try to lock with another principal lock = self._acquire( "/dav/res/", "write", "exclusive", "infinity", self.owner, self.timeout, "another principal", token_list, ) assert lock is None, "Could acquire a conflicting lock" # Try to lock child with another principal lock = self._acquire( "/dav/res/sub", "write", "exclusive", "infinity", self.owner, self.timeout, "another principal", token_list, ) assert lock is None, "Could acquire a conflicting child lock" # Try to lock parent with same principal lock = self._acquire( "/dav/", "write", "exclusive", "infinity", self.owner, self.timeout, self.principal, token_list, ) assert lock is None, "Could acquire a conflicting parent lock" # Try to lock child with same principal lock = self._acquire( "/dav/res/sub", "write", "exclusive", "infinity", self.owner, self.timeout, self.principal, token_list, ) assert lock is None, "Could acquire a conflicting child lock (same principal)" # ======================================================================== # ShelveTest # ======================================================================== class ShelveTest(BasicTest): """Test lock_manager.ShelveLockManager().""" def setUp(self): if sys.version_info < (3, 0): modifier = "-py2" # shelve formats are incompatible else: modifier = "-py3" self.path = os.path.join( gettempdir(), "wsgidav-locks{}.shelve".format(modifier) ) storage = lock_storage.LockStorageShelve(self.path) self.lm = lock_manager.LockManager(storage) self.lm._verbose = 2 def tearDown(self): self.lm.storage.clear() self.lm = None # Note: os.remove(self.path) does not work, because Shelve may append # a file extension. # if os.path.exists(self.path): # os.remove(self.path) class RedisTest(BasicTest): _redis_connect_failed = None def setUp(self): if RedisTest._redis_connect_failed: raise unittest.SkipTest("Test requires a running redis instance (again)") try: import redis r = redis.Redis() r.ping() except redis.exceptions.ConnectionError: RedisTest._redis_connect_failed = True raise unittest.SkipTest("Test requires a running redis instance") storage = LockStorageRedis() self.lm = lock_manager.LockManager(storage) self.lm._verbose = 2 def tearDown(self): self.lm.storage.clear() self.lm = None # ======================================================================== # suite # ======================================================================== # def suite(): # """Return suites of all test cases.""" # return TestSuite([BasicTest.suite(), # ShelveTest.suite(), # ]) if __name__ == "__main__": unittest.main() # suite = suite() # TextTestRunner(descriptions=0, verbosity=2).run(suite)
''' Copyright (C) 2014 Parrot SA Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Parrot nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ''' from ARFuncs import * from Common_CheckBootstrap import * from Common_CheckConfigure import * from Common_RemoveVersionsFromSo import * def Common_GetConfigureDir(lib): # Find library configure dir if Common_ValidAutotoolsDirectory(lib.path + '/Build'): LibConfigureDir = lib.path + '/Build' elif Common_ValidAutotoolsDirectory(lib.path): LibConfigureDir = lib.path elif lib.customBuild is not None: LibConfigureDir = lib.path else: LibConfigureDir = None return LibConfigureDir def Common_IsConfigureLibrary(lib): return Common_GetConfigureDir(lib) is not None def Common_MergeConfigureArgs(args): variables = {} newArgs = [] for arg in args: match = re.match(r'[A-Z]+=', arg) if match is not None: var = match.group(0).strip('=') val = arg.strip(var).strip('=').strip('"') if var in variables: gval = variables[var] + ' ' + val variables[var] = gval else: variables[var] = val else: newArgs.append(arg) for var in variables: newArg = var + '="' + variables[var] + '"' newArgs.append(newArg) return newArgs def Common_BuildConfigureLibrary(target, lib, extraArgs=[], clean=False, debug=False, inhouse=False, confdirSuffix='', installSubDir='', isLib=True, stripVersionNumber=False, noSharedObjects=False): args = dict(locals()) prefix = 'lib' if (isLib and (not lib.name.startswith('lib')) ) else '' suffix = '_dbg' if debug else '' if not target.needsToBuild(lib): ARLog('Skipping %(prefix)s%(lib)s build : already built for target %(target)s' % locals()) return True StartDumpArgs(**args) # Build actual library # Sanity checks if not os.path.exists(lib.path): ARLog('Unable to build ' + lib.name + ' : directory ' + lib.path + ' does not exists') return EndDumpArgs(res=False, **args) # Generate directory names TargetDir = ARPathFromHere('Targets/%(target)s' % locals()) ConfigureDir = '%(TargetDir)s/Build/%(prefix)s%(lib)s' % locals() if confdirSuffix: ConfigureDir += '_%(confdirSuffix)s' % locals() ConfigureDirDbg = '%(ConfigureDir)s_dbg' % locals() InstallDir = '%(TargetDir)s/Install' % locals() if installSubDir: InstallDir += '/%(installSubDir)s' % locals() # Generate configure args ConfigureArgs = ['--prefix=%(InstallDir)s' % locals()] ConfigureArgs.extend(lib.extraConfFlags) ConfigureArgs.extend(extraArgs) # TEMP ALWAYS USE -g !!! ForceDebugFlags = [ 'CFLAGS=" -g"' ] ConfigureArgs.extend(ForceDebugFlags) # END OF TEMP ALWAYS USE -g !!! if inhouse: InHouseFlags = [ 'CFLAGS=" -D_IN_HOUSE"', 'CPPFLAGS=" -D_IN_HOUSE"', 'OBJCFLAGS=" -D_IN_HOUSE"' ] ConfigureArgs.extend(InHouseFlags) ConfigureArgs = Common_MergeConfigureArgs(ConfigureArgs) ConfigureArgsDbg = ConfigureArgs + ['--enable-debug'] # Get path for install program InstallBinPath = os.popen('which install').read().strip() if InstallBinPath is not None: ConfigureArgs.append('INSTALL="%(InstallBinPath)s -C"' % locals()) ConfigureArgsDbg.append('INSTALL="%(InstallBinPath)s -C"' % locals()) # Find library configure dir LibConfigureDir = Common_GetConfigureDir(lib) if LibConfigureDir is None: ARLog('Don\'t know how to build %(prefix)s%(lib)s for %(target)s' % locals()) return EndDumpArgs(res=False, **args) # Find library custom script if lib.customBuild is not None: CustomBuildPath = lib.customBuild CustomBuildScript = '%(LibConfigureDir)s/../%(CustomBuildPath)s' % locals() ARLog('Custom build %(CustomBuildScript)s' % locals()) if not os.path.exists(CustomBuildScript): ARLog('Failed to customBuild check %(prefix)s%(lib)s' % locals()) return EndDumpArgs(res=False, **args) # Check bootstrap status of the directory if lib.customBuild is None: if not Common_CheckBootstrap(LibConfigureDir) or not os.path.exists('%(LibConfigureDir)s/configure' % locals()): ARLog('Failed to bootstrap %(prefix)s%(lib)s' % locals()) return EndDumpArgs(res=False, **args) # Replace %{ARSDK_INSTALL_DIR}% Argn = len(ConfigureArgs) index = 0 while index < Argn: arg = ConfigureArgs[index] #print 'ARG ' + arg match = re.search('%\{[a-zA-Z_]*\}%', arg) if match is not None: print 'MATCH ' + match.group(0) print 'REPLACE ' + re.sub('%\{.*\}%', InstallDir, arg) ConfigureArgs[index] = re.sub('%\{[a-zA-Z_]*\}%', InstallDir, arg) index = index + 1 if not clean: mdir = None #Custom Build if lib.customBuild is not None: CustomBuildArg = ConfigureArgs if debug: CustomBuildArg = ConfigureArgsDbg if not ARExecute(CustomBuildScript + ' ' + ARListAsBashArg(CustomBuildArg), failOnError=False): ARLog('Failed to build %(prefix)s%(lib)s' % locals()) return EndDumpArgs(res=False, **args) else: return EndDumpArgs(res=True, **args) if not debug: # Check configure(release) if not Common_CheckConfigure(lib, LibConfigureDir, ConfigureDir, ConfigureArgs, lib.confdeps): return EndDumpArgs(res=False, **args) mdir = Chdir(ConfigureDir) else: if not Common_CheckConfigure(lib, LibConfigureDir, ConfigureDirDbg, ConfigureArgsDbg, lib.confdeps): return EndDumpArgs(res=False, **args) mdir = Chdir(ConfigureDirDbg) # Make if not ARExecute(os.environ.get('ARMAKE') + ' install', failOnError=False): ARLog('Failed to build %(prefix)s%(lib)s' % locals()) mdir.exit() return EndDumpArgs(res=False, **args) # Strip version number if requested if not noSharedObjects: # Get all .so name InstallOut = ARExecuteGetStdout(['make', 'install']).replace('\n', ' ') soregex = r'lib[a-z]*' + suffix + '\.' + target.soext + r'\ ' for somatch in re.finditer(soregex, InstallOut): soname = somatch.group().strip() if soname not in lib.soLibs: lib.soLibs.append(soname) if stripVersionNumber: extLibDir='%(InstallDir)s/lib' % locals() for soname in lib.soLibs: sopath = '%(extLibDir)s/%(soname)s' % locals() if not Common_RemoveVersionsFromSo(sopath, target.soext, lib.soLibs): ARLog('Error while removing versioning informations of %(sopath)s' % locals()) return EndDumpArgs(res=False, **args) # Rename lib to _dbg if not already done (ext libraries in debug mode) if lib.ext and debug: extLibDir='%(InstallDir)s/lib' % locals() for soname in lib.soLibs: if not soname.endswith('_dbg.' + target.soext): soname_dbg = re.sub('\.' + target.soext + '$', '_dbg.' + target.soext, soname) extLibDbg='%(extLibDir)s/%(soname_dbg)s' % locals() extLibNDbg='%(extLibDir)s/%(soname)s' % locals() ARCopyAndReplaceFile(extLibNDbg, extLibDbg) mdir.exit() else: if os.path.exists ('%(ConfigureDirDbg)s/Makefile' % locals()): cdir = Chdir(ConfigureDirDbg) ARExecute(os.environ.get('ARMAKE') + ' uninstall') ARExecute(os.environ.get('ARMAKE') + ' clean') cdir.exit () if os.path.exists ('%(ConfigureDir)s/Makefile' % locals()): cdir = Chdir(ConfigureDir) ARExecute(os.environ.get('ARMAKE') + ' uninstall') ARExecute(os.environ.get('ARMAKE') + ' clean') cdir.exit () return EndDumpArgs(res=True, **args)
#!/usr/bin/python #============================ adjust path ===================================== import sys import os if __name__ == "__main__": here = sys.path[0] sys.path.insert(0, os.path.join(here, '..', '..')) #============================ verify installation ============================= from SmartMeshSDK import SmsdkInstallVerifier (goodToGo,reason) = SmsdkInstallVerifier.verifyComponents( [ SmsdkInstallVerifier.PYTHON, SmsdkInstallVerifier.PYSERIAL, ] ) if not goodToGo: print "Your installation does not allow this application to run:\n" print reason raw_input("Press any button to exit") sys.exit(1) #============================ imports ========================================= import threading from optparse import OptionParser from SmartMeshSDK import AppUtils, \ FormatUtils from SmartMeshSDK.ApiDefinition import IpMgrDefinition from SmartMeshSDK.IpMgrConnectorMux import IpMgrConnectorMux, \ IpMgrSubscribe from dustUI import dustWindow, \ dustFrameConnection, \ dustFrameSensorData #============================ logging ========================================= # local import logging class NullHandler(logging.Handler): def emit(self, record): pass log = logging.getLogger('App') log.setLevel(logging.ERROR) log.addHandler(NullHandler()) # global AppUtils.configureLogging() #============================ defines ========================================= UPDATEPERIOD = 500 # in ms DEFAULT_HOST = '127.0.0.1' DEFAULT_PORT = 9900 #============================ body ============================================ ## # \addtogroup SensorDataReceiver # \{ # class notifClient(object): def __init__(self, connector, disconnectedCallback): # store params self.connector = connector self.disconnectedCallback = disconnectedCallback # variables self.data = None self.dataLock = threading.Lock() # subscriber self.subscriber = IpMgrSubscribe.IpMgrSubscribe(self.connector) self.subscriber.start() self.subscriber.subscribe( notifTypes = [ IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA, ], fun = self._notifDataCallback, isRlbl = False, ) self.subscriber.subscribe( notifTypes = [ IpMgrSubscribe.IpMgrSubscribe.ERROR, IpMgrSubscribe.IpMgrSubscribe.FINISH, ], fun = self.disconnectedCallback, isRlbl = True, ) #======================== public ========================================== def getSensorData(self): self.dataLock.acquire() if self.data: returnVal = self.data.copy() self.data = None else: returnVal = None self.dataLock.release() return returnVal def disconnect(self): self.connector.disconnect() #======================== private ========================================= def _notifDataCallback(self,notifName,notifParams): self.dataLock.acquire() self.data = {} self.data['srcMac'] = notifParams.macAddress self.data['srcPort'] = notifParams.srcPort self.data['destPort'] = notifParams.dstPort self.data['payload'] = notifParams.data self.data['ts_sec'] = notifParams.utcSecs self.data['ts_usec'] = notifParams.utcUsecs self.dataLock.release() class dataGui(object): def __init__(self): # local variables self.guiLock = threading.Lock() self.apiDef = IpMgrDefinition.IpMgrDefinition() self.notifClientHandler = None # create window self.window = dustWindow.dustWindow('SensorDataReceiver', self._windowCb_close) # add a connection frame self.connectionFrame = dustFrameConnection.dustFrameConnection( self.window, self.guiLock, self._connectionFrameCb_connected, frameName="manager connection", row=0,column=0) self.connectionFrame.apiLoaded(self.apiDef) self.connectionFrame.show() # add a sensor data frame self.sensorDataFrame = dustFrameSensorData.dustFrameSensorData(self.window, self.guiLock, frameName="received sensor data", row=1,column=0) self.sensorDataFrame.show() #======================== public ========================================== def start(self, connect_params): ''' This command instructs the GUI to start executing and reacting to user interactions. It never returns and should therefore be the last command called. ''' try: self.window.mainloop() except SystemExit: sys.exit() #======================== private ========================================= def _windowCb_close(self): if self.notifClientHandler: self.notifClientHandler.disconnect() def _connectionFrameCb_connected(self,connector): ''' \brief Called when the connectionFrame has connected. ''' # store the connector self.connector = connector # schedule the GUI to update itself in UPDATEPERIOD ms self.sensorDataFrame.after(UPDATEPERIOD,self._updateSensorData) # start a notification client self.notifClientHandler = notifClient( self.connector, self._connectionFrameCb_disconnected ) def _connectionFrameCb_disconnected(self,notifName,notifParams): ''' \brief Called when the connectionFrame has disconnected. ''' # update the GUI self.connectionFrame.updateGuiDisconnected() # delete the connector if self.connector: self.connector.disconnect() self.connector = None def _updateSensorData(self): # get the data sensorDataToDisplay = self.notifClientHandler.getSensorData() # update the frame if sensorDataToDisplay: self.sensorDataFrame.update(sensorDataToDisplay) # schedule the next update self.sensorDataFrame.after(UPDATEPERIOD,self._updateSensorData) #============================ main ============================================ def main(connect_params): dataGuiHandler = dataGui() dataGuiHandler.start(connect_params) if __name__ == '__main__': # Parse the command line parser = OptionParser("usage: %prog [options]", version="%prog 1.0") parser.add_option("--host", dest="host", default=DEFAULT_HOST, help="Mux host to connect to") parser.add_option("-p", "--port", dest="port", default=DEFAULT_PORT, help="Mux port to connect to") (options, args) = parser.parse_args() connect_params = { 'host': options.host, 'port': int(options.port), } main(connect_params) ## # end of SensorDataReceiver # \} #
# Natural Language Toolkit: SVM-based classifier # # Copyright (C) 2001-2013 NLTK Project # Author: Leon Derczynski <[email protected]> # # URL: <http://www.nltk.org/> # For license information, see LICENSE.TXT """ A classifier based on a support vector machine. This code uses Thorsten Joachims' SVM^light implementation (http://svmlight.joachims.org/), wrapped using PySVMLight (https://bitbucket.org/wcauchois/pysvmlight). The default settings are to train a linear classification kernel, though through minor modification, full SVMlight capabilities should be accessible if needed. Only binary classification is possible at present. """ from __future__ import print_function from nltk import compat from nltk.probability import DictionaryProbDist from nltk.classify.api import ClassifierI # # Interface to Support Vector Machine # try: import svmlight except ImportError: svmlight = None def _raise_if_svmlight_is_missing(): if svmlight is None: raise LookupError( "\n\n===========================================================================\n " "NLTK was unable to import SVMlight!\n\n " "For more information, see <https://bitbucket.org/wcauchois/pysvmlight>\n" "===========================================================================" ) # create a boolean feature name for the SVM from a feature/value pair, # that'll take on a 1.0 value if the original feature:value is asserted. def featurename(feature, value): """ :param feature: a string denoting a feature name :param value: the value of the feature """ return '|'.join([feature, type(value).__name__, str(value)]) # convert a set of NLTK classifier features to SVMlight format def map_features_to_svm(features, svmfeatureindex): """ :param features: a dict of features in the format {'feature':value} :param svmfeatureindex: a mapping from feature:value pairs to integer SVMlight feature labels """ instancefeatures = [] # svmlight supports sparse feature sets and so we simply omit features that we don't include for k,v in compat.iteritems(features): # each feature is represented as an (int, float) tuple where the int is the SVMlight feature label and the float is the value; as we either have or have not a feature, this is 1.0 # this does not support scalar features - rather, each value that a feature may take on is a discrete independent label # use 1.0 as the feature value to specify the presence of a feature:value couple svmfeaturename = featurename(k, v) if svmfeaturename not in svmfeatureindex: # skip over feature:value pairs that were not in the training data and so not included in our mappings continue instancefeatures.append( (svmfeatureindex[svmfeaturename], 1.0) ) return instancefeatures # convert a whole instance (including label) from NLTK to SVMlight format def map_instance_to_svm(instance, labelmapping, svmfeatureindex): """ :param instance: an NLTK format instance, which is in the tuple format (dict(), label), where the dict contains feature:value pairs, and the label signifies the target attribute's value for this instance (e.g. its class) :param labelmapping: a previously-defined dict mapping from text labels in the NLTK instance format to SVMlight labels of either +1 or -1 @svmfeatureindex: a mapping from feature:value pairs to integer SVMlight feature labels """ (features, label) = instance instancefeatures = map_features_to_svm(features, svmfeatureindex) return (labelmapping[label], instancefeatures) class SvmClassifier(ClassifierI): """ A Support Vector Machine classifier. To explain briefly, support vector machines (SVM) treat each feature as a dimension, and position features in n-dimensional feature space. An optimal hyperplane is then determined that best divides feature space into classes, and future instances classified based on which side of the hyperplane they lie on, and their proximity to it. This implementation is for a binary SVM - that is, only two classes are supported. You may achieve perform classification with more classes by training an SVM per class and then picking a best option for new instances given results from each binary class-SVM. """ def __init__(self, labels, labelmapping, svmfeatures, model=None): """ :param labels: a list of text labels for classes :param labelmapping: a mapping from labels to SVM classes (-1,+1) :param svmfeatures: a list of svm features, where the index is the integer feature number and the value an feature/value pair :param model: the model generated by svmlight.learn() """ _raise_if_svmlight_is_missing() self._labels = labels self._model = model self._labelmapping = labelmapping self._svmfeatures = svmfeatures # _svmfeatureindex is the inverse of svmfeatures, allowing us # to find an SVM feature name (int) given a feature/value self._svmfeatureindex = dict(zip(svmfeatures, range(len(svmfeatures)))) self._verbose = False def labels(self): """ Return the list of class labels. """ return self._labels def svm_label_name(self, label): """ searches values of _labelmapping to resolve +1 or -1 to a string :param label: the string label to look up """ labelname = [k for k, v in compat.iteritems(self._labelmapping) if v == label][0] return labelname def resolve_prediction(self, prediction): """ resolve a float (in this case, probably from svmlight.learn().classify()) to either -1 or +1, and then look up the label for that class in _labelmapping, and return the text label :param prediction: a signed float describing classifier confidence """ classification = cmp(prediction, 0) return self.svm_label_name(classification) def _get_svm_classification(self, featureset): """ given a set of features, classify them with our trained model and return a signed float :param featureset: a dict of feature/value pairs in NLTK format, representing a single instance """ instance_to_classify = (0, map_features_to_svm(featureset, self._svmfeatureindex)) if self._verbose: print('instance', instance_to_classify) # svmlight.classify expects a list; this should be taken advantage of when writing SvmClassifier.batch_classify / .batch_prob_classify. # it returns a list of floats, too. [prediction] = svmlight.classify(self._model, [instance_to_classify]) return prediction def prob_classify(self, featureset): """ Return a probability distribution of classifications :param featureset: a dict of feature/value pairs in NLTK format, representing a single instance """ if self._model is None: raise Exception('This classifier is not yet trained') return None # do the classification prediction = self._get_svm_classification(featureset) if self._verbose: print('prediction', prediction) # lump it into a boolean class, -1 or +1 predicted_label = cmp(prediction, 0) # sometimes the result is not within -1 ... +1; clip it so # that it is, and we get a sane-looking probability # distribution. this will upset some results with non-linear # partitioning where instance-hyperplane distance can be many # orders of magnitude larger; I don't have a fix for that if prediction < -1.0: prediction = -1.0 if prediction > 1.0: prediction = 1.0 # if the prediction is negative, then we will maximise the # value of the -1 class; otherwise, that of the 1 class will # be greater. if predicted_label == 1: distribution = {str(self.resolve_prediction(1)): prediction, str(self.resolve_prediction(-1)): 1 - prediction} else: distribution = {str(self.resolve_prediction(1)): prediction + 1, str(self.resolve_prediction(-1)): -prediction} return DictionaryProbDist(distribution) def classify(self, featureset): """ Use a trained SVM to predict a label given for an unlabelled instance :param featureset: a dict of feature/value pairs in NLTK format, representing a single instance """ prediction = self._get_svm_classification(featureset) if self._verbose: print('prediction', prediction) return self.resolve_prediction(prediction) @staticmethod def train(featuresets): """ given a set of training instances in nltk format: [ ( {feature:value, ..}, str(label) ) ] train a support vector machine :param featuresets: training instances """ _raise_if_svmlight_is_missing() # build a unique list of labels labels = set() for (features, label) in featuresets: labels.add(label) # this is a binary classifier only if len(labels) > 2: raise ValueError('Can only do boolean classification (labels: '+ str(labels) + ')') return False # we need ordering, so a set's no good labels = list(labels) # next, assign -1 and 1 labelmapping = {labels[0]:-1, labels[1]:1} # now for feature conversion # iter through instances, building a set of feature:type:str(value) triples svmfeatures = set() for (features, label) in featuresets: for k,v in compat.iteritems(features): svmfeatures.add(featurename(k, v)) # svmfeatures is indexable by integer svm feature number # svmfeatureindex is the inverse (svm feature name -> number) svmfeatures = list(svmfeatures) svmfeatureindex = dict(zip(svmfeatures, range(len(svmfeatures)))) # build svm feature set case by case svmfeatureset = [] for instance in featuresets: svmfeatureset.append(map_instance_to_svm(instance, labelmapping, svmfeatureindex)) # train the svm # TODO: implement passing of SVMlight parameters from train() to learn() return SvmClassifier(labels, labelmapping, svmfeatures, svmlight.learn(svmfeatureset, type='classification')) def demo(): import random from nltk.classify import accuracy from nltk.corpus import names def gender_features(word): return {'last_letter': word[-1], 'penultimate_letter': word[-2]} names = ([(str(name), 'male') for name in names.words('male.txt')] + [(str(name), 'female') for name in names.words('female.txt')]) random.seed(60221023) random.shuffle(names) featuresets = [(gender_features(n), g) for (n,g) in names] train_set, test_set = featuresets[500:], featuresets[:500] print('--- nltk.classify.svm demo ---') print('Number of training examples:', len(train_set)) classifier = SvmClassifier.train(train_set) print('Total SVM dimensions:', len(classifier._svmfeatureindex)) print('Label mapping:', classifier._labelmapping) print('--- Processing an example instance ---') print('Reference instance:', names[0]) print('NLTK-format features:\n ' + str(test_set[0])) print('SVMlight-format features:\n ' + str(map_instance_to_svm(test_set[0], classifier._labelmapping, classifier._svmfeatureindex))) distr = classifier.prob_classify(test_set[0][0]) print('Instance classification and confidence:', distr.max(), distr.prob(distr.max())) print('--- Measuring classifier performance ---') print('Overall accuracy:', accuracy(classifier, test_set)) if __name__ == '__main__': demo()
# ============================================================================= # Copyright (c) 2016, Cisco Systems, Inc # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # ============================================================================= from sqlalchemy import exc from handlers.loader import get_inventory_handler_class from handlers.loader import get_install_handler_class from handlers.doc_central import handle_doc_central_logging from context import InstallContext from utils import create_log_directory from utils import get_log_directory from utils import get_file_list from filters import get_datetime_string from mailer import create_email_job from constants import InstallAction from constants import JobStatus from multi_process import WorkUnit from models import Host from models import InstallJob from models import InstallJobHistory from models import SystemOption import traceback import datetime import urllib import os class InstallWorkUnit(WorkUnit): def __init__(self, host_id, job_id): WorkUnit.__init__(self) self.host_id = host_id self.job_id = job_id def get_unique_key(self): return self.host_id def get_inventory(self, ctx, logger): handler_class = get_inventory_handler_class(ctx) if handler_class is None: logger.error('SoftwareManager: Unable to get handler for %s', ctx.host.software_platform) handler_class().get_inventory(ctx) def start(self, db_session, logger, process_name): ctx = None host = None try: install_job = db_session.query(InstallJob).filter(InstallJob.id == self.job_id).first() if install_job is None: # This is normal because of race condition. It means the job is already deleted (completed). return if not db_session.query(SystemOption).first().can_install: # This will halt this host that has already been queued return host = db_session.query(Host).filter(Host.id == self.host_id).first() if host is None: logger.error('Unable to retrieve host %s', self.host_id) return install_job.session_log = create_log_directory(host.connection_param[0].host_or_ip, install_job.id) ctx = InstallContext(db_session, host, install_job) handler_class = get_install_handler_class(ctx) if handler_class is None: logger.error('Unable to get handler for %s, install job %s', host.software_platform, self.job_id) install_job.start_time = datetime.datetime.utcnow() install_job.set_status(JobStatus.IN_PROGRESS) # Reset the job_info field especially for a re-submitted job. install_job.save_data('job_info', []) db_session.commit() handler = handler_class() handler.execute(ctx) if ctx.success: try: # Update the software self.get_inventory(ctx, logger) except Exception: pass # Support Doc Central feature for SIT team if install_job.install_action == InstallAction.PRE_UPGRADE or \ install_job.install_action == InstallAction.INSTALL_ADD: install_job.save_data("from_release", ctx.host.software_version) self.archive_install_job(db_session, logger, ctx, host, install_job, JobStatus.COMPLETED, process_name) # Support Doc Central feature for SIT team - must be done after archive_install_job. handle_doc_central_logging(ctx, logger) else: self.archive_install_job(db_session, logger, ctx, host, install_job, JobStatus.FAILED, process_name) except (Exception, exc.InvalidRequestError, exc.SQLAlchemyError): db_session.rollback() try: self.log_exception(logger, host) self.archive_install_job(db_session, logger, ctx, host, install_job, JobStatus.FAILED, process_name, trace=traceback.format_exc()) except Exception: self.log_exception(logger, host) finally: db_session.close() def log_exception(self, logger, host): logger.exception('InstallManager hit exception - hostname = %s, install job = %s', host.hostname if host is not None else 'Unknown', self.job_id) def archive_install_job(self, db_session, logger, ctx, host, install_job, job_status, process_name, trace=None): install_job_history = InstallJobHistory() install_job_history.install_job_id = install_job.id install_job_history.host_id = install_job.host_id install_job_history.install_action = install_job.install_action install_job_history.packages = install_job.packages install_job_history.scheduled_time = install_job.scheduled_time install_job_history.start_time = install_job.start_time install_job_history.set_status(job_status) install_job_history.dependency = install_job.dependency install_job_history.session_log = install_job.session_log install_job_history.created_by = install_job.created_by install_job_history.data = install_job.data install_job_history.trace = trace # Only delete the install job if it is completed successfully. # Failed job should still be retained in the InstallJob table. if job_status == JobStatus.COMPLETED: db_session.delete(install_job) else: install_job.set_status(job_status) if trace is not None: install_job.trace = trace db_session.add(install_job_history) db_session.commit() # Send notification error self.create_email_notification(db_session, logger, host, install_job_history) def get_job_datetime_string(self, job_time): datetime_string = get_datetime_string(job_time) return 'Unknown' if datetime_string is None else datetime_string def create_email_notification(self, db_session, logger, host, install_job): try: session_log_link = "log/hosts/{}/install_job_history/session_log/{}?file_path={}".format( urllib.quote(host.hostname), install_job.id, install_job.session_log) message = '<html><head></head><body>' if install_job.status == JobStatus.COMPLETED: message += 'The scheduled installation for host "' + host.hostname + '" has COMPLETED.<br><br>' elif install_job.status == JobStatus.FAILED: message += 'The scheduled installation for host "' + host.hostname + '" has FAILED.<br><br>' message += 'Scheduled Time: ' + \ self.get_job_datetime_string(install_job.scheduled_time) + \ ' UTC<br>' message += 'Start Time: ' + \ self.get_job_datetime_string(install_job.start_time) + \ ' UTC<br>' message += 'Install Action: ' + install_job.install_action + '<br><br>' message = self.check_command_file_diff(install_job, message) session_log_url = SystemOption.get(db_session).base_url + '/' + session_log_link message += 'For more information, click the link below<br><br>' message += session_log_url + '<br><br>' if install_job.packages is not None and len(install_job.packages) > 0: message += 'Followings are the software packages: <br><br>' + install_job.packages.replace(',','<br>') message += '</body></html>' create_email_job(db_session, logger, message, install_job.created_by) except Exception: logger.exception('create_email_notification() hit exception') def check_command_file_diff(self, install_job, message): if install_job.session_log is not None: file_suffix = '.diff.html' file_list = get_file_list(os.path.join(get_log_directory(), install_job.session_log)) diff_file_list = [file for file in file_list if file_suffix in file] if len(diff_file_list) > 0: message += 'The following command outputs have changed between different installation phases<br><br>' for file in diff_file_list: message += file.replace(file_suffix, '') + '<br>' message += '<br>' return message
''' Spatial Two Stages Least Squares ''' __author__ = "Luc Anselin [email protected], David C. Folch [email protected]" import copy import numpy as np import pysal import numpy.linalg as la import twosls as TSLS import robust as ROBUST import user_output as USER import summary_output as SUMMARY from utils import get_lags, set_endog, sp_att, set_warn __all__ = ["GM_Lag"] class BaseGM_Lag(TSLS.BaseTSLS): """ Spatial two stage least squares (S2SLS) (note: no consistency checks, diagnostics or constant added); Anselin (1988) [1]_ Parameters ---------- y : array nx1 array for dependent variable x : array Two dimensional array with n rows and one column for each independent (exogenous) variable, excluding the constant yend : array Two dimensional array with n rows and one column for each endogenous variable q : array Two dimensional array with n rows and one column for each external exogenous variable to use as instruments (note: this should not contain any variables from x); cannot be used in combination with h w : Sparse matrix Spatial weights sparse matrix w_lags : integer Orders of W to include as instruments for the spatially lagged dependent variable. For example, w_lags=1, then instruments are WX; if w_lags=2, then WX, WWX; and so on. lag_q : boolean If True, then include spatial lags of the additional instruments (q). robust : string If 'white', then a White consistent estimator of the variance-covariance matrix is given. If 'hac', then a HAC consistent estimator of the variance-covariance matrix is given. Default set to None. gwk : pysal W object Kernel spatial weights needed for HAC estimation. Note: matrix must have ones along the main diagonal. sig2n_k : boolean If True, then use n-k to estimate sigma^2. If False, use n. Attributes ---------- betas : array kx1 array of estimated coefficients u : array nx1 array of residuals predy : array nx1 array of predicted y values n : integer Number of observations k : integer Number of variables for which coefficients are estimated (including the constant) kstar : integer Number of endogenous variables. y : array nx1 array for dependent variable x : array Two dimensional array with n rows and one column for each independent (exogenous) variable, including the constant yend : array Two dimensional array with n rows and one column for each endogenous variable q : array Two dimensional array with n rows and one column for each external exogenous variable used as instruments z : array nxk array of variables (combination of x and yend) h : array nxl array of instruments (combination of x and q) mean_y : float Mean of dependent variable std_y : float Standard deviation of dependent variable vm : array Variance covariance matrix (kxk) utu : float Sum of squared residuals sig2 : float Sigma squared used in computations sig2n : float Sigma squared (computed with n in the denominator) sig2n_k : float Sigma squared (computed with n-k in the denominator) hth : float H'H hthi : float (H'H)^-1 varb : array (Z'H (H'H)^-1 H'Z)^-1 zthhthi : array Z'H(H'H)^-1 pfora1a2 : array n(zthhthi)'varb References ---------- .. [1] Anselin, L. (1988) "Spatial Econometrics: Methods and Models". Kluwer Academic Publishers. Dordrecht. Examples -------- >>> import numpy as np >>> import pysal >>> import pysal.spreg.diagnostics as D >>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp")) >>> w.transform = 'r' >>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r') >>> y = np.array(db.by_col("HOVAL")) >>> y = np.reshape(y, (49,1)) >>> # no non-spatial endogenous variables >>> X = [] >>> X.append(db.by_col("INC")) >>> X.append(db.by_col("CRIME")) >>> X = np.array(X).T >>> w_lags = 2 >>> yd2, q2 = pysal.spreg.utils.set_endog(y, X, w, None, None, w_lags, True) >>> X = np.hstack((np.ones(y.shape),X)) >>> reg=BaseGM_Lag(y, X, yend=yd2, q=q2, w=w.sparse, w_lags=w_lags) >>> reg.betas array([[ 45.30170561], [ 0.62088862], [ -0.48072345], [ 0.02836221]]) >>> D.se_betas(reg) array([ 17.91278862, 0.52486082, 0.1822815 , 0.31740089]) >>> reg=BaseGM_Lag(y, X, yend=yd2, q=q2, w=w.sparse, w_lags=w_lags, robust='white') >>> reg.betas array([[ 45.30170561], [ 0.62088862], [ -0.48072345], [ 0.02836221]]) >>> D.se_betas(reg) array([ 20.47077481, 0.50613931, 0.20138425, 0.38028295]) >>> # instrument for HOVAL with DISCBD >>> X = np.array(db.by_col("INC")) >>> X = np.reshape(X, (49,1)) >>> yd = np.array(db.by_col("CRIME")) >>> yd = np.reshape(yd, (49,1)) >>> q = np.array(db.by_col("DISCBD")) >>> q = np.reshape(q, (49,1)) >>> yd2, q2 = pysal.spreg.utils.set_endog(y, X, w, yd, q, w_lags, True) >>> X = np.hstack((np.ones(y.shape),X)) >>> reg=BaseGM_Lag(y, X, w=w.sparse, yend=yd2, q=q2, w_lags=w_lags) >>> reg.betas array([[ 100.79359082], [ -0.50215501], [ -1.14881711], [ -0.38235022]]) >>> D.se_betas(reg) array([ 53.0829123 , 1.02511494, 0.57589064, 0.59891744]) """ def __init__(self, y, x, yend=None, q=None, w=None, w_lags=1, lag_q=True, robust=None, gwk=None, sig2n_k=False): TSLS.BaseTSLS.__init__(self, y=y, x=x, yend=yend, q=q, robust=robust, gwk=gwk, sig2n_k=sig2n_k) class GM_Lag(BaseGM_Lag): """ Spatial two stage least squares (S2SLS) with results and diagnostics; Anselin (1988) [1]_ Parameters ---------- y : array nx1 array for dependent variable x : array Two dimensional array with n rows and one column for each independent (exogenous) variable, excluding the constant yend : array Two dimensional array with n rows and one column for each endogenous variable q : array Two dimensional array with n rows and one column for each external exogenous variable to use as instruments (note: this should not contain any variables from x); cannot be used in combination with h w : pysal W object Spatial weights object w_lags : integer Orders of W to include as instruments for the spatially lagged dependent variable. For example, w_lags=1, then instruments are WX; if w_lags=2, then WX, WWX; and so on. lag_q : boolean If True, then include spatial lags of the additional instruments (q). robust : string If 'white', then a White consistent estimator of the variance-covariance matrix is given. If 'hac', then a HAC consistent estimator of the variance-covariance matrix is given. Default set to None. gwk : pysal W object Kernel spatial weights needed for HAC estimation. Note: matrix must have ones along the main diagonal. sig2n_k : boolean If True, then use n-k to estimate sigma^2. If False, use n. spat_diag : boolean If True, then compute Anselin-Kelejian test vm : boolean If True, include variance-covariance matrix in summary results name_y : string Name of dependent variable for use in output name_x : list of strings Names of independent variables for use in output name_yend : list of strings Names of endogenous variables for use in output name_q : list of strings Names of instruments for use in output name_w : string Name of weights matrix for use in output name_gwk : string Name of kernel weights matrix for use in output name_ds : string Name of dataset for use in output Attributes ---------- summary : string Summary of regression results and diagnostics (note: use in conjunction with the print command) betas : array kx1 array of estimated coefficients u : array nx1 array of residuals e_pred : array nx1 array of residuals (using reduced form) predy : array nx1 array of predicted y values predy_e : array nx1 array of predicted y values (using reduced form) n : integer Number of observations k : integer Number of variables for which coefficients are estimated (including the constant) kstar : integer Number of endogenous variables. y : array nx1 array for dependent variable x : array Two dimensional array with n rows and one column for each independent (exogenous) variable, including the constant yend : array Two dimensional array with n rows and one column for each endogenous variable q : array Two dimensional array with n rows and one column for each external exogenous variable used as instruments z : array nxk array of variables (combination of x and yend) h : array nxl array of instruments (combination of x and q) robust : string Adjustment for robust standard errors mean_y : float Mean of dependent variable std_y : float Standard deviation of dependent variable vm : array Variance covariance matrix (kxk) pr2 : float Pseudo R squared (squared correlation between y and ypred) pr2_e : float Pseudo R squared (squared correlation between y and ypred_e (using reduced form)) utu : float Sum of squared residuals sig2 : float Sigma squared used in computations std_err : array 1xk array of standard errors of the betas z_stat : list of tuples z statistic; each tuple contains the pair (statistic, p-value), where each is a float ak_test : tuple Anselin-Kelejian test; tuple contains the pair (statistic, p-value) name_y : string Name of dependent variable for use in output name_x : list of strings Names of independent variables for use in output name_yend : list of strings Names of endogenous variables for use in output name_z : list of strings Names of exogenous and endogenous variables for use in output name_q : list of strings Names of external instruments name_h : list of strings Names of all instruments used in ouput name_w : string Name of weights matrix for use in output name_gwk : string Name of kernel weights matrix for use in output name_ds : string Name of dataset for use in output title : string Name of the regression method used sig2n : float Sigma squared (computed with n in the denominator) sig2n_k : float Sigma squared (computed with n-k in the denominator) hth : float H'H hthi : float (H'H)^-1 varb : array (Z'H (H'H)^-1 H'Z)^-1 zthhthi : array Z'H(H'H)^-1 pfora1a2 : array n(zthhthi)'varb References ---------- .. [1] Anselin, L. (1988) "Spatial Econometrics: Methods and Models". Kluwer Academic Publishers. Dordrecht. Examples -------- We first need to import the needed modules, namely numpy to convert the data we read into arrays that ``spreg`` understands and ``pysal`` to perform all the analysis. Since we will need some tests for our model, we also import the diagnostics module. >>> import numpy as np >>> import pysal >>> import pysal.spreg.diagnostics as D Open data on Columbus neighborhood crime (49 areas) using pysal.open(). This is the DBF associated with the Columbus shapefile. Note that pysal.open() also reads data in CSV format; since the actual class requires data to be passed in as numpy arrays, the user can read their data in using any method. >>> db = pysal.open(pysal.examples.get_path("columbus.dbf"),'r') Extract the HOVAL column (home value) from the DBF file and make it the dependent variable for the regression. Note that PySAL requires this to be an numpy array of shape (n, 1) as opposed to the also common shape of (n, ) that other packages accept. >>> y = np.array(db.by_col("HOVAL")) >>> y = np.reshape(y, (49,1)) Extract INC (income) and CRIME (crime rates) vectors from the DBF to be used as independent variables in the regression. Note that PySAL requires this to be an nxj numpy array, where j is the number of independent variables (not including a constant). By default this model adds a vector of ones to the independent variables passed in, but this can be overridden by passing constant=False. >>> X = [] >>> X.append(db.by_col("INC")) >>> X.append(db.by_col("CRIME")) >>> X = np.array(X).T Since we want to run a spatial error model, we need to specify the spatial weights matrix that includes the spatial configuration of the observations into the error component of the model. To do that, we can open an already existing gal file or create a new one. In this case, we will create one from ``columbus.shp``. >>> w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp")) Unless there is a good reason not to do it, the weights have to be row-standardized so every row of the matrix sums to one. Among other things, this allows to interpret the spatial lag of a variable as the average value of the neighboring observations. In PySAL, this can be easily performed in the following way: >>> w.transform = 'r' This class runs a lag model, which means that includes the spatial lag of the dependent variable on the right-hand side of the equation. If we want to have the names of the variables printed in the output summary, we will have to pass them in as well, although this is optional. The default most basic model to be run would be: >>> reg=GM_Lag(y, X, w=w, w_lags=2, name_x=['inc', 'crime'], name_y='hoval', name_ds='columbus') >>> reg.betas array([[ 45.30170561], [ 0.62088862], [ -0.48072345], [ 0.02836221]]) Once the model is run, we can obtain the standard error of the coefficient estimates by calling the diagnostics module: >>> D.se_betas(reg) array([ 17.91278862, 0.52486082, 0.1822815 , 0.31740089]) But we can also run models that incorporates corrected standard errors following the White procedure. For that, we will have to include the optional parameter ``robust='white'``: >>> reg=GM_Lag(y, X, w=w, w_lags=2, robust='white', name_x=['inc', 'crime'], name_y='hoval', name_ds='columbus') >>> reg.betas array([[ 45.30170561], [ 0.62088862], [ -0.48072345], [ 0.02836221]]) And we can access the standard errors from the model object: >>> reg.std_err array([ 20.47077481, 0.50613931, 0.20138425, 0.38028295]) The class is flexible enough to accomodate a spatial lag model that, besides the spatial lag of the dependent variable, includes other non-spatial endogenous regressors. As an example, we will assume that CRIME is actually endogenous and we decide to instrument for it with DISCBD (distance to the CBD). We reload the X including INC only and define CRIME as endogenous and DISCBD as instrument: >>> X = np.array(db.by_col("INC")) >>> X = np.reshape(X, (49,1)) >>> yd = np.array(db.by_col("CRIME")) >>> yd = np.reshape(yd, (49,1)) >>> q = np.array(db.by_col("DISCBD")) >>> q = np.reshape(q, (49,1)) And we can run the model again: >>> reg=GM_Lag(y, X, w=w, yend=yd, q=q, w_lags=2, name_x=['inc'], name_y='hoval', name_yend=['crime'], name_q=['discbd'], name_ds='columbus') >>> reg.betas array([[ 100.79359082], [ -0.50215501], [ -1.14881711], [ -0.38235022]]) Once the model is run, we can obtain the standard error of the coefficient estimates by calling the diagnostics module: >>> D.se_betas(reg) array([ 53.0829123 , 1.02511494, 0.57589064, 0.59891744]) """ def __init__(self, y, x, yend=None, q=None, w=None, w_lags=1, lag_q=True, robust=None, gwk=None, sig2n_k=False, spat_diag=False, vm=False, name_y=None, name_x=None, name_yend=None, name_q=None, name_w=None, name_gwk=None, name_ds=None): n = USER.check_arrays(x, yend, q) USER.check_y(y, n) USER.check_weights(w, y, w_required=True) USER.check_robust(robust, gwk) yend2, q2 = set_endog(y, x, w, yend, q, w_lags, lag_q) x_constant = USER.check_constant(x) BaseGM_Lag.__init__( self, y=y, x=x_constant, w=w.sparse, yend=yend2, q=q2, w_lags=w_lags, robust=robust, gwk=gwk, lag_q=lag_q, sig2n_k=sig2n_k) self.rho = self.betas[-1] self.predy_e, self.e_pred, warn = sp_att(w, self.y, self.predy, yend2[:, -1].reshape(self.n, 1), self.rho) set_warn(self, warn) self.title = "SPATIAL TWO STAGE LEAST SQUARES" self.name_ds = USER.set_name_ds(name_ds) self.name_y = USER.set_name_y(name_y) self.name_x = USER.set_name_x(name_x, x) self.name_yend = USER.set_name_yend(name_yend, yend) self.name_yend.append(USER.set_name_yend_sp(self.name_y)) self.name_z = self.name_x + self.name_yend self.name_q = USER.set_name_q(name_q, q) self.name_q.extend( USER.set_name_q_sp(self.name_x, w_lags, self.name_q, lag_q)) self.name_h = USER.set_name_h(self.name_x, self.name_q) self.robust = USER.set_robust(robust) self.name_w = USER.set_name_w(name_w, w) self.name_gwk = USER.set_name_w(name_gwk, gwk) SUMMARY.GM_Lag(reg=self, w=w, vm=vm, spat_diag=spat_diag) def _test(): import doctest start_suppress = np.get_printoptions()['suppress'] np.set_printoptions(suppress=True) doctest.testmod() np.set_printoptions(suppress=start_suppress) if __name__ == '__main__': _test() import numpy as np import pysal db = pysal.open(pysal.examples.get_path("columbus.dbf"), 'r') y_var = 'CRIME' y = np.array([db.by_col(y_var)]).reshape(49, 1) x_var = ['INC'] x = np.array([db.by_col(name) for name in x_var]).T yd_var = ['HOVAL'] yd = np.array([db.by_col(name) for name in yd_var]).T q_var = ['DISCBD'] q = np.array([db.by_col(name) for name in q_var]).T w = pysal.rook_from_shapefile(pysal.examples.get_path("columbus.shp")) w.transform = 'r' model = GM_Lag( y, x, yd, q, w=w, spat_diag=True, name_y=y_var, name_x=x_var, name_yend=yd_var, name_q=q_var, name_ds='columbus', name_w='columbus.gal') print model.summary