repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
kontais/EFI-MIPS
ToolKit/cmds/python/Lib/test/skipped/test_mimetypes.py
15
2570
import mimetypes import StringIO import unittest from sets import Set from test import test_support # Tell it we don't know about external files: mimetypes.knownfiles = [] mimetypes.inited = False class MimeTypesTestCase(unittest.TestCase): def setUp(self): self.db = mimetypes.MimeTypes() def test_default_data(self): eq = self.assertEqual eq(self.db.guess_type("foo.html"), ("text/html", None)) eq(self.db.guess_type("foo.tgz"), ("application/x-tar", "gzip")) eq(self.db.guess_type("foo.tar.gz"), ("application/x-tar", "gzip")) eq(self.db.guess_type("foo.tar.Z"), ("application/x-tar", "compress")) def test_data_urls(self): eq = self.assertEqual guess_type = self.db.guess_type eq(guess_type("data:,thisIsTextPlain"), ("text/plain", None)) eq(guess_type("data:;base64,thisIsTextPlain"), ("text/plain", None)) eq(guess_type("data:text/x-foo,thisIsTextXFoo"), ("text/x-foo", None)) def test_file_parsing(self): eq = self.assertEqual sio = StringIO.StringIO("x-application/x-unittest pyunit\n") self.db.readfp(sio) eq(self.db.guess_type("foo.pyunit"), ("x-application/x-unittest", None)) eq(self.db.guess_extension("x-application/x-unittest"), ".pyunit") def test_non_standard_types(self): eq = self.assertEqual # First try strict eq(self.db.guess_type('foo.xul', strict=True), (None, None)) eq(self.db.guess_extension('image/jpg', strict=True), None) # And then non-strict eq(self.db.guess_type('foo.xul', strict=False), ('text/xul', None)) eq(self.db.guess_extension('image/jpg', strict=False), '.jpg') def test_guess_all_types(self): eq = self.assertEqual unless = self.failUnless # First try strict. Use a set here for testing the results because if # test_urllib2 is run before test_mimetypes, global state is modified # such that the 'all' set will have more items in it. all = Set(self.db.guess_all_extensions('text/plain', strict=True)) unless(all >= Set(['.bat', '.c', '.h', '.ksh', '.pl', '.txt'])) # And now non-strict all = self.db.guess_all_extensions('image/jpg', strict=False) all.sort() eq(all, ['.jpg']) # And now for no hits all = self.db.guess_all_extensions('image/jpg', strict=True) eq(all, []) def test_main(): test_support.run_unittest(MimeTypesTestCase) if __name__ == "__main__": test_main()
bsd-3-clause
haijieg/SFrame
oss_src/unity/python/sframe/test/test_file_util.py
6
3599
import os import unittest import tempfile from ..util import file_util as fu class FileUtilTests(unittest.TestCase): def setUp(self): self.local_path = 'tmp/a/b/c' self.s3_path = 's3://a/b/c' self.http_path = 'http://a.b.c/d' self._get_env() def _get_env(self): self.run_s3_test = ('FILE_UTIL_TEST_S3_BUCKET' in os.environ) and \ 'AWS_ACCESS_KEY_ID' in os.environ and \ 'AWS_SECRET_ACCESS_KEY' in os.environ if self.run_s3_test: self.s3_test_path = os.environ['FILE_UTIL_TEST_S3_BUCKET'] else: self.s3_test_path = None def test_get_protocol(self): self.assertEqual(fu.get_protocol(self.local_path), '') self.assertEqual(fu.get_protocol(self.s3_path), 's3') self.assertEqual(fu.get_protocol(self.http_path), 'http') def test_s3_load_save(self): # skip tests if not setup appropriately if self.s3_test_path is None: return # non exist local file with self.assertRaises(RuntimeError): fu.upload_to_s3('~/tmp/__abc_non_exist', self.s3_test_path, {}) # s3 path not correct with self.assertRaises(RuntimeError): fu.download_from_s3('abc', '~/tmp/some_new_file') # local path not correct with self.assertRaises(RuntimeError): fu.download_from_s3('s3:/a/b/', 'http:/a.b.c/d') # invalid s3 path with self.assertRaises(RuntimeError): with tempfile.NamedTemporaryFile(delete=True) as f: f.close() fu.upload_to_s3(f.name, 'abc', {}) with tempfile.NamedTemporaryFile(delete=False) as f: f.write('abc') f.close() fu.upload_to_s3(f.name, self.s3_test_path) os.remove(f.name) self.assertFalse(os.path.exists(f.name)) fu.download_from_s3(self.s3_test_path, f.name) self.assertTrue(os.path.exists(f.name)) with open(f.name, 'r') as f1: s = f1.read() self.assertEqual(s, 'abc') def test_is_local_path(self): self.assertTrue(fu.is_local_path(self.local_path)) self.assertFalse(fu.is_local_path(self.s3_path)) self.assertFalse(fu.is_local_path(self.http_path)) def test_is_s3_path(self): self.assertFalse(fu.is_s3_path(self.local_path)) self.assertTrue(fu.is_s3_path(self.s3_path)) self.assertFalse(fu.is_s3_path(self.http_path)) def test_expand_full_path(self): if not 'HOME' in os.environ: raise RuntimeError('warning: cannot find $HOME key in environment') else: home = os.environ['HOME'] self.assertTrue(fu.expand_full_path('~/tmp'), os.path.join(home, 'tmp')) self.assertTrue(fu.expand_full_path('tmp'), os.path.join(os.getcwd(), 'tmp')) def test_parse_s3_path(self): s3_path = 's3://a/b/c' (bucket, path) = fu.parse_s3_path(s3_path) self.assertEqual(bucket, 'a') self.assertEqual(path, 'b/c') s3_path = 'S3://a/b/c/' (bucket, path) = fu.parse_s3_path(s3_path) self.assertEqual(bucket, 'a') self.assertEqual(path, 'b/c') def test_cert_directories(self): import sframe as sf import certifi self.assertEqual(sf.get_runtime_config()['GRAPHLAB_FILEIO_ALTERNATIVE_SSL_CERT_FILE'], certifi.where()) self.assertEqual(sf.get_runtime_config()['GRAPHLAB_FILEIO_ALTERNATIVE_SSL_CERT_DIR'], "")
bsd-3-clause
Aegeaner/spark
examples/src/main/python/ml/normalizer_example.py
123
1807
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function # $example on$ from pyspark.ml.feature import Normalizer from pyspark.ml.linalg import Vectors # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("NormalizerExample")\ .getOrCreate() # $example on$ dataFrame = spark.createDataFrame([ (0, Vectors.dense([1.0, 0.5, -1.0]),), (1, Vectors.dense([2.0, 1.0, 1.0]),), (2, Vectors.dense([4.0, 10.0, 2.0]),) ], ["id", "features"]) # Normalize each Vector using $L^1$ norm. normalizer = Normalizer(inputCol="features", outputCol="normFeatures", p=1.0) l1NormData = normalizer.transform(dataFrame) print("Normalized using L^1 norm") l1NormData.show() # Normalize each Vector using $L^\infty$ norm. lInfNormData = normalizer.transform(dataFrame, {normalizer.p: float("inf")}) print("Normalized using L^inf norm") lInfNormData.show() # $example off$ spark.stop()
apache-2.0
m-wichmann/dist_brake
drm.py
1
14505
#!/usr/bin/env python3 import argparse import os import sys import tempfile import time import shutil import datetime import json import threading import queue import textwrap import re import pathlib import logging import drm from drm.data import HandbrakeConfig, RipConfig, Disc, Fix, Job import drm.handbrake as handbrake from drm.util import * from drm.master import master_start_server from drm.slave import slave_start logger = logging.getLogger('drm') MIN_DISK_SPACE_LEFT = 15 # in gb # TODO: support subdirs # TODO: detect new files during runtime # TODO: maybe add prefix to tempfile # TODO: Check if file already fully copied (for master) # TODO: Implement state site via flask class InvalidConfigException(Exception): pass class PathIsDirException(Exception): pass def invalid_config_get_text(expected_master, path): # Check if a slave config was given try: if expected_master: parse_cfg_slave(path) else: parse_cfg_master(path) wrong_config_found = True except: wrong_config_found = False ret = 'Config file invalid' if wrong_config_found: if expected_master: ret = 'Config file for master expected, slave config found' else: ret = 'Config file for slave expected, master config found' return ret def parse_cfg_master(cfg_path): try: with open(cfg_path, 'r') as fd: data = json.load(fd) hb_config = HandbrakeConfig(quality=data['hb_config']['quality'], h264_preset=data['hb_config']['h264_preset'], h264_profile=data['hb_config']['h264_profile'], h264_level=data['hb_config']['h264_level']) rip_config = RipConfig(a_lang=data['rip_config']['a_tracks'], s_lang=data['rip_config']['s_tracks'], len_range=(data['rip_config']['min_dur'], data['rip_config']['max_dur'])) fixes = [] for fix in data['fixes']: fixes.append(Fix(fix, data['fixes'][fix])) in_path = data['in_path'] out_path = data['out_path'] except (KeyError, json.decoder.JSONDecodeError): raise InvalidConfigException('Config is invalid') except FileNotFoundError: raise except IsADirectoryError: raise PathIsDirException('Config file expected, directory found') return (hb_config, rip_config, fixes, in_path, out_path) def parse_cfg_slave(cfg_path): try: with open(cfg_path, 'r') as fd: data = json.load(fd) ip = data['ip'] port = data['port'] except (KeyError, json.decoder.JSONDecodeError): raise InvalidConfigException('Config is invalid') except FileNotFoundError: raise except IsADirectoryError: raise PathIsDirException('Config file expected, directory found') return (ip, port) def master(hb_config, rip_config, fixes, in_path, out_path): logger.info('Starting as master...') if len(fixes) > 0: logger.info('Active fixes:') for fix in fixes: logger.info(' %s', fix) job_queue = [] for root, dirs, files in os.walk(in_path): if dirs: logger.error('Subdirs currently not supported!') break for f in files: disc = Disc(os.path.join(root, f)) job = Job(disc, rip_config, hb_config, fixes) job_queue.append(job) logger.debug('Creating job {}'.format(job)) logger.info('Created {} jobs'.format(len(job_queue))) # TODO: ip/port master_start_server('0.0.0.0', 5001, job_queue, out_path) def slave(ip, port): logger.info('Starting as slave...') slave_start(ip, port) def rip(out_dir): # TODO: show progress via shutil.disk_usage('/tmp/tmp.../') if not any([dvdbackup_check(), genisoimage_check(), eject_check()]): logger.error('Some necessary tool not found (dvdbackup, genisoimage, eject)') return while True: # Check if there is still some disk space left (_, _, free_mem) = shutil.disk_usage(out_dir) free_mem_gb = free_mem / 1024 / 1024 / 1024 if free_mem_gb < MIN_DISK_SPACE_LEFT: logger.warning('Free space in out dir might not be enough') # Get name for image try: name = input('Please enter disc name (empty to end): ') except KeyboardInterrupt: break # Empty name to exit if name == '': break # Images are expected to use upper case (not necessary, but used here) name = name.upper() temp_dir = tempfile.TemporaryDirectory() out_path = os.path.join(out_dir, name + '.iso') if pathlib.Path(out_path).is_file(): logger.warning('File already exists') continue time_started = datetime.datetime.now() rip_success = True try: dvdbackup(temp_dir.name, name) genisoimage(out_path, os.path.join(temp_dir.name, name)) except KeyboardInterrupt: break time_done = datetime.datetime.now() # delete temp_dir explicitly, so memory gets freed right now del temp_dir # Eject disk eject_retval = eject() if not eject_retval: logger.error('Eject failed!') try: image_size = os.path.getsize(out_path) except FileNotFoundError: rip_success = False image_size = 0 # If image is 0 byte, ripping failed, and there won't be a real image if image_size == 0: rip_success = False if rip_success: # TODO: limit print prec logger.info('Done {} [{} GB, {}]!'.format(name, image_size / (1024 * 1024 * 1024), time_done - time_started)) else: logger.warning('Failed {} [{}]'.format(name, time_done - time_started)) def list_titles(target_dir, rip_config, fixes): # TODO: Maybe try libdvdread if dvdnav returns no titles?! use_libdvdread = False if 'use_libdvdread' in fixes: use_libdvdread = True for root, dirs, files in os.walk(target_dir): for f in files: track_list = handbrake.scan_disc(os.path.join(root, f), use_libdvdread) track_list = handbrake.filter_titles(track_list, *rip_config.len_range, rip_config.a_lang, rip_config.s_lang) logger.info(' => {} matching tracks...'.format(len(track_list))) for track in track_list: logger.info(' {}'.format(track)) def set_properties(target_dir): allowed_pattern = r'''[^a-zA-Z0-9\-() .',_!&äöüÄÖÜ\[\]]''' invalid_file_name_msg = 'File name invalid: ' for root, dirs, files in os.walk(target_dir): for f in files: if os.path.splitext(f)[1] != '.mkv': continue # Check if file seems valid matches = re.finditer(allowed_pattern, f) match_pos = [e.span()[0] for e in matches] if match_pos: marker = ''.join(['^' if i in match_pos else ' ' for i in range(len(f))]) logger.warning('%s%s', invalid_file_name_msg, f) logger.warning('%s%s', ' '*len(invalid_file_name_msg), marker) # Set mkv properties path = os.path.join(root, f) title = os.path.splitext(f)[0] mkvpropedit(path, title) def help_build_epilog(): found_hb = handbrake.check_env() found_dvdbackup = dvdbackup_check() found_geniso = genisoimage_check() found_eject = eject_check() found_mkvprop = mkvpropedit_check() allowed_fixes = '\n' for fix in Fix.allowed_fixes: allowed_fixes += ' ' + fix + '\n' help_text = """ Tools: Handbrake {handbrake} dvdbackup {dvdbackup} genisoimage {genisoimage} eject {eject} mkvpropedit {mkvpropedit} Available fixes:{allowed_fixes} Examples: $ drm --rip isos/ # Rip DVDs to dir isos/ $ drm --rip master.cfg # Rip DVDs to input directory from master.cfg $ drm --list isos/ # List iso titles using default config $ drm --list master.cfg # List iso titles using config from master.cfg $ drm --master master.cfg # Start master $ drm --slave slave.cfg # Start slave $ drm --prop out/ # Set properties of mkv files in directory out/ """ help_text = help_text.format(handbrake='Found' if found_hb else 'Not found! --slave and --list not available', dvdbackup='Found' if found_dvdbackup else 'Not found! --rip not available', genisoimage='Found' if found_geniso else 'Not found! --rip not available', eject='Found' if found_eject else 'Not found! --rip not available', mkvpropedit='Found' if found_mkvprop else 'Not found! --prop not available', allowed_fixes=allowed_fixes) return textwrap.dedent(help_text) def drm_main(): parser = argparse.ArgumentParser(description='Distributed video transcoder based on HandBrake.', epilog=help_build_epilog(), formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s ' + drm.__version__) parser.add_argument('-v', '--verbose', action='count', default=0) group = parser.add_mutually_exclusive_group(required=True) group.add_argument('--master', action='store', help='start drm as master to distribute image files to the slaves') group.add_argument('--slave', action='store', help='start drm as slave to process image files provided by the master') group.add_argument('--rip', action='store', help='rip DVD discs to image files') group.add_argument('--list', action='store', help='list tracks for all images in given directory that match given configuration') group.add_argument('--prop', action='store', help='set mkv properties for files') args = parser.parse_args() # set log level if args.verbose == 0: log_level_drm = logging.INFO log_level_werkzeug = logging.WARNING elif args.verbose == 1: log_level_drm = logging.DEBUG log_level_werkzeug = logging.WARNING elif args.verbose == 2: log_level_drm = logging.DEBUG log_level_werkzeug = logging.INFO elif args.verbose == 3: log_level_drm = logging.DEBUG log_level_werkzeug = logging.DEBUG else: log_level_drm = logging.DEBUG log_level_werkzeug = logging.NOTSET # flask/werkzeug logger logging.getLogger('werkzeug').setLevel(log_level_werkzeug) # drm logger logger_drm = logging.getLogger('drm') logger_drm.setLevel(log_level_drm) handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter(fmt='[%(asctime)s][%(levelname)s] %(message)s', datefmt='%H:%M:%S')) logger_drm.addHandler(handler) if args.master: try: (hb_config, rip_config, fixes, in_path, out_path) = parse_cfg_master(args.master) except InvalidConfigException: parser.error(invalid_config_get_text(expected_master=True, path=args.master)) except FileNotFoundError: parser.error('Config file not found') except PathIsDirException: parser.error('File expected, directory found') master(hb_config, rip_config, fixes, in_path, out_path) elif args.slave: if not handbrake.check_env(): parser.error('Handbrake not found! Please install HandBrakeCLI') try: (ip, port) = parse_cfg_slave(args.slave) except InvalidConfigException: parser.error(invalid_config_get_text(expected_master=False, path=args.slave)) except FileNotFoundError: parser.error('Config file not found') except PathIsDirException: parser.error('File expected, directory found') slave(ip, port) elif args.rip: if not all((dvdbackup_check(), genisoimage_check(), eject_check())): parser.error('Necessary tools not found! Make sure dvdbackup, genisoimage and eject are installed') # Try if path is config file, if so, use in_path of config try: (hb_config, rip_config, fixes, in_path, out_path) = parse_cfg_master(args.rip) if len(fixes) > 0: logger.info('Active fixes:') for fix in fixes: logger.info(' %s', fix) rip_dir = in_path except InvalidConfigException: parser.error(invalid_config_get_text(expected_master=True, path=args.rip)) except FileNotFoundError: parser.error('Path invalid') except PathIsDirException: rip_dir = args.rip rip(rip_dir) elif args.list: if not handbrake.check_env(): parser.error('Handbrake not found! Please install HandBrakeCLI') # Try if path is config file, if so, use in_path of config try: (hb_config, rip_config, fixes, in_path, out_path) = parse_cfg_master(args.list) list_dir = in_path list_rip_config = rip_config except InvalidConfigException: parser.error(invalid_config_get_text(expected_master=True, path=args.list)) except FileNotFoundError: parser.error('Path invalid') except PathIsDirException: list_dir = args.list list_rip_config = RipConfig(len_range=(10, 200)) fixes = [] list_titles(list_dir, list_rip_config, fixes) elif args.prop: if not mkvpropedit_check(): parser.error('mkvpropedit not found! Please install mkvpropedit') if not os.path.isdir(args.prop): parser.error('Directory expected') set_properties(args.prop) if __name__ == '__main__': drm_main()
mit
h3biomed/ansible-modules-core
network/dellos6/dellos6_config.py
44
9929
#!/usr/bin/python # # (c) 2015 Peter Sprygada, <[email protected]> # # Copyright (c) 2016 Dell Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'status': ['preview'], 'supported_by': 'community', 'version': '1.0'} DOCUMENTATION = """ --- module: dellos6_config version_added: "2.2" author: "Abirami N(@abirami-n)" short_description: Manage Dell OS6 configuration sections description: - Dell OS6 configurations use a simple block indent file syntax for segmenting configuration into sections. This module provides an implementation for working with Dell OS6 configuration sections in a deterministic way. extends_documentation_fragment: dellos6 options: lines: description: - The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration command syntax as some commands are automatically modified by the device config parser. This argument is mutually exclusive with I(src). required: false default: null aliases: ['commands'] parents: description: - The ordered set of parents that uniquely identify the section the commands should be checked against. If the parents argument is omitted, the commands are checked against the set of top level or global commands. required: false default: null src: description: - Specifies the source path to the file that contains the configuration or configuration template to load. The path to the source file can either be the full path on the Ansible control host or a relative path from the playbook or role root dir. This argument is mutually exclusive with I(lines). required: false default: null before: description: - The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched against the system. required: false default: null after: description: - The ordered set of commands to append to the end of the command stack if a change needs to be made. Just like with I(before) this allows the playbook designer to append a set of commands to be executed after the command set. required: false default: null match: description: - Instructs the module on the way to perform the matching of the set of commands against the current device config. If match is set to I(line), commands are matched line by line. If match is set to I(strict), command lines are matched with respect to position. If match is set to I(exact), command lines must be an equal match. Finally, if match is set to I(none), the module will not attempt to compare the source configuration with the running configuration on the remote device. required: false default: line choices: ['line', 'strict', 'exact', 'none'] replace: description: - Instructs the module on the way to perform the configuration on the device. If the replace argument is set to I(line) then the modified lines are pushed to the device in configuration mode. If the replace argument is set to I(block) then the entire command block is pushed to the device in configuration mode if any line is not correct. required: false default: line choices: ['line', 'block'] update: description: - The I(update) argument controls how the configuration statements are processed on the remote device. Valid choices for the I(update) argument are I(merge) and I(check). When the argument is set to I(merge), the configuration changes are merged with the current device running configuration. When the argument is set to I(check) the configuration updates are determined but not actually configured on the remote device. required: false default: merge choices: ['merge', 'check'] save: description: - The C(save) argument instructs the module to save the running- config to the startup-config at the conclusion of the module running. If check mode is specified, this argument is ignored. required: false default: no choices: ['yes', 'no'] config: description: - The C(config) argument allows the playbook designer to supply the base configuration to be used to validate configuration changes necessary. If this argument is provided, the module will not download the running-config from the remote node. required: false default: null backup: description: - This argument will cause the module to create a full backup of the current C(running-config) from the remote device before any changes are made. The backup file is written to the C(backup) folder in the playbook root directory. If the directory does not exist, it is created. required: false default: no choices: ['yes', 'no'] """ EXAMPLES = """ - dellos6_config: lines: ['hostname {{ inventory_hostname }}'] provider: "{{ cli }}" - dellos6_config: lines: - 10 permit ip 1.1.1.1 any log - 20 permit ip 2.2.2.2 any log - 30 permit ip 3.3.3.3 any log - 40 permit ip 4.4.4.4 any log - 50 permit ip 5.5.5.5 any log parents: ['ip access-list test'] before: ['no ip access-list test'] match: exact provider: "{{ cli }}" - dellos6_config: lines: - 10 permit ip 1.1.1.1 any log - 20 permit ip 2.2.2.2 any log - 30 permit ip 3.3.3.3 any log - 40 permit ip 4.4.4.4 any log parents: ['ip access-list test'] before: ['no ip access-list test'] replace: block provider: "{{ cli }}" """ RETURN = """ updates: description: The set of commands that will be pushed to the remote device returned: always type: list sample: ['...', '...'] responses: description: The set of responses from issuing the commands on the device returned: when not check_mode type: list sample: ['...', '...'] saved: description: Returns whether the configuration is saved to the startup configuration or not. returned: when not check_mode type: bool sample: True """ from ansible.module_utils.netcfg import dumps from ansible.module_utils.network import NetworkModule from ansible.module_utils.dellos6 import get_config, get_sublevel_config, Dellos6NetworkConfig def get_candidate(module): candidate = Dellos6NetworkConfig(indent=0) if module.params['src']: candidate.load(module.params['src']) elif module.params['lines']: parents = module.params['parents'] or list() candidate.add(module.params['lines'], parents=parents) return candidate def main(): argument_spec = dict( lines=dict(aliases=['commands'], type='list'), parents=dict(type='list'), src=dict(type='path'), before=dict(type='list'), after=dict(type='list'), match=dict(default='line', choices=['line', 'strict', 'exact', 'none']), replace=dict(default='line', choices=['line', 'block']), update=dict(choices=['merge', 'check'], default='merge'), save=dict(type='bool', default=False), config=dict(), backup=dict(type='bool', default=False) ) mutually_exclusive = [('lines', 'src')] module = NetworkModule(argument_spec=argument_spec, connect_on_load=False, mutually_exclusive=mutually_exclusive, supports_check_mode=True) parents = module.params['parents'] or list() match = module.params['match'] replace = module.params['replace'] result = dict(changed=False, saved=False) candidate = get_candidate(module) if match != 'none': config = get_config(module) if parents: config = get_sublevel_config(config, module) configobjs = candidate.difference(config, match=match, replace=replace) else: configobjs = candidate.items if module.params['backup']: result['__backup__'] = module.cli('show running-config')[0] commands = list() if configobjs: commands = dumps(configobjs, 'commands') commands = commands.split('\n') if module.params['before']: commands[:0] = module.params['before'] if module.params['after']: commands.extend(module.params['after']) if not module.check_mode and module.params['update'] == 'merge': response = module.config.load_config(commands) result['responses'] = response if module.params['save']: module.config.save_config() result['saved'] = True result['changed'] = True result['updates'] = commands module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
azoft-dev-team/imagrium
env/Lib/json/tests/__init__.py
145
2452
import os import sys import json import doctest import unittest from test import test_support # import json with and without accelerations cjson = test_support.import_fresh_module('json', fresh=['_json']) pyjson = test_support.import_fresh_module('json', blocked=['_json']) # create two base classes that will be used by the other tests class PyTest(unittest.TestCase): json = pyjson loads = staticmethod(pyjson.loads) dumps = staticmethod(pyjson.dumps) @unittest.skipUnless(cjson, 'requires _json') class CTest(unittest.TestCase): if cjson is not None: json = cjson loads = staticmethod(cjson.loads) dumps = staticmethod(cjson.dumps) # test PyTest and CTest checking if the functions come from the right module class TestPyTest(PyTest): def test_pyjson(self): self.assertEqual(self.json.scanner.make_scanner.__module__, 'json.scanner') self.assertEqual(self.json.decoder.scanstring.__module__, 'json.decoder') self.assertEqual(self.json.encoder.encode_basestring_ascii.__module__, 'json.encoder') class TestCTest(CTest): def test_cjson(self): self.assertEqual(self.json.scanner.make_scanner.__module__, '_json') self.assertEqual(self.json.decoder.scanstring.__module__, '_json') self.assertEqual(self.json.encoder.c_make_encoder.__module__, '_json') self.assertEqual(self.json.encoder.encode_basestring_ascii.__module__, '_json') here = os.path.dirname(__file__) def test_suite(): suite = additional_tests() loader = unittest.TestLoader() for fn in os.listdir(here): if fn.startswith("test") and fn.endswith(".py"): modname = "json.tests." + fn[:-3] __import__(modname) module = sys.modules[modname] suite.addTests(loader.loadTestsFromModule(module)) return suite def additional_tests(): suite = unittest.TestSuite() for mod in (json, json.encoder, json.decoder): suite.addTest(doctest.DocTestSuite(mod)) suite.addTest(TestPyTest('test_pyjson')) suite.addTest(TestCTest('test_cjson')) return suite def main(): suite = test_suite() runner = unittest.TextTestRunner() runner.run(suite) if __name__ == '__main__': sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) main()
mit
ivanlyon/exercises
test/test_k_solitaire.py
1
2752
import io import unittest from unittest.mock import patch from kattis import k_solitaire ############################################################################### class SampleInput(unittest.TestCase): '''Problem statement sample inputs and outputs''' def test_sample_input(self): '''Run and assert problem statement sample input and output.''' inputs = [] inputs.append('3') inputs.append('###...###') inputs.append('..oo.....') inputs.append('.....oo..') inputs.append('.........') inputs.append('###...###') inputs.append('') inputs.append('###...###') inputs.append('..oo.o...') inputs.append('...o.oo..') inputs.append('...oo....') inputs.append('###...###') inputs.append('') inputs.append('###o..###') inputs.append('.o.oo....') inputs.append('o.o......') inputs.append('.o.o.....') inputs.append('###...###') inputs = '\n'.join(inputs) + '\n' outputs = [] outputs.append('1 3') outputs.append('1 7') outputs.append('1 7') outputs = '\n'.join(outputs) + '\n' with patch('sys.stdin', io.StringIO(inputs)) as stdin,\ patch('sys.stdout', new_callable=io.StringIO) as stdout: k_solitaire.main() self.assertEqual(stdout.getvalue(), outputs) self.assertEqual(stdin.read(), '') def test_zeroes(self): '''Run and assert problem statement sample input and output.''' inputs = [] inputs.append('3') inputs.append('###...###') inputs.append('.........') inputs.append('.........') inputs.append('.........') inputs.append('###...###') inputs.append('') inputs.append('###...##') inputs.append('..o....') inputs.append('.o....') inputs.append('.....') inputs.append('####') inputs.append('') inputs.append('###...##') inputs.append('..oo...') inputs.append('.o..o.') inputs.append('.....') inputs.append('####') inputs = '\n'.join(inputs) + '\n' outputs = [] outputs.append('0 0') outputs.append('2 0') outputs.append('2 2') outputs = '\n'.join(outputs) + '\n' with patch('sys.stdin', io.StringIO(inputs)) as stdin,\ patch('sys.stdout', new_callable=io.StringIO) as stdout: k_solitaire.main() self.assertEqual(stdout.getvalue(), outputs) self.assertEqual(stdin.read(), '') ############################################################################### if __name__ == '__main__': unittest.main()
mit
vrk-kpa/ckanext-xroad_integration
ckanext/xroad_integration/tests/xroad_mock/xroad_rest_mock.py
1
1242
from flask import Flask import json from io import open from datetime import datetime, timedelta app = Flask(__name__) def create_app(input_file): app = Flask(__name__) mock_data = json.load(open(input_file, 'r', encoding='utf-8')) @app.route('/getListOfServices/<int:days>') def getListOfServices(days=1): now = datetime.now() member_data = [] for i in range(days): dt = now - timedelta(days=i) date = [dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond] member_data.append({'date': date, 'memberDataList': mock_data['memberData'][0]['memberDataList']}) data = {'memberData': member_data, 'securityServerData': mock_data['securityServerData']} return json.dumps(data) @app.route('/getServiceStatistics/<int:days>') def getServiceStatistics(days=1): return json.dumps({'serviceStatisticsList': []}) return app if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('input_file') parser.add_argument('port', nargs='?', type=int, default=8088) args = parser.parse_args() app = create_app(args.input_file) app.run(port=args.port)
agpl-3.0
titimoby/connected
jsserver/node_modules/mosca/node_modules/leveldown/node_modules/prebuild/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py
1366
120842
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Xcode project file generator. This module is both an Xcode project file generator and a documentation of the Xcode project file format. Knowledge of the project file format was gained based on extensive experience with Xcode, and by making changes to projects in Xcode.app and observing the resultant changes in the associated project files. XCODE PROJECT FILES The generator targets the file format as written by Xcode 3.2 (specifically, 3.2.6), but past experience has taught that the format has not changed significantly in the past several years, and future versions of Xcode are able to read older project files. Xcode project files are "bundled": the project "file" from an end-user's perspective is actually a directory with an ".xcodeproj" extension. The project file from this module's perspective is actually a file inside this directory, always named "project.pbxproj". This file contains a complete description of the project and is all that is needed to use the xcodeproj. Other files contained in the xcodeproj directory are simply used to store per-user settings, such as the state of various UI elements in the Xcode application. The project.pbxproj file is a property list, stored in a format almost identical to the NeXTstep property list format. The file is able to carry Unicode data, and is encoded in UTF-8. The root element in the property list is a dictionary that contains several properties of minimal interest, and two properties of immense interest. The most important property is a dictionary named "objects". The entire structure of the project is represented by the children of this property. The objects dictionary is keyed by unique 96-bit values represented by 24 uppercase hexadecimal characters. Each value in the objects dictionary is itself a dictionary, describing an individual object. Each object in the dictionary is a member of a class, which is identified by the "isa" property of each object. A variety of classes are represented in a project file. Objects can refer to other objects by ID, using the 24-character hexadecimal object key. A project's objects form a tree, with a root object of class PBXProject at the root. As an example, the PBXProject object serves as parent to an XCConfigurationList object defining the build configurations used in the project, a PBXGroup object serving as a container for all files referenced in the project, and a list of target objects, each of which defines a target in the project. There are several different types of target object, such as PBXNativeTarget and PBXAggregateTarget. In this module, this relationship is expressed by having each target type derive from an abstract base named XCTarget. The project.pbxproj file's root dictionary also contains a property, sibling to the "objects" dictionary, named "rootObject". The value of rootObject is a 24-character object key referring to the root PBXProject object in the objects dictionary. In Xcode, every file used as input to a target or produced as a final product of a target must appear somewhere in the hierarchy rooted at the PBXGroup object referenced by the PBXProject's mainGroup property. A PBXGroup is generally represented as a folder in the Xcode application. PBXGroups can contain other PBXGroups as well as PBXFileReferences, which are pointers to actual files. Each XCTarget contains a list of build phases, represented in this module by the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the "Compile Sources" and "Link Binary With Libraries" phases displayed in the Xcode application. Files used as input to these phases (for example, source files in the former case and libraries and frameworks in the latter) are represented by PBXBuildFile objects, referenced by elements of "files" lists in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile object as a "weak" reference: it does not "own" the PBXBuildFile, which is owned by the root object's mainGroup or a descendant group. In most cases, the layer of indirection between an XCBuildPhase and a PBXFileReference via a PBXBuildFile appears extraneous, but there's actually one reason for this: file-specific compiler flags are added to the PBXBuildFile object so as to allow a single file to be a member of multiple targets while having distinct compiler flags for each. These flags can be modified in the Xcode applciation in the "Build" tab of a File Info window. When a project is open in the Xcode application, Xcode will rewrite it. As such, this module is careful to adhere to the formatting used by Xcode, to avoid insignificant changes appearing in the file when it is used in the Xcode application. This will keep version control repositories happy, and makes it possible to compare a project file used in Xcode to one generated by this module to determine if any significant changes were made in the application. Xcode has its own way of assigning 24-character identifiers to each object, which is not duplicated here. Because the identifier only is only generated once, when an object is created, and is then left unchanged, there is no need to attempt to duplicate Xcode's behavior in this area. The generator is free to select any identifier, even at random, to refer to the objects it creates, and Xcode will retain those identifiers and use them when subsequently rewriting the project file. However, the generator would choose new random identifiers each time the project files are generated, leading to difficulties comparing "used" project files to "pristine" ones produced by this module, and causing the appearance of changes as every object identifier is changed when updated projects are checked in to a version control repository. To mitigate this problem, this module chooses identifiers in a more deterministic way, by hashing a description of each object as well as its parent and ancestor objects. This strategy should result in minimal "shift" in IDs as successive generations of project files are produced. THIS MODULE This module introduces several classes, all derived from the XCObject class. Nearly all of the "brains" are built into the XCObject class, which understands how to create and modify objects, maintain the proper tree structure, compute identifiers, and print objects. For the most part, classes derived from XCObject need only provide a _schema class object, a dictionary that expresses what properties objects of the class may contain. Given this structure, it's possible to build a minimal project file by creating objects of the appropriate types and making the proper connections: config_list = XCConfigurationList() group = PBXGroup() project = PBXProject({'buildConfigurationList': config_list, 'mainGroup': group}) With the project object set up, it can be added to an XCProjectFile object. XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject subclass that does not actually correspond to a class type found in a project file. Rather, it is used to represent the project file's root dictionary. Printing an XCProjectFile will print the entire project file, including the full "objects" dictionary. project_file = XCProjectFile({'rootObject': project}) project_file.ComputeIDs() project_file.Print() Xcode project files are always encoded in UTF-8. This module will accept strings of either the str class or the unicode class. Strings of class str are assumed to already be encoded in UTF-8. Obviously, if you're just using ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset. Strings of class unicode are handled properly and encoded in UTF-8 when a project file is output. """ import gyp.common import posixpath import re import struct import sys # hashlib is supplied as of Python 2.5 as the replacement interface for sha # and other secure hashes. In 2.6, sha is deprecated. Import hashlib if # available, avoiding a deprecation warning under 2.6. Import sha otherwise, # preserving 2.4 compatibility. try: import hashlib _new_sha1 = hashlib.sha1 except ImportError: import sha _new_sha1 = sha.new # See XCObject._EncodeString. This pattern is used to determine when a string # can be printed unquoted. Strings that match this pattern may be printed # unquoted. Strings that do not match must be quoted and may be further # transformed to be properly encoded. Note that this expression matches the # characters listed with "+", for 1 or more occurrences: if a string is empty, # it must not match this pattern, because it needs to be encoded as "". _unquoted = re.compile('^[A-Za-z0-9$./_]+$') # Strings that match this pattern are quoted regardless of what _unquoted says. # Oddly, Xcode will quote any string with a run of three or more underscores. _quoted = re.compile('___') # This pattern should match any character that needs to be escaped by # XCObject._EncodeString. See that function. _escaped = re.compile('[\\\\"]|[\x00-\x1f]') # Used by SourceTreeAndPathFromPath _path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$') def SourceTreeAndPathFromPath(input_path): """Given input_path, returns a tuple with sourceTree and path values. Examples: input_path (source_tree, output_path) '$(VAR)/path' ('VAR', 'path') '$(VAR)' ('VAR', None) 'path' (None, 'path') """ source_group_match = _path_leading_variable.match(input_path) if source_group_match: source_tree = source_group_match.group(1) output_path = source_group_match.group(3) # This may be None. else: source_tree = None output_path = input_path return (source_tree, output_path) def ConvertVariablesToShellSyntax(input_string): return re.sub(r'\$\((.*?)\)', '${\\1}', input_string) class XCObject(object): """The abstract base of all class types used in Xcode project files. Class variables: _schema: A dictionary defining the properties of this class. The keys to _schema are string property keys as used in project files. Values are a list of four or five elements: [ is_list, property_type, is_strong, is_required, default ] is_list: True if the property described is a list, as opposed to a single element. property_type: The type to use as the value of the property, or if is_list is True, the type to use for each element of the value's list. property_type must be an XCObject subclass, or one of the built-in types str, int, or dict. is_strong: If property_type is an XCObject subclass, is_strong is True to assert that this class "owns," or serves as parent, to the property value (or, if is_list is True, values). is_strong must be False if property_type is not an XCObject subclass. is_required: True if the property is required for the class. Note that is_required being True does not preclude an empty string ("", in the case of property_type str) or list ([], in the case of is_list True) from being set for the property. default: Optional. If is_requried is True, default may be set to provide a default value for objects that do not supply their own value. If is_required is True and default is not provided, users of the class must supply their own value for the property. Note that although the values of the array are expressed in boolean terms, subclasses provide values as integers to conserve horizontal space. _should_print_single_line: False in XCObject. Subclasses whose objects should be written to the project file in the alternate single-line format, such as PBXFileReference and PBXBuildFile, should set this to True. _encode_transforms: Used by _EncodeString to encode unprintable characters. The index into this list is the ordinal of the character to transform; each value is a string used to represent the character in the output. XCObject provides an _encode_transforms list suitable for most XCObject subclasses. _alternate_encode_transforms: Provided for subclasses that wish to use the alternate encoding rules. Xcode seems to use these rules when printing objects in single-line format. Subclasses that desire this behavior should set _encode_transforms to _alternate_encode_transforms. _hashables: A list of XCObject subclasses that can be hashed by ComputeIDs to construct this object's ID. Most classes that need custom hashing behavior should do it by overriding Hashables, but in some cases an object's parent may wish to push a hashable value into its child, and it can do so by appending to _hashables. Attributes: id: The object's identifier, a 24-character uppercase hexadecimal string. Usually, objects being created should not set id until the entire project file structure is built. At that point, UpdateIDs() should be called on the root object to assign deterministic values for id to each object in the tree. parent: The object's parent. This is set by a parent XCObject when a child object is added to it. _properties: The object's property dictionary. An object's properties are described by its class' _schema variable. """ _schema = {} _should_print_single_line = False # See _EncodeString. _encode_transforms = [] i = 0 while i < ord(' '): _encode_transforms.append('\\U%04x' % i) i = i + 1 _encode_transforms[7] = '\\a' _encode_transforms[8] = '\\b' _encode_transforms[9] = '\\t' _encode_transforms[10] = '\\n' _encode_transforms[11] = '\\v' _encode_transforms[12] = '\\f' _encode_transforms[13] = '\\n' _alternate_encode_transforms = list(_encode_transforms) _alternate_encode_transforms[9] = chr(9) _alternate_encode_transforms[10] = chr(10) _alternate_encode_transforms[11] = chr(11) def __init__(self, properties=None, id=None, parent=None): self.id = id self.parent = parent self._properties = {} self._hashables = [] self._SetDefaultsFromSchema() self.UpdateProperties(properties) def __repr__(self): try: name = self.Name() except NotImplementedError: return '<%s at 0x%x>' % (self.__class__.__name__, id(self)) return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) def Copy(self): """Make a copy of this object. The new object will have its own copy of lists and dicts. Any XCObject objects owned by this object (marked "strong") will be copied in the new object, even those found in lists. If this object has any weak references to other XCObjects, the same references are added to the new object without making a copy. """ that = self.__class__(id=self.id, parent=self.parent) for key, value in self._properties.iteritems(): is_strong = self._schema[key][2] if isinstance(value, XCObject): if is_strong: new_value = value.Copy() new_value.parent = that that._properties[key] = new_value else: that._properties[key] = value elif isinstance(value, str) or isinstance(value, unicode) or \ isinstance(value, int): that._properties[key] = value elif isinstance(value, list): if is_strong: # If is_strong is True, each element is an XCObject, so it's safe to # call Copy. that._properties[key] = [] for item in value: new_item = item.Copy() new_item.parent = that that._properties[key].append(new_item) else: that._properties[key] = value[:] elif isinstance(value, dict): # dicts are never strong. if is_strong: raise TypeError('Strong dict for key ' + key + ' in ' + \ self.__class__.__name__) else: that._properties[key] = value.copy() else: raise TypeError('Unexpected type ' + value.__class__.__name__ + \ ' for key ' + key + ' in ' + self.__class__.__name__) return that def Name(self): """Return the name corresponding to an object. Not all objects necessarily need to be nameable, and not all that do have a "name" property. Override as needed. """ # If the schema indicates that "name" is required, try to access the # property even if it doesn't exist. This will result in a KeyError # being raised for the property that should be present, which seems more # appropriate than NotImplementedError in this case. if 'name' in self._properties or \ ('name' in self._schema and self._schema['name'][3]): return self._properties['name'] raise NotImplementedError(self.__class__.__name__ + ' must implement Name') def Comment(self): """Return a comment string for the object. Most objects just use their name as the comment, but PBXProject uses different values. The returned comment is not escaped and does not have any comment marker strings applied to it. """ return self.Name() def Hashables(self): hashables = [self.__class__.__name__] name = self.Name() if name != None: hashables.append(name) hashables.extend(self._hashables) return hashables def HashablesForChild(self): return None def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None): """Set "id" properties deterministically. An object's "id" property is set based on a hash of its class type and name, as well as the class type and name of all ancestor objects. As such, it is only advisable to call ComputeIDs once an entire project file tree is built. If recursive is True, recurse into all descendant objects and update their hashes. If overwrite is True, any existing value set in the "id" property will be replaced. """ def _HashUpdate(hash, data): """Update hash with data's length and contents. If the hash were updated only with the value of data, it would be possible for clowns to induce collisions by manipulating the names of their objects. By adding the length, it's exceedingly less likely that ID collisions will be encountered, intentionally or not. """ hash.update(struct.pack('>i', len(data))) hash.update(data) if seed_hash is None: seed_hash = _new_sha1() hash = seed_hash.copy() hashables = self.Hashables() assert len(hashables) > 0 for hashable in hashables: _HashUpdate(hash, hashable) if recursive: hashables_for_child = self.HashablesForChild() if hashables_for_child is None: child_hash = hash else: assert len(hashables_for_child) > 0 child_hash = seed_hash.copy() for hashable in hashables_for_child: _HashUpdate(child_hash, hashable) for child in self.Children(): child.ComputeIDs(recursive, overwrite, child_hash) if overwrite or self.id is None: # Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is # is 160 bits. Instead of throwing out 64 bits of the digest, xor them # into the portion that gets used. assert hash.digest_size % 4 == 0 digest_int_count = hash.digest_size / 4 digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest()) id_ints = [0, 0, 0] for index in xrange(0, digest_int_count): id_ints[index % 3] ^= digest_ints[index] self.id = '%08X%08X%08X' % tuple(id_ints) def EnsureNoIDCollisions(self): """Verifies that no two objects have the same ID. Checks all descendants. """ ids = {} descendants = self.Descendants() for descendant in descendants: if descendant.id in ids: other = ids[descendant.id] raise KeyError( 'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \ (descendant.id, str(descendant._properties), str(other._properties), self._properties['rootObject'].Name())) ids[descendant.id] = descendant def Children(self): """Returns a list of all of this object's owned (strong) children.""" children = [] for property, attributes in self._schema.iteritems(): (is_list, property_type, is_strong) = attributes[0:3] if is_strong and property in self._properties: if not is_list: children.append(self._properties[property]) else: children.extend(self._properties[property]) return children def Descendants(self): """Returns a list of all of this object's descendants, including this object. """ children = self.Children() descendants = [self] for child in children: descendants.extend(child.Descendants()) return descendants def PBXProjectAncestor(self): # The base case for recursion is defined at PBXProject.PBXProjectAncestor. if self.parent: return self.parent.PBXProjectAncestor() return None def _EncodeComment(self, comment): """Encodes a comment to be placed in the project file output, mimicing Xcode behavior. """ # This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If # the string already contains a "*/", it is turned into "(*)/". This keeps # the file writer from outputting something that would be treated as the # end of a comment in the middle of something intended to be entirely a # comment. return '/* ' + comment.replace('*/', '(*)/') + ' */' def _EncodeTransform(self, match): # This function works closely with _EncodeString. It will only be called # by re.sub with match.group(0) containing a character matched by the # the _escaped expression. char = match.group(0) # Backslashes (\) and quotation marks (") are always replaced with a # backslash-escaped version of the same. Everything else gets its # replacement from the class' _encode_transforms array. if char == '\\': return '\\\\' if char == '"': return '\\"' return self._encode_transforms[ord(char)] def _EncodeString(self, value): """Encodes a string to be placed in the project file output, mimicing Xcode behavior. """ # Use quotation marks when any character outside of the range A-Z, a-z, 0-9, # $ (dollar sign), . (period), and _ (underscore) is present. Also use # quotation marks to represent empty strings. # # Escape " (double-quote) and \ (backslash) by preceding them with a # backslash. # # Some characters below the printable ASCII range are encoded specially: # 7 ^G BEL is encoded as "\a" # 8 ^H BS is encoded as "\b" # 11 ^K VT is encoded as "\v" # 12 ^L NP is encoded as "\f" # 127 ^? DEL is passed through as-is without escaping # - In PBXFileReference and PBXBuildFile objects: # 9 ^I HT is passed through as-is without escaping # 10 ^J NL is passed through as-is without escaping # 13 ^M CR is passed through as-is without escaping # - In other objects: # 9 ^I HT is encoded as "\t" # 10 ^J NL is encoded as "\n" # 13 ^M CR is encoded as "\n" rendering it indistinguishable from # 10 ^J NL # All other characters within the ASCII control character range (0 through # 31 inclusive) are encoded as "\U001f" referring to the Unicode code point # in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e". # Characters above the ASCII range are passed through to the output encoded # as UTF-8 without any escaping. These mappings are contained in the # class' _encode_transforms list. if _unquoted.search(value) and not _quoted.search(value): return value return '"' + _escaped.sub(self._EncodeTransform, value) + '"' def _XCPrint(self, file, tabs, line): file.write('\t' * tabs + line) def _XCPrintableValue(self, tabs, value, flatten_list=False): """Returns a representation of value that may be printed in a project file, mimicing Xcode's behavior. _XCPrintableValue can handle str and int values, XCObjects (which are made printable by returning their id property), and list and dict objects composed of any of the above types. When printing a list or dict, and _should_print_single_line is False, the tabs parameter is used to determine how much to indent the lines corresponding to the items in the list or dict. If flatten_list is True, single-element lists will be transformed into strings. """ printable = '' comment = None if self._should_print_single_line: sep = ' ' element_tabs = '' end_tabs = '' else: sep = '\n' element_tabs = '\t' * (tabs + 1) end_tabs = '\t' * tabs if isinstance(value, XCObject): printable += value.id comment = value.Comment() elif isinstance(value, str): printable += self._EncodeString(value) elif isinstance(value, unicode): printable += self._EncodeString(value.encode('utf-8')) elif isinstance(value, int): printable += str(value) elif isinstance(value, list): if flatten_list and len(value) <= 1: if len(value) == 0: printable += self._EncodeString('') else: printable += self._EncodeString(value[0]) else: printable = '(' + sep for item in value: printable += element_tabs + \ self._XCPrintableValue(tabs + 1, item, flatten_list) + \ ',' + sep printable += end_tabs + ')' elif isinstance(value, dict): printable = '{' + sep for item_key, item_value in sorted(value.iteritems()): printable += element_tabs + \ self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \ self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \ sep printable += end_tabs + '}' else: raise TypeError("Can't make " + value.__class__.__name__ + ' printable') if comment != None: printable += ' ' + self._EncodeComment(comment) return printable def _XCKVPrint(self, file, tabs, key, value): """Prints a key and value, members of an XCObject's _properties dictionary, to file. tabs is an int identifying the indentation level. If the class' _should_print_single_line variable is True, tabs is ignored and the key-value pair will be followed by a space insead of a newline. """ if self._should_print_single_line: printable = '' after_kv = ' ' else: printable = '\t' * tabs after_kv = '\n' # Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy # objects without comments. Sometimes it prints them with comments, but # the majority of the time, it doesn't. To avoid unnecessary changes to # the project file after Xcode opens it, don't write comments for # remoteGlobalIDString. This is a sucky hack and it would certainly be # cleaner to extend the schema to indicate whether or not a comment should # be printed, but since this is the only case where the problem occurs and # Xcode itself can't seem to make up its mind, the hack will suffice. # # Also see PBXContainerItemProxy._schema['remoteGlobalIDString']. if key == 'remoteGlobalIDString' and isinstance(self, PBXContainerItemProxy): value_to_print = value.id else: value_to_print = value # PBXBuildFile's settings property is represented in the output as a dict, # but a hack here has it represented as a string. Arrange to strip off the # quotes so that it shows up in the output as expected. if key == 'settings' and isinstance(self, PBXBuildFile): strip_value_quotes = True else: strip_value_quotes = False # In another one-off, let's set flatten_list on buildSettings properties # of XCBuildConfiguration objects, because that's how Xcode treats them. if key == 'buildSettings' and isinstance(self, XCBuildConfiguration): flatten_list = True else: flatten_list = False try: printable_key = self._XCPrintableValue(tabs, key, flatten_list) printable_value = self._XCPrintableValue(tabs, value_to_print, flatten_list) if strip_value_quotes and len(printable_value) > 1 and \ printable_value[0] == '"' and printable_value[-1] == '"': printable_value = printable_value[1:-1] printable += printable_key + ' = ' + printable_value + ';' + after_kv except TypeError, e: gyp.common.ExceptionAppend(e, 'while printing key "%s"' % key) raise self._XCPrint(file, 0, printable) def Print(self, file=sys.stdout): """Prints a reprentation of this object to file, adhering to Xcode output formatting. """ self.VerifyHasRequiredProperties() if self._should_print_single_line: # When printing an object in a single line, Xcode doesn't put any space # between the beginning of a dictionary (or presumably a list) and the # first contained item, so you wind up with snippets like # ...CDEF = {isa = PBXFileReference; fileRef = 0123... # If it were me, I would have put a space in there after the opening # curly, but I guess this is just another one of those inconsistencies # between how Xcode prints PBXFileReference and PBXBuildFile objects as # compared to other objects. Mimic Xcode's behavior here by using an # empty string for sep. sep = '' end_tabs = 0 else: sep = '\n' end_tabs = 2 # Start the object. For example, '\t\tPBXProject = {\n'. self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep) # "isa" isn't in the _properties dictionary, it's an intrinsic property # of the class which the object belongs to. Xcode always outputs "isa" # as the first element of an object dictionary. self._XCKVPrint(file, 3, 'isa', self.__class__.__name__) # The remaining elements of an object dictionary are sorted alphabetically. for property, value in sorted(self._properties.iteritems()): self._XCKVPrint(file, 3, property, value) # End the object. self._XCPrint(file, end_tabs, '};\n') def UpdateProperties(self, properties, do_copy=False): """Merge the supplied properties into the _properties dictionary. The input properties must adhere to the class schema or a KeyError or TypeError exception will be raised. If adding an object of an XCObject subclass and the schema indicates a strong relationship, the object's parent will be set to this object. If do_copy is True, then lists, dicts, strong-owned XCObjects, and strong-owned XCObjects in lists will be copied instead of having their references added. """ if properties is None: return for property, value in properties.iteritems(): # Make sure the property is in the schema. if not property in self._schema: raise KeyError(property + ' not in ' + self.__class__.__name__) # Make sure the property conforms to the schema. (is_list, property_type, is_strong) = self._schema[property][0:3] if is_list: if value.__class__ != list: raise TypeError( property + ' of ' + self.__class__.__name__ + \ ' must be list, not ' + value.__class__.__name__) for item in value: if not isinstance(item, property_type) and \ not (item.__class__ == unicode and property_type == str): # Accept unicode where str is specified. str is treated as # UTF-8-encoded. raise TypeError( 'item of ' + property + ' of ' + self.__class__.__name__ + \ ' must be ' + property_type.__name__ + ', not ' + \ item.__class__.__name__) elif not isinstance(value, property_type) and \ not (value.__class__ == unicode and property_type == str): # Accept unicode where str is specified. str is treated as # UTF-8-encoded. raise TypeError( property + ' of ' + self.__class__.__name__ + ' must be ' + \ property_type.__name__ + ', not ' + value.__class__.__name__) # Checks passed, perform the assignment. if do_copy: if isinstance(value, XCObject): if is_strong: self._properties[property] = value.Copy() else: self._properties[property] = value elif isinstance(value, str) or isinstance(value, unicode) or \ isinstance(value, int): self._properties[property] = value elif isinstance(value, list): if is_strong: # If is_strong is True, each element is an XCObject, so it's safe # to call Copy. self._properties[property] = [] for item in value: self._properties[property].append(item.Copy()) else: self._properties[property] = value[:] elif isinstance(value, dict): self._properties[property] = value.copy() else: raise TypeError("Don't know how to copy a " + \ value.__class__.__name__ + ' object for ' + \ property + ' in ' + self.__class__.__name__) else: self._properties[property] = value # Set up the child's back-reference to this object. Don't use |value| # any more because it may not be right if do_copy is true. if is_strong: if not is_list: self._properties[property].parent = self else: for item in self._properties[property]: item.parent = self def HasProperty(self, key): return key in self._properties def GetProperty(self, key): return self._properties[key] def SetProperty(self, key, value): self.UpdateProperties({key: value}) def DelProperty(self, key): if key in self._properties: del self._properties[key] def AppendProperty(self, key, value): # TODO(mark): Support ExtendProperty too (and make this call that)? # Schema validation. if not key in self._schema: raise KeyError(key + ' not in ' + self.__class__.__name__) (is_list, property_type, is_strong) = self._schema[key][0:3] if not is_list: raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list') if not isinstance(value, property_type): raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \ ' must be ' + property_type.__name__ + ', not ' + \ value.__class__.__name__) # If the property doesn't exist yet, create a new empty list to receive the # item. if not key in self._properties: self._properties[key] = [] # Set up the ownership link. if is_strong: value.parent = self # Store the item. self._properties[key].append(value) def VerifyHasRequiredProperties(self): """Ensure that all properties identified as required by the schema are set. """ # TODO(mark): A stronger verification mechanism is needed. Some # subclasses need to perform validation beyond what the schema can enforce. for property, attributes in self._schema.iteritems(): (is_list, property_type, is_strong, is_required) = attributes[0:4] if is_required and not property in self._properties: raise KeyError(self.__class__.__name__ + ' requires ' + property) def _SetDefaultsFromSchema(self): """Assign object default values according to the schema. This will not overwrite properties that have already been set.""" defaults = {} for property, attributes in self._schema.iteritems(): (is_list, property_type, is_strong, is_required) = attributes[0:4] if is_required and len(attributes) >= 5 and \ not property in self._properties: default = attributes[4] defaults[property] = default if len(defaults) > 0: # Use do_copy=True so that each new object gets its own copy of strong # objects, lists, and dicts. self.UpdateProperties(defaults, do_copy=True) class XCHierarchicalElement(XCObject): """Abstract base for PBXGroup and PBXFileReference. Not represented in a project file.""" # TODO(mark): Do name and path belong here? Probably so. # If path is set and name is not, name may have a default value. Name will # be set to the basename of path, if the basename of path is different from # the full value of path. If path is already just a leaf name, name will # not be set. _schema = XCObject._schema.copy() _schema.update({ 'comments': [0, str, 0, 0], 'fileEncoding': [0, str, 0, 0], 'includeInIndex': [0, int, 0, 0], 'indentWidth': [0, int, 0, 0], 'lineEnding': [0, int, 0, 0], 'sourceTree': [0, str, 0, 1, '<group>'], 'tabWidth': [0, int, 0, 0], 'usesTabs': [0, int, 0, 0], 'wrapsLines': [0, int, 0, 0], }) def __init__(self, properties=None, id=None, parent=None): # super XCObject.__init__(self, properties, id, parent) if 'path' in self._properties and not 'name' in self._properties: path = self._properties['path'] name = posixpath.basename(path) if name != '' and path != name: self.SetProperty('name', name) if 'path' in self._properties and \ (not 'sourceTree' in self._properties or \ self._properties['sourceTree'] == '<group>'): # If the pathname begins with an Xcode variable like "$(SDKROOT)/", take # the variable out and make the path be relative to that variable by # assigning the variable name as the sourceTree. (source_tree, path) = SourceTreeAndPathFromPath(self._properties['path']) if source_tree != None: self._properties['sourceTree'] = source_tree if path != None: self._properties['path'] = path if source_tree != None and path is None and \ not 'name' in self._properties: # The path was of the form "$(SDKROOT)" with no path following it. # This object is now relative to that variable, so it has no path # attribute of its own. It does, however, keep a name. del self._properties['path'] self._properties['name'] = source_tree def Name(self): if 'name' in self._properties: return self._properties['name'] elif 'path' in self._properties: return self._properties['path'] else: # This happens in the case of the root PBXGroup. return None def Hashables(self): """Custom hashables for XCHierarchicalElements. XCHierarchicalElements are special. Generally, their hashes shouldn't change if the paths don't change. The normal XCObject implementation of Hashables adds a hashable for each object, which means that if the hierarchical structure changes (possibly due to changes caused when TakeOverOnlyChild runs and encounters slight changes in the hierarchy), the hashes will change. For example, if a project file initially contains a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent a/b. If someone later adds a/f2 to the project file, a/b can no longer be collapsed, and f1 winds up with parent b and grandparent a. That would be sufficient to change f1's hash. To counteract this problem, hashables for all XCHierarchicalElements except for the main group (which has neither a name nor a path) are taken to be just the set of path components. Because hashables are inherited from parents, this provides assurance that a/b/f1 has the same set of hashables whether its parent is b or a/b. The main group is a special case. As it is permitted to have no name or path, it is permitted to use the standard XCObject hash mechanism. This is not considered a problem because there can be only one main group. """ if self == self.PBXProjectAncestor()._properties['mainGroup']: # super return XCObject.Hashables(self) hashables = [] # Put the name in first, ensuring that if TakeOverOnlyChild collapses # children into a top-level group like "Source", the name always goes # into the list of hashables without interfering with path components. if 'name' in self._properties: # Make it less likely for people to manipulate hashes by following the # pattern of always pushing an object type value onto the list first. hashables.append(self.__class__.__name__ + '.name') hashables.append(self._properties['name']) # NOTE: This still has the problem that if an absolute path is encountered, # including paths with a sourceTree, they'll still inherit their parents' # hashables, even though the paths aren't relative to their parents. This # is not expected to be much of a problem in practice. path = self.PathFromSourceTreeAndPath() if path != None: components = path.split(posixpath.sep) for component in components: hashables.append(self.__class__.__name__ + '.path') hashables.append(component) hashables.extend(self._hashables) return hashables def Compare(self, other): # Allow comparison of these types. PBXGroup has the highest sort rank; # PBXVariantGroup is treated as equal to PBXFileReference. valid_class_types = { PBXFileReference: 'file', PBXGroup: 'group', PBXVariantGroup: 'file', } self_type = valid_class_types[self.__class__] other_type = valid_class_types[other.__class__] if self_type == other_type: # If the two objects are of the same sort rank, compare their names. return cmp(self.Name(), other.Name()) # Otherwise, sort groups before everything else. if self_type == 'group': return -1 return 1 def CompareRootGroup(self, other): # This function should be used only to compare direct children of the # containing PBXProject's mainGroup. These groups should appear in the # listed order. # TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the # generator should have a way of influencing this list rather than having # to hardcode for the generator here. order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products', 'Build'] # If the groups aren't in the listed order, do a name comparison. # Otherwise, groups in the listed order should come before those that # aren't. self_name = self.Name() other_name = other.Name() self_in = isinstance(self, PBXGroup) and self_name in order other_in = isinstance(self, PBXGroup) and other_name in order if not self_in and not other_in: return self.Compare(other) if self_name in order and not other_name in order: return -1 if other_name in order and not self_name in order: return 1 # If both groups are in the listed order, go by the defined order. self_index = order.index(self_name) other_index = order.index(other_name) if self_index < other_index: return -1 if self_index > other_index: return 1 return 0 def PathFromSourceTreeAndPath(self): # Turn the object's sourceTree and path properties into a single flat # string of a form comparable to the path parameter. If there's a # sourceTree property other than "<group>", wrap it in $(...) for the # comparison. components = [] if self._properties['sourceTree'] != '<group>': components.append('$(' + self._properties['sourceTree'] + ')') if 'path' in self._properties: components.append(self._properties['path']) if len(components) > 0: return posixpath.join(*components) return None def FullPath(self): # Returns a full path to self relative to the project file, or relative # to some other source tree. Start with self, and walk up the chain of # parents prepending their paths, if any, until no more parents are # available (project-relative path) or until a path relative to some # source tree is found. xche = self path = None while isinstance(xche, XCHierarchicalElement) and \ (path is None or \ (not path.startswith('/') and not path.startswith('$'))): this_path = xche.PathFromSourceTreeAndPath() if this_path != None and path != None: path = posixpath.join(this_path, path) elif this_path != None: path = this_path xche = xche.parent return path class PBXGroup(XCHierarchicalElement): """ Attributes: _children_by_path: Maps pathnames of children of this PBXGroup to the actual child XCHierarchicalElement objects. _variant_children_by_name_and_path: Maps (name, path) tuples of PBXVariantGroup children to the actual child PBXVariantGroup objects. """ _schema = XCHierarchicalElement._schema.copy() _schema.update({ 'children': [1, XCHierarchicalElement, 1, 1, []], 'name': [0, str, 0, 0], 'path': [0, str, 0, 0], }) def __init__(self, properties=None, id=None, parent=None): # super XCHierarchicalElement.__init__(self, properties, id, parent) self._children_by_path = {} self._variant_children_by_name_and_path = {} for child in self._properties.get('children', []): self._AddChildToDicts(child) def Hashables(self): # super hashables = XCHierarchicalElement.Hashables(self) # It is not sufficient to just rely on name and parent to build a unique # hashable : a node could have two child PBXGroup sharing a common name. # To add entropy the hashable is enhanced with the names of all its # children. for child in self._properties.get('children', []): child_name = child.Name() if child_name != None: hashables.append(child_name) return hashables def HashablesForChild(self): # To avoid a circular reference the hashables used to compute a child id do # not include the child names. return XCHierarchicalElement.Hashables(self) def _AddChildToDicts(self, child): # Sets up this PBXGroup object's dicts to reference the child properly. child_path = child.PathFromSourceTreeAndPath() if child_path: if child_path in self._children_by_path: raise ValueError('Found multiple children with path ' + child_path) self._children_by_path[child_path] = child if isinstance(child, PBXVariantGroup): child_name = child._properties.get('name', None) key = (child_name, child_path) if key in self._variant_children_by_name_and_path: raise ValueError('Found multiple PBXVariantGroup children with ' + \ 'name ' + str(child_name) + ' and path ' + \ str(child_path)) self._variant_children_by_name_and_path[key] = child def AppendChild(self, child): # Callers should use this instead of calling # AppendProperty('children', child) directly because this function # maintains the group's dicts. self.AppendProperty('children', child) self._AddChildToDicts(child) def GetChildByName(self, name): # This is not currently optimized with a dict as GetChildByPath is because # it has few callers. Most callers probably want GetChildByPath. This # function is only useful to get children that have names but no paths, # which is rare. The children of the main group ("Source", "Products", # etc.) is pretty much the only case where this likely to come up. # # TODO(mark): Maybe this should raise an error if more than one child is # present with the same name. if not 'children' in self._properties: return None for child in self._properties['children']: if child.Name() == name: return child return None def GetChildByPath(self, path): if not path: return None if path in self._children_by_path: return self._children_by_path[path] return None def GetChildByRemoteObject(self, remote_object): # This method is a little bit esoteric. Given a remote_object, which # should be a PBXFileReference in another project file, this method will # return this group's PBXReferenceProxy object serving as a local proxy # for the remote PBXFileReference. # # This function might benefit from a dict optimization as GetChildByPath # for some workloads, but profiling shows that it's not currently a # problem. if not 'children' in self._properties: return None for child in self._properties['children']: if not isinstance(child, PBXReferenceProxy): continue container_proxy = child._properties['remoteRef'] if container_proxy._properties['remoteGlobalIDString'] == remote_object: return child return None def AddOrGetFileByPath(self, path, hierarchical): """Returns an existing or new file reference corresponding to path. If hierarchical is True, this method will create or use the necessary hierarchical group structure corresponding to path. Otherwise, it will look in and create an item in the current group only. If an existing matching reference is found, it is returned, otherwise, a new one will be created, added to the correct group, and returned. If path identifies a directory by virtue of carrying a trailing slash, this method returns a PBXFileReference of "folder" type. If path identifies a variant, by virtue of it identifying a file inside a directory with an ".lproj" extension, this method returns a PBXVariantGroup containing the variant named by path, and possibly other variants. For all other paths, a "normal" PBXFileReference will be returned. """ # Adding or getting a directory? Directories end with a trailing slash. is_dir = False if path.endswith('/'): is_dir = True path = posixpath.normpath(path) if is_dir: path = path + '/' # Adding or getting a variant? Variants are files inside directories # with an ".lproj" extension. Xcode uses variants for localization. For # a variant path/to/Language.lproj/MainMenu.nib, put a variant group named # MainMenu.nib inside path/to, and give it a variant named Language. In # this example, grandparent would be set to path/to and parent_root would # be set to Language. variant_name = None parent = posixpath.dirname(path) grandparent = posixpath.dirname(parent) parent_basename = posixpath.basename(parent) (parent_root, parent_ext) = posixpath.splitext(parent_basename) if parent_ext == '.lproj': variant_name = parent_root if grandparent == '': grandparent = None # Putting a directory inside a variant group is not currently supported. assert not is_dir or variant_name is None path_split = path.split(posixpath.sep) if len(path_split) == 1 or \ ((is_dir or variant_name != None) and len(path_split) == 2) or \ not hierarchical: # The PBXFileReference or PBXVariantGroup will be added to or gotten from # this PBXGroup, no recursion necessary. if variant_name is None: # Add or get a PBXFileReference. file_ref = self.GetChildByPath(path) if file_ref != None: assert file_ref.__class__ == PBXFileReference else: file_ref = PBXFileReference({'path': path}) self.AppendChild(file_ref) else: # Add or get a PBXVariantGroup. The variant group name is the same # as the basename (MainMenu.nib in the example above). grandparent # specifies the path to the variant group itself, and path_split[-2:] # is the path of the specific variant relative to its group. variant_group_name = posixpath.basename(path) variant_group_ref = self.AddOrGetVariantGroupByNameAndPath( variant_group_name, grandparent) variant_path = posixpath.sep.join(path_split[-2:]) variant_ref = variant_group_ref.GetChildByPath(variant_path) if variant_ref != None: assert variant_ref.__class__ == PBXFileReference else: variant_ref = PBXFileReference({'name': variant_name, 'path': variant_path}) variant_group_ref.AppendChild(variant_ref) # The caller is interested in the variant group, not the specific # variant file. file_ref = variant_group_ref return file_ref else: # Hierarchical recursion. Add or get a PBXGroup corresponding to the # outermost path component, and then recurse into it, chopping off that # path component. next_dir = path_split[0] group_ref = self.GetChildByPath(next_dir) if group_ref != None: assert group_ref.__class__ == PBXGroup else: group_ref = PBXGroup({'path': next_dir}) self.AppendChild(group_ref) return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]), hierarchical) def AddOrGetVariantGroupByNameAndPath(self, name, path): """Returns an existing or new PBXVariantGroup for name and path. If a PBXVariantGroup identified by the name and path arguments is already present as a child of this object, it is returned. Otherwise, a new PBXVariantGroup with the correct properties is created, added as a child, and returned. This method will generally be called by AddOrGetFileByPath, which knows when to create a variant group based on the structure of the pathnames passed to it. """ key = (name, path) if key in self._variant_children_by_name_and_path: variant_group_ref = self._variant_children_by_name_and_path[key] assert variant_group_ref.__class__ == PBXVariantGroup return variant_group_ref variant_group_properties = {'name': name} if path != None: variant_group_properties['path'] = path variant_group_ref = PBXVariantGroup(variant_group_properties) self.AppendChild(variant_group_ref) return variant_group_ref def TakeOverOnlyChild(self, recurse=False): """If this PBXGroup has only one child and it's also a PBXGroup, take it over by making all of its children this object's children. This function will continue to take over only children when those children are groups. If there are three PBXGroups representing a, b, and c, with c inside b and b inside a, and a and b have no other children, this will result in a taking over both b and c, forming a PBXGroup for a/b/c. If recurse is True, this function will recurse into children and ask them to collapse themselves by taking over only children as well. Assuming an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f (d1, d2, and f are files, the rest are groups), recursion will result in a group for a/b/c containing a group for d3/e. """ # At this stage, check that child class types are PBXGroup exactly, # instead of using isinstance. The only subclass of PBXGroup, # PBXVariantGroup, should not participate in reparenting in the same way: # reparenting by merging different object types would be wrong. while len(self._properties['children']) == 1 and \ self._properties['children'][0].__class__ == PBXGroup: # Loop to take over the innermost only-child group possible. child = self._properties['children'][0] # Assume the child's properties, including its children. Save a copy # of this object's old properties, because they'll still be needed. # This object retains its existing id and parent attributes. old_properties = self._properties self._properties = child._properties self._children_by_path = child._children_by_path if not 'sourceTree' in self._properties or \ self._properties['sourceTree'] == '<group>': # The child was relative to its parent. Fix up the path. Note that # children with a sourceTree other than "<group>" are not relative to # their parents, so no path fix-up is needed in that case. if 'path' in old_properties: if 'path' in self._properties: # Both the original parent and child have paths set. self._properties['path'] = posixpath.join(old_properties['path'], self._properties['path']) else: # Only the original parent has a path, use it. self._properties['path'] = old_properties['path'] if 'sourceTree' in old_properties: # The original parent had a sourceTree set, use it. self._properties['sourceTree'] = old_properties['sourceTree'] # If the original parent had a name set, keep using it. If the original # parent didn't have a name but the child did, let the child's name # live on. If the name attribute seems unnecessary now, get rid of it. if 'name' in old_properties and old_properties['name'] != None and \ old_properties['name'] != self.Name(): self._properties['name'] = old_properties['name'] if 'name' in self._properties and 'path' in self._properties and \ self._properties['name'] == self._properties['path']: del self._properties['name'] # Notify all children of their new parent. for child in self._properties['children']: child.parent = self # If asked to recurse, recurse. if recurse: for child in self._properties['children']: if child.__class__ == PBXGroup: child.TakeOverOnlyChild(recurse) def SortGroup(self): self._properties['children'] = \ sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y)) # Recurse. for child in self._properties['children']: if isinstance(child, PBXGroup): child.SortGroup() class XCFileLikeElement(XCHierarchicalElement): # Abstract base for objects that can be used as the fileRef property of # PBXBuildFile. def PathHashables(self): # A PBXBuildFile that refers to this object will call this method to # obtain additional hashables specific to this XCFileLikeElement. Don't # just use this object's hashables, they're not specific and unique enough # on their own (without access to the parent hashables.) Instead, provide # hashables that identify this object by path by getting its hashables as # well as the hashables of ancestor XCHierarchicalElement objects. hashables = [] xche = self while xche != None and isinstance(xche, XCHierarchicalElement): xche_hashables = xche.Hashables() for index in xrange(0, len(xche_hashables)): hashables.insert(index, xche_hashables[index]) xche = xche.parent return hashables class XCContainerPortal(XCObject): # Abstract base for objects that can be used as the containerPortal property # of PBXContainerItemProxy. pass class XCRemoteObject(XCObject): # Abstract base for objects that can be used as the remoteGlobalIDString # property of PBXContainerItemProxy. pass class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject): _schema = XCFileLikeElement._schema.copy() _schema.update({ 'explicitFileType': [0, str, 0, 0], 'lastKnownFileType': [0, str, 0, 0], 'name': [0, str, 0, 0], 'path': [0, str, 0, 1], }) # Weird output rules for PBXFileReference. _should_print_single_line = True # super _encode_transforms = XCFileLikeElement._alternate_encode_transforms def __init__(self, properties=None, id=None, parent=None): # super XCFileLikeElement.__init__(self, properties, id, parent) if 'path' in self._properties and self._properties['path'].endswith('/'): self._properties['path'] = self._properties['path'][:-1] is_dir = True else: is_dir = False if 'path' in self._properties and \ not 'lastKnownFileType' in self._properties and \ not 'explicitFileType' in self._properties: # TODO(mark): This is the replacement for a replacement for a quick hack. # It is no longer incredibly sucky, but this list needs to be extended. extension_map = { 'a': 'archive.ar', 'app': 'wrapper.application', 'bdic': 'file', 'bundle': 'wrapper.cfbundle', 'c': 'sourcecode.c.c', 'cc': 'sourcecode.cpp.cpp', 'cpp': 'sourcecode.cpp.cpp', 'css': 'text.css', 'cxx': 'sourcecode.cpp.cpp', 'dart': 'sourcecode', 'dylib': 'compiled.mach-o.dylib', 'framework': 'wrapper.framework', 'gyp': 'sourcecode', 'gypi': 'sourcecode', 'h': 'sourcecode.c.h', 'hxx': 'sourcecode.cpp.h', 'icns': 'image.icns', 'java': 'sourcecode.java', 'js': 'sourcecode.javascript', 'kext': 'wrapper.kext', 'm': 'sourcecode.c.objc', 'mm': 'sourcecode.cpp.objcpp', 'nib': 'wrapper.nib', 'o': 'compiled.mach-o.objfile', 'pdf': 'image.pdf', 'pl': 'text.script.perl', 'plist': 'text.plist.xml', 'pm': 'text.script.perl', 'png': 'image.png', 'py': 'text.script.python', 'r': 'sourcecode.rez', 'rez': 'sourcecode.rez', 's': 'sourcecode.asm', 'storyboard': 'file.storyboard', 'strings': 'text.plist.strings', 'swift': 'sourcecode.swift', 'ttf': 'file', 'xcassets': 'folder.assetcatalog', 'xcconfig': 'text.xcconfig', 'xcdatamodel': 'wrapper.xcdatamodel', 'xcdatamodeld':'wrapper.xcdatamodeld', 'xib': 'file.xib', 'y': 'sourcecode.yacc', } prop_map = { 'dart': 'explicitFileType', 'gyp': 'explicitFileType', 'gypi': 'explicitFileType', } if is_dir: file_type = 'folder' prop_name = 'lastKnownFileType' else: basename = posixpath.basename(self._properties['path']) (root, ext) = posixpath.splitext(basename) # Check the map using a lowercase extension. # TODO(mark): Maybe it should try with the original case first and fall # back to lowercase, in case there are any instances where case # matters. There currently aren't. if ext != '': ext = ext[1:].lower() # TODO(mark): "text" is the default value, but "file" is appropriate # for unrecognized files not containing text. Xcode seems to choose # based on content. file_type = extension_map.get(ext, 'text') prop_name = prop_map.get(ext, 'lastKnownFileType') self._properties[prop_name] = file_type class PBXVariantGroup(PBXGroup, XCFileLikeElement): """PBXVariantGroup is used by Xcode to represent localizations.""" # No additions to the schema relative to PBXGroup. pass # PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below # because it uses PBXContainerItemProxy, defined below. class XCBuildConfiguration(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'baseConfigurationReference': [0, PBXFileReference, 0, 0], 'buildSettings': [0, dict, 0, 1, {}], 'name': [0, str, 0, 1], }) def HasBuildSetting(self, key): return key in self._properties['buildSettings'] def GetBuildSetting(self, key): return self._properties['buildSettings'][key] def SetBuildSetting(self, key, value): # TODO(mark): If a list, copy? self._properties['buildSettings'][key] = value def AppendBuildSetting(self, key, value): if not key in self._properties['buildSettings']: self._properties['buildSettings'][key] = [] self._properties['buildSettings'][key].append(value) def DelBuildSetting(self, key): if key in self._properties['buildSettings']: del self._properties['buildSettings'][key] def SetBaseConfiguration(self, value): self._properties['baseConfigurationReference'] = value class XCConfigurationList(XCObject): # _configs is the default list of configurations. _configs = [ XCBuildConfiguration({'name': 'Debug'}), XCBuildConfiguration({'name': 'Release'}) ] _schema = XCObject._schema.copy() _schema.update({ 'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs], 'defaultConfigurationIsVisible': [0, int, 0, 1, 1], 'defaultConfigurationName': [0, str, 0, 1, 'Release'], }) def Name(self): return 'Build configuration list for ' + \ self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"' def ConfigurationNamed(self, name): """Convenience accessor to obtain an XCBuildConfiguration by name.""" for configuration in self._properties['buildConfigurations']: if configuration._properties['name'] == name: return configuration raise KeyError(name) def DefaultConfiguration(self): """Convenience accessor to obtain the default XCBuildConfiguration.""" return self.ConfigurationNamed(self._properties['defaultConfigurationName']) def HasBuildSetting(self, key): """Determines the state of a build setting in all XCBuildConfiguration child objects. If all child objects have key in their build settings, and the value is the same in all child objects, returns 1. If no child objects have the key in their build settings, returns 0. If some, but not all, child objects have the key in their build settings, or if any children have different values for the key, returns -1. """ has = None value = None for configuration in self._properties['buildConfigurations']: configuration_has = configuration.HasBuildSetting(key) if has is None: has = configuration_has elif has != configuration_has: return -1 if configuration_has: configuration_value = configuration.GetBuildSetting(key) if value is None: value = configuration_value elif value != configuration_value: return -1 if not has: return 0 return 1 def GetBuildSetting(self, key): """Gets the build setting for key. All child XCConfiguration objects must have the same value set for the setting, or a ValueError will be raised. """ # TODO(mark): This is wrong for build settings that are lists. The list # contents should be compared (and a list copy returned?) value = None for configuration in self._properties['buildConfigurations']: configuration_value = configuration.GetBuildSetting(key) if value is None: value = configuration_value else: if value != configuration_value: raise ValueError('Variant values for ' + key) return value def SetBuildSetting(self, key, value): """Sets the build setting for key to value in all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.SetBuildSetting(key, value) def AppendBuildSetting(self, key, value): """Appends value to the build setting for key, which is treated as a list, in all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.AppendBuildSetting(key, value) def DelBuildSetting(self, key): """Deletes the build setting key from all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.DelBuildSetting(key) def SetBaseConfiguration(self, value): """Sets the build configuration in all child XCBuildConfiguration objects. """ for configuration in self._properties['buildConfigurations']: configuration.SetBaseConfiguration(value) class PBXBuildFile(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'fileRef': [0, XCFileLikeElement, 0, 1], 'settings': [0, str, 0, 0], # hack, it's a dict }) # Weird output rules for PBXBuildFile. _should_print_single_line = True _encode_transforms = XCObject._alternate_encode_transforms def Name(self): # Example: "main.cc in Sources" return self._properties['fileRef'].Name() + ' in ' + self.parent.Name() def Hashables(self): # super hashables = XCObject.Hashables(self) # It is not sufficient to just rely on Name() to get the # XCFileLikeElement's name, because that is not a complete pathname. # PathHashables returns hashables unique enough that no two # PBXBuildFiles should wind up with the same set of hashables, unless # someone adds the same file multiple times to the same target. That # would be considered invalid anyway. hashables.extend(self._properties['fileRef'].PathHashables()) return hashables class XCBuildPhase(XCObject): """Abstract base for build phase classes. Not represented in a project file. Attributes: _files_by_path: A dict mapping each path of a child in the files list by path (keys) to the corresponding PBXBuildFile children (values). _files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys) to the corresponding PBXBuildFile children (values). """ # TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't # actually have a "files" list. XCBuildPhase should not have "files" but # another abstract subclass of it should provide this, and concrete build # phase types that do have "files" lists should be derived from that new # abstract subclass. XCBuildPhase should only provide buildActionMask and # runOnlyForDeploymentPostprocessing, and not files or the various # file-related methods and attributes. _schema = XCObject._schema.copy() _schema.update({ 'buildActionMask': [0, int, 0, 1, 0x7fffffff], 'files': [1, PBXBuildFile, 1, 1, []], 'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0], }) def __init__(self, properties=None, id=None, parent=None): # super XCObject.__init__(self, properties, id, parent) self._files_by_path = {} self._files_by_xcfilelikeelement = {} for pbxbuildfile in self._properties.get('files', []): self._AddBuildFileToDicts(pbxbuildfile) def FileGroup(self, path): # Subclasses must override this by returning a two-element tuple. The # first item in the tuple should be the PBXGroup to which "path" should be # added, either as a child or deeper descendant. The second item should # be a boolean indicating whether files should be added into hierarchical # groups or one single flat group. raise NotImplementedError( self.__class__.__name__ + ' must implement FileGroup') def _AddPathToDict(self, pbxbuildfile, path): """Adds path to the dict tracking paths belonging to this build phase. If the path is already a member of this build phase, raises an exception. """ if path in self._files_by_path: raise ValueError('Found multiple build files with path ' + path) self._files_by_path[path] = pbxbuildfile def _AddBuildFileToDicts(self, pbxbuildfile, path=None): """Maintains the _files_by_path and _files_by_xcfilelikeelement dicts. If path is specified, then it is the path that is being added to the phase, and pbxbuildfile must contain either a PBXFileReference directly referencing that path, or it must contain a PBXVariantGroup that itself contains a PBXFileReference referencing the path. If path is not specified, either the PBXFileReference's path or the paths of all children of the PBXVariantGroup are taken as being added to the phase. If the path is already present in the phase, raises an exception. If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile are already present in the phase, referenced by a different PBXBuildFile object, raises an exception. This does not raise an exception when a PBXFileReference or PBXVariantGroup reappear and are referenced by the same PBXBuildFile that has already introduced them, because in the case of PBXVariantGroup objects, they may correspond to multiple paths that are not all added simultaneously. When this situation occurs, the path needs to be added to _files_by_path, but nothing needs to change in _files_by_xcfilelikeelement, and the caller should have avoided adding the PBXBuildFile if it is already present in the list of children. """ xcfilelikeelement = pbxbuildfile._properties['fileRef'] paths = [] if path != None: # It's best when the caller provides the path. if isinstance(xcfilelikeelement, PBXVariantGroup): paths.append(path) else: # If the caller didn't provide a path, there can be either multiple # paths (PBXVariantGroup) or one. if isinstance(xcfilelikeelement, PBXVariantGroup): for variant in xcfilelikeelement._properties['children']: paths.append(variant.FullPath()) else: paths.append(xcfilelikeelement.FullPath()) # Add the paths first, because if something's going to raise, the # messages provided by _AddPathToDict are more useful owing to its # having access to a real pathname and not just an object's Name(). for a_path in paths: self._AddPathToDict(pbxbuildfile, a_path) # If another PBXBuildFile references this XCFileLikeElement, there's a # problem. if xcfilelikeelement in self._files_by_xcfilelikeelement and \ self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile: raise ValueError('Found multiple build files for ' + \ xcfilelikeelement.Name()) self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile def AppendBuildFile(self, pbxbuildfile, path=None): # Callers should use this instead of calling # AppendProperty('files', pbxbuildfile) directly because this function # maintains the object's dicts. Better yet, callers can just call AddFile # with a pathname and not worry about building their own PBXBuildFile # objects. self.AppendProperty('files', pbxbuildfile) self._AddBuildFileToDicts(pbxbuildfile, path) def AddFile(self, path, settings=None): (file_group, hierarchical) = self.FileGroup(path) file_ref = file_group.AddOrGetFileByPath(path, hierarchical) if file_ref in self._files_by_xcfilelikeelement and \ isinstance(file_ref, PBXVariantGroup): # There's already a PBXBuildFile in this phase corresponding to the # PBXVariantGroup. path just provides a new variant that belongs to # the group. Add the path to the dict. pbxbuildfile = self._files_by_xcfilelikeelement[file_ref] self._AddBuildFileToDicts(pbxbuildfile, path) else: # Add a new PBXBuildFile to get file_ref into the phase. if settings is None: pbxbuildfile = PBXBuildFile({'fileRef': file_ref}) else: pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings}) self.AppendBuildFile(pbxbuildfile, path) class PBXHeadersBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Headers' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) class PBXResourcesBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Resources' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) class PBXSourcesBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Sources' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) class PBXFrameworksBuildPhase(XCBuildPhase): # No additions to the schema relative to XCBuildPhase. def Name(self): return 'Frameworks' def FileGroup(self, path): (root, ext) = posixpath.splitext(path) if ext != '': ext = ext[1:].lower() if ext == 'o': # .o files are added to Xcode Frameworks phases, but conceptually aren't # frameworks, they're more like sources or intermediates. Redirect them # to show up in one of those other groups. return self.PBXProjectAncestor().RootGroupForPath(path) else: return (self.PBXProjectAncestor().FrameworksGroup(), False) class PBXShellScriptBuildPhase(XCBuildPhase): _schema = XCBuildPhase._schema.copy() _schema.update({ 'inputPaths': [1, str, 0, 1, []], 'name': [0, str, 0, 0], 'outputPaths': [1, str, 0, 1, []], 'shellPath': [0, str, 0, 1, '/bin/sh'], 'shellScript': [0, str, 0, 1], 'showEnvVarsInLog': [0, int, 0, 0], }) def Name(self): if 'name' in self._properties: return self._properties['name'] return 'ShellScript' class PBXCopyFilesBuildPhase(XCBuildPhase): _schema = XCBuildPhase._schema.copy() _schema.update({ 'dstPath': [0, str, 0, 1], 'dstSubfolderSpec': [0, int, 0, 1], 'name': [0, str, 0, 0], }) # path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is # "DIR", match group 3 is "path" or None. path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$') # path_tree_to_subfolder maps names of Xcode variables to the associated # dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object. path_tree_to_subfolder = { 'BUILT_FRAMEWORKS_DIR': 10, # Frameworks Directory 'BUILT_PRODUCTS_DIR': 16, # Products Directory # Other types that can be chosen via the Xcode UI. # TODO(mark): Map Xcode variable names to these. # : 1, # Wrapper # : 6, # Executables: 6 # : 7, # Resources # : 15, # Java Resources # : 11, # Shared Frameworks # : 12, # Shared Support # : 13, # PlugIns } def Name(self): if 'name' in self._properties: return self._properties['name'] return 'CopyFiles' def FileGroup(self, path): return self.PBXProjectAncestor().RootGroupForPath(path) def SetDestination(self, path): """Set the dstSubfolderSpec and dstPath properties from path. path may be specified in the same notation used for XCHierarchicalElements, specifically, "$(DIR)/path". """ path_tree_match = self.path_tree_re.search(path) if path_tree_match: # Everything else needs to be relative to an Xcode variable. path_tree = path_tree_match.group(1) relative_path = path_tree_match.group(3) if path_tree in self.path_tree_to_subfolder: subfolder = self.path_tree_to_subfolder[path_tree] if relative_path is None: relative_path = '' else: # The path starts with an unrecognized Xcode variable # name like $(SRCROOT). Xcode will still handle this # as an "absolute path" that starts with the variable. subfolder = 0 relative_path = path elif path.startswith('/'): # Special case. Absolute paths are in dstSubfolderSpec 0. subfolder = 0 relative_path = path[1:] else: raise ValueError('Can\'t use path %s in a %s' % \ (path, self.__class__.__name__)) self._properties['dstPath'] = relative_path self._properties['dstSubfolderSpec'] = subfolder class PBXBuildRule(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'compilerSpec': [0, str, 0, 1], 'filePatterns': [0, str, 0, 0], 'fileType': [0, str, 0, 1], 'isEditable': [0, int, 0, 1, 1], 'outputFiles': [1, str, 0, 1, []], 'script': [0, str, 0, 0], }) def Name(self): # Not very inspired, but it's what Xcode uses. return self.__class__.__name__ def Hashables(self): # super hashables = XCObject.Hashables(self) # Use the hashables of the weak objects that this object refers to. hashables.append(self._properties['fileType']) if 'filePatterns' in self._properties: hashables.append(self._properties['filePatterns']) return hashables class PBXContainerItemProxy(XCObject): # When referencing an item in this project file, containerPortal is the # PBXProject root object of this project file. When referencing an item in # another project file, containerPortal is a PBXFileReference identifying # the other project file. # # When serving as a proxy to an XCTarget (in this project file or another), # proxyType is 1. When serving as a proxy to a PBXFileReference (in another # project file), proxyType is 2. Type 2 is used for references to the # producs of the other project file's targets. # # Xcode is weird about remoteGlobalIDString. Usually, it's printed without # a comment, indicating that it's tracked internally simply as a string, but # sometimes it's printed with a comment (usually when the object is initially # created), indicating that it's tracked as a project file object at least # sometimes. This module always tracks it as an object, but contains a hack # to prevent it from printing the comment in the project file output. See # _XCKVPrint. _schema = XCObject._schema.copy() _schema.update({ 'containerPortal': [0, XCContainerPortal, 0, 1], 'proxyType': [0, int, 0, 1], 'remoteGlobalIDString': [0, XCRemoteObject, 0, 1], 'remoteInfo': [0, str, 0, 1], }) def __repr__(self): props = self._properties name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo']) return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) def Name(self): # Admittedly not the best name, but it's what Xcode uses. return self.__class__.__name__ def Hashables(self): # super hashables = XCObject.Hashables(self) # Use the hashables of the weak objects that this object refers to. hashables.extend(self._properties['containerPortal'].Hashables()) hashables.extend(self._properties['remoteGlobalIDString'].Hashables()) return hashables class PBXTargetDependency(XCObject): # The "target" property accepts an XCTarget object, and obviously not # NoneType. But XCTarget is defined below, so it can't be put into the # schema yet. The definition of PBXTargetDependency can't be moved below # XCTarget because XCTarget's own schema references PBXTargetDependency. # Python doesn't deal well with this circular relationship, and doesn't have # a real way to do forward declarations. To work around, the type of # the "target" property is reset below, after XCTarget is defined. # # At least one of "name" and "target" is required. _schema = XCObject._schema.copy() _schema.update({ 'name': [0, str, 0, 0], 'target': [0, None.__class__, 0, 0], 'targetProxy': [0, PBXContainerItemProxy, 1, 1], }) def __repr__(self): name = self._properties.get('name') or self._properties['target'].Name() return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self)) def Name(self): # Admittedly not the best name, but it's what Xcode uses. return self.__class__.__name__ def Hashables(self): # super hashables = XCObject.Hashables(self) # Use the hashables of the weak objects that this object refers to. hashables.extend(self._properties['targetProxy'].Hashables()) return hashables class PBXReferenceProxy(XCFileLikeElement): _schema = XCFileLikeElement._schema.copy() _schema.update({ 'fileType': [0, str, 0, 1], 'path': [0, str, 0, 1], 'remoteRef': [0, PBXContainerItemProxy, 1, 1], }) class XCTarget(XCRemoteObject): # An XCTarget is really just an XCObject, the XCRemoteObject thing is just # to allow PBXProject to be used in the remoteGlobalIDString property of # PBXContainerItemProxy. # # Setting a "name" property at instantiation may also affect "productName", # which may in turn affect the "PRODUCT_NAME" build setting in children of # "buildConfigurationList". See __init__ below. _schema = XCRemoteObject._schema.copy() _schema.update({ 'buildConfigurationList': [0, XCConfigurationList, 1, 1, XCConfigurationList()], 'buildPhases': [1, XCBuildPhase, 1, 1, []], 'dependencies': [1, PBXTargetDependency, 1, 1, []], 'name': [0, str, 0, 1], 'productName': [0, str, 0, 1], }) def __init__(self, properties=None, id=None, parent=None, force_outdir=None, force_prefix=None, force_extension=None): # super XCRemoteObject.__init__(self, properties, id, parent) # Set up additional defaults not expressed in the schema. If a "name" # property was supplied, set "productName" if it is not present. Also set # the "PRODUCT_NAME" build setting in each configuration, but only if # the setting is not present in any build configuration. if 'name' in self._properties: if not 'productName' in self._properties: self.SetProperty('productName', self._properties['name']) if 'productName' in self._properties: if 'buildConfigurationList' in self._properties: configs = self._properties['buildConfigurationList'] if configs.HasBuildSetting('PRODUCT_NAME') == 0: configs.SetBuildSetting('PRODUCT_NAME', self._properties['productName']) def AddDependency(self, other): pbxproject = self.PBXProjectAncestor() other_pbxproject = other.PBXProjectAncestor() if pbxproject == other_pbxproject: # Add a dependency to another target in the same project file. container = PBXContainerItemProxy({'containerPortal': pbxproject, 'proxyType': 1, 'remoteGlobalIDString': other, 'remoteInfo': other.Name()}) dependency = PBXTargetDependency({'target': other, 'targetProxy': container}) self.AppendProperty('dependencies', dependency) else: # Add a dependency to a target in a different project file. other_project_ref = \ pbxproject.AddOrGetProjectReference(other_pbxproject)[1] container = PBXContainerItemProxy({ 'containerPortal': other_project_ref, 'proxyType': 1, 'remoteGlobalIDString': other, 'remoteInfo': other.Name(), }) dependency = PBXTargetDependency({'name': other.Name(), 'targetProxy': container}) self.AppendProperty('dependencies', dependency) # Proxy all of these through to the build configuration list. def ConfigurationNamed(self, name): return self._properties['buildConfigurationList'].ConfigurationNamed(name) def DefaultConfiguration(self): return self._properties['buildConfigurationList'].DefaultConfiguration() def HasBuildSetting(self, key): return self._properties['buildConfigurationList'].HasBuildSetting(key) def GetBuildSetting(self, key): return self._properties['buildConfigurationList'].GetBuildSetting(key) def SetBuildSetting(self, key, value): return self._properties['buildConfigurationList'].SetBuildSetting(key, \ value) def AppendBuildSetting(self, key, value): return self._properties['buildConfigurationList'].AppendBuildSetting(key, \ value) def DelBuildSetting(self, key): return self._properties['buildConfigurationList'].DelBuildSetting(key) # Redefine the type of the "target" property. See PBXTargetDependency._schema # above. PBXTargetDependency._schema['target'][1] = XCTarget class PBXNativeTarget(XCTarget): # buildPhases is overridden in the schema to be able to set defaults. # # NOTE: Contrary to most objects, it is advisable to set parent when # constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject # object. A parent reference is required for a PBXNativeTarget during # construction to be able to set up the target defaults for productReference, # because a PBXBuildFile object must be created for the target and it must # be added to the PBXProject's mainGroup hierarchy. _schema = XCTarget._schema.copy() _schema.update({ 'buildPhases': [1, XCBuildPhase, 1, 1, [PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]], 'buildRules': [1, PBXBuildRule, 1, 1, []], 'productReference': [0, PBXFileReference, 0, 1], 'productType': [0, str, 0, 1], }) # Mapping from Xcode product-types to settings. The settings are: # filetype : used for explicitFileType in the project file # prefix : the prefix for the file name # suffix : the suffix for the file name _product_filetypes = { 'com.apple.product-type.application': ['wrapper.application', '', '.app'], 'com.apple.product-type.application.watchapp': ['wrapper.application', '', '.app'], 'com.apple.product-type.watchkit-extension': ['wrapper.app-extension', '', '.appex'], 'com.apple.product-type.app-extension': ['wrapper.app-extension', '', '.appex'], 'com.apple.product-type.bundle': ['wrapper.cfbundle', '', '.bundle'], 'com.apple.product-type.framework': ['wrapper.framework', '', '.framework'], 'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib', 'lib', '.dylib'], 'com.apple.product-type.library.static': ['archive.ar', 'lib', '.a'], 'com.apple.product-type.tool': ['compiled.mach-o.executable', '', ''], 'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle', '', '.xctest'], 'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib', '', '.so'], 'com.apple.product-type.kernel-extension': ['wrapper.kext', '', '.kext'], } def __init__(self, properties=None, id=None, parent=None, force_outdir=None, force_prefix=None, force_extension=None): # super XCTarget.__init__(self, properties, id, parent) if 'productName' in self._properties and \ 'productType' in self._properties and \ not 'productReference' in self._properties and \ self._properties['productType'] in self._product_filetypes: products_group = None pbxproject = self.PBXProjectAncestor() if pbxproject != None: products_group = pbxproject.ProductsGroup() if products_group != None: (filetype, prefix, suffix) = \ self._product_filetypes[self._properties['productType']] # Xcode does not have a distinct type for loadable modules that are # pure BSD targets (not in a bundle wrapper). GYP allows such modules # to be specified by setting a target type to loadable_module without # having mac_bundle set. These are mapped to the pseudo-product type # com.googlecode.gyp.xcode.bundle. # # By picking up this special type and converting it to a dynamic # library (com.apple.product-type.library.dynamic) with fix-ups, # single-file loadable modules can be produced. # # MACH_O_TYPE is changed to mh_bundle to produce the proper file type # (as opposed to mh_dylib). In order for linking to succeed, # DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be # cleared. They are meaningless for type mh_bundle. # # Finally, the .so extension is forcibly applied over the default # (.dylib), unless another forced extension is already selected. # .dylib is plainly wrong, and .bundle is used by loadable_modules in # bundle wrappers (com.apple.product-type.bundle). .so seems an odd # choice because it's used as the extension on many other systems that # don't distinguish between linkable shared libraries and non-linkable # loadable modules, but there's precedent: Python loadable modules on # Mac OS X use an .so extension. if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle': self._properties['productType'] = \ 'com.apple.product-type.library.dynamic' self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle') self.SetBuildSetting('DYLIB_CURRENT_VERSION', '') self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '') if force_extension is None: force_extension = suffix[1:] if self._properties['productType'] == \ 'com.apple.product-type-bundle.unit.test': if force_extension is None: force_extension = suffix[1:] if force_extension is not None: # If it's a wrapper (bundle), set WRAPPER_EXTENSION. # Extension override. suffix = '.' + force_extension if filetype.startswith('wrapper.'): self.SetBuildSetting('WRAPPER_EXTENSION', force_extension) else: self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension) if filetype.startswith('compiled.mach-o.executable'): product_name = self._properties['productName'] product_name += suffix suffix = '' self.SetProperty('productName', product_name) self.SetBuildSetting('PRODUCT_NAME', product_name) # Xcode handles most prefixes based on the target type, however there # are exceptions. If a "BSD Dynamic Library" target is added in the # Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that # behavior. if force_prefix is not None: prefix = force_prefix if filetype.startswith('wrapper.'): self.SetBuildSetting('WRAPPER_PREFIX', prefix) else: self.SetBuildSetting('EXECUTABLE_PREFIX', prefix) if force_outdir is not None: self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir) # TODO(tvl): Remove the below hack. # http://code.google.com/p/gyp/issues/detail?id=122 # Some targets include the prefix in the target_name. These targets # really should just add a product_name setting that doesn't include # the prefix. For example: # target_name = 'libevent', product_name = 'event' # This check cleans up for them. product_name = self._properties['productName'] prefix_len = len(prefix) if prefix_len and (product_name[:prefix_len] == prefix): product_name = product_name[prefix_len:] self.SetProperty('productName', product_name) self.SetBuildSetting('PRODUCT_NAME', product_name) ref_props = { 'explicitFileType': filetype, 'includeInIndex': 0, 'path': prefix + product_name + suffix, 'sourceTree': 'BUILT_PRODUCTS_DIR', } file_ref = PBXFileReference(ref_props) products_group.AppendChild(file_ref) self.SetProperty('productReference', file_ref) def GetBuildPhaseByType(self, type): if not 'buildPhases' in self._properties: return None the_phase = None for phase in self._properties['buildPhases']: if isinstance(phase, type): # Some phases may be present in multiples in a well-formed project file, # but phases like PBXSourcesBuildPhase may only be present singly, and # this function is intended as an aid to GetBuildPhaseByType. Loop # over the entire list of phases and assert if more than one of the # desired type is found. assert the_phase is None the_phase = phase return the_phase def HeadersPhase(self): headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase) if headers_phase is None: headers_phase = PBXHeadersBuildPhase() # The headers phase should come before the resources, sources, and # frameworks phases, if any. insert_at = len(self._properties['buildPhases']) for index in xrange(0, len(self._properties['buildPhases'])): phase = self._properties['buildPhases'][index] if isinstance(phase, PBXResourcesBuildPhase) or \ isinstance(phase, PBXSourcesBuildPhase) or \ isinstance(phase, PBXFrameworksBuildPhase): insert_at = index break self._properties['buildPhases'].insert(insert_at, headers_phase) headers_phase.parent = self return headers_phase def ResourcesPhase(self): resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase) if resources_phase is None: resources_phase = PBXResourcesBuildPhase() # The resources phase should come before the sources and frameworks # phases, if any. insert_at = len(self._properties['buildPhases']) for index in xrange(0, len(self._properties['buildPhases'])): phase = self._properties['buildPhases'][index] if isinstance(phase, PBXSourcesBuildPhase) or \ isinstance(phase, PBXFrameworksBuildPhase): insert_at = index break self._properties['buildPhases'].insert(insert_at, resources_phase) resources_phase.parent = self return resources_phase def SourcesPhase(self): sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase) if sources_phase is None: sources_phase = PBXSourcesBuildPhase() self.AppendProperty('buildPhases', sources_phase) return sources_phase def FrameworksPhase(self): frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase) if frameworks_phase is None: frameworks_phase = PBXFrameworksBuildPhase() self.AppendProperty('buildPhases', frameworks_phase) return frameworks_phase def AddDependency(self, other): # super XCTarget.AddDependency(self, other) static_library_type = 'com.apple.product-type.library.static' shared_library_type = 'com.apple.product-type.library.dynamic' framework_type = 'com.apple.product-type.framework' if isinstance(other, PBXNativeTarget) and \ 'productType' in self._properties and \ self._properties['productType'] != static_library_type and \ 'productType' in other._properties and \ (other._properties['productType'] == static_library_type or \ ((other._properties['productType'] == shared_library_type or \ other._properties['productType'] == framework_type) and \ ((not other.HasBuildSetting('MACH_O_TYPE')) or other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))): file_ref = other.GetProperty('productReference') pbxproject = self.PBXProjectAncestor() other_pbxproject = other.PBXProjectAncestor() if pbxproject != other_pbxproject: other_project_product_group = \ pbxproject.AddOrGetProjectReference(other_pbxproject)[0] file_ref = other_project_product_group.GetChildByRemoteObject(file_ref) self.FrameworksPhase().AppendProperty('files', PBXBuildFile({'fileRef': file_ref})) class PBXAggregateTarget(XCTarget): pass class PBXProject(XCContainerPortal): # A PBXProject is really just an XCObject, the XCContainerPortal thing is # just to allow PBXProject to be used in the containerPortal property of # PBXContainerItemProxy. """ Attributes: path: "sample.xcodeproj". TODO(mark) Document me! _other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each value is a reference to the dict in the projectReferences list associated with the keyed PBXProject. """ _schema = XCContainerPortal._schema.copy() _schema.update({ 'attributes': [0, dict, 0, 0], 'buildConfigurationList': [0, XCConfigurationList, 1, 1, XCConfigurationList()], 'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'], 'hasScannedForEncodings': [0, int, 0, 1, 1], 'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()], 'projectDirPath': [0, str, 0, 1, ''], 'projectReferences': [1, dict, 0, 0], 'projectRoot': [0, str, 0, 1, ''], 'targets': [1, XCTarget, 1, 1, []], }) def __init__(self, properties=None, id=None, parent=None, path=None): self.path = path self._other_pbxprojects = {} # super return XCContainerPortal.__init__(self, properties, id, parent) def Name(self): name = self.path if name[-10:] == '.xcodeproj': name = name[:-10] return posixpath.basename(name) def Path(self): return self.path def Comment(self): return 'Project object' def Children(self): # super children = XCContainerPortal.Children(self) # Add children that the schema doesn't know about. Maybe there's a more # elegant way around this, but this is the only case where we need to own # objects in a dictionary (that is itself in a list), and three lines for # a one-off isn't that big a deal. if 'projectReferences' in self._properties: for reference in self._properties['projectReferences']: children.append(reference['ProductGroup']) return children def PBXProjectAncestor(self): return self def _GroupByName(self, name): if not 'mainGroup' in self._properties: self.SetProperty('mainGroup', PBXGroup()) main_group = self._properties['mainGroup'] group = main_group.GetChildByName(name) if group is None: group = PBXGroup({'name': name}) main_group.AppendChild(group) return group # SourceGroup and ProductsGroup are created by default in Xcode's own # templates. def SourceGroup(self): return self._GroupByName('Source') def ProductsGroup(self): return self._GroupByName('Products') # IntermediatesGroup is used to collect source-like files that are generated # by rules or script phases and are placed in intermediate directories such # as DerivedSources. def IntermediatesGroup(self): return self._GroupByName('Intermediates') # FrameworksGroup and ProjectsGroup are top-level groups used to collect # frameworks and projects. def FrameworksGroup(self): return self._GroupByName('Frameworks') def ProjectsGroup(self): return self._GroupByName('Projects') def RootGroupForPath(self, path): """Returns a PBXGroup child of this object to which path should be added. This method is intended to choose between SourceGroup and IntermediatesGroup on the basis of whether path is present in a source directory or an intermediates directory. For the purposes of this determination, any path located within a derived file directory such as PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates directory. The returned value is a two-element tuple. The first element is the PBXGroup, and the second element specifies whether that group should be organized hierarchically (True) or as a single flat list (False). """ # TODO(mark): make this a class variable and bind to self on call? # Also, this list is nowhere near exhaustive. # INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by # gyp.generator.xcode. There should probably be some way for that module # to push the names in, rather than having to hard-code them here. source_tree_groups = { 'DERIVED_FILE_DIR': (self.IntermediatesGroup, True), 'INTERMEDIATE_DIR': (self.IntermediatesGroup, True), 'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True), 'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True), } (source_tree, path) = SourceTreeAndPathFromPath(path) if source_tree != None and source_tree in source_tree_groups: (group_func, hierarchical) = source_tree_groups[source_tree] group = group_func() return (group, hierarchical) # TODO(mark): make additional choices based on file extension. return (self.SourceGroup(), True) def AddOrGetFileInRootGroup(self, path): """Returns a PBXFileReference corresponding to path in the correct group according to RootGroupForPath's heuristics. If an existing PBXFileReference for path exists, it will be returned. Otherwise, one will be created and returned. """ (group, hierarchical) = self.RootGroupForPath(path) return group.AddOrGetFileByPath(path, hierarchical) def RootGroupsTakeOverOnlyChildren(self, recurse=False): """Calls TakeOverOnlyChild for all groups in the main group.""" for group in self._properties['mainGroup']._properties['children']: if isinstance(group, PBXGroup): group.TakeOverOnlyChild(recurse) def SortGroups(self): # Sort the children of the mainGroup (like "Source" and "Products") # according to their defined order. self._properties['mainGroup']._properties['children'] = \ sorted(self._properties['mainGroup']._properties['children'], cmp=lambda x,y: x.CompareRootGroup(y)) # Sort everything else by putting group before files, and going # alphabetically by name within sections of groups and files. SortGroup # is recursive. for group in self._properties['mainGroup']._properties['children']: if not isinstance(group, PBXGroup): continue if group.Name() == 'Products': # The Products group is a special case. Instead of sorting # alphabetically, sort things in the order of the targets that # produce the products. To do this, just build up a new list of # products based on the targets. products = [] for target in self._properties['targets']: if not isinstance(target, PBXNativeTarget): continue product = target._properties['productReference'] # Make sure that the product is already in the products group. assert product in group._properties['children'] products.append(product) # Make sure that this process doesn't miss anything that was already # in the products group. assert len(products) == len(group._properties['children']) group._properties['children'] = products else: group.SortGroup() def AddOrGetProjectReference(self, other_pbxproject): """Add a reference to another project file (via PBXProject object) to this one. Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in this project file that contains a PBXReferenceProxy object for each product of each PBXNativeTarget in the other project file. ProjectRef is a PBXFileReference to the other project file. If this project file already references the other project file, the existing ProductGroup and ProjectRef are returned. The ProductGroup will still be updated if necessary. """ if not 'projectReferences' in self._properties: self._properties['projectReferences'] = [] product_group = None project_ref = None if not other_pbxproject in self._other_pbxprojects: # This project file isn't yet linked to the other one. Establish the # link. product_group = PBXGroup({'name': 'Products'}) # ProductGroup is strong. product_group.parent = self # There's nothing unique about this PBXGroup, and if left alone, it will # wind up with the same set of hashables as all other PBXGroup objects # owned by the projectReferences list. Add the hashables of the # remote PBXProject that it's related to. product_group._hashables.extend(other_pbxproject.Hashables()) # The other project reports its path as relative to the same directory # that this project's path is relative to. The other project's path # is not necessarily already relative to this project. Figure out the # pathname that this project needs to use to refer to the other one. this_path = posixpath.dirname(self.Path()) projectDirPath = self.GetProperty('projectDirPath') if projectDirPath: if posixpath.isabs(projectDirPath[0]): this_path = projectDirPath else: this_path = posixpath.join(this_path, projectDirPath) other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path) # ProjectRef is weak (it's owned by the mainGroup hierarchy). project_ref = PBXFileReference({ 'lastKnownFileType': 'wrapper.pb-project', 'path': other_path, 'sourceTree': 'SOURCE_ROOT', }) self.ProjectsGroup().AppendChild(project_ref) ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref} self._other_pbxprojects[other_pbxproject] = ref_dict self.AppendProperty('projectReferences', ref_dict) # Xcode seems to sort this list case-insensitively self._properties['projectReferences'] = \ sorted(self._properties['projectReferences'], cmp=lambda x,y: cmp(x['ProjectRef'].Name().lower(), y['ProjectRef'].Name().lower())) else: # The link already exists. Pull out the relevnt data. project_ref_dict = self._other_pbxprojects[other_pbxproject] product_group = project_ref_dict['ProductGroup'] project_ref = project_ref_dict['ProjectRef'] self._SetUpProductReferences(other_pbxproject, product_group, project_ref) inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False) targets = other_pbxproject.GetProperty('targets') if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets): dir_path = project_ref._properties['path'] product_group._hashables.extend(dir_path) return [product_group, project_ref] def _AllSymrootsUnique(self, target, inherit_unique_symroot): # Returns True if all configurations have a unique 'SYMROOT' attribute. # The value of inherit_unique_symroot decides, if a configuration is assumed # to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't # define an explicit value for 'SYMROOT'. symroots = self._DefinedSymroots(target) for s in self._DefinedSymroots(target): if (s is not None and not self._IsUniqueSymrootForTarget(s) or s is None and not inherit_unique_symroot): return False return True if symroots else inherit_unique_symroot def _DefinedSymroots(self, target): # Returns all values for the 'SYMROOT' attribute defined in all # configurations for this target. If any configuration doesn't define the # 'SYMROOT' attribute, None is added to the returned set. If all # configurations don't define the 'SYMROOT' attribute, an empty set is # returned. config_list = target.GetProperty('buildConfigurationList') symroots = set() for config in config_list.GetProperty('buildConfigurations'): setting = config.GetProperty('buildSettings') if 'SYMROOT' in setting: symroots.add(setting['SYMROOT']) else: symroots.add(None) if len(symroots) == 1 and None in symroots: return set() return symroots def _IsUniqueSymrootForTarget(self, symroot): # This method returns True if all configurations in target contain a # 'SYMROOT' attribute that is unique for the given target. A value is # unique, if the Xcode macro '$SRCROOT' appears in it in any form. uniquifier = ['$SRCROOT', '$(SRCROOT)'] if any(x in symroot for x in uniquifier): return True return False def _SetUpProductReferences(self, other_pbxproject, product_group, project_ref): # TODO(mark): This only adds references to products in other_pbxproject # when they don't exist in this pbxproject. Perhaps it should also # remove references from this pbxproject that are no longer present in # other_pbxproject. Perhaps it should update various properties if they # change. for target in other_pbxproject._properties['targets']: if not isinstance(target, PBXNativeTarget): continue other_fileref = target._properties['productReference'] if product_group.GetChildByRemoteObject(other_fileref) is None: # Xcode sets remoteInfo to the name of the target and not the name # of its product, despite this proxy being a reference to the product. container_item = PBXContainerItemProxy({ 'containerPortal': project_ref, 'proxyType': 2, 'remoteGlobalIDString': other_fileref, 'remoteInfo': target.Name() }) # TODO(mark): Does sourceTree get copied straight over from the other # project? Can the other project ever have lastKnownFileType here # instead of explicitFileType? (Use it if so?) Can path ever be # unset? (I don't think so.) Can other_fileref have name set, and # does it impact the PBXReferenceProxy if so? These are the questions # that perhaps will be answered one day. reference_proxy = PBXReferenceProxy({ 'fileType': other_fileref._properties['explicitFileType'], 'path': other_fileref._properties['path'], 'sourceTree': other_fileref._properties['sourceTree'], 'remoteRef': container_item, }) product_group.AppendChild(reference_proxy) def SortRemoteProductReferences(self): # For each remote project file, sort the associated ProductGroup in the # same order that the targets are sorted in the remote project file. This # is the sort order used by Xcode. def CompareProducts(x, y, remote_products): # x and y are PBXReferenceProxy objects. Go through their associated # PBXContainerItem to get the remote PBXFileReference, which will be # present in the remote_products list. x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString'] y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString'] x_index = remote_products.index(x_remote) y_index = remote_products.index(y_remote) # Use the order of each remote PBXFileReference in remote_products to # determine the sort order. return cmp(x_index, y_index) for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems(): # Build up a list of products in the remote project file, ordered the # same as the targets that produce them. remote_products = [] for target in other_pbxproject._properties['targets']: if not isinstance(target, PBXNativeTarget): continue remote_products.append(target._properties['productReference']) # Sort the PBXReferenceProxy children according to the list of remote # products. product_group = ref_dict['ProductGroup'] product_group._properties['children'] = sorted( product_group._properties['children'], cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp)) class XCProjectFile(XCObject): _schema = XCObject._schema.copy() _schema.update({ 'archiveVersion': [0, int, 0, 1, 1], 'classes': [0, dict, 0, 1, {}], 'objectVersion': [0, int, 0, 1, 46], 'rootObject': [0, PBXProject, 1, 1], }) def ComputeIDs(self, recursive=True, overwrite=True, hash=None): # Although XCProjectFile is implemented here as an XCObject, it's not a # proper object in the Xcode sense, and it certainly doesn't have its own # ID. Pass through an attempt to update IDs to the real root object. if recursive: self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash) def Print(self, file=sys.stdout): self.VerifyHasRequiredProperties() # Add the special "objects" property, which will be caught and handled # separately during printing. This structure allows a fairly standard # loop do the normal printing. self._properties['objects'] = {} self._XCPrint(file, 0, '// !$*UTF8*$!\n') if self._should_print_single_line: self._XCPrint(file, 0, '{ ') else: self._XCPrint(file, 0, '{\n') for property, value in sorted(self._properties.iteritems(), cmp=lambda x, y: cmp(x, y)): if property == 'objects': self._PrintObjects(file) else: self._XCKVPrint(file, 1, property, value) self._XCPrint(file, 0, '}\n') del self._properties['objects'] def _PrintObjects(self, file): if self._should_print_single_line: self._XCPrint(file, 0, 'objects = {') else: self._XCPrint(file, 1, 'objects = {\n') objects_by_class = {} for object in self.Descendants(): if object == self: continue class_name = object.__class__.__name__ if not class_name in objects_by_class: objects_by_class[class_name] = [] objects_by_class[class_name].append(object) for class_name in sorted(objects_by_class): self._XCPrint(file, 0, '\n') self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n') for object in sorted(objects_by_class[class_name], cmp=lambda x, y: cmp(x.id, y.id)): object.Print(file) self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n') if self._should_print_single_line: self._XCPrint(file, 0, '}; ') else: self._XCPrint(file, 1, '};\n')
mit
lbybee/kaggle_contests
reverse_gol/simple_reverse_gol.py
1
15661
from itertools import combinations import pymc as pm import numpy as np import datetime import csv # These are helper functions that are called by various functions # throughout the code def addBorder(in_array): """adds a border of zeros to an array""" array = np.array(in_array) shape = array.shape array = np.c_[np.zeros(shape[0]), array, np.zeros(shape[0])] shape = array.shape array = np.r_[[np.zeros(shape[1])], array, [np.zeros(shape[1])]] return array def removeBorder(in_array): """removes the border of zeros""" array = np.array(in_array) return array[1:-1, 1:-1] # These functions are run to pre the initial data def iterateGoL(in_array): """iterates in_array one step in the GoL""" array = np.array(in_array) neighbors = (array[0:-2, 0:-2] + array[0:-2, 1:-1] + array[0:-2, 2:] + array[1:-1, 0:-2] + array[1:-1, 2:] + array[2:, 0:-2] + array[2:, 1:-1] + array[2:, 2:]) birth = (neighbors == 3) & (array[1:-1, 1:-1] == 0) survive = ((neighbors == 3) | (neighbors == 2)) & (array[1:-1, 1:-1] == 1) array[...] = 0 array[1:-1, 1:-1][survive | birth] = 1 return array def iterateGoLList(array_list): """iterates all the arrays in a list one step forward in the GoL""" output_list = [] for array in array_list: array = addBorder(array) array = iterateGoL(array) array = removeBorder(array) output_list.append(array) return output_list def genCombs(neighbors): """generates all possible combinations of a list of neighbors""" nb_comb = [] for i in range(len(neighbors)): comb = combinations(neighbors, i + 1) nb_comb.extend([c for c in comb]) return nb_comb def genFloat(array_list): """updates all the arrays in a list as floats""" output_list = [] for array in array_list: t_array = np.array(array) t_array = t_array.astype(float) output_list.append(t_array) return output_list # These function are unique in that they are required for more then just # genPosterior but are not used to prep the data def mapSubsetKeys(nb, array): """maps each neighbor to his corresponding section of the array""" return {nb[0]: array[0:-2, 0:-2], nb[1]: array[0:-2, 1:-1], nb[2]: array[0:-2, 2:], nb[3]: array[1:-1, 0:-2], nb[4]: array[1:-1, 2:], nb[5]: array[2:, 0:-2], nb[6]: array[2:, 1:-1], nb[7]: array[2:, 2:]} def genTruthArray(in_array, nb_comb, nb, nb_key_dict, state): """generates an array of truth values for the given combination and array""" bool_array = np.array(in_array[1:-1, 1:-1]) res_array = np.array(in_array[1:-1, 1:-1]) res_array[...] = 0 bool_array = True not_nb_comb = list(set(nb) - set(nb_comb)) for c in nb_comb: bool_array = (bool_array & (nb_key_dict[c] == 1)) for c in not_nb_comb: bool_array = (bool_array & (nb_key_dict[c] == 0)) bool_array = (bool_array & (in_array[1:-1, 1:-1] == state)) res_array[bool_array] = 1 return res_array def genTruthArrayList(in_array, comb_list, nb, nb_key_dict): """generates a list of arrays of truth values for each combination""" truth_list_0 = [] truth_list_1 = [] for comb in comb_list: truth_list_0.append(genTruthArray(in_array, comb, nb, nb_key_dict, 0)) truth_list_1.append(genTruthArray(in_array, comb, nb, nb_key_dict, 1)) return truth_list_0, truth_list_1 # These functions are run in the MCMC code to prep the priors def genParams(nb_comb): """returns the parameters to be estimated""" betas_0 = pm.Uniform("betas_0", 0.0, 1.0, size=len(nb_comb)) betas_1 = pm.Uniform("betas_1", 0.0, 1.0, size=len(nb_comb)) return betas_0, betas_1 def genPrior01(array_list): """generates the prior probability of the prev state being 1 or 0, alpha""" ones = 0.0 dim = 0.0 for array in array_list: ones += np.sum(array) dim += array.size alpha = ones / dim return alpha def genPriorComb(array_list, neighbors, comb_list): """generates the probability of each combination""" comb_output_0 = [0.0] * len(comb_list) comb_output_1 = [0.0] * len(comb_list) dim = 0.0 j = 0 for array in array_list: j += 1 print j array = addBorder(array) nb_key_dict = mapSubsetKeys(neighbors, array) truth_list_0, truth_list_1 = genTruthArrayList(array, comb_list, neighbors, nb_key_dict) i = 0 for truth_array_0, truth_array_1 in zip(truth_list_0, truth_list_1): true_0 = np.sum(truth_array_0) true_1 = np.sum(truth_array_1) comb_output_0[i] += true_0 comb_output_1[i] += true_1 i += 1 dim += truth_list_0[0].size comb_output_0 = [num / dim for num in comb_output_0] comb_output_1 = [num / dim for num in comb_output_1] return comb_output_0, comb_output_1 # These functions generate the prior probabilities of each combination # for the posterior def prepPriorCombLikelihood(comb, beta): """generates the prior prob for each comb""" prob_array = np.array(comb) prob_array[prob_array == 1] = beta prob_array[prob_array == 0] = 1 return prob_array def prepPriorLikelihood(in_array, truth_list_0, truth_list_1, modifier_list_0, modifier_list_1): """generates the prior prob for the array using the modifier""" prob_array = np.array(in_array) prob_array[...] = 1 for truth, modifier in zip(truth_list_0, modifier_list_0): prior = prepPriorCombLikelihood(truth, modifier) prob_array = prob_array * prior for truth, modifier in zip(truth_list_1, modifier_list_1): prior = prepPriorCombLikelihood(truth, modifier) prob_array = prob_array * prior return prob_array def genPosterior(array, neighbors, comb_list, betas_0, betas_1, comb_priors_0, comb_priors_1, alpha): """generates the posterior probability array""" array = addBorder(array) nb_key_dict = mapSubsetKeys(neighbors, array) truth_list_0, truth_list_1 = genTruthArrayList(array, comb_list, neighbors, nb_key_dict) array = removeBorder(array) prior_cond = prepPriorLikelihood(array, truth_list_0, truth_list_1, betas_0, betas_1) prior_ind = prepPriorLikelihood(array, truth_list_0, truth_list_1, comb_priors_0, comb_priors_1) res = (alpha * prior_cond) / prior_ind return res def genPosteriorList(array_list, neighbors, comb_list, betas_0, betas_1, comb_priors_0, comb_priors_1, alpha): """generates a list of posteriors""" post_list = [] for array in array_list: post_list.append(genPosterior(array, neighbors, comb_list, betas_0, betas_1, comb_priors_0, comb_priors_1, alpha)) return post_list # This is the code for the MCMC, it specifies the betas def chkMCMC(start, finish, comb_list, neighbors, alpha, comb_priors_0, comb_priors_1, iterations, betas_0, betas_1): """runs the MCMC code for a chunk of the data""" t_1 = datetime.datetime.now() @pm.deterministic def p(f=finish, n=neighbors, cl=comb_list, b0=betas_0, b1=betas_1, cp0=comb_priors_0, cp1=comb_priors_1, a=alpha): return genPosteriorList(f, n, cl, b0, b1, cp0, cp1, a) observed = pm.Bernoulli("obs", p, value=start, observed=True) model = pm.Model([betas_0, betas_1, observed]) mcmc = pm.MCMC(model) mcmc.sample(int(iterations)) t_2 = datetime.datetime.now() print "\n" print t_2 - t_1 print "\n" return betas_0, betas_1 def runMCMC(start, finish, comb_list, neighbors, iterations, chk): """runs the MCMC code for all the chunks""" betas_0, betas_1 = genParams(comb_list) alpha = genPrior01(start) comb_priors_0, comb_priors_1 = genPriorComb(finish, neighbors, comb_list) for i in range(0, len(finish), chk): chk_start = start[i:i + chk] chk_finish = finish[i:i + chk] betas_0, betas_1 = chkMCMC(chk_start, chk_finish, comb_list, neighbors, alpha, comb_priors_0, comb_priors_1, iterations, betas_0, betas_1) return (betas_0.value, betas_1.value, alpha, comb_priors_0, comb_priors_1) # This is the code for the estimation def genArrayValues(prob_array, lambd): """returns the array of ones""" array = np.array(prob_array) add_val = (array >= lambd) array[...] = 0 array[add_val] = 1 return array def iterateSubArray(in_array, test_values): """iterates all combinations for a subboard""" live = np.nonzero(test_values) live_comb = [] for i in range(len(live)): comb = combinations([(j, k) for j, k in zip(live[0], live[1])], i + 1) live_comb.extend([c for c in comb]) test_values = addBorder(test_values) test_iter = iterateGoL(test_values) test_values = removeBorder(test_values) test_iter = removeBorder(test_iter) curr_score = np.sum(in_array == test_iter) / test_iter.size curr_array = np.array(test_values) for c in live_comb: iter_array = np.array(test_values) iter_array = addBorder(iter_array) iter_array = iterateGoL(iter_array) iter_array = removeBorder(iter_array) iter_array[c] = 0 iter_score = np.sum(in_array == iter_array) / iter_array.size if iter_score > curr_score: curr_array = np.array(iter_array) curr_score = iter_score if (iter_score == curr_score and np.sum(iter_array) < np.sum(curr_array)): curr_array = np.array(iter_array) curr_score = iter_score return curr_array def iterateRem(in_array, test_values): """removes elements until we find the closest match""" # output_array = np.array(in_array) # for i in range(0, 20, 10): # for j in range(0, 20, 10): # chk_array = in_array[i - 9:i, j - 9:j] # chk_test = test_values[i - 9:i, j - 9:j] # output_array[i - 9:i, j - 9:j] = iterateSubArray(chk_array, # chk_test) # return output_array return iterateSubArray(in_array, test_values) def genEstimate(array, delta, comb_list, neighbors, betas_0, betas_1, comb_prior_0, comb_prior_1, lambd, alpha): """generates the actual estimates""" for i in range(delta): prob_array = genPosterior(array, neighbors, comb_list, betas_0, betas_1, comb_prior_0, comb_prior_1, alpha) test_values = genArrayValues(prob_array, lambd) array = iterateRem(array, test_values) return array def chkEstimation(finish, delta_list, comb_list, neighbors, betas_0, betas_1, comb_prior_0, comb_prior_1, lambd, alpha, total, total_time, start_ind, dir_n): """generates the estimates for the chk""" estimates = [] for array, delta in zip(finish, delta_list): t_1 = datetime.datetime.now() total += 1 t_est = genEstimate(array, delta, comb_list, neighbors, betas_0, betas_1, comb_prior_0, comb_prior_1, lambd, alpha) csv_f = open(dir_n + "output_%d.csv" % start_ind, "ab") writer = csv.writer(csv_f) est_l = [str(total + start_ind)] for row in t_est: est_l.extend(row) writer.writerow(est_l) csv_f.close() estimates.append(t_est) t_2 = datetime.datetime.now() total_time += t_2 - t_1 print t_2 - t_1, total, total_time, start_ind return estimates, total, total_time def runEstimation(finish, delta, comb_list, neighbors, betas_0, betas_1, alpha, comb_prior_0, comb_prior_1, lambd, chk, start_ind, dir_n): """generates the estimates""" total = 0 total_time = datetime.timedelta(0) estimates = [] for i in range(0, len(finish), chk): chk_finish = finish[i: i + chk] chk_delta = delta[i: i + chk] est_t, total, total_time = chkEstimation(chk_finish, chk_delta, comb_list, neighbors, betas_0, betas_1, comb_prior_0, comb_prior_1, lambd, alpha, total, total_time, start_ind, dir_n) estimates.extend(est_t) return estimates def checkEstimation(true, test, delta): """tests how accuracte the results are for all arrays""" num = 0.0 den = 0.0 for tr, te, d in zip(true, test, delta): correct = np.sum(tr == te) tot = tr.size ratio = correct / (tot * 1.0) print ratio num += correct den += tot return num / den def checkContainsSingle(start, finish, comb_list, neighbors, betas_0, betas_1, alpha, comb_prior_0, comb_prior_1, lambd): """checks that the estimated values contain the correct value""" num = 0.0 den = 0.0 for s, f in zip(start, finish): prob_array = genPosterior(f, neighbors, comb_list, betas_0, betas_1, comb_prior_0, comb_prior_1, alpha) test_values = genArrayValues(prob_array, lambd) missing = (test_values == 0) & (s == 1) num += np.sum(missing) den += missing.size cont = (num * 1.0) / den print cont return cont def checkContainsFull(start, finish, delta, comb_list, neighbors, betas_0, betas_1, alpha, comb_prior_0, comb_prior_1, lambd): """checks that the estimated values work when run with the delta""" num = 0.0 den = 0.0 for s, f, d in zip(start, finish, delta): for i in range(d): prob_array = genPosterior(f, neighbors, comb_list, betas_0, betas_1, comb_prior_0, comb_prior_1, alpha) test_values = genArrayValues(prob_array, lambd) f = iterateRem(f, test_values) missing = (test_values == 0) & (s == 1) num += np.sum(missing) den += missing.size cont = (num * 1.0) / den print cont return cont def testProbDist(start, finish, comb_list, neighbors, betas_0, betas_1, alpha, comb_prior_0, comb_prior_1, lambd): """gets the distribution of the probabilities in all prob arrays""" prob_list = [] for s, f in zip(start, finish): prob_list.append(genPosterior(f, neighbors, comb_list, betas_0, betas_1, comb_prior_0, comb_prior_1, alpha)) return np.histogram(prob_list, range=(0.0, 1.0))
gpl-2.0
holmes/intellij-community
plugins/hg4idea/testData/bin/mercurial/scmwindows.py
94
1669
import os import osutil import util import _winreg def systemrcpath(): '''return default os-specific hgrc search path''' rcpath = [] filename = util.executablepath() # Use mercurial.ini found in directory with hg.exe progrc = os.path.join(os.path.dirname(filename), 'mercurial.ini') if os.path.isfile(progrc): rcpath.append(progrc) return rcpath # Use hgrc.d found in directory with hg.exe progrcd = os.path.join(os.path.dirname(filename), 'hgrc.d') if os.path.isdir(progrcd): for f, kind in osutil.listdir(progrcd): if f.endswith('.rc'): rcpath.append(os.path.join(progrcd, f)) return rcpath # else look for a system rcpath in the registry value = util.lookupreg('SOFTWARE\\Mercurial', None, _winreg.HKEY_LOCAL_MACHINE) if not isinstance(value, str) or not value: return rcpath value = util.localpath(value) for p in value.split(os.pathsep): if p.lower().endswith('mercurial.ini'): rcpath.append(p) elif os.path.isdir(p): for f, kind in osutil.listdir(p): if f.endswith('.rc'): rcpath.append(os.path.join(p, f)) return rcpath def userrcpath(): '''return os-specific hgrc search path to the user dir''' home = os.path.expanduser('~') path = [os.path.join(home, 'mercurial.ini'), os.path.join(home, '.hgrc')] userprofile = os.environ.get('USERPROFILE') if userprofile: path.append(os.path.join(userprofile, 'mercurial.ini')) path.append(os.path.join(userprofile, '.hgrc')) return path
apache-2.0
CydarLtd/ansible
lib/ansible/modules/network/bigswitch/bigmon_policy.py
51
6930
#!/usr/bin/python # -*- coding: utf-8 -*- # Ansible module to manage Big Monitoring Fabric service chains # (c) 2016, Ted Elhourani <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: bigmon_policy author: "Ted (@tedelhourani)" short_description: Create and remove a bigmon out-of-band policy. description: - Create and remove a bigmon out-of-band policy. version_added: "2.3" options: name: description: - The name of the policy. required: true policy_description: description: - Description of policy. action: description: - Forward matching packets to delivery interfaces, Drop is for measure rate of matching packets, but do not forward to delivery interfaces, capture packets and write to a PCAP file, or enable NetFlow generation. default: forward choices: ['forward', 'drop', 'flow-gen'] priority: description: - A priority associated with this policy. The higher priority policy takes precedence over a lower priority. default: 100 duration: description: - Run policy for duration duration or until delivery_packet_count packets are delivered, whichever comes first. default: 0 start_time: description: - Date the policy becomes active default: ansible_date_time.iso8601 delivery_packet_count: description: - Run policy until delivery_packet_count packets are delivered. default: 0 state: description: - Whether the policy should be present or absent. default: present choices: ['present', 'absent'] controller: description: - The controller address. required: true validate_certs: description: - If C(false), SSL certificates will not be validated. This should only be used on personally controlled devices using self-signed certificates. required: false default: true choices: [true, false] access_token: description: - Bigmon access token. If this isn't set the the environment variable C(BIGSWITCH_ACCESS_TOKEN) is used. ''' EXAMPLES = ''' - name: policy to aggregate filter and deliver data center (DC) 1 traffic bigmon_policy: name: policy1 policy_description: DC 1 traffic policy action: drop controller: '{{ inventory_hostname }}' state: present validate_certs: false ''' RETURN = ''' # ''' import os import datetime from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.bigswitch_utils import Rest, Response from ansible.module_utils.pycompat24 import get_exception def policy(module): try: access_token = module.params['access_token'] or os.environ['BIGSWITCH_ACCESS_TOKEN'] except KeyError: e = get_exception() module.fail_json(msg='Unable to load %s' % e.message) name = module.params['name'] policy_description = module.params['policy_description'] action = module.params['action'] priority = module.params['priority'] duration = module.params['duration'] start_time = module.params['start_time'] delivery_packet_count = module.params['delivery_packet_count'] state = module.params['state'] controller = module.params['controller'] rest = Rest(module, {'content-type': 'application/json', 'Cookie': 'session_cookie='+access_token}, 'https://'+controller+':8443/api/v1/data/controller/applications/bigtap') if name is None: module.fail_json(msg='parameter `name` is missing') response = rest.get('policy?config=true', data={}) if response.status_code != 200: module.fail_json(msg="failed to obtain existing policy config: {}".format(response.json['description'])) config_present = False matching = [policy for policy in response.json if policy['name'] == name and policy['duration'] == duration and policy['delivery-packet-count'] == delivery_packet_count and policy['policy-description'] == policy_description and policy['action'] == action and policy['priority'] == priority] if matching: config_present = True if state in ('present') and config_present: module.exit_json(changed=False) if state in ('absent') and not config_present: module.exit_json(changed=False) if state in ('present'): data={'name': name, 'action': action, 'policy-description': policy_description, 'priority': priority, 'duration': duration, 'start-time': start_time, 'delivery-packet-count': delivery_packet_count } response = rest.put('policy[name="%s"]' % name, data=data) if response.status_code == 204: module.exit_json(changed=True) else: module.fail_json(msg="error creating policy '{}': {}".format(name, response.json['description'])) if state in ('absent'): response = rest.delete('policy[name="%s"]' % name, data={}) if response.status_code == 204: module.exit_json(changed=True) else: module.fail_json(msg="error deleting policy '{}': {}".format(name, response.json['description'])) def main(): module = AnsibleModule( argument_spec=dict( name=dict(type='str', required=True), policy_description=dict(type='str', default=''), action=dict(choices=['forward', 'drop', 'capture', 'flow-gen'], default='forward'), priority=dict(type='int', default=100), duration=dict(type='int', default=0), start_time=dict(type='str', default=datetime.datetime.now().isoformat()+'+00:00'), delivery_packet_count=dict(type='int', default=0), controller=dict(type='str', required=True), state=dict(choices=['present', 'absent'], default='present'), validate_certs=dict(type='bool', default='True'), access_token=dict(type='str', no_log=True) ) ) try: policy(module) except Exception: e = get_exception() module.fail_json(msg=str(e)) if __name__ == '__main__': main()
gpl-3.0
SUSE/lrbd
test/test_acls.py
2
4227
from lrbd import Acls, Common, Runtime, entries from nose.tools import * import unittest, mock import re, tempfile class AclsTestCase(unittest.TestCase): def setUp(self): Common.config['iqns'] = [ "iqn.xyz" ] Common.config['auth'] = [ { "host": "igw1", "authentication": "acls" } ] Common.config['portals'] = [ { "name": "portal1", "addresses": [ "172.16.1.16" ] } ] Common.config['pools'] = [ { "pool": "rbd", "gateways": [ { "host": "igw1", "tpg": [ { "image": "archive", "initiator": "iqn.abc", "portal": "portal1" } ] } ] } ] Runtime.config['addresses'] = [ "172.16.1.16" ] Runtime.config['portals'] = {} Runtime.config['portals']["iqn.xyz"] = {} Runtime.config['portals']["iqn.xyz"]["archive"] = {} Runtime.config['portals']["iqn.xyz"]["archive"]["portal1"] = "1" def test_acls(self): class mock_Acls(Acls): def _find(self): pass def _cmd(self, target, tpg, initiator): self.called = " ".join([ target, str(tpg), initiator ]) self.a = mock_Acls() assert self.a.called == "iqn.xyz 1 iqn.abc" @raises(AttributeError) def test_tpg(self): Common.config['auth'] = [ { "host": "igw1", "authentication": "tpg" } ] class mock_Acls(Acls): def _find(self): pass def _cmd(self, target, tpg, initiator): self.called = True self.a = mock_Acls() self.a.called @mock.patch('glob.glob') def test_find(self, mock_subproc_glob): mock_subproc_glob.return_value = [ "/s/k/c/t/i/t/t_1/a/iqn.abc" ] class mock_Acls(Acls): def _cmd(self, target, tpg, initiator): self.called = " ".join([ target, str(tpg), initiator ]) self.a = mock_Acls() assert self.a.exists == {'iqn.xyz': {'1': ['iqn.abc']}} @mock.patch('glob.glob') def test_find_does_nothing(self, mock_subproc_glob): mock_subproc_glob.return_value = [ ] class mock_Acls(Acls): def _cmd(self, target, tpg, initiator): self.called = " ".join([ target, str(tpg), initiator ]) self.a = mock_Acls() assert not self.a.initiators def test_cmd(self): class mock_Acls(Acls): def _find(self): pass self.a = mock_Acls() print self.a.cmds assert self.a.cmds == [['targetcli', '/iscsi/iqn.xyz/tpg1/acls', 'create', 'iqn.abc']] @mock.patch('lrbd.Popen') def test_create(self, mock_subproc_popen): mock_subproc_popen.return_value.returncode = 0 Common.config['iqns'] = [ "iqn.xyz" ] Common.config['portals'] = [ { "name": "portal1", "addresses": [ "172.16.1.16" ] } ] Common.config['pools'] = [ { "pool": "rbd", "gateways": [ { "host": "igw1", "tpg": [ { "image": "archive", "initiator": "iqn.abc", "portal": "portal1" } ] } ] } ] Runtime.config['addresses'] = [ "172.16.1.16" ] Runtime.config['portals'] = {} Runtime.config['portals']["iqn.xyz"] = {} Runtime.config['portals']["iqn.xyz"]["archive"] = {} Runtime.config['portals']["iqn.xyz"]["archive"]["portal1"] = "1" class mock_Acls(Acls): def _find(self): pass def _cmd(self, target, tpg, initiator): self.called = " ".join([ target, str(tpg), initiator ]) self.a = mock_Acls() self.a.cmds = [[ "targetcli", "hello" ]] self.a.create() assert mock_subproc_popen.called
lgpl-2.1
Eric-Zhong/odoo
addons/lunch/report/report_lunch_order.py
341
2771
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import tools from openerp.osv import fields,osv class report_lunch_order(osv.osv): _name = "report.lunch.order.line" _description = "Lunch Orders Statistics" _auto = False _rec_name = 'date' _columns = { 'date': fields.date('Date Order', readonly=True, select=True), 'year': fields.char('Year', size=4, readonly=True), 'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True), 'day': fields.char('Day', size=128, readonly=True), 'user_id': fields.many2one('res.users', 'User Name'), 'price_total':fields.float('Total Price', readonly=True), 'note' : fields.text('Note', readonly=True), } _order = 'date desc' def init(self, cr): tools.drop_view_if_exists(cr, 'report_lunch_order_line') cr.execute(""" create or replace view report_lunch_order_line as ( select min(lo.id) as id, lo.user_id as user_id, lo.date as date, to_char(lo.date, 'YYYY') as year, to_char(lo.date, 'MM') as month, to_char(lo.date, 'YYYY-MM-DD') as day, lo.note as note, sum(lp.price) as price_total from lunch_order_line as lo left join lunch_product as lp on (lo.product_id = lp.id) group by lo.date,lo.user_id,lo.note ) """) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
rombie/contrail-controller
src/config/api-server/vnc_cfg_api_server/tests/test_import_server.py
1
6338
# # Copyright (c) 2018 Juniper Networks, Inc. All rights reserved. # """ This file contains implementation of job api handler code """ import gevent from enum import Enum from vnc_api.vnc_api import VncApi import sys import time import base64 class ImportServerStatus(Enum): INIT = 0 IN_PROGRESS = 1 COMPLETE = 2 FAILED = 3 # end class JobStatus class ImportServerJob(object): JOB_STATUS_MAPPING = { 'SUCCESS': ImportServerStatus.COMPLETE, 'FAILURE': ImportServerStatus.FAILED, 'UNKNOWN': ImportServerStatus.FAILED } def __init__(self, job_type, job_input, api_server_config): self._job_type = job_type self._job_input = job_input self._api_server_config = api_server_config self._job_id = None self._job_status = ImportServerStatus.INIT super(ImportServerJob, self).__init__() # end __init__ def push(self, timeout, max_retries): vnc_api = self._get_vnc_api(self._api_server_config) self._job_status = ImportServerStatus.IN_PROGRESS job_execution_id = '123' try: print ("SD handler: executing job for (%s, %s)" % (self._job_id, str(self._job_type))) job_execution_info = vnc_api.execute_job( job_template_fq_name=self._job_type, job_input=self._job_input ) job_execution_id = job_execution_info.get('job_execution_id') print ("SD started with execution id %s" % job_execution_id) self._wait(vnc_api, job_execution_id, timeout, max_retries) except Exception as e: print ("SD handler: push failed for (%s, %s)" " execution id %s: %s" % (self._job_id, str(self._job_type), job_execution_id, repr(e))) self._job_status = ImportServerStatus.FAILED if self._job_status == ImportServerStatus.FAILED: raise Exception("SD handler: push failed for (%s, %s)" " execution id %s" % (self._job_id, str(self._job_type), job_execution_id)) print ("SD handler: push succeeded for (%s, %s)" " execution id %s" % (self._job_id, str(self._job_type), job_execution_id)) # end push def _check_job_status(self, vnc_api, job_execution_id, status): try: job_status = vnc_api.job_status(job_execution_id) return self._verify_job_status(job_status, status) except Exception as e: print ("SD handler: error while querying " "SD status for execution_id %s: %s" % (job_execution_id, repr(e))) return False # end _check_job_status def _get_job_status(self, vnc_api, job_execution_id): if self._check_job_status(vnc_api, job_execution_id, ImportServerStatus.COMPLETE): return ImportServerStatus.COMPLETE if self._check_job_status(vnc_api, job_execution_id, ImportServerStatus.FAILED): return ImportServerStatus.FAILED return ImportServerStatus.IN_PROGRESS # end _get_job_status def _wait(self, vnc_api, job_execution_id, timeout, max_retries): retry_count = 1 while not self.is_job_done(): self._job_status = self._get_job_status(vnc_api, job_execution_id) if not self.is_job_done(): if retry_count >= max_retries: print ( "SD handler: timed out waiting for job %s for device" " %s and job_type %s:" % (job_execution_id, self._job_id, str(self._job_type))) self._job_status = ImportServerStatus.FAILED else: retry_count += 1 gevent.sleep(timeout) # end _wait def get_job_status(self): return self._job_status # end get_job_status def is_job_done(self): if self._job_status == ImportServerStatus.COMPLETE or \ self._job_status == ImportServerStatus.FAILED: return True return False # end is_job_done @staticmethod def _get_vnc_api(api_config): return VncApi( api_server_host=api_config.get('api_server_host'), api_server_port=api_config.get('api_server_port'), username=api_config.get('username'), password=api_config.get('password'), tenant_name=api_config.get('tenant_name'), domain_name=api_config.get('domain_name'), api_server_use_ssl=api_config.get('api_server_use_ssl')) # end _get_vnc_api @classmethod def _verify_job_status(cls, job_status, status): return job_status and \ cls.JOB_STATUS_MAPPING.get(job_status.get('job_status')) == \ status # end _verify_job_status # end class JobHandler def main(): """Main entry point for the script.""" yaml_file = open('node.yaml', 'rb') file_read = yaml_file.read() file_base64_encode = base64.encodestring(file_read) fabric_fq_name = [ 'default-global-system-config', "abc"] cc_auth_host = "10.10.10.11" api_server_config = { 'api_server_host': '10.10.10.10', 'api_server_port': '8082', 'username': 'admin', 'password': 'password', 'tenant_name': 'admin', 'domain_name': 'default', 'api_server_use_ssl': False } job_template_fq_name = [ 'default-global-system-config', 'server_import_template'] job_input = { "file_format": "yaml", "encoded_file": file_base64_encode, "contrail_command_host": cc_auth_host, "fabric_fq_name": fabric_fq_name } test_sd_obj = ImportServerJob(job_template_fq_name, job_input, api_server_config=api_server_config) start_time = time.time() test_sd_obj.push(100, 5) end_time = time.time() print "time taken: " + str(end_time - start_time) if __name__ == '__main__': sys.exit(main())
apache-2.0
soldag/home-assistant
homeassistant/components/insteon/light.py
7
1984
"""Support for Insteon lights via PowerLinc Modem.""" from homeassistant.components.light import ( ATTR_BRIGHTNESS, DOMAIN as LIGHT_DOMAIN, SUPPORT_BRIGHTNESS, LightEntity, ) from homeassistant.helpers.dispatcher import async_dispatcher_connect from .const import SIGNAL_ADD_ENTITIES from .insteon_entity import InsteonEntity from .utils import async_add_insteon_entities MAX_BRIGHTNESS = 255 async def async_setup_entry(hass, config_entry, async_add_entities): """Set up the Insteon lights from a config entry.""" def add_entities(discovery_info=None): """Add the Insteon entities for the platform.""" async_add_insteon_entities( hass, LIGHT_DOMAIN, InsteonDimmerEntity, async_add_entities, discovery_info ) signal = f"{SIGNAL_ADD_ENTITIES}_{LIGHT_DOMAIN}" async_dispatcher_connect(hass, signal, add_entities) add_entities() class InsteonDimmerEntity(InsteonEntity, LightEntity): """A Class for an Insteon light entity.""" @property def brightness(self): """Return the brightness of this light between 0..255.""" return self._insteon_device_group.value @property def is_on(self): """Return the boolean response if the node is on.""" return bool(self.brightness) @property def supported_features(self): """Flag supported features.""" return SUPPORT_BRIGHTNESS async def async_turn_on(self, **kwargs): """Turn light on.""" if ATTR_BRIGHTNESS in kwargs: brightness = int(kwargs[ATTR_BRIGHTNESS]) await self._insteon_device.async_on( on_level=brightness, group=self._insteon_device_group.group ) else: await self._insteon_device.async_on(group=self._insteon_device_group.group) async def async_turn_off(self, **kwargs): """Turn light off.""" await self._insteon_device.async_off(self._insteon_device_group.group)
apache-2.0
jgodwinWX/gefs-plots
htmlbuilder.py
1
8544
#!/usr/bin/env python ''' Creates GEFS plots for a single location. Plots include high temperature, low temperature, dewpoint, and precipitation. Program reads in CSV file created by ensemblemeans.py. Will eventually create some HTML tables. ''' import calendar import datetime import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.dates as mdates import numpy import pandas __author__ = 'Jason Godwin' __license__ = 'GPL' __maintainer__ = 'Jason Godwin' __email__ = '[email protected]' __status__ = 'Production' # function for plotting ensemble members def plotter(dataset,namestr,savestr,season,inittime): fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(1,1,1) plt.plot(dataset) plt.grid() # x axis plt.xlim([dataset.index[0],dataset.index[-1]]) plt.xticks(dataset.index,rotation=90) plt.xlabel('Date/Time (UTC)',fontsize=14) ax.xaxis.set_major_locator(mdates.HourLocator(interval=24)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%a %b-%d')) # y axis and title plt.ylabel('Temperature (degrees Fahrenheit)',fontsize=14) if season == 'warm': plt.ylim([40,110]) plt.yticks(numpy.arange(40,110,5)) elif season == 'cold': plt.ylim([0,90]) plt.yticks(numpy.arange(0,90,5)) elif season == 'dwpt': plt.ylim([20,85]) plt.yticks(numpy.arange(20,85,5)) else: plt.ylim([0,110]) plt.yticks(numpy.arange(0,110,5)) plt.title('GEFS Ensemble Daily %s (init: %s)' % (namestr,inittime),fontsize=16) plt.savefig(savestr,bbox_inches='tight') plt.close(fig) # function for plotting precipitation in ensemble members def precip_plotter(dataset,namestr,savestr,inittime): fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(1,1,1) plt.plot(numpy.cumsum(dataset)) plt.grid() # x axis plt.xlim([dataset.index[0],dataset.index[-1]]) plt.xticks(dataset.index,rotation=90) plt.xlabel('Date/Time (UTC)',fontsize=14) ax.xaxis.set_major_locator(mdates.HourLocator(interval=24)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%a %b-%d')) # y axis and title plt.ylim([0.0,4.0]) plt.yticks([0,0.1,0.25,0.50,1,2,3,4]) plt.ylabel('Precipitation (inches)',fontsize=14) plt.title('GEFS Ensemble Daily %s (init: %s)' % (namestr,inittime),fontsize=16) plt.savefig(savestr,bbox_inches='tight') plt.close(fig) # box and whisker plot function def box_and_whisker(dataset,valid_dates,datatype,unitstr,namestr,savestr,inittime): # reformat the date labels valid_dates = [datetime.datetime.strftime(x,'%a %b-%d') for x in sorted(valid_dates)] fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(1,1,1) plt.boxplot(numpy.transpose(numpy.array(dataset)),whis='range',labels=valid_dates) plt.grid() # x axis plt.xticks(rotation=90) plt.xlabel('Date') # set the y limits for temperatures if 'Temperature' in namestr: plt.ylim([0,110]) plt.yticks(numpy.arange(0,110,5)) elif 'Dewpoint' in namestr: plt.ylim([20,85]) plt.yticks(numpy.arange(20,85,5)) elif 'Precip' in namestr: plt.ylim([0.0,4.0]) plt.yticks([0,0.10,0.25,0.50,1.0,2.0,3.0,4.0]) # y axis and title plt.ylabel('%s (%s)' % (datatype,unitstr),fontsize=14) plt.title('GEFS Ensemble Daily %s (init: %s)' % (namestr,inittime),fontsize=16) plt.savefig(savestr,bbox_inches='tight') plt.close(fig) ### USER EDIT SECTION ### savedir = '/home/jgodwin/Documents/python/python/gefs-plots' # directory to save pngs locname = 'Dallas/Fort Worth, TX' # name of location to appear on plots season = 'warm' # season to set temperature info ### END USER EDIT SECTION ### # open CSVs containing ensemble information max_temp_df = pandas.read_csv('%s/maxtemps.csv' % savedir,index_col=0) min_temp_df = pandas.read_csv('%s/mintemps.csv' % savedir,index_col=0) dpt_df = pandas.read_csv('%s/dewpoint.csv' % savedir,index_col=0) precip_df = pandas.read_csv('%s/precip.csv' % savedir,index_col=0) # convert index strings into datetime objects max_temp_df.index = pandas.to_datetime(max_temp_df.index) min_temp_df.index = pandas.to_datetime(min_temp_df.index) dpt_df.index = pandas.to_datetime(dpt_df.index) precip_df.index = pandas.to_datetime(precip_df.index) # get individual dates and group max/min/mean values by date dates = [datetime.datetime.strftime(i,'%m/%d/%Y') for i in max_temp_df.index] highs = max_temp_df.groupby(lambda row: row.date()).max() lows = min_temp_df.groupby(lambda row: row.date()).min() dpts = dpt_df.groupby(lambda row: row.date()).mean() precip = precip_df.groupby(lambda row: row.date()).sum() # create list of valid dates and model run init time valid_dates = [datetime.datetime.strptime(x,'%m/%d/%Y') for x in sorted(set(dates))] inittime = datetime.datetime.strftime(max_temp_df.index[0],'%m/%d %H') + '00 UTC' # truncate highs/lows since we are computing on closed intervals # basically, if we don't do this, the 00Z runs will show a spike in high temperatures at the end # of the run, and the highs will show a drop at the end of a 12Z run if max_temp_df.index[0].hour == 0: lows = lows[0:-2] valid_dates_lo = valid_dates[0:-2] valid_dates_hi = valid_dates elif max_temp_df.index[0].hour == 12: highs = highs[0:-2] valid_dates_hi = valid_dates[0:-2] valid_dates_lo = valid_dates # plot forecasts plotter(highs,'High Temperature at %s' % locname,'%s/highs.png' % savedir,season,inittime) plotter(lows,'Low Temperature at %s' % locname,'%s/lows.png' % savedir,season,inittime) plotter(dpts,'Mean Daily Dewpoint at %s' % locname,'%s/dwpt.png' % savedir,'dwpt',inittime) precip_plotter(precip,'Run-Total Precip. at %s' % locname,'%s/precip.png' % savedir,inittime) ### PERCENT OF MEMBERS CONTAINING PRECIPITATION ### precip_members = numpy.zeros(17) for i in range(numpy.shape(precip)[0]): precip_members[i] = numpy.shape((numpy.where(numpy.array(precip)[i,:]>0)))[1] / 20.0 # actual plot routine fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(1,1,1) plt.bar(valid_dates,precip_members,width=0.5,align='center') plt.grid() # add ensmble mean values to top of bars rects = ax.patches labels = ['%.02f' % x for x in numpy.nanmean(numpy.array(precip),axis=1)] for rect,label in zip(rects,labels): height = rect.get_height() ax.text(rect.get_x() + rect.get_width()/2, height + 0.01, label, ha='center', va='bottom',\ fontsize=12) # x axis plt.xticks(rotation=90) plt.xlim(valid_dates[0],valid_dates[-1]) plt.xlabel('Date/Time (UTC)',fontsize=14) ax.xaxis.set_major_locator(mdates.DayLocator(interval=1)) ax.xaxis.set_major_formatter(mdates.DateFormatter('%a %b-%d')) # y axis and title plt.ylabel('Percent of Members',fontsize=14) plt.ylim([0,1]) plt.yticks(numpy.arange(0,1,0.1)) vals = ax.get_yticks() ax.set_yticklabels(['{:.0f}%'.format(x*100) for x in vals]) plt.title('GEFS Members Indicating Precipitation at %s (init: %s)' % (locname,inittime),\ fontsize=16) plt.savefig('precip_percent.png',bbox_inches='tight') plt.close(fig) # create box and whisker plots box_and_whisker(highs,valid_dates_hi,'Temperature','degrees Fahrenheit','High Temperature at %s' % \ locname,'%s/box_highs.png' % savedir,inittime) box_and_whisker(lows,valid_dates_lo,'Temperature','degrees Fahrenheit','Low Temperature at %s' % \ locname,'%s/box_lows.png' % savedir,inittime) box_and_whisker(dpts,valid_dates,'Dewpoint','degrees Fahrenheit','Mean Dewpoint at %s' % locname,\ '%s/box_dwpt.png' % savedir,inittime) box_and_whisker(precip,valid_dates,'Precipitation','inches','Total Precip. at %s' % locname,\ '%s/box_precip.png' % savedir,inittime) ##### vv THIS PART STILL UNDER CONSTRUCTION vv ####### # create the webpage html_file = open('%s/dfw.html' % savedir,'w') # page header html_info = """ <html> <head> <title>GEFS Viewer</title> </head> <body> <h1>GEFS Temperature Plot for DFW</h1> <h2>Initialized: %s UTC</h2> """ % datetime.datetime.strftime(max_temp_df.index[0],'%m/%d/%Y %H:%M') # create table header html_info += ''' <table border=1 cols=22 width=1200px> <tr><th>Valid Time</th> ''' # create columns for each ensemble member for i in range(1,21): html_info += ''' <th>GEP %d</th> ''' % i html_info += '<th>Ensemble Mean</th></tr>' html_file.write(html_info) html_file.close()
gpl-3.0
zenoss/Community-Zenpacks
ZenPacks.Nova.UCDFileSystemMap/ZenPacks/Nova/UCDFileSystemMap/modeler/plugins/zenoss/snmp/UCDFileSystemMap.py
3
3083
########################################################################### # # This program is part of Zenoss Core, an open source monitoring platform. # Copyright (C) 2007, 2009 Zenoss Inc. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License version 2 as published by # the Free Software Foundation. # # For complete information please visit: http://www.zenoss.com/oss/ # ########################################################################### __doc__ = """UCDFileSystemMap UCDFileSystemMap maps the filesystems to filesystem objects """ import re from Products.ZenUtils.Utils import unsigned from Products.DataCollector.plugins.DataMaps import ObjectMap from Products.DataCollector.plugins.CollectorPlugin \ import SnmpPlugin, GetTableMap class UCDFileSystemMap(SnmpPlugin): maptype = "FileSystemMap" compname = "os" relname = "filesystems" modname = "Products.ZenModel.FileSystem" deviceProperties = SnmpPlugin.deviceProperties + ( 'zFileSystemMapIgnoreNames',) columns = { '.1': 'snmpindex', '.2': 'mount', '.3': 'storageDevice', '.6': 'totalBlocks', } snmpGetTableMaps = ( GetTableMap('fsTableOid', '.1.3.6.1.4.1.2021.9.1', columns), ) def process(self, device, results, log): """Process SNMP information from this device""" log.info('Modeler %s processing data for device %s', self.name(), device.id) getdata, tabledata = results log.debug("%s tabledata = %s", device.id, tabledata) fstable = tabledata.get("fsTableOid") if fstable is None: log.error("Unable to get data for %s from fsTableOid" " -- skipping model" % device.id) return None skipfsnames = getattr(device, 'zFileSystemMapIgnoreNames', None) maps = [] rm = self.relMap() for fs in fstable.values(): if not self.checkColumns(fs, self.columns, log): continue totalBlocks = fs['totalBlocks'] # This may now be a redundant check. Candidate for removal. # http://dev.zenoss.org/trac/ticket/4556 if totalBlocks < 0: fs['totalBlocks'] = unsigned(totalBlocks) # blockSize is not used by UCD mibs. # UCD mibs display size in kilobytes. # Value has been hardcoded as 1024 to convert to bytes. fs['blockSize'] = 1024 size = fs['totalBlocks'] # UCD-SNMP-MIB does not provide filesystem type info. # Only zFileSystemMapIgnoreNames is checked. if skipfsnames and re.search(skipfsnames, fs['mount']): log.info("Skipping %s as it matches zFileSystemMapIgnoreNames.", fs['mount']) continue om = self.objectMap(fs) om.id = self.prepId(om.mount) rm.append(om) maps.append(rm) return maps
gpl-2.0
daniula/python-social-auth
social/backends/mineid.py
72
1257
from social.backends.oauth import BaseOAuth2 class MineIDOAuth2(BaseOAuth2): """MineID OAuth2 authentication backend""" name = 'mineid' _AUTHORIZATION_URL = '%(scheme)s://%(host)s/oauth/authorize' _ACCESS_TOKEN_URL = '%(scheme)s://%(host)s/oauth/access_token' ACCESS_TOKEN_METHOD = 'POST' SCOPE_SEPARATOR = ',' EXTRA_DATA = [ ] def get_user_details(self, response): """Return user details""" return {'email': response.get('email'), 'username': response.get('email')} def user_data(self, access_token, *args, **kwargs): return self._user_data(access_token) def _user_data(self, access_token, path=None): url = '%(scheme)s://%(host)s/api/user' % self.get_mineid_url_params() return self.get_json(url, params={'access_token': access_token}) @property def AUTHORIZATION_URL(self): return self._AUTHORIZATION_URL % self.get_mineid_url_params() @property def ACCESS_TOKEN_URL(self): return self._ACCESS_TOKEN_URL % self.get_mineid_url_params() def get_mineid_url_params(self): return { 'host': self.setting('HOST', 'www.mineid.org'), 'scheme': self.setting('SCHEME', 'https'), }
bsd-3-clause
lorin/umdinst
test/testcaptureprofilereporter.py
1
2301
import unittest import sys import os import errno import commands from xml.dom import minidom from umdinst.setup import which sys.path.append('bin') from umdinst import wrap from testsuccessfulcompiledata import getfield, timezonecheck, xmlifystring from testcapturecompile import programcheck ### Not yet in use class TestCaptureProfileReporter(unittest.TestCase): """Test the program which captures profile reports""" def setUp(self): # Uses gprof data, so make sure that the "loop" # program has been instrumented for gprof os.system("gcc -o loop test/testsource/loop.c -pg") # Make sure the gmon.out file is there self.profiledata = 'test/testsource/gmon.out' self.failUnless(os.access(self.profiledata,os.R_OK)) self.runprog = which.which('gprof') self.failUnless(self.runprog is not None) self.args = 'gprof ./loop'.split() # Create a subdirectory to hold the log file dirname = 'testcaptureprofilereporter' self.logfiledir = os.path.abspath(os.path.join('.',dirname)) os.mkdir(self.logfiledir) def tearDown(self): # Remove the subject logfile, if it exists try: os.unlink(wrap.getlogfilepath(self.logfiledir)) except OSError, e: if e.errno!=errno.ENOENT: raise e os.rmdir(self.logfiledir) def testCapture(self): # gprof will output a lot to standard out, which we will redirect # to /dev/null stdout = sys.stdout try: sys.stdout.flush() sys.stdout = open('/dev/null','w') logfile = wrap.getlogfilepath(self.logfiledir) wrap.capture_profile_report(self.runprog, argv=self.args, logex=logfile) finally: sys.stdout = stdout logfile = wrap.getlogfilepath(self.logfiledir) dom = minidom.parseString(wrap.printable(open(logfile).read())) report = dom.getElementsByTagName('profile_report')[0] # Contents should be the same as contents of running gprof gprof_output = commands.getoutput('gprof ./loop') self.assertEquals(getfield(report,'contents'),wrap.printable(gprof_output)) if __name__ == '__main__': unittest.main()
bsd-3-clause
redhat-openstack/rally
tests/unit/plugins/openstack/context/quotas/test_quotas.py
13
12292
# Copyright 2014: Dassault Systemes # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import random import ddt import jsonschema import mock from rally.plugins.openstack.context.quotas import quotas from tests.unit import test QUOTAS_PATH = "rally.plugins.openstack.context.quotas." @ddt.ddt class QuotasTestCase(test.TestCase): def setUp(self): super(QuotasTestCase, self).setUp() self.unlimited = -1 self.context = { "config": { }, "tenants": { "t1": {"endpoint": mock.MagicMock()}, "t2": {"endpoint": mock.MagicMock()}}, "admin": {"endpoint": mock.MagicMock()}, "task": mock.MagicMock() } def test_quotas_schemas(self): ctx = copy.deepcopy(self.context) ctx["config"]["quotas"] = { "cinder": { "volumes": self.unlimited, "snapshots": self.unlimited, "gigabytes": self.unlimited }, "nova": { "instances": self.unlimited, "cores": self.unlimited, "ram": self.unlimited, "floating_ips": self.unlimited, "fixed_ips": self.unlimited, "metadata_items": self.unlimited, "injected_files": self.unlimited, "injected_file_content_bytes": self.unlimited, "injected_file_path_bytes": self.unlimited, "key_pairs": self.unlimited, "security_groups": self.unlimited, "security_group_rules": self.unlimited }, "neutron": { "network": self.unlimited, "subnet": self.unlimited, "port": self.unlimited, "router": self.unlimited, "floatingip": self.unlimited, "security_group": self.unlimited, "security_group_rule": self.unlimited } } for service in ctx["config"]["quotas"]: for key in ctx["config"]["quotas"][service]: # Test invalid values ctx["config"]["quotas"][service][key] = self.unlimited - 1 try: quotas.Quotas.validate(ctx["config"]["quotas"]) except jsonschema.ValidationError: pass else: self.fail("Invalid value %s must raise a validation error" % ctx["config"]["quotas"][service][key]) ctx["config"]["quotas"][service][key] = 2.5 try: quotas.Quotas.validate(ctx["config"]["quotas"]) except jsonschema.ValidationError: pass else: self.fail("Invalid value %s must raise a validation error" % ctx["config"]["quotas"][service][key]) ctx["config"]["quotas"][service][key] = "-1" try: quotas.Quotas.validate(ctx["config"]["quotas"]) except jsonschema.ValidationError: pass else: self.fail("Invalid value %s must raise a validation error" % ctx["config"]["quotas"][service][key]) # Test valid values ctx["config"]["quotas"][service][key] = random.randint(0, 1000000) try: quotas.Quotas.validate(ctx["config"]["quotas"]) except jsonschema.ValidationError: self.fail("Positive integers are valid quota values") ctx["config"]["quotas"][service][key] = self.unlimited try: quotas.Quotas.validate(ctx["config"]["quotas"]) except jsonschema.ValidationError: self.fail("%d is a valid quota value" % self.unlimited) # Test additional keys are refused ctx["config"]["quotas"][service]["additional"] = self.unlimited try: quotas.Quotas.validate(ctx["config"]["quotas"]) except jsonschema.ValidationError: pass else: self.fail("Additional keys must raise a validation error") del ctx["config"]["quotas"][service]["additional"] # Test valid keys are optional ctx["config"]["quotas"][service] = {} try: quotas.Quotas.validate(ctx["config"]["quotas"]) except jsonschema.ValidationError: self.fail("Valid quota keys are optional") @mock.patch("rally.plugins.openstack.context." "quotas.quotas.osclients.Clients") @mock.patch("rally.plugins.openstack.context." "quotas.cinder_quotas.CinderQuotas") def test_cinder_quotas(self, mock_cinder_quotas, mock_clients): ctx = copy.deepcopy(self.context) ctx["config"]["quotas"] = { "cinder": { "volumes": self.unlimited, "snapshots": self.unlimited, "gigabytes": self.unlimited } } tenants = ctx["tenants"] cinder_quotas = ctx["config"]["quotas"]["cinder"] with quotas.Quotas(ctx) as quotas_ctx: quotas_ctx.setup() expected_setup_calls = [] for tenant in tenants: expected_setup_calls.append(mock.call() .update(tenant, **cinder_quotas)) mock_cinder_quotas.assert_has_calls( expected_setup_calls, any_order=True) mock_cinder_quotas.reset_mock() expected_cleanup_calls = [] for tenant in tenants: expected_cleanup_calls.append(mock.call().delete(tenant)) mock_cinder_quotas.assert_has_calls( expected_cleanup_calls, any_order=True) @mock.patch("rally.plugins.openstack.context." "quotas.quotas.osclients.Clients") @mock.patch("rally.plugins.openstack.context." "quotas.nova_quotas.NovaQuotas") def test_nova_quotas(self, mock_nova_quotas, mock_clients): ctx = copy.deepcopy(self.context) ctx["config"]["quotas"] = { "nova": { "instances": self.unlimited, "cores": self.unlimited, "ram": self.unlimited, "floating-ips": self.unlimited, "fixed-ips": self.unlimited, "metadata_items": self.unlimited, "injected_files": self.unlimited, "injected_file_content_bytes": self.unlimited, "injected_file_path_bytes": self.unlimited, "key_pairs": self.unlimited, "security_groups": self.unlimited, "security_group_rules": self.unlimited, } } nova_quotas = ctx["config"]["quotas"]["nova"] with quotas.Quotas(ctx) as quotas_ctx: quotas_ctx.setup() expected_setup_calls = [] for tenant in ctx["tenants"]: expected_setup_calls.append(mock.call() .update(tenant, **nova_quotas)) mock_nova_quotas.assert_has_calls( expected_setup_calls, any_order=True) mock_nova_quotas.reset_mock() expected_cleanup_calls = [] for tenant in ctx["tenants"]: expected_cleanup_calls.append(mock.call().delete(tenant)) mock_nova_quotas.assert_has_calls( expected_cleanup_calls, any_order=True) @mock.patch("rally.plugins.openstack.context." "quotas.quotas.osclients.Clients") @mock.patch("rally.plugins.openstack.context." "quotas.neutron_quotas.NeutronQuotas") def test_neutron_quotas(self, mock_neutron_quotas, mock_clients): ctx = copy.deepcopy(self.context) ctx["config"]["quotas"] = { "neutron": { "network": self.unlimited, "subnet": self.unlimited, "port": self.unlimited, "router": self.unlimited, "floatingip": self.unlimited, "security_group": self.unlimited, "security_group_rule": self.unlimited } } neutron_quotas = ctx["config"]["quotas"]["neutron"] with quotas.Quotas(ctx) as quotas_ctx: quotas_ctx.setup() expected_setup_calls = [] for tenant in ctx["tenants"]: expected_setup_calls.append(mock.call() .update(tenant, **neutron_quotas)) mock_neutron_quotas.assert_has_calls( expected_setup_calls, any_order=True) mock_neutron_quotas.reset_mock() expected_cleanup_calls = [] for tenant in ctx["tenants"]: expected_cleanup_calls.append(mock.call().delete(tenant)) mock_neutron_quotas.assert_has_calls( expected_cleanup_calls, any_order=True) @mock.patch("rally.plugins.openstack.context." "quotas.quotas.osclients.Clients") @mock.patch("rally.plugins.openstack.context." "quotas.nova_quotas.NovaQuotas") @mock.patch("rally.plugins.openstack.context." "quotas.cinder_quotas.CinderQuotas") @mock.patch("rally.plugins.openstack.context." "quotas.neutron_quotas.NeutronQuotas") def test_no_quotas(self, mock_neutron_quotas, mock_cinder_quotas, mock_nova_quotas, mock_clients): ctx = copy.deepcopy(self.context) if "quotas" in ctx["config"]: del ctx["config"]["quotas"] with quotas.Quotas(ctx) as quotas_ctx: quotas_ctx.setup() self.assertFalse(mock_cinder_quotas.update.called) self.assertFalse(mock_nova_quotas.update.called) self.assertFalse(mock_neutron_quotas.update.called) self.assertFalse(mock_cinder_quotas.delete.called) self.assertFalse(mock_nova_quotas.delete.called) self.assertFalse(mock_neutron_quotas.delete.called) @ddt.data( {"quotas_ctxt": {"nova": {"cpu": 1}}, "quotas_class_path": "nova_quotas.NovaQuotas"}, {"quotas_ctxt": {"neutron": {"network": 2}}, "quotas_class_path": "neutron_quotas.NeutronQuotas"}, {"quotas_ctxt": {"cinder": {"volumes": 3}}, "quotas_class_path": "cinder_quotas.CinderQuotas"}, {"quotas_ctxt": {"manila": {"shares": 4}}, "quotas_class_path": "manila_quotas.ManilaQuotas"}, {"quotas_ctxt": {"designate": {"domains": 5}}, "quotas_class_path": "designate_quotas.DesignateQuotas"}, ) @ddt.unpack def test_exception_during_cleanup(self, quotas_ctxt, quotas_class_path): with mock.patch(QUOTAS_PATH + quotas_class_path) as mock_quotas: mock_quotas.delete.side_effect = type( "ExceptionDuringCleanup", (Exception, ), {}) ctx = copy.deepcopy(self.context) ctx["config"]["quotas"] = quotas_ctxt # NOTE(boris-42): ensure that cleanup didn't raise exceptions. quotas.Quotas(ctx).cleanup() self.assertEqual(mock_quotas.return_value.delete.call_count, len(self.context["tenants"]))
apache-2.0
google/containerregistry
client/v2_2/append_.py
1
3602
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This package provides tools for appending layers to docker images.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import json from containerregistry.client import docker_name from containerregistry.client.v2_2 import docker_digest from containerregistry.client.v2_2 import docker_http from containerregistry.client.v2_2 import docker_image from containerregistry.transform.v2_2 import metadata # _EMPTY_LAYER_TAR_ID is the sha256 of an empty tarball. _EMPTY_LAYER_TAR_ID = 'sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4' class Layer(docker_image.DockerImage): """Appends a new layer on top of a base image. This augments a base docker image with new files from a gzipped tarball, adds environment variables and exposes a port. """ def __init__(self, base, tar_gz, diff_id = None, overrides = None): """Creates a new layer on top of a base with optional tar.gz. Args: base: a base DockerImage for a new layer. tar_gz: an optional gzipped tarball passed as a bytes with filesystem changeset. diff_id: an optional string containing the digest of the uncompressed tar_gz. overrides: an optional metadata.Overrides object of properties to override on the base image. """ self._base = base manifest = json.loads(self._base.manifest()) config_file = json.loads(self._base.config_file()) overrides = overrides or metadata.Overrides() overrides = overrides.Override(created_by=docker_name.USER_AGENT) if tar_gz: self._blob = tar_gz self._blob_sum = docker_digest.SHA256(self._blob) manifest['layers'].append({ 'digest': self._blob_sum, 'mediaType': docker_http.LAYER_MIME, 'size': len(self._blob), }) if not diff_id: diff_id = docker_digest.SHA256(self.uncompressed_blob(self._blob_sum)) # Takes naked hex. overrides = overrides.Override(layers=[diff_id[len('sha256:'):]]) else: # The empty layer. overrides = overrides.Override(layers=[docker_digest.SHA256(b'', '')]) config_file = metadata.Override(config_file, overrides) self._config_file = json.dumps(config_file, sort_keys=True) manifest['config']['digest'] = docker_digest.SHA256( self._config_file.encode('utf8')) self._manifest = json.dumps(manifest, sort_keys=True) def manifest(self): """Override.""" return self._manifest def config_file(self): """Override.""" return self._config_file def blob(self, digest): """Override.""" if digest == self._blob_sum: return self._blob return self._base.blob(digest) # __enter__ and __exit__ allow use as a context manager. def __enter__(self): """Override.""" return self def __exit__(self, unused_type, unused_value, unused_traceback): """Override.""" return
apache-2.0
CPonty/PyPlant
editor.py
1
10432
""" PyPlant 1.0 GUI: Seed File Editor Copyright(c) 2010 Chris Ponticello [email protected] DESCRIPTION: A small GUI application used for creating and editing seed properties. The 21 parameters are changed through command-line input, buttons and sliders. Seed files can be created and edited for later use by the grower application. 2 windows/displays: 'Controls' and 'Data' LICENSE: I, Chris Ponticello, hereby grant the rights to copy, redistribute, modify and otherwise edit any source code file included within PyPlant 1.0, provided that this license agreement and my ownership of the code is maintained and acknowledged. I also grant the right to non-commercial use of the source code without my express permission. Commercial users must seek the permission of the author. """ # Get Directories import sys, os sys.path.append(os.getcwd()+'\lib')#adds lib folder to import directories # PyPlant Libraries from visual import * from visual.controls import * from seeds import * #from common import * # Standard Libraries from random import random # ------------------------------------------------------------------------------------- #CONSTANTS & EXTERNAL FUNCTIONS """ Using the Controls window: http://www.vpython.org/contents/docs/visual/controls.html """ #Constants s=0.9 ctrlWinW=650 ctrlWinH=350 ctrlWinAspect=0.7857#0.7857#1.*ctrlWinW/ctrlWinH dataWinW=650 dataWinH=350 dataWinAspect=dataWinW/dataWinH sliderLen=125*s sliderW=15*s slider_txtbuf=22*s labelW=145*s labelH=20*s vspacing=52 red=(1,0,0) green=(0,1,0) blue=(0,0,1) ltgrey=(0.7,0.7,0.7) white=(1,1,1) sliderCol=(0.6,0.6,1) def testReturn(x): pass def cWinPos(x,y): """Convert regular screen-pixel coordinates to a control window position.""" return (-ctrlWinW/2+75 + x*ctrlWinAspect, ctrlWinH/2 - y) def dWinPos(x,y): """Convert regular screen-pixel coordinate to a display window position.""" return (-dataWinW/2 + x*dataWinAspect, dataWinH/2 - y) # ------------------------------------------------------------------------------ #WIDGET ABSTRACTION class text(menu): """A simple wrapper for the menu class, to create a static label on Control windows. """ def __init__(self,x,y,text,color=ltgrey): menu.__init__(self, pos=cWinPos(x,y), text=text, width=labelW, height=labelH, color=color) class dragbar: """A widget combining a label, a slider and associated formatting. Constructor: dragbar(tuple<float,float,float>,string,float,float,float,function, color) Class invariant: action_change takes a float as input """ def _move(self): """Send slider value to be processed""" self._sendValue(self.sliderObj.value) def getValue(self): """Return the slider reading.""" return self.sliderObj.value def setValue(self,val): """Change the slider position""" self.sliderObj.value=val self._move() def __init__(self, x, y, title, minval, maxval, action_change, color=sliderCol): """Create GUI elements""" self._sendValue = action_change self._title=title self.x,self.y=cWinPos(x,y) #Create the components self._title=title self.sliderObj = slider(pos=( self.x-sliderLen/2+5, self.y ), width = sliderW, length = sliderLen, axis = (1,0,0), color = color, min = minval, max = maxval, action = self._move) if self._title<>'': self.titleLbl = text(x,y-slider_txtbuf,title) class colorSlider: """A widget combining three dragbars, a title and color display. Constructor: colorSlider(float,float,function,color) Class invariant: action_change takes a color as input """ def getColor(self): """Return the combined color from slider values""" return (self.s1.getValue(),self.s2.getValue(),self.s3.getValue()) def update(self): """Fill the color box; pass the value to the change action""" val=self.getColor() self._updateColor(val) def sliderMove(self, sliderValue): """Slider change event""" self.update() def setColor(self,color): """Overwrite color represented by sliders""" self.s1.setValue(color[0]) self.s2.setValue(color[1]) self.s3.setValue(color[2]) self.update() def __init__(self,x,y,action_change): """Initialise and update GUI elements""" self._updateColor=action_change #Create the components self.s1=dragbar(x,y-15,'',0,1,self.sliderMove,color=red) self.s2=dragbar(x,y ,'',0,1,self.sliderMove,color=green) self.s3=dragbar(x,y+15,'',0,1,self.sliderMove,color=blue) self.update() class colorTribar: """A widget using a Curve object to show 3 color values. Constructor: colorTribar(float,float,float,float,color,color,color) """ def update(self): """Update the color values on the model from memory""" self._model.color[0]=self._col1 self._model.color[1]=self._col1 self._model.color[2]=self._col2 self._model.color[3]=self._col2 self._model.color[4]=self._col3 self._model.color[5]=self._col3 def setColor(self,col1=None,col2=None,col3=None): """Change the color values. setColor(color,color,color) -> void """ if col1: self._col1=col1 if col2: self._col2=col2 if col3: self._col3=col3 self.update() def __init__(self,x,y,w,h,col1,col2,col3): """Initialise and update GUI elements""" #Store colors self._col1=col1 self._col2=col2 self._col3=col3 #Create component pos=dWinPos(x,y) ylist=[pos[1]-h, pos[1]-0.33*h, pos[1]-0.33*h, pos[1]+0.33*h, pos[1]+0.33*h, pos[1]+h] self._model=curve(radius=w,x=pos[0],y=ylist) self.update() # ------------------------------------------------------------------------------ #GUI BUILDING/OPERATION class GUI: """The user interaction handler and main application GUI. """ #functions: initialisation; all buttons; all sliders #do return statements where needed: load/changename button; colorsliders def color1Change(self,value): """Call the general color changer""" self._colorChange(1,value) def color2Change(self,value): """Call the general color changer""" self._colorChange(2,value) def color3Change(self,value): """Call the general color changer""" self._colorChange(3,value) def _colorChange(self,colorNum,value): """Update seed data and display with the new color""" if colorNum==1: self.colorBar.setColor(col1=value) if colorNum==2: self.colorBar.setColor(col2=value) if colorNum==3: self.colorBar.setColor(col3=value) #self.mainseed.color[colorNum] = value def globalUpdate(self): """Update all display widgets from the current seed settings""" pass #self.colorSlider1.setColor(ltgrey) #update all widgets. after loading a new seed #(Override control settings with seed data; automatically triggers) def __init__(self): """Create all GUI components""" #Initialiser: get file/keep data #Add widgets for the Data display ##self.xx=box(pos=dWinPos(dataWinW*0.25,dataWinH*0.3), width=10, height=10, length=10, ##color=color.red) self.colorBar=colorTribar(dataWinW*0.5,dataWinH*0.5,10,100,red,green,blue) #Add widgets for the Controls display text(0,100,'COLORS',white) self.colorSlider1 = colorSlider(-8,100+vspacing*1,self.color1Change) self.colorSlider2 = colorSlider(-8,100+vspacing*2,self.color2Change) self.colorSlider3 = colorSlider(-8,100+vspacing*3,self.color3Change) x=220 text(x,100,'FIRST STEM',blue) self.iniLenBar = dragbar(x,110+vspacing*1,'Length:',0,1,testReturn) self.iniWidthBar = dragbar(x,110+vspacing*2,'Width:',0,1,testReturn) self.iniAngleBar = dragbar(x,110+vspacing*3,'Branch Angle:',0,1,testReturn) self.iniSplitsBar = dragbar(x,110+vspacing*4,'Branches/Split:',0,1,testReturn) x=435 text(x,100,'CHANGE RATES',red) self.changeLenBar = dragbar(x,110+vspacing*1,'Length:',0,1,testReturn) self.changeWidthBar = dragbar(x,110+vspacing*2,'Width:',0,1,testReturn) self.changeAngleBar = dragbar(x,110+vspacing*3,'Branch Angle:',0,1,testReturn) self.changeSplitsBar = dragbar(x,110+vspacing*4,'Branches/Split',0,1,testReturn) x=650 text(x,100,'GROW CYCLE',green) self.cycleTimeBar = dragbar(x,110+vspacing*1,'Time:',0,1,testReturn) self.cycleMutationBar = dragbar(x,110+vspacing*2,'Mutation:',0,1,testReturn) self.cycleIterationsBar = dragbar(x,110+vspacing*3,'Iterations:',0,1,testReturn) #Update state self.globalUpdate() # -------------------------------------------------------------------------- #MAIN APPLICATION class runtime: """The main application """ def __init__(self): """Set up the controls window and process input.""" #Make controls window self.ctrlWin = controls(x=25, y=25, width=ctrlWinW, title='==PyPlant Seed Editor== Controls', height=ctrlWinH, range=ctrlWinH) slider(pos=(-999,-999)) #Make data window self.dataWin = display(height=dataWinH, width=dataWinW, range=dataWinH, title='==PyPlant Seed Editor== Data', x = self.ctrlWin.x, y=self.ctrlWin.y+self.ctrlWin.height) self.g = GUI() #Main loop while True: rate(100) self.ctrlWin.interact() r=runtime() # ----------------------------------------------------------------------------
gpl-2.0
jessrosenfield/pants
tests/python/pants_test/tasks/test_reflect.py
15
1459
# coding=utf-8 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) from pants.backend.core.register import build_file_aliases as register_core from pants.backend.core.tasks import reflect from pants.backend.jvm.register import build_file_aliases as register_jvm from pants.backend.python.register import build_file_aliases as register_python from pants_test.base_test import BaseTest class BuildsymsSanityTests(BaseTest): @property def alias_groups(self): return register_core().merge(register_jvm().merge(register_python())) def setUp(self): super(BuildsymsSanityTests, self).setUp() self._syms = reflect.assemble_buildsyms(build_file_parser=self.build_file_parser) def test_exclude_unuseful(self): # These symbols snuck into old dictionaries, make sure they don't again: for unexpected in ['__builtins__', 'Target']: self.assertTrue(unexpected not in self._syms.keys(), 'Found %s' % unexpected) def test_java_library(self): # Good bet that 'java_library' exists and contains these text blobs jl_text = '{0}'.format(self._syms['java_library']['defn']) self.assertIn('java_library', jl_text) self.assertIn('dependencies', jl_text) self.assertIn('sources', jl_text)
apache-2.0
hubert667/AIR
src/python/comparison/AbstractInterleavedComparison.py
2
1088
# This file is part of Lerot. # # Lerot is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Lerot is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Lerot. If not, see <http://www.gnu.org/licenses/>. # KH, 2012/06/19 """ Abstract base class for interleaved comparison methods """ class AbstractInterleavedComparison: def interleave(self, r1, r2, query, length): raise NotImplementedError("The derived class needs to implement " "interleave.") def infer_outcome(self, l, a, c, query): raise NotImplementedError("The derived class needs to implement " "infer_outcome.")
gpl-3.0
BjoernT/python-openstackclient
openstackclient/volume/client.py
2
2471
# Copyright 2012-2013 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import logging from openstackclient.common import utils LOG = logging.getLogger(__name__) DEFAULT_API_VERSION = '2' API_VERSION_OPTION = 'os_volume_api_version' API_NAME = "volume" API_VERSIONS = { "1": "cinderclient.v1.client.Client", "2": "cinderclient.v2.client.Client" } def make_client(instance): """Returns a volume service client.""" # Defer client imports until we actually need them from cinderclient import extension from cinderclient.v1.contrib import list_extensions from cinderclient.v1 import volume_snapshots from cinderclient.v1 import volumes # Monkey patch for v1 cinderclient volumes.Volume.NAME_ATTR = 'display_name' volume_snapshots.Snapshot.NAME_ATTR = 'display_name' volume_client = utils.get_client_class( API_NAME, instance._api_version[API_NAME], API_VERSIONS ) LOG.debug('Instantiating volume client: %s', volume_client) # Set client http_log_debug to True if verbosity level is high enough http_log_debug = utils.get_effective_log_level() <= logging.DEBUG extensions = [extension.Extension('list_extensions', list_extensions)] # Remember interface only if it is set kwargs = utils.build_kwargs_dict('endpoint_type', instance._interface) client = volume_client( session=instance.session, extensions=extensions, http_log_debug=http_log_debug, region_name=instance._region_name, **kwargs ) return client def build_option_parser(parser): """Hook to add global options""" parser.add_argument( '--os-volume-api-version', metavar='<volume-api-version>', default=utils.env('OS_VOLUME_API_VERSION'), help='Volume API version, default=' + DEFAULT_API_VERSION + ' (Env: OS_VOLUME_API_VERSION)') return parser
apache-2.0
vighneshbirodkar/scikit-image
skimage/filters/thresholding.py
1
24806
import math import numpy as np from scipy import ndimage as ndi from scipy.ndimage import filters as ndif from collections import OrderedDict from ..exposure import histogram from .._shared.utils import assert_nD, warn __all__ = ['try_all_threshold', 'threshold_adaptive', 'threshold_otsu', 'threshold_yen', 'threshold_isodata', 'threshold_li', 'threshold_minimum', 'threshold_mean', 'threshold_triangle'] def _try_all(image, methods=None, figsize=None, num_cols=2, verbose=True): """Returns a figure comparing the outputs of different methods. Parameters ---------- image : (N, M) ndarray Input image. methods : dict, optional Names and associated functions. Functions must take and return an image. figsize : tuple, optional Figure size (in inches). num_cols : int, optional Number of columns. verbose : bool, optional Print function name for each method. Returns ------- fig, ax : tuple Matplotlib figure and axes. """ from matplotlib import pyplot as plt num_rows = math.ceil((len(methods) + 1.) / num_cols) num_rows = int(num_rows) # Python 2.7 support fig, ax = plt.subplots(num_rows, num_cols, figsize=figsize, sharex=True, sharey=True, subplot_kw={'adjustable': 'box-forced'}) ax = ax.ravel() ax[0].imshow(image, cmap=plt.cm.gray) ax[0].set_title('Original') i = 1 for name, func in methods.items(): ax[i].imshow(func(image), cmap=plt.cm.gray) ax[i].set_title(name) i += 1 if verbose: print(func.__orifunc__) for a in ax: a.axis('off') fig.tight_layout() return fig, ax def try_all_threshold(image, figsize=(8, 5), verbose=True): """Returns a figure comparing the outputs of different thresholding methods. Parameters ---------- image : (N, M) ndarray Input image. figsize : tuple, optional Figure size (in inches). verbose : bool, optional Print function name for each method. Returns ------- fig, ax : tuple Matplotlib figure and axes. Notes ----- The following algorithms are used: * isodata * li * mean * minimum * otsu * triangle * yen Examples -------- >>> from skimage.data import text >>> fig, ax = try_all_threshold(text(), figsize=(10, 6), verbose=False) """ def thresh(func): """ A wrapper function to return a thresholded image. """ def wrapper(im): return im > func(im) try: wrapper.__orifunc__ = func.__orifunc__ except AttributeError: wrapper.__orifunc__ = func.__module__ + '.' + func.__name__ return wrapper # Global algorithms. methods = OrderedDict({'Isodata': thresh(threshold_isodata), 'Li': thresh(threshold_li), 'Mean': thresh(threshold_mean), 'Minimum': thresh(threshold_minimum), 'Otsu': thresh(threshold_otsu), 'Triangle': thresh(threshold_triangle), 'Yen': thresh(threshold_yen)}) return _try_all(image, figsize=figsize, methods=methods, verbose=verbose) def threshold_adaptive(image, block_size, method='gaussian', offset=0, mode='reflect', param=None): """Applies an adaptive threshold to an array. Also known as local or dynamic thresholding where the threshold value is the weighted mean for the local neighborhood of a pixel subtracted by a constant. Alternatively the threshold can be determined dynamically by a a given function using the 'generic' method. Parameters ---------- image : (N, M) ndarray Input image. block_size : int Odd size of pixel neighborhood which is used to calculate the threshold value (e.g. 3, 5, 7, ..., 21, ...). method : {'generic', 'gaussian', 'mean', 'median'}, optional Method used to determine adaptive threshold for local neighbourhood in weighted mean image. * 'generic': use custom function (see `param` parameter) * 'gaussian': apply gaussian filter (see `param` parameter for custom\ sigma value) * 'mean': apply arithmetic mean filter * 'median': apply median rank filter By default the 'gaussian' method is used. offset : float, optional Constant subtracted from weighted mean of neighborhood to calculate the local threshold value. Default offset is 0. mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional The mode parameter determines how the array borders are handled, where cval is the value when mode is equal to 'constant'. Default is 'reflect'. param : {int, function}, optional Either specify sigma for 'gaussian' method or function object for 'generic' method. This functions takes the flat array of local neighbourhood as a single argument and returns the calculated threshold for the centre pixel. Returns ------- threshold : (N, M) ndarray Thresholded binary image References ---------- .. [1] http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold Examples -------- >>> from skimage.data import camera >>> image = camera()[:50, :50] >>> binary_image1 = threshold_adaptive(image, 15, 'mean') >>> func = lambda arr: arr.mean() >>> binary_image2 = threshold_adaptive(image, 15, 'generic', param=func) """ if block_size % 2 == 0: raise ValueError("The kwarg ``block_size`` must be odd! Given " "``block_size`` {0} is even.".format(block_size)) assert_nD(image, 2) thresh_image = np.zeros(image.shape, 'double') if method == 'generic': ndi.generic_filter(image, param, block_size, output=thresh_image, mode=mode) elif method == 'gaussian': if param is None: # automatically determine sigma which covers > 99% of distribution sigma = (block_size - 1) / 6.0 else: sigma = param ndi.gaussian_filter(image, sigma, output=thresh_image, mode=mode) elif method == 'mean': mask = 1. / block_size * np.ones((block_size,)) # separation of filters to speedup convolution ndi.convolve1d(image, mask, axis=0, output=thresh_image, mode=mode) ndi.convolve1d(thresh_image, mask, axis=1, output=thresh_image, mode=mode) elif method == 'median': ndi.median_filter(image, block_size, output=thresh_image, mode=mode) return image > (thresh_image - offset) def threshold_otsu(image, nbins=256): """Return threshold value based on Otsu's method. Parameters ---------- image : (N, M) ndarray Grayscale input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. Returns ------- threshold : float Upper threshold value. All pixels with an intensity higher than this value are assumed to be foreground. Raises ------ ValueError If `image` only contains a single grayscale value. References ---------- .. [1] Wikipedia, http://en.wikipedia.org/wiki/Otsu's_Method Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_otsu(image) >>> binary = image <= thresh Notes ----- The input image must be grayscale. """ if len(image.shape) > 2 and image.shape[-1] in (3, 4): msg = "threshold_otsu is expected to work correctly only for " \ "grayscale images; image shape {0} looks like an RGB image" warn(msg.format(image.shape)) # Check if the image is multi-colored or not if image.min() == image.max(): raise ValueError("threshold_otsu is expected to work with images " "having more than one color. The input image seems " "to have just one color {0}.".format(image.min())) hist, bin_centers = histogram(image.ravel(), nbins) hist = hist.astype(float) # class probabilities for all possible thresholds weight1 = np.cumsum(hist) weight2 = np.cumsum(hist[::-1])[::-1] # class means for all possible thresholds mean1 = np.cumsum(hist * bin_centers) / weight1 mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1] # Clip ends to align class 1 and class 2 variables: # The last value of `weight1`/`mean1` should pair with zero values in # `weight2`/`mean2`, which do not exist. variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2 idx = np.argmax(variance12) threshold = bin_centers[:-1][idx] return threshold def threshold_yen(image, nbins=256): """Return threshold value based on Yen's method. Parameters ---------- image : (N, M) ndarray Input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. Returns ------- threshold : float Upper threshold value. All pixels with an intensity higher than this value are assumed to be foreground. References ---------- .. [1] Yen J.C., Chang F.J., and Chang S. (1995) "A New Criterion for Automatic Multilevel Thresholding" IEEE Trans. on Image Processing, 4(3): 370-378. DOI:10.1109/83.366472 .. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding Techniques and Quantitative Performance Evaluation" Journal of Electronic Imaging, 13(1): 146-165, DOI:10.1117/1.1631315 http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf .. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_yen(image) >>> binary = image <= thresh """ hist, bin_centers = histogram(image.ravel(), nbins) # On blank images (e.g. filled with 0) with int dtype, `histogram()` # returns `bin_centers` containing only one value. Speed up with it. if bin_centers.size == 1: return bin_centers[0] # Calculate probability mass function pmf = hist.astype(np.float32) / hist.sum() P1 = np.cumsum(pmf) # Cumulative normalized histogram P1_sq = np.cumsum(pmf ** 2) # Get cumsum calculated from end of squared array: P2_sq = np.cumsum(pmf[::-1] ** 2)[::-1] # P2_sq indexes is shifted +1. I assume, with P1[:-1] it's help avoid '-inf' # in crit. ImageJ Yen implementation replaces those values by zero. crit = np.log(((P1_sq[:-1] * P2_sq[1:]) ** -1) * (P1[:-1] * (1.0 - P1[:-1])) ** 2) return bin_centers[crit.argmax()] def threshold_isodata(image, nbins=256, return_all=False): """Return threshold value(s) based on ISODATA method. Histogram-based threshold, known as Ridler-Calvard method or inter-means. Threshold values returned satisfy the following equality: `threshold = (image[image <= threshold].mean() +` `image[image > threshold].mean()) / 2.0` That is, returned thresholds are intensities that separate the image into two groups of pixels, where the threshold intensity is midway between the mean intensities of these groups. For integer images, the above equality holds to within one; for floating- point images, the equality holds to within the histogram bin-width. Parameters ---------- image : (N, M) ndarray Input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. return_all: bool, optional If False (default), return only the lowest threshold that satisfies the above equality. If True, return all valid thresholds. Returns ------- threshold : float or int or array Threshold value(s). References ---------- .. [1] Ridler, TW & Calvard, S (1978), "Picture thresholding using an iterative selection method" IEEE Transactions on Systems, Man and Cybernetics 8: 630-632, DOI:10.1109/TSMC.1978.4310039 .. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding Techniques and Quantitative Performance Evaluation" Journal of Electronic Imaging, 13(1): 146-165, http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf DOI:10.1117/1.1631315 .. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold Examples -------- >>> from skimage.data import coins >>> image = coins() >>> thresh = threshold_isodata(image) >>> binary = image > thresh """ hist, bin_centers = histogram(image.ravel(), nbins) # image only contains one unique value if len(bin_centers) == 1: if return_all: return bin_centers else: return bin_centers[0] hist = hist.astype(np.float32) # csuml and csumh contain the count of pixels in that bin or lower, and # in all bins strictly higher than that bin, respectively csuml = np.cumsum(hist) csumh = np.cumsum(hist[::-1])[::-1] - hist # intensity_sum contains the total pixel intensity from each bin intensity_sum = hist * bin_centers # l and h contain average value of all pixels in that bin or lower, and # in all bins strictly higher than that bin, respectively. # Note that since exp.histogram does not include empty bins at the low or # high end of the range, csuml and csumh are strictly > 0, except in the # last bin of csumh, which is zero by construction. # So no worries about division by zero in the following lines, except # for the last bin, but we can ignore that because no valid threshold # can be in the top bin. So we just patch up csumh[-1] to not cause 0/0 # errors. csumh[-1] = 1 l = np.cumsum(intensity_sum) / csuml h = (np.cumsum(intensity_sum[::-1])[::-1] - intensity_sum) / csumh # isodata finds threshold values that meet the criterion t = (l + m)/2 # where l is the mean of all pixels <= t and h is the mean of all pixels # > t, as calculated above. So we are looking for places where # (l + m) / 2 equals the intensity value for which those l and m figures # were calculated -- which is, of course, the histogram bin centers. # We only require this equality to be within the precision of the bin # width, of course. all_mean = (l + h) / 2.0 bin_width = bin_centers[1] - bin_centers[0] # Look only at thresholds that are below the actual all_mean value, # for consistency with the threshold being included in the lower pixel # group. Otherwise can get thresholds that are not actually fixed-points # of the isodata algorithm. For float images, this matters less, since # there really can't be any guarantees anymore anyway. distances = all_mean - bin_centers thresholds = bin_centers[(distances >= 0) & (distances < bin_width)] if return_all: return thresholds else: return thresholds[0] def threshold_li(image): """Return threshold value based on adaptation of Li's Minimum Cross Entropy method. Parameters ---------- image : (N, M) ndarray Input image. Returns ------- threshold : float Upper threshold value. All pixels with an intensity higher than this value are assumed to be foreground. References ---------- .. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding" Pattern Recognition, 26(4): 617-625 DOI:10.1016/0031-3203(93)90115-D .. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776 DOI:10.1016/S0167-8655(98)00057-9 .. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding Techniques and Quantitative Performance Evaluation" Journal of Electronic Imaging, 13(1): 146-165 DOI:10.1117/1.1631315 .. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_li(image) >>> binary = image > thresh """ # Make sure image has more than one value if np.all(image == image.flat[0]): raise ValueError("threshold_li is expected to work with images " "having more than one value. The input image seems " "to have just one value {0}.".format(image.flat[0])) # Copy to ensure input image is not modified image = image.copy() # Requires positive image (because of log(mean)) immin = np.min(image) image -= immin imrange = np.max(image) tolerance = 0.5 * imrange / 256 # Calculate the mean gray-level mean = np.mean(image) # Initial estimate new_thresh = mean old_thresh = new_thresh + 2 * tolerance # Stop the iterations when the difference between the # new and old threshold values is less than the tolerance while abs(new_thresh - old_thresh) > tolerance: old_thresh = new_thresh threshold = old_thresh + tolerance # range # Calculate the means of background and object pixels mean_back = image[image <= threshold].mean() mean_obj = image[image > threshold].mean() temp = (mean_back - mean_obj) / (np.log(mean_back) - np.log(mean_obj)) if temp < 0: new_thresh = temp - tolerance else: new_thresh = temp + tolerance return threshold + immin def threshold_minimum(image, nbins=256, bias='min', max_iter=10000): """Return threshold value based on minimum method. The histogram of the input `image` is computed and smoothed until there are only two maxima. Then the minimum in between is the threshold value. Parameters ---------- image : (M, N) ndarray Input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. bias : {'min', 'mid', 'max'}, optional 'min', 'mid', 'max' return lowest, middle, or highest pixel value with minimum histogram value. max_iter: int, optional Maximum number of iterations to smooth the histogram. Returns ------- threshold : float Upper threshold value. All pixels with an intensity higher than this value are assumed to be foreground. Raises ------ RuntimeError If unable to find two local maxima in the histogram or if the smoothing takes more than 1e4 iterations. References ---------- .. [1] Prewitt, JMS & Mendelsohn, ML (1966), "The analysis of cell images", Annals of the New York Academy of Sciences 128: 1035-1053 DOI:10.1111/j.1749-6632.1965.tb11715.x Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_minimum(image) >>> binary = image > thresh """ def find_local_maxima(hist): # We can't use scipy.signal.argrelmax # as it fails on plateaus maximums = list() direction = 1 for i in range(hist.shape[0] - 1): if direction > 0: if hist[i + 1] < hist[i]: direction = -1 maximums.append(i) else: if hist[i + 1] > hist[i]: direction = 1 return maximums if bias not in ('min', 'mid', 'max'): raise ValueError("Unknown bias: {0}".format(bias)) hist, bin_centers = histogram(image.ravel(), nbins) smooth_hist = np.copy(hist) for counter in range(max_iter): smooth_hist = ndif.uniform_filter1d(smooth_hist, 3) maximums = find_local_maxima(smooth_hist) if len(maximums) < 3: break if len(maximums) != 2: raise RuntimeError('Unable to find two maxima in histogram') elif counter == max_iter - 1: raise RuntimeError('Maximum iteration reached for histogram' 'smoothing') # Find lowest point between the maxima, biased to the low end (min) minimum = smooth_hist[maximums[0]] threshold = maximums[0] for i in range(maximums[0], maximums[1]+1): if smooth_hist[i] < minimum: minimum = smooth_hist[i] threshold = i if bias == 'min': return bin_centers[threshold] else: upper_bound = threshold while smooth_hist[upper_bound] == smooth_hist[threshold]: upper_bound += 1 upper_bound -= 1 if bias == 'max': return bin_centers[upper_bound] elif bias == 'mid': return bin_centers[(threshold + upper_bound) // 2] def threshold_mean(image): """Return threshold value based on the mean of grayscale values. Parameters ---------- image : (N, M[, ..., P]) ndarray Grayscale input image. Returns ------- threshold : float Upper threshold value. All pixels with an intensity higher than this value are assumed to be foreground. References ---------- .. [1] C. A. Glasbey, "An analysis of histogram-based thresholding algorithms," CVGIP: Graphical Models and Image Processing, vol. 55, pp. 532-537, 1993. DOI:10.1006/cgip.1993.1040 Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_mean(image) >>> binary = image > thresh """ return np.mean(image) def threshold_triangle(image, nbins=256): """Return threshold value based on the triangle algorithm. Parameters ---------- image : (N, M[, ..., P]) ndarray Grayscale input image. nbins : int, optional Number of bins used to calculate histogram. This value is ignored for integer arrays. Returns ------- threshold : float Upper threshold value. All pixels with an intensity higher than this value are assumed to be foreground. References ---------- .. [1] Zack, G. W., Rogers, W. E. and Latt, S. A., 1977, Automatic Measurement of Sister Chromatid Exchange Frequency, Journal of Histochemistry and Cytochemistry 25 (7), pp. 741-753 DOI:10.1177/25.7.70454 .. [2] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold Examples -------- >>> from skimage.data import camera >>> image = camera() >>> thresh = threshold_triangle(image) >>> binary = image > thresh """ # nbins is ignored for integer arrays # so, we recalculate the effective nbins. hist, bin_centers = histogram(image.ravel(), nbins) nbins = len(hist) # Find peak, lowest and highest gray levels. arg_peak_height = np.argmax(hist) peak_height = hist[arg_peak_height] arg_low_level, arg_high_level = np.where(hist>0)[0][[0, -1]] # Flip is True if left tail is shorter. flip = arg_peak_height - arg_low_level < arg_high_level - arg_peak_height if flip: hist = hist[::-1] arg_low_level = nbins - arg_high_level - 1 arg_peak_height = nbins - arg_peak_height - 1 # If flip == True, arg_high_level becomes incorrect # but we don't need it anymore. del(arg_high_level) # Set up the coordinate system. width = arg_peak_height - arg_low_level x1 = np.arange(width) y1 = hist[x1 + arg_low_level] # Normalize. norm = np.sqrt(peak_height**2 + width**2) peak_height /= norm width /= norm # Maximize the length. # The ImageJ implementation includes an additional constant when calculating # the length, but here we omit it as it does not affect the location of the # minimum. length = peak_height * x1 - width * y1 arg_level = np.argmax(length) + arg_low_level if flip: arg_level = nbins - arg_level - 1 return bin_centers[arg_level]
bsd-3-clause
gsec/eZchat
ez_process/ez_process_base.py
1
5024
#==============================================================================# # ez_process_base.py # #==============================================================================# #============# # Includes # #============# import os import types import Queue # adding the eZchat path to search directory import sys, os sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) import ez_pipe as pipe import ez_process_preferences as epp #==============================================================================# # class p2pCommand # #==============================================================================# class p2pCommand(object): """ A p2pCommand encapsulates ``'commandQueue'`` commands. The p2pCommand instance can be appended to the client's command queue for execution via: cmd = p2pCommand(...) client_class_instance.commandQueue.put(cmd) The funcStr is the name of the function which must match a key in the client handler functions (see ez_process_base_meta). Data must be a dictionary and has to be filled with key,value pairs as required by the handler function. """ def __init__(self, funcStr=None, data=None): """ :param funcStr: Storing the function name which should be called when executed. :type funcStr: String :param data: Keyword arguments (*kwargs*) passed to the called function. :type data: Dict """ self.funcStr = funcStr self.data = data #==============================================================================# # class p2pReply # #==============================================================================# class p2pReply(object): """ Encapsulate received data. A p2pReply instance can be appended to the reply queue. replyType = success: type(data) = str replyType = error: type(data) = str """ error, success = range(2) msg = 2 def __init__(self, replyType=None, data=None): self.replyType = replyType self.data = data #==============================================================================# # class ez_process_base(_meta) # #==============================================================================# class ez_process_base_meta(type): """ The metaclass __init__ function is called after the class is created, but before any class instance initialization. Any class with set __metaclass__ attribute to ez_process_base_meta which is equivalent to inheriting the class ez_process_base is extended by: - self.handlers: dictionary storing all user-defined functions - global attributes as class attributes self.handlers is called in the client main loop in p2pclient """ def __init__(cls, name, bases, dct): if not hasattr(cls, 'handlers'): cls.handlers = {} if not hasattr(cls, 'handler_rules'): cls.handler_rules = {} for attr in [x for x in dct if not x.startswith('_')]: # register process functionalities and inherit them to child classes if isinstance(dct[attr], types.FunctionType): assert not attr in cls.handlers cls.handlers[attr] = dct[attr] # register global attributes and inherit them to child classes else: cls.attr = dct[attr] super(ez_process_base_meta, cls).__init__(name, bases, dct) class ez_process_base(object): __metaclass__ = ez_process_base_meta commandQueue = Queue.Queue() # storing active and/or sleeping background process background_processes = {} # storing the cmd with which the background processes were initiated background_process_cmds = {} # storing user-defined functions for calling after a process has been # terminated successfully success_callback = {} def __init__(self, **kwargs): if 'write_to_pipe' in kwargs: class RQueue(Queue.Queue): def __init__(self, write_to_pipe = False, *args, **kwargs): Queue.Queue.__init__(self, *args, **kwargs) self.write_to_pipe = write_to_pipe def put(self, cmd): Queue.Queue.put(self, cmd) if self.write_to_pipe: os.write(pipe.pipe, 'status') self.replyQueue = RQueue(write_to_pipe = kwargs['write_to_pipe']) else: self.replyQueue = Queue.Queue() # user defined parameters are passed to the process preferences potentiall # overwriting default values. epp.init_process_preferences(**kwargs) # client related def success(self, success_msg=None): return p2pReply(p2pReply.success, success_msg) # client related def error(self, error_msg=None): return p2pReply(p2pReply.error, error_msg) # MsgDb update def msg(self, msg=None): return p2pReply(p2pReply.msg, msg)
gpl-2.0
kapadia/rasterio
rasterio/features.py
2
11130
"""Functions for working with features in a raster dataset.""" from __future__ import absolute_import import logging import warnings import numpy as np import rasterio from rasterio._features import _shapes, _sieve, _rasterize, _bounds from rasterio.env import ensure_env from rasterio.transform import IDENTITY, guard_transform from rasterio.dtypes import validate_dtype, can_cast_dtype, get_minimum_dtype log = logging.getLogger(__name__) @ensure_env def geometry_mask( geometries, out_shape, transform, all_touched=False, invert=False): """Create a mask from shapes. By default, mask is intended for use as a numpy mask, where pixels that overlap shapes are False. Parameters ---------- geometries : iterable over geometries (GeoJSON-like objects) out_shape : tuple or list Shape of output numpy ndarray. transform : Affine transformation object Transformation from pixel coordinates of `image` to the coordinate system of the input `shapes`. See the `transform` property of dataset objects. all_touched : boolean, optional If True, all pixels touched by geometries will be burned in. If false, only pixels whose center is within the polygon or that are selected by Bresenham's line algorithm will be burned in. invert: boolean, optional If True, mask will be True for pixels that overlap shapes. False by default. Returns ------- out : numpy ndarray of type 'bool' Result """ fill, mask_value = (0, 1) if invert else (1, 0) return rasterize( geometries, out_shape=out_shape, transform=transform, all_touched=all_touched, fill=fill, default_value=mask_value).astype('bool') @ensure_env def shapes(image, mask=None, connectivity=4, transform=IDENTITY): """Yield (polygon, value for each set of adjacent pixels of the same value. Parameters ---------- image : numpy ndarray or rasterio Band object (RasterReader, bidx namedtuple). Data type must be one of rasterio.int16, rasterio.int32, rasterio.uint8, rasterio.uint16, or rasterio.float32. mask : numpy ndarray or rasterio Band object, optional Values of False or 0 will be excluded from feature generation Must evaluate to bool (rasterio.bool_ or rasterio.uint8) connectivity : int, optional Use 4 or 8 pixel connectivity for grouping pixels into features transform : Affine transformation, optional If not provided, feature coordinates will be generated based on pixel coordinates Yields ------- tuple A pair of (polygon, value) for each feature found in the image. Polygons are GeoJSON-like dicts and the values are the associated value from the image, in the data type of the image. Note: due to floating point precision issues, values returned from a floating point image may not exactly match the original values. Notes ----- The amount of memory used by this algorithm is proportional to the number and complexity of polygons produced. This algorithm is most appropriate for simple thematic data. Data with high pixel-to-pixel variability, such as imagery, may produce one polygon per pixel and consume large amounts of memory. """ transform = guard_transform(transform) for s, v in _shapes(image, mask, connectivity, transform.to_gdal()): yield s, v @ensure_env def sieve(image, size, out=None, output=None, mask=None, connectivity=4): """Replace small polygons in `image` with value of their largest neighbor. Polygons are found for each set of neighboring pixels of the same value. Parameters ---------- image : numpy ndarray or rasterio Band object (RasterReader, bidx namedtuple) Must be of type rasterio.int16, rasterio.int32, rasterio.uint8, rasterio.uint16, or rasterio.float32 size : int minimum polygon size (number of pixels) to retain. out : numpy ndarray, optional Array of same shape and data type as `image` in which to store results. output : older alias for `out`, will be removed before 1.0. output : numpy ndarray, optional mask : numpy ndarray or rasterio Band object, optional Values of False or 0 will be excluded from feature generation Must evaluate to bool (rasterio.bool_ or rasterio.uint8) connectivity : int, optional Use 4 or 8 pixel connectivity for grouping pixels into features Returns ------- out : numpy ndarray Result Notes ----- GDAL only supports values that can be cast to 32-bit integers for this operation. The amount of memory used by this algorithm is proportional to the number and complexity of polygons found in the image. This algorithm is most appropriate for simple thematic data. Data with high pixel-to-pixel variability, such as imagery, may produce one polygon per pixel and consume large amounts of memory. """ # Start moving users over to 'out'. if output is not None: warnings.warn( "The 'output' keyword arg has been superseded by 'out' " "and will be removed before Rasterio 1.0.", FutureWarning, stacklevel=2) # pragma: no cover out = out if out is not None else output if out is None: out = np.zeros(image.shape, image.dtype) _sieve(image, size, out, mask, connectivity) return out @ensure_env def rasterize( shapes, out_shape=None, fill=0, out=None, output=None, transform=IDENTITY, all_touched=False, default_value=1, dtype=None): """Return an image array with input geometries burned in. Parameters ---------- shapes : iterable of (geometry, value) pairs or iterable over geometries. `geometry` can either be an object that implements the geo interface or GeoJSON-like object. out_shape : tuple or list Shape of output numpy ndarray. fill : int or float, optional Used as fill value for all areas not covered by input geometries. out : numpy ndarray, optional Array of same shape and data type as `image` in which to store results. output : older alias for `out`, will be removed before 1.0. transform : Affine transformation object, optional Transformation from pixel coordinates of `image` to the coordinate system of the input `shapes`. See the `transform` property of dataset objects. all_touched : boolean, optional If True, all pixels touched by geometries will be burned in. If false, only pixels whose center is within the polygon or that are selected by Bresenham's line algorithm will be burned in. default_value : int or float, optional Used as value for all geometries, if not provided in `shapes`. dtype : rasterio or numpy data type, optional Used as data type for results, if `out` is not provided. Returns ------- out : numpy ndarray Results Notes ----- Valid data types for `fill`, `default_value`, `out`, `dtype` and shape values are rasterio.int16, rasterio.int32, rasterio.uint8, rasterio.uint16, rasterio.uint32, rasterio.float32, rasterio.float64. """ valid_dtypes = ( 'int16', 'int32', 'uint8', 'uint16', 'uint32', 'float32', 'float64' ) def format_invalid_dtype(param): return '{0} dtype must be one of: {1}'.format( param, ', '.join(valid_dtypes) ) def format_cast_error(param, dtype): return '{0} cannot be cast to specified dtype: {1}'.format(param, dtype) if fill != 0: fill_array = np.array([fill]) if not validate_dtype(fill_array, valid_dtypes): raise ValueError(format_invalid_dtype('fill')) if dtype is not None and not can_cast_dtype(fill_array, dtype): raise ValueError(format_cast_error('fill', dtype)) if default_value != 1: default_value_array = np.array([default_value]) if not validate_dtype(default_value_array, valid_dtypes): raise ValueError(format_invalid_dtype('default_value')) if dtype is not None and not can_cast_dtype(default_value_array, dtype): raise ValueError(format_cast_error('default_vaue', dtype)) if dtype is not None and np.dtype(dtype).name not in valid_dtypes: raise ValueError(format_invalid_dtype('dtype')) valid_shapes = [] shape_values = [] for index, item in enumerate(shapes): if isinstance(item, (tuple, list)): geom, value = item else: geom = item value = default_value geom = getattr(geom, '__geo_interface__', None) or geom # not isinstance(geom, dict) or if 'type' in geom or 'coordinates' in geom: valid_shapes.append((geom, value)) shape_values.append(value) else: raise ValueError( 'Invalid geometry object at index {0}'.format(index) ) if not valid_shapes: raise ValueError('No valid geometry objects found for rasterize') shape_values = np.array(shape_values) if not validate_dtype(shape_values, valid_dtypes): raise ValueError(format_invalid_dtype('shape values')) if dtype is None: dtype = get_minimum_dtype(np.append(shape_values, fill)) elif not can_cast_dtype(shape_values, dtype): raise ValueError(format_cast_error('shape values', dtype)) if output is not None: warnings.warn( "The 'output' keyword arg has been superseded by 'out' " "and will be removed before Rasterio 1.0.", FutureWarning, stacklevel=2) # pragma: no cover out = out if out is not None else output if out is not None: if np.dtype(out.dtype).name not in valid_dtypes: raise ValueError(format_invalid_dtype('out')) if not can_cast_dtype(shape_values, out.dtype): raise ValueError(format_cast_error('shape values', out.dtype.name)) elif out_shape is not None: out = np.empty(out_shape, dtype=dtype) out.fill(fill) else: raise ValueError('Either an output shape or image must be provided') transform = guard_transform(transform) _rasterize(valid_shapes, out, transform.to_gdal(), all_touched) return out def bounds(geometry): """Return a (minx, miny, maxx, maxy) bounding box. From Fiona 1.4.8. Modified to return bbox from geometry if available. Parameters ---------- geometry: GeoJSON-like feature, feature collection, or geometry. Returns ------- tuple Bounding box: (minx, miny, maxx, maxy) """ if 'bbox' in geometry: return tuple(geometry['bbox']) geom = geometry.get('geometry') or geometry return _bounds(geom)
bsd-3-clause
Tatsh-ansible/ansible
lib/ansible/plugins/action/ce.py
25
4673
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import sys import copy from ansible.plugins.action.normal import ActionModule as _ActionModule from ansible.module_utils.six import iteritems from ansible.module_utils.ce import ce_argument_spec from ansible.module_utils.basic import AnsibleFallbackNotFound try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() class ActionModule(_ActionModule): def run(self, tmp=None, task_vars=None): if self._play_context.connection != 'local': return dict( failed=True, msg='invalid connection specified, expected connection=local, ' 'got %s' % self._play_context.connection ) provider = self.load_provider() transport = provider['transport'] or 'cli' display.vvvv('connection transport is %s' % transport, self._play_context.remote_addr) if transport == 'cli': pc = copy.deepcopy(self._play_context) pc.connection = 'network_cli' pc.network_os = 'ce' pc.remote_addr = provider['host'] or self._play_context.remote_addr pc.port = int(provider['port']) or int(self._play_context.port) or 22 pc.remote_user = provider['username'] or self._play_context.connection_user pc.password = provider['password'] or self._play_context.password pc.timeout = provider['timeout'] or self._play_context.timeout self._task.args['provider'] = provider.update( host=pc.remote_addr, port=pc.port, username=pc.remote_user, password=pc.password, ssh_keyfile=pc.private_key_file ) display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr) connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) socket_path = connection.run() display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) if not socket_path: return {'failed': True, 'msg': 'unable to open shell. Please see: ' + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} # make sure we are in the right cli context which should be # enable mode and not config module rc, out, err = connection.exec_command('prompt()') while str(out).strip().endswith(']'): display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr) connection.exec_command('return') rc, out, err = connection.exec_command('prompt()') task_vars['ansible_socket'] = socket_path # make sure a transport value is set in args self._task.args['transport'] = transport result = super(ActionModule, self).run(tmp, task_vars) return result def load_provider(self): provider = self._task.args.get('provider', {}) for key, value in iteritems(ce_argument_spec): if key != 'provider' and key not in provider: if key in self._task.args: provider[key] = self._task.args[key] elif 'fallback' in value: provider[key] = self._fallback(value['fallback']) elif key not in provider: provider[key] = None return provider def _fallback(self, fallback): strategy = fallback[0] args = [] kwargs = {} for item in fallback[1:]: if isinstance(item, dict): kwargs = item else: args = item try: return strategy(*args, **kwargs) except AnsibleFallbackNotFound: pass
gpl-3.0
petemounce/ansible-modules-extras
notification/osx_say.py
161
2108
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: osx_say version_added: "1.2" short_description: Makes an OSX computer to speak. description: - makes an OS computer speak! Amuse your friends, annoy your coworkers! notes: - If you like this module, you may also be interested in the osx_say callback in the plugins/ directory of the source checkout. options: msg: description: What to say required: true voice: description: What voice to use required: false requirements: [ say ] author: - "Ansible Core Team" - "Michael DeHaan (@mpdehaan)" ''' EXAMPLES = ''' - local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox ''' DEFAULT_VOICE='Trinoids' def say(module, msg, voice): module.run_command(["/usr/bin/say", msg, "--voice=%s" % (voice)], check_rc=True) def main(): module = AnsibleModule( argument_spec=dict( msg=dict(required=True), voice=dict(required=False, default=DEFAULT_VOICE), ), supports_check_mode=False ) if not os.path.exists("/usr/bin/say"): module.fail_json(msg="/usr/bin/say is not installed") msg = module.params['msg'] voice = module.params['voice'] say(module, msg, voice) module.exit_json(msg=msg, changed=False) # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
samedder/azure-cli
src/command_modules/azure-cli-appservice/azure/cli/command_modules/appservice/custom.py
1
65556
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from __future__ import print_function import threading try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse # pylint: disable=import-error import OpenSSL.crypto from msrestazure.azure_exceptions import CloudError from azure.mgmt.storage import StorageManagementClient from azure.mgmt.web.models import (Site, SiteConfig, User, AppServicePlan, SiteConfigResource, SkuDescription, SslState, HostNameBinding, NameValuePair, BackupRequest, DatabaseBackupSetting, BackupSchedule, RestoreRequest, FrequencyUnit, Certificate, HostNameSslState, RampUpRule) from azure.cli.core.commands.client_factory import get_mgmt_service_client from azure.cli.core.commands.arm import is_valid_resource_id, parse_resource_id from azure.cli.core.commands import LongRunningOperation from azure.cli.core.prompting import prompt_pass, NoTTYException import azure.cli.core.azlogging as azlogging from azure.cli.core.util import CLIError from .vsts_cd_provider import VstsContinuousDeliveryProvider from ._params import _generic_site_operation from ._client_factory import web_client_factory, ex_handler_factory logger = azlogging.get_az_logger(__name__) # pylint:disable=no-member,too-many-lines,too-many-locals def create_webapp(resource_group_name, name, plan, runtime=None, startup_file=None, deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master', deployment_local_git=None): if deployment_source_url and deployment_local_git: raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git') client = web_client_factory() if is_valid_resource_id(plan): parse_result = parse_resource_id(plan) plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name']) else: plan_info = client.app_service_plans.get(resource_group_name, plan) is_linux = plan_info.reserved location = plan_info.location site_config = SiteConfig(app_settings=[]) webapp_def = Site(server_farm_id=plan_info.id, location=location, site_config=site_config) if is_linux: if runtime and deployment_container_image_name: raise CLIError('usage error: --runtime | --deployment-container-image-name') if startup_file: site_config.app_command_line = startup_file if runtime: site_config.linux_fx_version = runtime elif deployment_container_image_name: site_config.linux_fx_version = _format_linux_fx_version(deployment_container_image_name) site_config.app_settings.append(NameValuePair("WEBSITES_ENABLE_APP_SERVICE_STORAGE", "false")) else: # must specify runtime raise CLIError('usage error: must specify --runtime | --deployment-container-image-name') # pylint: disable=line-too-long elif runtime: # windows webapp if startup_file or deployment_container_image_name: raise CLIError("usage error: --startup-file or --deployment-container-image-name is " "only appliable on linux webapp") helper = _StackRuntimeHelper(client) match = helper.resolve(runtime) if not match: raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long match['setter'](match, site_config) poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def) webapp = LongRunningOperation()(poller) # Ensure SCC operations follow right after the 'create', no precedent appsetting update commands _set_remote_or_local_git(webapp, resource_group_name, name, deployment_source_url, deployment_source_branch, deployment_local_git) _fill_ftp_publishing_url(webapp, resource_group_name, name) return webapp def show_webapp(resource_group_name, name, slot=None, app_instance=None): webapp = app_instance if not app_instance: # when the routine is invoked as a help method, not through commands webapp = _generic_site_operation(resource_group_name, name, 'get', slot) _rename_server_farm_props(webapp) _fill_ftp_publishing_url(webapp, resource_group_name, name, slot) return webapp def list_webapp(resource_group_name=None): return _list_app(['app', 'app,linux'], resource_group_name) def list_function_app(resource_group_name=None): return _list_app(['functionapp'], resource_group_name) def _list_app(app_types, resource_group_name=None): client = web_client_factory() if resource_group_name: result = list(client.web_apps.list_by_resource_group(resource_group_name)) else: result = list(client.web_apps.list()) result = [x for x in result if x.kind in app_types] for webapp in result: _rename_server_farm_props(webapp) return result def list_runtimes(linux=False): client = web_client_factory() if linux: # workaround before API is exposed logger.warning('You are viewing an offline list of runtimes. For up to date list, ' 'check out https://aka.ms/linux-stacks') return ['node|6.4', 'node|4.5', 'node|6.2', 'node|6.6', 'node|6.9', 'node|6.10', 'php|5.6', 'php|7.0', 'dotnetcore|1.0', 'dotnetcore|1.1', 'ruby|2.3'] runtime_helper = _StackRuntimeHelper(client) return [s['displayName'] for s in runtime_helper.stacks] def _rename_server_farm_props(webapp): # Should be renamed in SDK in a future release setattr(webapp, 'app_service_plan_id', webapp.server_farm_id) del webapp.server_farm_id return webapp def delete_function_app(resource_group_name, name, slot=None): return _generic_site_operation(resource_group_name, name, 'delete', slot) def delete_webapp(resource_group_name, name, keep_metrics=None, keep_empty_plan=None, keep_dns_registration=None, slot=None): client = web_client_factory() delete_method = getattr(client.web_apps, 'delete' if slot is None else 'delete_slot') delete_method(resource_group_name, name, delete_metrics=False if keep_metrics else None, delete_empty_server_farm=False if keep_empty_plan else None, skip_dns_registration=False if keep_dns_registration else None) def stop_webapp(resource_group_name, name, slot=None): return _generic_site_operation(resource_group_name, name, 'stop', slot) def start_webapp(resource_group_name, name, slot=None): return _generic_site_operation(resource_group_name, name, 'start', slot) def restart_webapp(resource_group_name, name, slot=None): return _generic_site_operation(resource_group_name, name, 'restart', slot) def get_site_configs(resource_group_name, name, slot=None): return _generic_site_operation(resource_group_name, name, 'get_configuration', slot) def get_app_settings(resource_group_name, name, slot=None): result = _generic_site_operation(resource_group_name, name, 'list_application_settings', slot) client = web_client_factory() slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names return _build_app_settings_output(result.properties, slot_app_setting_names) def get_connection_strings(resource_group_name, name, slot=None): result = _generic_site_operation(resource_group_name, name, 'list_connection_strings', slot) client = web_client_factory() slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \ .connection_string_names or [] result = [{'name': p, 'value': result.properties[p], 'slotSetting': p in slot_constr_names} for p in result.properties] return result def _fill_ftp_publishing_url(webapp, resource_group_name, name, slot=None): profiles = list_publish_profiles(resource_group_name, name, slot) url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP') setattr(webapp, 'ftpPublishingUrl', url) return webapp def _format_linux_fx_version(custom_image_name): fx_version = custom_image_name.strip() fx_version_lower = fx_version.lower() # handles case of only spaces if fx_version: if not fx_version_lower.startswith('docker|'): fx_version = '{}|{}'.format('DOCKER', custom_image_name) else: fx_version = ' ' return fx_version def _add_linux_fx_version(resource_group_name, name, custom_image_name, slot=None): fx_version = _format_linux_fx_version(custom_image_name) return update_site_configs(resource_group_name, name, linux_fx_version=fx_version, slot=slot) def _delete_linux_fx_version(resource_group_name, name, slot=None): fx_version = ' ' return update_site_configs(resource_group_name, name, linux_fx_version=fx_version, slot=slot) def _get_linux_fx_version(resource_group_name, name, slot=None): site_config = get_site_configs(resource_group_name, name, slot) return site_config.linux_fx_version # for any modifications to the non-optional parameters, adjust the reflection logic accordingly # in the method def update_site_configs(resource_group_name, name, slot=None, linux_fx_version=None, php_version=None, python_version=None, # pylint: disable=unused-argument net_framework_version=None, # pylint: disable=unused-argument java_version=None, java_container=None, java_container_version=None, # pylint: disable=unused-argument remote_debugging_enabled=None, web_sockets_enabled=None, # pylint: disable=unused-argument always_on=None, auto_heal_enabled=None, # pylint: disable=unused-argument use32_bit_worker_process=None, # pylint: disable=unused-argument app_command_line=None): # pylint: disable=unused-argument configs = get_site_configs(resource_group_name, name, slot) import inspect frame = inspect.currentframe() bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on', 'auto_heal_enabled', 'use32_bit_worker_process'] # note: getargvalues is used already in azure.cli.core.commands. # and no simple functional replacement for this deprecating method for 3.5 args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method for arg in args[3:]: if values.get(arg, None): setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true') return _generic_site_operation(resource_group_name, name, 'update_configuration', slot, configs) def update_app_settings(resource_group_name, name, settings=None, slot=None, slot_settings=None): if not settings and not slot_settings: raise CLIError('Usage Error: --settings |--slot-settings') settings = settings or [] slot_settings = slot_settings or [] app_settings = _generic_site_operation(resource_group_name, name, 'list_application_settings', slot) for name_value in settings + slot_settings: # split at the first '=', appsetting should not have '=' in the name settings_name, value = name_value.split('=', 1) app_settings.properties[settings_name] = value result = _generic_site_operation(resource_group_name, name, 'update_application_settings', slot, app_settings) app_settings_slot_cfg_names = [] if slot_settings: client = web_client_factory() new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings] slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or [] slot_cfg_names.app_setting_names += new_slot_setting_names app_settings_slot_cfg_names = slot_cfg_names.app_setting_names client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return _build_app_settings_output(result.properties, app_settings_slot_cfg_names) def delete_app_settings(resource_group_name, name, setting_names, slot=None): app_settings = _generic_site_operation(resource_group_name, name, 'list_application_settings', slot) client = web_client_factory() slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) is_slot_settings = False for setting_name in setting_names: app_settings.properties.pop(setting_name, None) if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names: slot_cfg_names.app_setting_names.remove(setting_name) is_slot_settings = True if is_slot_settings: client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) result = _generic_site_operation(resource_group_name, name, 'update_application_settings', slot, app_settings) return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names) def _build_app_settings_output(app_settings, slot_cfg_names): slot_cfg_names = slot_cfg_names or [] return [{'name': p, 'value': app_settings[p], 'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)] def update_connection_strings(resource_group_name, name, connection_string_type, settings=None, slot=None, slot_settings=None): from azure.mgmt.web.models import ConnStringValueTypePair if not settings and not slot_settings: raise CLIError('Usage Error: --settings |--slot-settings') settings = settings or [] slot_settings = slot_settings or [] conn_strings = _generic_site_operation(resource_group_name, name, 'list_connection_strings', slot) for name_value in settings + slot_settings: # split at the first '=', connection string should not have '=' in the name conn_string_name, value = name_value.split('=', 1) if value[0] in ["'", '"']: # strip away the quots used as separators value = value[1:-1] conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value, connection_string_type) result = _generic_site_operation(resource_group_name, name, 'update_connection_strings', slot, conn_strings) if slot_settings: client = web_client_factory() new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings] slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or [] slot_cfg_names.connection_string_names += new_slot_setting_names client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return result.properties def delete_connection_strings(resource_group_name, name, setting_names, slot=None): conn_strings = _generic_site_operation(resource_group_name, name, 'list_connection_strings', slot) client = web_client_factory() slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) is_slot_settings = False for setting_name in setting_names: conn_strings.properties.pop(setting_name, None) if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names: slot_cfg_names.connection_string_names.remove(setting_name) is_slot_settings = True if is_slot_settings: client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names) return _generic_site_operation(resource_group_name, name, 'update_connection_strings', slot, conn_strings) CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME', 'DOCKER_REGISTRY_SERVER_PASSWORD', 'DOCKER_CUSTOM_IMAGE_NAME'] APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD'] def update_container_settings(resource_group_name, name, docker_registry_server_url=None, docker_custom_image_name=None, docker_registry_server_user=None, docker_registry_server_password=None, slot=None): settings = [] if docker_registry_server_url is not None: settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url) if (not docker_registry_server_user and not docker_registry_server_password and docker_registry_server_url and '.azurecr.io' in docker_registry_server_url): logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...') parsed = urlparse(docker_registry_server_url) registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] try: docker_registry_server_user, docker_registry_server_password = _get_acr_cred(registry_name) except Exception as ex: # pylint: disable=broad-except logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed if docker_registry_server_user is not None: settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user) if docker_registry_server_password is not None: settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password) if docker_custom_image_name is not None: _add_linux_fx_version(resource_group_name, name, docker_custom_image_name, slot) if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url: update_app_settings(resource_group_name, name, settings, slot) settings = get_app_settings(resource_group_name, name, slot) return _mask_creds_related_appsettings(_filter_for_container_settings(resource_group_name, name, settings)) def _get_acr_cred(registry_name): from azure.mgmt.containerregistry import ContainerRegistryManagementClient from azure.cli.core.commands.parameters import get_resources_in_subscription client = get_mgmt_service_client(ContainerRegistryManagementClient).registries result = get_resources_in_subscription('Microsoft.ContainerRegistry/registries') result = [item for item in result if item.name.lower() == registry_name] if not result or len(result) > 1: raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name)) resource_group_name = parse_resource_id(result[0].id)['resource_group'] registry = client.get(resource_group_name, registry_name) if registry.admin_user_enabled: # pylint: disable=no-member cred = client.list_credentials(resource_group_name, registry_name) return cred.username, cred.passwords[0].value raise CLIError("Failed to retrieve container registry credentails. Please either provide the " "credentail or run 'az acr update -n {} --admin-enabled true' to enable " "admin first.".format(registry_name)) def delete_container_settings(resource_group_name, name, slot=None): _delete_linux_fx_version(resource_group_name, name, slot) delete_app_settings(resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot) def show_container_settings(resource_group_name, name, slot=None): settings = get_app_settings(resource_group_name, name, slot) return _mask_creds_related_appsettings(_filter_for_container_settings(resource_group_name, name, settings, slot)) def _filter_for_container_settings(resource_group_name, name, settings, slot=None): result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES] fx_version = _get_linux_fx_version(resource_group_name, name, slot).strip() if fx_version: added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME', 'value': fx_version} result.append(added_image_name) return result # TODO: remove this when #3660(service tracking issue) is resolved def _mask_creds_related_appsettings(settings): for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]: settings[x] = None return settings def add_hostname(resource_group_name, webapp_name, hostname, slot=None): client = web_client_factory() webapp = client.web_apps.get(resource_group_name, webapp_name) binding = HostNameBinding(webapp.location, host_name_binding_name=hostname, site_name=webapp.name) if slot is None: return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding) return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding, slot) def delete_hostname(resource_group_name, webapp_name, hostname, slot=None): client = web_client_factory() if slot is None: return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname) return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname) def list_hostnames(resource_group_name, webapp_name, slot=None): result = list(_generic_site_operation(resource_group_name, webapp_name, 'list_host_name_bindings', slot)) for r in result: r.name = r.name.split('/')[-1] return result def get_external_ip(resource_group_name, webapp_name): # logics here are ported from portal client = web_client_factory() webapp_name = client.web_apps.get(resource_group_name, webapp_name) if webapp_name.hosting_environment_profile: address = client.app_service_environments.list_vips( resource_group_name, webapp_name.hosting_environment_profile.name) if address.internal_ip_address: ip_address = address.internal_ip_address else: vip = next((s for s in webapp_name.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None) ip_address = vip.virtual_ip if vip else address.service_ip_address else: ip_address = _resolve_hostname_through_dns(webapp_name.default_host_name) return {'ip': ip_address} def _resolve_hostname_through_dns(hostname): import socket return socket.gethostbyname(hostname) def create_webapp_slot(resource_group_name, webapp, slot, configuration_source=None): client = web_client_factory() site = client.web_apps.get(resource_group_name, webapp) location = site.location slot_def = Site(server_farm_id=site.server_farm_id, location=location) clone_from_prod = None slot_def.site_config = SiteConfig() poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot) result = LongRunningOperation()(poller) if configuration_source: clone_from_prod = configuration_source.lower() == webapp.lower() site_config = get_site_configs( resource_group_name, webapp, None if clone_from_prod else configuration_source) _generic_site_operation(resource_group_name, webapp, 'update_configuration', slot, site_config) # slot create doesn't clone over the app-settings and connection-strings, so we do it here # also make sure slot settings don't get propagated. if configuration_source: slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp) src_slot = None if clone_from_prod else configuration_source app_settings = _generic_site_operation(resource_group_name, webapp, 'list_application_settings', src_slot) for a in slot_cfg_names.app_setting_names or []: app_settings.properties.pop(a, None) connection_strings = _generic_site_operation(resource_group_name, webapp, 'list_connection_strings', src_slot) for a in slot_cfg_names.connection_string_names or []: connection_strings.properties.pop(a, None) _generic_site_operation(resource_group_name, webapp, 'update_application_settings', slot, app_settings) _generic_site_operation(resource_group_name, webapp, 'update_connection_strings', slot, connection_strings) result.name = result.name.split('/')[-1] return result def config_source_control(resource_group_name, name, repo_url, repository_type=None, branch=None, # pylint: disable=too-many-locals git_token=None, manual_integration=None, slot=None, cd_provider=None, cd_app_type=None, cd_account=None, cd_account_must_exist=None): client = web_client_factory() location = _get_location_from_webapp(client, resource_group_name, name) if cd_provider == 'vsts': create_account = not cd_account_must_exist vsts_provider = VstsContinuousDeliveryProvider() status = vsts_provider.setup_continuous_delivery(resource_group_name, name, repo_url, branch, git_token, slot, cd_app_type, cd_account, create_account, location) logger.warning(status.status_message) return status else: from azure.mgmt.web.models import SiteSourceControl, SourceControl if git_token: sc = SourceControl(location, name='GitHub', token=git_token) client.update_source_control('GitHub', sc) source_control = SiteSourceControl(location, repo_url=repo_url, branch=branch, is_manual_integration=manual_integration, is_mercurial=(repository_type != 'git')) # SCC config can fail if previous commands caused SCMSite shutdown, so retry here. for i in range(5): try: poller = _generic_site_operation(resource_group_name, name, 'create_or_update_source_control', slot, source_control) return LongRunningOperation()(poller) except Exception as ex: # pylint: disable=broad-except import re import time ex = ex_handler_factory(no_throw=True)(ex) # for non server errors(50x), just throw; otherwise retry 4 times if i == 4 or not re.findall(r'\(50\d\)', str(ex)): raise logger.warning('retrying %s/4', i + 1) time.sleep(5) # retry in a moment def update_git_token(git_token=None): ''' Update source control token cached in Azure app service. If no token is provided, the command will clean up existing token. ''' client = web_client_factory() from azure.mgmt.web.models import SourceControl sc = SourceControl('not-really-needed', name='GitHub', token=git_token or '') return client.update_source_control('GitHub', sc) def show_source_control(resource_group_name, name, slot=None): return _generic_site_operation(resource_group_name, name, 'get_source_control', slot) def delete_source_control(resource_group_name, name, slot=None): return _generic_site_operation(resource_group_name, name, 'delete_source_control', slot) def enable_local_git(resource_group_name, name, slot=None): client = web_client_factory() location = _get_location_from_webapp(client, resource_group_name, name) site_config = SiteConfigResource(location) site_config.scm_type = 'LocalGit' if slot is None: client.web_apps.create_or_update_configuration(resource_group_name, name, site_config) else: client.web_apps.create_or_update_configuration_slot(resource_group_name, name, site_config, slot) return {'url': _get_local_git_url(client, resource_group_name, name, slot)} def sync_site_repo(resource_group_name, name, slot=None): try: return _generic_site_operation(resource_group_name, name, 'sync_repository', slot) except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here if ex.status_code not in [200, 204]: raise ex def list_app_service_plans(resource_group_name=None): client = web_client_factory() if resource_group_name is None: plans = list(client.app_service_plans.list()) else: plans = list(client.app_service_plans.list_by_resource_group(resource_group_name)) for plan in plans: # prune a few useless fields del plan.app_service_plan_name del plan.geo_region del plan.subscription return plans def _linux_sku_check(sku): tier = _get_sku_name(sku) if tier in ['BASIC', 'STANDARD']: return format_string = 'usage error: {0} is not a valid sku for linux plan, please use one of the following: {1}' raise CLIError(format_string.format(sku, 'B1, B2, B3, S1, S2, S3')) def create_app_service_plan(resource_group_name, name, is_linux, sku='B1', number_of_workers=None, location=None): client = web_client_factory() sku = _normalize_sku(sku) if location is None: location = _get_location_from_resource_group(resource_group_name) if is_linux: _linux_sku_check(sku) # the api is odd on parameter naming, have to live with it for now sku_def = SkuDescription(tier=_get_sku_name(sku), name=sku, capacity=number_of_workers) plan_def = AppServicePlan(location, app_service_plan_name=name, sku=sku_def, reserved=(is_linux or None)) return client.app_service_plans.create_or_update(resource_group_name, name, plan_def) def update_app_service_plan(instance, sku=None, number_of_workers=None, admin_site_name=None): sku_def = instance.sku if sku is not None: sku = _normalize_sku(sku) sku_def.tier = _get_sku_name(sku) sku_def.name = sku if number_of_workers is not None: sku_def.capacity = number_of_workers instance.sku = sku_def if admin_site_name is not None: instance.admin_site_name = admin_site_name return instance def show_backup_configuration(resource_group_name, webapp_name, slot=None): try: return _generic_site_operation(resource_group_name, webapp_name, 'get_backup_configuration', slot) except: raise CLIError('Backup configuration not found') def list_backups(resource_group_name, webapp_name, slot=None): return _generic_site_operation(resource_group_name, webapp_name, 'list_backups', slot) def create_backup(resource_group_name, webapp_name, storage_account_url, db_name=None, db_type=None, db_connection_string=None, backup_name=None, slot=None): client = web_client_factory() if backup_name and backup_name.lower().endswith('.zip'): backup_name = backup_name[:-4] location = _get_location_from_webapp(client, resource_group_name, webapp_name) db_setting = _create_db_setting(db_name, db_type, db_connection_string) backup_request = BackupRequest(location, backup_request_name=backup_name, storage_account_url=storage_account_url, databases=db_setting) if slot: return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot) return client.web_apps.backup(resource_group_name, webapp_name, backup_request) def update_backup_schedule(resource_group_name, webapp_name, storage_account_url=None, frequency=None, keep_at_least_one_backup=None, retention_period_in_days=None, db_name=None, db_connection_string=None, db_type=None, slot=None): client = web_client_factory() location = _get_location_from_webapp(client, resource_group_name, webapp_name) configuration = None try: configuration = _generic_site_operation(resource_group_name, webapp_name, 'get_backup_configuration', slot) except CloudError: # No configuration set yet if not all([storage_account_url, frequency, retention_period_in_days, keep_at_least_one_backup]): raise CLIError('No backup configuration found. A configuration must be created. ' + 'Usage: --container-url URL --frequency TIME --retention DAYS ' + '--retain-one TRUE/FALSE') # If arguments were not specified, use the values in the current backup schedule if storage_account_url is None: storage_account_url = configuration.storage_account_url if retention_period_in_days is None: retention_period_in_days = configuration.backup_schedule.retention_period_in_days if keep_at_least_one_backup is None: keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup else: keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true' if frequency: # Parse schedule frequency frequency_num, frequency_unit = _parse_frequency(frequency) else: frequency_num = configuration.backup_schedule.frequency_interval frequency_unit = configuration.backup_schedule.frequency_unit if configuration and configuration.databases: db = configuration.databases[0] db_type = db_type or db.database_type db_name = db_name or db.name db_connection_string = db_connection_string or db.connection_string db_setting = _create_db_setting(db_name, db_type, db_connection_string) backup_schedule = BackupSchedule(frequency_num, frequency_unit.name, keep_at_least_one_backup, retention_period_in_days) backup_request = BackupRequest(location, backup_schedule=backup_schedule, enabled=True, storage_account_url=storage_account_url, databases=db_setting) if slot: return client.web_apps.update_backup_configuration_slot(resource_group_name, webapp_name, backup_request, slot) return client.web_apps.update_backup_configuration(resource_group_name, webapp_name, backup_request) def restore_backup(resource_group_name, webapp_name, storage_account_url, backup_name, db_name=None, db_type=None, db_connection_string=None, target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None): client = web_client_factory() storage_blob_name = backup_name if not storage_blob_name.lower().endswith('.zip'): storage_blob_name += '.zip' location = _get_location_from_webapp(client, resource_group_name, webapp_name) db_setting = _create_db_setting(db_name, db_type, db_connection_string) restore_request = RestoreRequest(location, storage_account_url=storage_account_url, blob_name=storage_blob_name, overwrite=overwrite, site_name=target_name, databases=db_setting, ignore_conflicting_host_names=ignore_hostname_conflict) if slot: return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request, slot) return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request) def _create_db_setting(db_name, db_type, db_connection_string): if all([db_name, db_type, db_connection_string]): return [DatabaseBackupSetting(db_type, db_name, connection_string=db_connection_string)] elif any([db_name, db_type, db_connection_string]): raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING') def _parse_frequency(frequency): unit_part = frequency.lower()[-1] if unit_part == 'd': frequency_unit = FrequencyUnit.day elif unit_part == 'h': frequency_unit = FrequencyUnit.hour else: raise CLIError('Frequency must end with d or h for "day" or "hour"') try: frequency_num = int(frequency[:-1]) except ValueError: raise CLIError('Frequency must start with a number') if frequency_num < 0: raise CLIError('Frequency must be positive') return frequency_num, frequency_unit def _normalize_sku(sku): sku = sku.upper() if sku == 'FREE': return 'F1' elif sku == 'SHARED': return 'D1' return sku def _get_sku_name(tier): tier = tier.upper() if tier == 'F1': return 'FREE' elif tier == 'D1': return 'SHARED' elif tier in ['B1', 'B2', 'B3']: return 'BASIC' elif tier in ['S1', 'S2', 'S3']: return 'STANDARD' elif tier in ['P1', 'P2', 'P3', 'P1V2', 'P2V2', 'P3V2']: return 'PREMIUM' else: raise CLIError("Invalid sku(pricing tier), please refer to command help for valid values") def _get_location_from_resource_group(resource_group_name): from azure.mgmt.resource import ResourceManagementClient client = get_mgmt_service_client(ResourceManagementClient) group = client.resource_groups.get(resource_group_name) return group.location def _get_location_from_webapp(client, resource_group_name, webapp): webapp = client.web_apps.get(resource_group_name, webapp) return webapp.location def _get_local_git_url(client, resource_group_name, name, slot=None): user = client.get_publishing_user() result = _generic_site_operation(resource_group_name, name, 'get_source_control', slot) parsed = urlparse(result.repo_url) return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name, parsed.netloc, name) def _get_scm_url(resource_group_name, name, slot=None): from azure.mgmt.web.models import HostType webapp = show_webapp(resource_group_name, name, slot=slot) for host in webapp.host_name_ssl_states or []: if host.host_type == HostType.repository: return "https://{}".format(host.name) # this should not happen, but throw anyway raise ValueError('Failed to retrieve Scm Uri') def set_deployment_user(user_name, password=None): ''' Update deployment credentials.(Note, all webapps in your subscription will be impacted) ''' client = web_client_factory() user = User(location='not-really-needed') user.publishing_user_name = user_name if password is None: try: password = prompt_pass(msg='Password: ', confirm=True) except NoTTYException: raise CLIError('Please specify both username and password in non-interactive mode.') user.publishing_password = password result = client.update_publishing_user(user) return result def list_publish_profiles(resource_group_name, name, slot=None): import xmltodict content = _generic_site_operation(resource_group_name, name, 'list_publishing_profile_xml_with_secrets', slot) full_xml = '' for f in content: full_xml += f.decode() profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile'] converted = [] for profile in profiles: new = {} for key in profile: # strip the leading '@' xmltodict put in for attributes new[key.lstrip('@')] = profile[key] converted.append(new) return converted def enable_cd(resource_group_name, name, enable, slot=None): settings = [] settings.append("DOCKER_ENABLE_CI=" + enable) update_app_settings(resource_group_name, name, settings, slot) return show_container_cd_url(resource_group_name, name, slot) def show_container_cd_url(resource_group_name, name, slot=None): settings = get_app_settings(resource_group_name, name, slot) docker_enabled = False for setting in settings: if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true': docker_enabled = True break cd_settings = {} cd_settings['DOCKER_ENABLE_CI'] = docker_enabled if docker_enabled: profiles = list_publish_profiles(resource_group_name, name, slot) for profile in profiles: if profile['publishMethod'] == 'MSDeploy': scmUrl = profile['publishUrl'].replace(":443", "") cd_url = 'https://' + profile['userName'] + ':' + profile['userPWD'] + '@' + scmUrl + '/docker/hook' cd_settings['CI_CD_URL'] = cd_url break else: cd_settings['CI_CD_URL'] = '' return cd_settings def view_in_browser(resource_group_name, name, slot=None, logs=False): site = _generic_site_operation(resource_group_name, name, 'get', slot) url = site.default_host_name ssl_host = next((h for h in site.host_name_ssl_states if h.ssl_state != SslState.disabled), None) url = ('https' if ssl_host else 'http') + '://' + url _open_page_in_browser(url) if logs: get_streaming_log(resource_group_name, name, provider=None, slot=slot) def _open_page_in_browser(url): import sys if sys.platform.lower() == 'darwin': # handle 2 things: # a. On OSX sierra, 'python -m webbrowser -t <url>' emits out "execution error: <url> doesn't # understand the "open location" message" # b. Python 2.x can't sniff out the default browser import subprocess subprocess.Popen(['open', url]) else: import webbrowser webbrowser.open(url, new=2) # 2 means: open in a new tab, if possible # TODO: expose new blob suport def config_diagnostics(resource_group_name, name, level=None, application_logging=None, web_server_logging=None, detailed_error_messages=None, failed_request_tracing=None, slot=None): from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig, SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig, EnabledConfig) client = web_client_factory() # TODO: ensure we call get_site only once site = client.web_apps.get(resource_group_name, name) location = site.location application_logs = None if application_logging is not None: if not application_logging: level = 'Off' elif level is None: level = 'Error' fs_log = FileSystemApplicationLogsConfig(level) application_logs = ApplicationLogsConfig(fs_log) http_logs = None if web_server_logging is not None: enabled = web_server_logging # 100 mb max log size, retenting last 3 days. Yes we hard code it, portal does too fs_server_log = FileSystemHttpLogsConfig(100, 3, enabled) http_logs = HttpLogsConfig(fs_server_log) detailed_error_messages_logs = (None if detailed_error_messages is None else EnabledConfig(detailed_error_messages)) failed_request_tracing_logs = (None if failed_request_tracing is None else EnabledConfig(failed_request_tracing)) site_log_config = SiteLogsConfig(location, application_logs=application_logs, http_logs=http_logs, failed_requests_tracing=failed_request_tracing_logs, detailed_error_messages=detailed_error_messages_logs) return _generic_site_operation(resource_group_name, name, 'update_diagnostic_logs_config', slot, site_log_config) def show_diagnostic_settings(resource_group_name, name, slot=None): return _generic_site_operation(resource_group_name, name, 'get_diagnostic_logs_configuration', slot) def config_slot_auto_swap(resource_group_name, webapp, slot, auto_swap_slot=None, disable=None): client = web_client_factory() site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot) site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production') return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot) def list_slots(resource_group_name, webapp): client = web_client_factory() slots = list(client.web_apps.list_slots(resource_group_name, webapp)) for slot in slots: slot.name = slot.name.split('/')[-1] setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name']) del slot.server_farm_id return slots def swap_slot(resource_group_name, webapp, slot, target_slot=None, action='swap'): client = web_client_factory() if action == 'swap': if target_slot is None: poller = client.web_apps.swap_slot_with_production(resource_group_name, webapp, slot, True) else: poller = client.web_apps.swap_slot_slot(resource_group_name, webapp, slot, target_slot, True) return poller elif action == 'preview': if target_slot is None: result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot, True) else: result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, target_slot, True) return result else: # reset # we will reset both source slot and target slot if target_slot is None: client.web_apps.reset_production_slot_config(resource_group_name, webapp) else: client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot) client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, slot) return None def delete_slot(resource_group_name, webapp, slot): client = web_client_factory() # TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc... client.web_apps.delete_slot(resource_group_name, webapp, slot) def set_traffic_routing(resource_group_name, name, distribution): client = web_client_factory() site = client.web_apps.get(resource_group_name, name) configs = get_site_configs(resource_group_name, name) host_name_suffix = '.' + site.default_host_name.split('.', 1)[1] configs.experiments.ramp_up_rules = [] for r in distribution: slot, percentage = r.split('=') configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=slot + host_name_suffix, reroute_percentage=float(percentage), name=slot)) _generic_site_operation(resource_group_name, name, 'update_configuration', None, configs) return configs.experiments.ramp_up_rules def show_traffic_routing(resource_group_name, name): configs = get_site_configs(resource_group_name, name) return configs.experiments.ramp_up_rules def clear_traffic_routing(resource_group_name, name): set_traffic_routing(resource_group_name, name, []) def get_streaming_log(resource_group_name, name, provider=None, slot=None): scm_url = _get_scm_url(resource_group_name, name, slot) streaming_url = scm_url + '/logstream' import time if provider: streaming_url += ('/' + provider.lstrip('/')) client = web_client_factory() user, password = _get_site_credential(client, resource_group_name, name) t = threading.Thread(target=_stream_trace, args=(streaming_url, user, password)) t.daemon = True t.start() while True: time.sleep(100) # so that ctrl+c can stop the command def download_historical_logs(resource_group_name, name, log_file=None, slot=None): scm_url = _get_scm_url(resource_group_name, name, slot) url = scm_url.rstrip('/') + '/dump' import requests r = requests.get(url, stream=True) with open(log_file, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) logger.warning('Downloaded logs to %s', log_file) def _get_site_credential(client, resource_group_name, name): creds = client.web_apps.list_publishing_credentials(resource_group_name, name) creds = creds.result() return (creds.publishing_user_name, creds.publishing_password) def _stream_trace(streaming_url, user_name, password): import sys import certifi import urllib3 try: import urllib3.contrib.pyopenssl urllib3.contrib.pyopenssl.inject_into_urllib3() except ImportError: pass std_encoding = sys.stdout.encoding http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where()) headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password)) r = http.request( 'GET', streaming_url, headers=headers, preload_content=False ) for chunk in r.stream(): if chunk: # Extra encode() and decode for stdout which does not surpport 'utf-8' print(chunk.decode(encoding='utf-8', errors='replace') .encode(std_encoding, errors='replace') .decode(std_encoding, errors='replace'), end='') # each line of log has CRLF. r.release_conn() def upload_ssl_cert(resource_group_name, name, certificate_password, certificate_file): client = web_client_factory() webapp = _generic_site_operation(resource_group_name, name, 'get') cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group'] cert_file = open(certificate_file, 'rb') cert_contents = cert_file.read() hosting_environment_profile_param = webapp.hosting_environment_profile if hosting_environment_profile_param is None: hosting_environment_profile_param = "" thumb_print = _get_cert(certificate_password, certificate_file) cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param, webapp.location, cert_resource_group_name) cert = Certificate(password=certificate_password, pfx_blob=cert_contents, location=webapp.location) return client.certificates.create_or_update(cert_resource_group_name, cert_name, cert) def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name): return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name) def _get_cert(certificate_password, certificate_file): ''' Decrypts the .pfx file ''' p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password) cert = p12.get_certificate() digest_algorithm = 'sha1' thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '') return thumbprint def list_ssl_certs(resource_group_name): client = web_client_factory() return client.certificates.list_by_resource_group(resource_group_name) def delete_ssl_cert(resource_group_name, certificate_thumbprint): client = web_client_factory() webapp_certs = client.certificates.list_by_resource_group(resource_group_name) for webapp_cert in webapp_certs: if webapp_cert.thumbprint == certificate_thumbprint: return client.certificates.delete(resource_group_name, webapp_cert.name) raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint)) def _update_host_name_ssl_state(resource_group_name, webapp_name, location, host_name, ssl_state, thumbprint, slot=None): updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name, ssl_state=ssl_state, thumbprint=thumbprint, to_update=True)], location=location) name = '{}({})'.format(webapp_name, slot) if slot else webapp_name return _generic_site_operation(resource_group_name, name, 'create_or_update', slot, updated_webapp) def _update_ssl_binding(resource_group_name, name, certificate_thumbprint, ssl_type, slot=None): client = web_client_factory() webapp = client.web_apps.get(resource_group_name, name) cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group'] webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name) for webapp_cert in webapp_certs: if webapp_cert.thumbprint == certificate_thumbprint: if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'): return _update_host_name_ssl_state(resource_group_name, name, webapp.location, webapp_cert.host_names[0], ssl_type, certificate_thumbprint, slot) query_result = list_hostnames(resource_group_name, name, slot) hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result] to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp) for h in to_update: _update_host_name_ssl_state(resource_group_name, name, webapp.location, h, ssl_type, certificate_thumbprint, slot) return show_webapp(resource_group_name, name, slot) raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint)) def bind_ssl_cert(resource_group_name, name, certificate_thumbprint, ssl_type, slot=None): return _update_ssl_binding( resource_group_name, name, certificate_thumbprint, SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot) def unbind_ssl_cert(resource_group_name, name, certificate_thumbprint, slot=None): return _update_ssl_binding(resource_group_name, name, certificate_thumbprint, SslState.disabled, slot) def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp): # the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc matched = set() for hostname in hostnames_from_cert: if hostname.startswith('*'): for h in hostnames_in_webapp: if hostname[hostname.find('.'):] == h[h.find('.'):]: matched.add(h) elif hostname in hostnames_in_webapp: matched.add(hostname) return matched # help class handles runtime stack in format like 'node|6.1', 'php|5.5' class _StackRuntimeHelper(object): def __init__(self, client): self._client = client self._stacks = [] def resolve(self, display_name): self._load_stacks() return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()), None) @property def stacks(self): self._load_stacks() return self._stacks @staticmethod def update_site_config(stack, site_config): for k, v in stack['configs'].items(): setattr(site_config, k, v) return site_config @staticmethod def update_site_appsettings(stack, site_config): site_config.app_settings += [NameValuePair(k, v) for k, v in stack['configs'].items()] return site_config def _load_stacks(self): if self._stacks: return raw_list = self._client.provider.get_available_stacks() stacks = raw_list['value'] config_mappings = { 'node': 'WEBSITE_NODE_DEFAULT_VERSION', 'python': 'python_version', 'php': 'php_version', 'aspnet': 'net_framework_version' } result = [] # get all stack version except 'java' for name, properties in [(s['name'], s['properties']) for s in stacks if s['name'] in config_mappings]: for major in properties['majorVersions']: default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']), None) result.append({ 'displayName': name + '|' + major['displayVersion'], 'configs': { config_mappings[name]: (default_minor['runtimeVersion'] if default_minor else major['runtimeVersion']) } }) # deal with java, which pairs with java container version java_stack = next((s for s in stacks if s['name'] == 'java')) java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers')) for java_version in java_stack['properties']['majorVersions']: for fx in java_container_stack['properties']['frameworks']: for fx_version in fx['majorVersions']: result.append({ 'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'], fx['display'], fx_version['displayVersion']), 'configs': { 'java_version': java_version['runtimeVersion'], 'java_container': fx['name'], 'java_container_version': fx_version['runtimeVersion'] } }) for r in result: r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in r['displayName'] else _StackRuntimeHelper.update_site_config) self._stacks = result def create_function(resource_group_name, name, storage_account, plan=None, consumption_plan_location=None, deployment_source_url=None, deployment_source_branch='master', deployment_local_git=None): if deployment_source_url and deployment_local_git: raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git') if bool(plan) == bool(consumption_plan_location): raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION") site_config = SiteConfig(app_settings=[]) functionapp_def = Site(location=None, site_config=site_config) client = web_client_factory() if consumption_plan_location: locations = list_consumption_locations() location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None) if location is None: raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations") functionapp_def.location = consumption_plan_location else: if is_valid_resource_id(plan): plan = parse_resource_id(plan)['name'] plan_info = client.app_service_plans.get(resource_group_name, plan) location = plan_info.location functionapp_def.server_farm_id = plan functionapp_def.location = location con_string = _validate_and_get_connection_string(resource_group_name, storage_account) functionapp_def.kind = 'functionapp' # adding appsetting to site to make it a function site_config.app_settings.append(NameValuePair('AzureWebJobsStorage', con_string)) site_config.app_settings.append(NameValuePair('AzureWebJobsDashboard', con_string)) site_config.app_settings.append(NameValuePair('WEBSITE_NODE_DEFAULT_VERSION', '6.5.0')) site_config.app_settings.append(NameValuePair('FUNCTIONS_EXTENSION_VERSION', '~1')) if consumption_plan_location is None: site_config.always_on = True else: site_config.app_settings.append(NameValuePair('WEBSITE_CONTENTAZUREFILECONNECTIONSTRING', con_string)) site_config.app_settings.append(NameValuePair('WEBSITE_CONTENTSHARE', name.lower())) poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def) functionapp = LongRunningOperation()(poller) _set_remote_or_local_git(functionapp, resource_group_name, name, deployment_source_url, deployment_source_branch, deployment_local_git) return functionapp def _set_remote_or_local_git(webapp, resource_group_name, name, deployment_source_url=None, deployment_source_branch='master', deployment_local_git=None): if deployment_source_url: logger.warning("Linking to git repository '%s'", deployment_source_url) try: config_source_control(resource_group_name, name, deployment_source_url, 'git', deployment_source_branch, manual_integration=True) except Exception as ex: # pylint: disable=broad-except ex = ex_handler_factory(no_throw=True)(ex) logger.warning("Link to git repository failed due to error '%s'", ex) if deployment_local_git: local_git_info = enable_local_git(resource_group_name, name) logger.warning("Local git is configured with url of '%s'", local_git_info['url']) setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url']) def _validate_and_get_connection_string(resource_group_name, storage_account): from azure.cli.core._profile import CLOUD sa_resource_group = resource_group_name if is_valid_resource_id(storage_account): sa_resource_group = parse_resource_id(storage_account)['resource_group'] storage_account = parse_resource_id(storage_account)['name'] storage_client = get_mgmt_service_client(StorageManagementClient) storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group, storage_account) error_message = '' endpoints = storage_properties.primary_endpoints sku = storage_properties.sku.name.value allowed_storage_types = ['Standard_GRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS'] for e in ['blob', 'queue', 'table']: if not getattr(endpoints, e, None): error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(e, storage_account) # pylint: disable=line-too-long if sku not in allowed_storage_types: error_message += 'Storage type {} is not allowed'.format(sku) if error_message: raise CLIError(error_message) obj = storage_client.storage_accounts.list_keys(resource_group_name, storage_account) # pylint: disable=no-member try: keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member except AttributeError: # Older API versions have a slightly different structure keys = [obj.key1, obj.key2] # pylint: disable=no-member endpoint_suffix = CLOUD.suffixes.storage_endpoint connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format( "https", endpoint_suffix, storage_account, keys[0]) # pylint: disable=no-member return connection_string def list_consumption_locations(): client = web_client_factory() regions = client.list_geo_regions(sku='Dynamic') return [{'name': x.name.lower().replace(" ", "")} for x in regions]
mit
pjryan126/solid-start-careers
store/api/glassdoor/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.py
979
4306
from __future__ import absolute_import, division, unicode_literals from gettext import gettext _ = gettext from . import _base from ..constants import cdataElements, rcdataElements, voidElements from ..constants import spaceCharacters spaceCharacters = "".join(spaceCharacters) class LintError(Exception): pass class Filter(_base.Filter): def __iter__(self): open_elements = [] contentModelFlag = "PCDATA" for token in _base.Filter.__iter__(self): type = token["type"] if type in ("StartTag", "EmptyTag"): name = token["name"] if contentModelFlag != "PCDATA": raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name}) if not isinstance(name, str): raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) if not name: raise LintError(_("Empty tag name")) if type == "StartTag" and name in voidElements: raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name}) elif type == "EmptyTag" and name not in voidElements: raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]}) if type == "StartTag": open_elements.append(name) for name, value in token["data"]: if not isinstance(name, str): raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name}) if not name: raise LintError(_("Empty attribute name")) if not isinstance(value, str): raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value}) if name in cdataElements: contentModelFlag = "CDATA" elif name in rcdataElements: contentModelFlag = "RCDATA" elif name == "plaintext": contentModelFlag = "PLAINTEXT" elif type == "EndTag": name = token["name"] if not isinstance(name, str): raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) if not name: raise LintError(_("Empty tag name")) if name in voidElements: raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name}) start_name = open_elements.pop() if start_name != name: raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name}) contentModelFlag = "PCDATA" elif type == "Comment": if contentModelFlag != "PCDATA": raise LintError(_("Comment not in PCDATA content model flag")) elif type in ("Characters", "SpaceCharacters"): data = token["data"] if not isinstance(data, str): raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data}) if not data: raise LintError(_("%(type)s token with empty data") % {"type": type}) if type == "SpaceCharacters": data = data.strip(spaceCharacters) if data: raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data}) elif type == "Doctype": name = token["name"] if contentModelFlag != "PCDATA": raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name}) if not isinstance(name, str): raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name}) # XXX: what to do with token["data"] ? elif type in ("ParseError", "SerializeError"): pass else: raise LintError(_("Unknown token type: %(type)s") % {"type": type}) yield token
gpl-2.0
yoki/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/steps/obsoletepatches.py
125
2318
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import logging from webkitpy.tool.grammar import pluralize from webkitpy.tool.steps.abstractstep import AbstractStep from webkitpy.tool.steps.options import Options _log = logging.getLogger(__name__) class ObsoletePatches(AbstractStep): @classmethod def options(cls): return AbstractStep.options() + [ Options.obsolete_patches, ] def run(self, state): if not self._options.obsolete_patches: return bug_id = state["bug_id"] patches = self._tool.bugs.fetch_bug(bug_id).patches() if not patches: return _log.info("Obsoleting %s on bug %s" % (pluralize("old patch", len(patches)), bug_id)) for patch in patches: self._tool.bugs.obsolete_attachment(patch.id())
bsd-3-clause
Yawning/obfsproxy-wfpadtools
obfsproxy/transports/scramblesuit/fifobuf.py
17
3237
""" Provides an interface for a fast FIFO buffer. The interface implements only 'read()', 'write()' and 'len()'. The implementation below is a modified version of the code originally written by Ben Timby: http://ben.timby.com/?p=139 """ try: from cStringIO import StringIO except ImportError: from StringIO import StringIO MAX_BUFFER = 1024**2*4 class Buffer( object ): """ Implements a fast FIFO buffer. Internally, the buffer consists of a list of StringIO objects. New StringIO objects are added and delete as data is written to and read from the FIFO buffer. """ def __init__( self, max_size=MAX_BUFFER ): """ Initialise a Buffer object. """ self.buffers = [] self.max_size = max_size self.read_pos = 0 self.write_pos = 0 def write( self, data ): """ Write `data' to the FIFO buffer. If necessary, a new internal buffer is created. """ # Add a StringIO buffer if none exists yet. if not self.buffers: self.buffers.append(StringIO()) self.write_pos = 0 lastBuf = self.buffers[-1] lastBuf.seek(self.write_pos) lastBuf.write(data) # If we are over the limit, a new internal buffer is created. if lastBuf.tell() >= self.max_size: lastBuf = StringIO() self.buffers.append(lastBuf) self.write_pos = lastBuf.tell() def read( self, length=-1 ): """ Read `length' elements of the FIFO buffer. Drained data is automatically deleted. """ read_buf = StringIO() remaining = length while True: if not self.buffers: break firstBuf = self.buffers[0] firstBuf.seek(self.read_pos) read_buf.write(firstBuf.read(remaining)) self.read_pos = firstBuf.tell() if length == -1: # We did not limit the read, we exhausted the buffer, so delete # it. Keep reading from the remaining buffers. del self.buffers[0] self.read_pos = 0 else: # We limited the read so either we exhausted the buffer or not. remaining = length - read_buf.tell() if remaining > 0: # Exhausted, remove buffer, read more. Keep reading from # remaining buffers. del self.buffers[0] self.read_pos = 0 else: # Did not exhaust buffer, but read all that was requested. # Break to stop reading and return data of requested # length. break return read_buf.getvalue() def __len__(self): """ Return the length of the Buffer object. """ length = 0 for buf in self.buffers: # Jump to the end of the internal buffer. buf.seek(0, 2) if buf == self.buffers[0]: length += buf.tell() - self.read_pos else: length += buf.tell() return length
bsd-3-clause
sachintaware/sublime-wakatime
packages/wakatime/packages/pygments_py2/pygments/formatters/svg.py
76
5840
# -*- coding: utf-8 -*- """ pygments.formatters.svg ~~~~~~~~~~~~~~~~~~~~~~~ Formatter for SVG output. :copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.formatter import Formatter from pygments.util import get_bool_opt, get_int_opt __all__ = ['SvgFormatter'] def escape_html(text): """Escape &, <, > as well as single and double quotes for HTML.""" return text.replace('&', '&amp;'). \ replace('<', '&lt;'). \ replace('>', '&gt;'). \ replace('"', '&quot;'). \ replace("'", '&#39;') class2style = {} class SvgFormatter(Formatter): """ Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles. By default, this formatter outputs a full SVG document including doctype declaration and the ``<svg>`` root element. .. versionadded:: 0.9 Additional options accepted: `nowrap` Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and don't add a XML declaration and a doctype. If true, the `fontfamily` and `fontsize` options are ignored. Defaults to ``False``. `fontfamily` The value to give the wrapping ``<g>`` element's ``font-family`` attribute, defaults to ``"monospace"``. `fontsize` The value to give the wrapping ``<g>`` element's ``font-size`` attribute, defaults to ``"14px"``. `xoffset` Starting offset in X direction, defaults to ``0``. `yoffset` Starting offset in Y direction, defaults to the font size if it is given in pixels, or ``20`` else. (This is necessary since text coordinates refer to the text baseline, not the top edge.) `ystep` Offset to add to the Y coordinate for each subsequent line. This should roughly be the text size plus 5. It defaults to that value if the text size is given in pixels, or ``25`` else. `spacehack` Convert spaces in the source to ``&#160;``, which are non-breaking spaces. SVG provides the ``xml:space`` attribute to control how whitespace inside tags is handled, in theory, the ``preserve`` value could be used to keep all whitespace as-is. However, many current SVG viewers don't obey that rule, so this option is provided as a workaround and defaults to ``True``. """ name = 'SVG' aliases = ['svg'] filenames = ['*.svg'] def __init__(self, **options): Formatter.__init__(self, **options) self.nowrap = get_bool_opt(options, 'nowrap', False) self.fontfamily = options.get('fontfamily', 'monospace') self.fontsize = options.get('fontsize', '14px') self.xoffset = get_int_opt(options, 'xoffset', 0) fs = self.fontsize.strip() if fs.endswith('px'): fs = fs[:-2].strip() try: int_fs = int(fs) except: int_fs = 20 self.yoffset = get_int_opt(options, 'yoffset', int_fs) self.ystep = get_int_opt(options, 'ystep', int_fs + 5) self.spacehack = get_bool_opt(options, 'spacehack', True) self._stylecache = {} def format_unencoded(self, tokensource, outfile): """ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)`` tuples and write it into ``outfile``. For our implementation we put all lines in their own 'line group'. """ x = self.xoffset y = self.yoffset if not self.nowrap: if self.encoding: outfile.write('<?xml version="1.0" encoding="%s"?>\n' % self.encoding) else: outfile.write('<?xml version="1.0"?>\n') outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" ' '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/' 'svg10.dtd">\n') outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n') outfile.write('<g font-family="%s" font-size="%s">\n' % (self.fontfamily, self.fontsize)) outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (x, y)) for ttype, value in tokensource: style = self._get_style(ttype) tspan = style and '<tspan' + style + '>' or '' tspanend = tspan and '</tspan>' or '' value = escape_html(value) if self.spacehack: value = value.expandtabs().replace(' ', '&#160;') parts = value.split('\n') for part in parts[:-1]: outfile.write(tspan + part + tspanend) y += self.ystep outfile.write('</text>\n<text x="%s" y="%s" ' 'xml:space="preserve">' % (x, y)) outfile.write(tspan + parts[-1] + tspanend) outfile.write('</text>') if not self.nowrap: outfile.write('</g></svg>\n') def _get_style(self, tokentype): if tokentype in self._stylecache: return self._stylecache[tokentype] otokentype = tokentype while not self.style.styles_token(tokentype): tokentype = tokentype.parent value = self.style.style_for_token(tokentype) result = '' if value['color']: result = ' fill="#' + value['color'] + '"' if value['bold']: result += ' font-weight="bold"' if value['italic']: result += ' font-style="italic"' self._stylecache[otokentype] = result return result
bsd-3-clause
impowski/servo
components/script/dom/bindings/codegen/parser/tests/test_typedef.py
154
1880
def WebIDLTest(parser, harness): parser.parse(""" typedef long mylong; typedef long? mynullablelong; interface Foo { const mylong X = 5; const mynullablelong Y = 7; const mynullablelong Z = null; void foo(mylong arg); }; """) results = parser.finish() harness.check(results[2].members[1].type.name, "LongOrNull", "Should expand typedefs") parser = parser.reset() threw = False try: parser.parse(""" typedef long? mynullablelong; interface Foo { void foo(mynullablelong? Y); }; """) results = parser.finish() except: threw = True harness.ok(threw, "Should have thrown on nullable inside nullable arg.") parser = parser.reset() threw = False try: parser.parse(""" typedef long? mynullablelong; interface Foo { const mynullablelong? X = 5; }; """) results = parser.finish() except: threw = True harness.ok(threw, "Should have thrown on nullable inside nullable const.") parser = parser.reset() threw = False try: parser.parse(""" interface Foo { const mynullablelong? X = 5; }; typedef long? mynullablelong; """) results = parser.finish() except: threw = True harness.ok(threw, "Should have thrown on nullable inside nullable const typedef " "after interface.") parser = parser.reset() parser.parse(""" interface Foo { const mylong X = 5; }; typedef long mylong; """) results = parser.finish() harness.check(results[0].members[0].type.name, "Long", "Should expand typedefs that come before interface")
mpl-2.0
jroyal/plexpy
plexpy/config.py
1
13229
import plexpy.logger import itertools import os import re from configobj import ConfigObj def bool_int(value): """ Casts a config value into a 0 or 1 """ if isinstance(value, basestring): if value.lower() in ('', '0', 'false', 'f', 'no', 'n', 'off'): value = 0 return int(bool(value)) _CONFIG_DEFINITIONS = { 'DATE_FORMAT': (str, 'General', 'YYYY-MM-DD'), 'GROUPING_GLOBAL_HISTORY': (int, 'PlexWatch', 0), 'GROUPING_USER_HISTORY': (int, 'PlexWatch', 0), 'GROUPING_CHARTS': (int, 'PlexWatch', 0), 'PLEXWATCH_DATABASE': (str, 'PlexWatch', ''), 'PMS_IDENTIFIER': (str, 'PMS', ''), 'PMS_IP': (str, 'PMS', '127.0.0.1'), 'PMS_IS_REMOTE': (int, 'PMS', 0), 'PMS_LOGS_FOLDER': (str, 'PMS', ''), 'PMS_PORT': (int, 'PMS', 32400), 'PMS_TOKEN': (str, 'PMS', ''), 'PMS_SSL': (int, 'General', 0), 'PMS_URL': (str, 'PMS', ''), 'PMS_USE_BIF': (int, 'PMS', 0), 'PMS_UUID': (str, 'PMS', ''), 'TIME_FORMAT': (str, 'General', 'HH:mm'), 'API_ENABLED': (int, 'General', 0), 'API_KEY': (str, 'General', ''), 'BOXCAR_ENABLED': (int, 'Boxcar', 0), 'BOXCAR_TOKEN': (str, 'Boxcar', ''), 'BOXCAR_ON_PLAY': (int, 'Boxcar', 0), 'BOXCAR_ON_STOP': (int, 'Boxcar', 0), 'BOXCAR_ON_PAUSE': (int, 'Boxcar', 0), 'BOXCAR_ON_RESUME': (int, 'Boxcar', 0), 'BOXCAR_ON_BUFFER': (int, 'Boxcar', 0), 'BOXCAR_ON_WATCHED': (int, 'Boxcar', 0), 'BUFFER_THRESHOLD': (int, 'Monitoring', 3), 'BUFFER_WAIT': (int, 'Monitoring', 900), 'CACHE_DIR': (str, 'General', ''), 'CACHE_SIZEMB': (int, 'Advanced', 32), 'CHECK_GITHUB': (int, 'General', 1), 'CHECK_GITHUB_INTERVAL': (int, 'General', 360), 'CHECK_GITHUB_ON_STARTUP': (int, 'General', 1), 'CLEANUP_FILES': (int, 'General', 0), 'CONFIG_VERSION': (str, 'General', '0'), 'DO_NOT_OVERRIDE_GIT_BRANCH': (int, 'General', 0), 'EMAIL_ENABLED': (int, 'Email', 0), 'EMAIL_FROM': (str, 'Email', ''), 'EMAIL_TO': (str, 'Email', ''), 'EMAIL_SMTP_SERVER': (str, 'Email', ''), 'EMAIL_SMTP_USER': (str, 'Email', ''), 'EMAIL_SMTP_PASSWORD': (str, 'Email', ''), 'EMAIL_SMTP_PORT': (int, 'Email', 25), 'EMAIL_TLS': (int, 'Email', 0), 'EMAIL_ON_PLAY': (int, 'Email', 0), 'EMAIL_ON_STOP': (int, 'Email', 0), 'EMAIL_ON_PAUSE': (int, 'Email', 0), 'EMAIL_ON_RESUME': (int, 'Email', 0), 'EMAIL_ON_BUFFER': (int, 'Email', 0), 'EMAIL_ON_WATCHED': (int, 'Email', 0), 'ENABLE_HTTPS': (int, 'General', 0), 'FIRST_RUN_COMPLETE': (int, 'General', 0), 'FREEZE_DB': (int, 'General', 0), 'GIT_BRANCH': (str, 'General', 'master'), 'GIT_PATH': (str, 'General', ''), 'GIT_USER': (str, 'General', 'drzoidberg33'), 'GROWL_ENABLED': (int, 'Growl', 0), 'GROWL_HOST': (str, 'Growl', ''), 'GROWL_PASSWORD': (str, 'Growl', ''), 'GROWL_ON_PLAY': (int, 'Growl', 0), 'GROWL_ON_STOP': (int, 'Growl', 0), 'GROWL_ON_PAUSE': (int, 'Growl', 0), 'GROWL_ON_RESUME': (int, 'Growl', 0), 'GROWL_ON_BUFFER': (int, 'Growl', 0), 'GROWL_ON_WATCHED': (int, 'Growl', 0), 'HOME_STATS_LENGTH': (int, 'General', 30), 'HTTPS_CERT': (str, 'General', ''), 'HTTPS_KEY': (str, 'General', ''), 'HTTP_HOST': (str, 'General', '0.0.0.0'), 'HTTP_PASSWORD': (str, 'General', ''), 'HTTP_PORT': (int, 'General', 8181), 'HTTP_PROXY': (int, 'General', 0), 'HTTP_ROOT': (str, 'General', '/'), 'HTTP_USERNAME': (str, 'General', ''), 'INTERFACE': (str, 'General', 'default'), 'IP_LOGGING_ENABLE': (int, 'General', 0), 'JOURNAL_MODE': (str, 'Advanced', 'wal'), 'LAUNCH_BROWSER': (int, 'General', 1), 'LOG_DIR': (str, 'General', ''), 'LOGGING_IGNORE_INTERVAL': (int, 'Monitoring', 120), 'MOVIE_NOTIFY_ENABLE': (int, 'Monitoring', 0), 'MOVIE_NOTIFY_ON_START': (int, 'Monitoring', 1), 'MOVIE_NOTIFY_ON_STOP': (int, 'Monitoring', 0), 'MOVIE_NOTIFY_ON_PAUSE': (int, 'Monitoring', 0), 'MUSIC_NOTIFY_ENABLE': (int, 'Monitoring', 0), 'MUSIC_NOTIFY_ON_START': (int, 'Monitoring', 1), 'MUSIC_NOTIFY_ON_STOP': (int, 'Monitoring', 0), 'MUSIC_NOTIFY_ON_PAUSE': (int, 'Monitoring', 0), 'MUSIC_LOGGING_ENABLE': (int, 'Monitoring', 0), 'MONITORING_INTERVAL': (int, 'Monitoring', 60), 'NMA_APIKEY': (str, 'NMA', ''), 'NMA_ENABLED': (int, 'NMA', 0), 'NMA_PRIORITY': (int, 'NMA', 0), 'NMA_ON_PLAY': (int, 'NMA', 0), 'NMA_ON_STOP': (int, 'NMA', 0), 'NMA_ON_PAUSE': (int, 'NMA', 0), 'NMA_ON_RESUME': (int, 'NMA', 0), 'NMA_ON_BUFFER': (int, 'NMA', 0), 'NMA_ON_WATCHED': (int, 'NMA', 0), 'NOTIFY_WATCHED_PERCENT': (int, 'Monitoring', 85), 'NOTIFY_ON_START_SUBJECT_TEXT': (str, 'Monitoring', 'PlexPy ({server_name})'), 'NOTIFY_ON_START_BODY_TEXT': (str, 'Monitoring', '{user} ({player}) started playing {title}.'), 'NOTIFY_ON_STOP_SUBJECT_TEXT': (str, 'Monitoring', 'PlexPy ({server_name})'), 'NOTIFY_ON_STOP_BODY_TEXT': (str, 'Monitoring', '{user} ({player}) has stopped {title}.'), 'NOTIFY_ON_PAUSE_SUBJECT_TEXT': (str, 'Monitoring', 'PlexPy ({server_name})'), 'NOTIFY_ON_PAUSE_BODY_TEXT': (str, 'Monitoring', '{user} ({player}) has paused {title}.'), 'NOTIFY_ON_RESUME_SUBJECT_TEXT': (str, 'Monitoring', 'PlexPy ({server_name})'), 'NOTIFY_ON_RESUME_BODY_TEXT': (str, 'Monitoring', '{user} ({player}) has resumed {title}.'), 'NOTIFY_ON_BUFFER_SUBJECT_TEXT': (str, 'Monitoring', 'PlexPy ({server_name})'), 'NOTIFY_ON_BUFFER_BODY_TEXT': (str, 'Monitoring', '{user} ({player}) is buffering {title}.'), 'NOTIFY_ON_WATCHED_SUBJECT_TEXT': (str, 'Monitoring', 'PlexPy ({server_name})'), 'NOTIFY_ON_WATCHED_BODY_TEXT': (str, 'Monitoring', '{user} ({player}) has watched {title}.'), 'OSX_NOTIFY_APP': (str, 'OSX_Notify', '/Applications/PlexPy'), 'OSX_NOTIFY_ENABLED': (int, 'OSX_Notify', 0), 'OSX_NOTIFY_ON_PLAY': (int, 'OSX_Notify', 0), 'OSX_NOTIFY_ON_STOP': (int, 'OSX_Notify', 0), 'OSX_NOTIFY_ON_PAUSE': (int, 'OSX_Notify', 0), 'OSX_NOTIFY_ON_RESUME': (int, 'OSX_Notify', 0), 'OSX_NOTIFY_ON_BUFFER': (int, 'OSX_Notify', 0), 'OSX_NOTIFY_ON_WATCHED': (int, 'OSX_Notify', 0), 'PLEX_CLIENT_HOST': (str, 'Plex', ''), 'PLEX_ENABLED': (int, 'Plex', 0), 'PLEX_PASSWORD': (str, 'Plex', ''), 'PLEX_USERNAME': (str, 'Plex', ''), 'PLEX_ON_PLAY': (int, 'Plex', 0), 'PLEX_ON_STOP': (int, 'Plex', 0), 'PLEX_ON_PAUSE': (int, 'Plex', 0), 'PLEX_ON_RESUME': (int, 'Plex', 0), 'PLEX_ON_BUFFER': (int, 'Plex', 0), 'PLEX_ON_WATCHED': (int, 'Plex', 0), 'PROWL_ENABLED': (int, 'Prowl', 0), 'PROWL_KEYS': (str, 'Prowl', ''), 'PROWL_PRIORITY': (int, 'Prowl', 0), 'PROWL_ON_PLAY': (int, 'Prowl', 0), 'PROWL_ON_STOP': (int, 'Prowl', 0), 'PROWL_ON_PAUSE': (int, 'Prowl', 0), 'PROWL_ON_RESUME': (int, 'Prowl', 0), 'PROWL_ON_BUFFER': (int, 'Prowl', 0), 'PROWL_ON_WATCHED': (int, 'Prowl', 0), 'PUSHALOT_APIKEY': (str, 'Pushalot', ''), 'PUSHALOT_ENABLED': (int, 'Pushalot', 0), 'PUSHALOT_ON_PLAY': (int, 'Pushalot', 0), 'PUSHALOT_ON_STOP': (int, 'Pushalot', 0), 'PUSHALOT_ON_PAUSE': (int, 'Pushalot', 0), 'PUSHALOT_ON_RESUME': (int, 'Pushalot', 0), 'PUSHALOT_ON_BUFFER': (int, 'Pushalot', 0), 'PUSHALOT_ON_WATCHED': (int, 'Pushalot', 0), 'PUSHBULLET_APIKEY': (str, 'PushBullet', ''), 'PUSHBULLET_DEVICEID': (str, 'PushBullet', ''), 'PUSHBULLET_CHANNEL_TAG': (str, 'PushBullet', ''), 'PUSHBULLET_ENABLED': (int, 'PushBullet', 0), 'PUSHBULLET_ON_PLAY': (int, 'PushBullet', 0), 'PUSHBULLET_ON_STOP': (int, 'PushBullet', 0), 'PUSHBULLET_ON_PAUSE': (int, 'PushBullet', 0), 'PUSHBULLET_ON_RESUME': (int, 'PushBullet', 0), 'PUSHBULLET_ON_BUFFER': (int, 'PushBullet', 0), 'PUSHBULLET_ON_WATCHED': (int, 'PushBullet', 0), 'PUSHOVER_APITOKEN': (str, 'Pushover', ''), 'PUSHOVER_ENABLED': (int, 'Pushover', 0), 'PUSHOVER_KEYS': (str, 'Pushover', ''), 'PUSHOVER_PRIORITY': (int, 'Pushover', 0), 'PUSHOVER_ON_PLAY': (int, 'Pushover', 0), 'PUSHOVER_ON_STOP': (int, 'Pushover', 0), 'PUSHOVER_ON_PAUSE': (int, 'Pushover', 0), 'PUSHOVER_ON_RESUME': (int, 'Pushover', 0), 'PUSHOVER_ON_BUFFER': (int, 'Pushover', 0), 'PUSHOVER_ON_WATCHED': (int, 'Pushover', 0), 'REFRESH_USERS_INTERVAL': (int, 'Monitoring', 12), 'REFRESH_USERS_ON_STARTUP': (int, 'Monitoring', 1), 'TV_NOTIFY_ENABLE': (int, 'Monitoring', 0), 'TV_NOTIFY_ON_START': (int, 'Monitoring', 1), 'TV_NOTIFY_ON_STOP': (int, 'Monitoring', 0), 'TV_NOTIFY_ON_PAUSE': (int, 'Monitoring', 0), 'TWITTER_ENABLED': (int, 'Twitter', 0), 'TWITTER_PASSWORD': (str, 'Twitter', ''), 'TWITTER_PREFIX': (str, 'Twitter', 'Headphones'), 'TWITTER_USERNAME': (str, 'Twitter', ''), 'UPDATE_DB_INTERVAL': (int, 'General', 24), 'VERIFY_SSL_CERT': (bool_int, 'Advanced', 1), 'VIDEO_LOGGING_ENABLE': (int, 'Monitoring', 1), 'XBMC_ENABLED': (int, 'XBMC', 0), 'XBMC_HOST': (str, 'XBMC', ''), 'XBMC_PASSWORD': (str, 'XBMC', ''), 'XBMC_USERNAME': (str, 'XBMC', ''), 'XBMC_ON_PLAY': (int, 'XBMC', 0), 'XBMC_ON_STOP': (int, 'XBMC', 0), 'XBMC_ON_PAUSE': (int, 'XBMC', 0), 'XBMC_ON_RESUME': (int, 'XBMC', 0), 'XBMC_ON_BUFFER': (int, 'XBMC', 0), 'XBMC_ON_WATCHED': (int, 'XBMC', 0) } # pylint:disable=R0902 # it might be nice to refactor for fewer instance variables class Config(object): """ Wraps access to particular values in a config file """ def __init__(self, config_file): """ Initialize the config with values from a file """ self._config_file = config_file self._config = ConfigObj(self._config_file, encoding='utf-8') for key in _CONFIG_DEFINITIONS.keys(): self.check_setting(key) def _define(self, name): key = name.upper() ini_key = name.lower() definition = _CONFIG_DEFINITIONS[key] if len(definition) == 3: definition_type, section, default = definition else: definition_type, section, _, default = definition return key, definition_type, section, ini_key, default def check_section(self, section): """ Check if INI section exists, if not create it """ if section not in self._config: self._config[section] = {} return True else: return False def check_setting(self, key): """ Cast any value in the config to the right type or use the default """ key, definition_type, section, ini_key, default = self._define(key) self.check_section(section) try: my_val = definition_type(self._config[section][ini_key]) except Exception: my_val = definition_type(default) self._config[section][ini_key] = my_val return my_val def write(self): """ Make a copy of the stored config and write it to the configured file """ new_config = ConfigObj(encoding="UTF-8") new_config.filename = self._config_file # first copy over everything from the old config, even if it is not # correctly defined to keep from losing data for key, subkeys in self._config.items(): if key not in new_config: new_config[key] = {} for subkey, value in subkeys.items(): new_config[key][subkey] = value # next make sure that everything we expect to have defined is so for key in _CONFIG_DEFINITIONS.keys(): key, definition_type, section, ini_key, default = self._define(key) self.check_setting(key) if section not in new_config: new_config[section] = {} new_config[section][ini_key] = self._config[section][ini_key] # Write it to file plexpy.logger.info("Writing configuration to file") try: new_config.write() except IOError as e: plexpy.logger.error("Error writing configuration file: %s", e) def __getattr__(self, name): """ Returns something from the ini unless it is a real property of the configuration object or is not all caps. """ if not re.match(r'[A-Z_]+$', name): return super(Config, self).__getattr__(name) else: return self.check_setting(name) def __setattr__(self, name, value): """ Maps all-caps properties to ini values unless they exist on the configuration object. """ if not re.match(r'[A-Z_]+$', name): super(Config, self).__setattr__(name, value) return value else: key, definition_type, section, ini_key, default = self._define(name) self._config[section][ini_key] = definition_type(value) return self._config[section][ini_key] def process_kwargs(self, kwargs): """ Given a big bunch of key value pairs, apply them to the ini. """ for name, value in kwargs.items(): key, definition_type, section, ini_key, default = self._define(name) self._config[section][ini_key] = definition_type(value)
gpl-3.0
whodunnit/AK-OnePlusOne-CM
tools/perf/scripts/python/sctop.py
11180
1924
# system call top # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Periodically displays system-wide system call totals, broken down by # syscall. If a [comm] arg is specified, only syscalls called by # [comm] are displayed. If an [interval] arg is specified, the display # will be refreshed every [interval] seconds. The default interval is # 3 seconds. import os, sys, thread, time sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 interval = default_interval if len(sys.argv) > 3: sys.exit(usage) if len(sys.argv) > 2: for_comm = sys.argv[1] interval = int(sys.argv[2]) elif len(sys.argv) > 1: try: interval = int(sys.argv[1]) except ValueError: for_comm = sys.argv[1] interval = default_interval syscalls = autodict() def trace_begin(): thread.start_new_thread(print_syscall_totals, (interval,)) pass def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(interval): while 1: clear_term() if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): try: print "%-40s %10d\n" % (syscall_name(id), val), except TypeError: pass syscalls.clear() time.sleep(interval)
gpl-2.0
idjaw/horizon
openstack_dashboard/dashboards/project/networks/urls.py
65
1987
# Copyright 2012 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.conf.urls import include from django.conf.urls import patterns from django.conf.urls import url from openstack_dashboard.dashboards.project.networks.ports \ import urls as port_urls from openstack_dashboard.dashboards.project.networks.ports \ import views as port_views from openstack_dashboard.dashboards.project.networks.subnets \ import urls as subnet_urls from openstack_dashboard.dashboards.project.networks.subnets \ import views as subnet_views from openstack_dashboard.dashboards.project.networks import views NETWORKS = r'^(?P<network_id>[^/]+)/%s$' urlpatterns = patterns( '', url(r'^$', views.IndexView.as_view(), name='index'), url(r'^create$', views.CreateView.as_view(), name='create'), url(NETWORKS % 'detail', views.DetailView.as_view(), name='detail'), url(NETWORKS % 'update', views.UpdateView.as_view(), name='update'), url(NETWORKS % 'subnets/create', subnet_views.CreateView.as_view(), name='addsubnet'), url(r'^(?P<network_id>[^/]+)/subnets/(?P<subnet_id>[^/]+)/update$', subnet_views.UpdateView.as_view(), name='editsubnet'), url(r'^(?P<network_id>[^/]+)/ports/(?P<port_id>[^/]+)/update$', port_views.UpdateView.as_view(), name='editport'), url(r'^subnets/', include(subnet_urls, namespace='subnets')), url(r'^ports/', include(port_urls, namespace='ports')))
apache-2.0
uber/pyodbc
tests2/freetdstests.py
22
46182
#!/usr/bin/python # -*- coding: latin-1 -*- usage = """\ usage: %prog [options] connection_string Unit tests for FreeTDS / SQL Server. To use, pass a connection string as the parameter. The tests will create and drop tables t1 and t2 as necessary. These run using the version from the 'build' directory, not the version installed into the Python directories. You must run python setup.py build before running the tests. You can also put the connection string into tmp/setup.cfg like so: [freetdstests] connection-string=DSN=xyz;UID=test;PWD=test """ import sys, os, re import unittest from decimal import Decimal from datetime import datetime, date, time from os.path import join, getsize, dirname, abspath from testutils import * _TESTSTR = '0123456789-abcdefghijklmnopqrstuvwxyz-' def _generate_test_string(length): """ Returns a string of `length` characters, constructed by repeating _TESTSTR as necessary. To enhance performance, there are 3 ways data is read, based on the length of the value, so most data types are tested with 3 lengths. This function helps us generate the test data. We use a recognizable data set instead of a single character to make it less likely that "overlap" errors will be hidden and to help us manually identify where a break occurs. """ if length <= len(_TESTSTR): return _TESTSTR[:length] c = (length + len(_TESTSTR)-1) / len(_TESTSTR) v = _TESTSTR * c return v[:length] class FreeTDSTestCase(unittest.TestCase): SMALL_FENCEPOST_SIZES = [ 0, 1, 255, 256, 510, 511, 512, 1023, 1024, 2047, 2048, 4000 ] LARGE_FENCEPOST_SIZES = [ 4095, 4096, 4097, 10 * 1024, 20 * 1024 ] ANSI_FENCEPOSTS = [ _generate_test_string(size) for size in SMALL_FENCEPOST_SIZES ] UNICODE_FENCEPOSTS = [ unicode(s) for s in ANSI_FENCEPOSTS ] IMAGE_FENCEPOSTS = ANSI_FENCEPOSTS + [ _generate_test_string(size) for size in LARGE_FENCEPOST_SIZES ] def __init__(self, method_name, connection_string): unittest.TestCase.__init__(self, method_name) self.connection_string = connection_string def get_sqlserver_version(self): """ Returns the major version: 8-->2000, 9-->2005, 10-->2008 """ self.cursor.execute("exec master..xp_msver 'ProductVersion'") row = self.cursor.fetchone() return int(row.Character_Value.split('.', 1)[0]) def setUp(self): self.cnxn = pyodbc.connect(self.connection_string) self.cursor = self.cnxn.cursor() for i in range(3): try: self.cursor.execute("drop table t%d" % i) self.cnxn.commit() except: pass for i in range(3): try: self.cursor.execute("drop procedure proc%d" % i) self.cnxn.commit() except: pass try: self.cursor.execute('drop function func1') self.cnxn.commit() except: pass self.cnxn.rollback() def tearDown(self): try: self.cursor.close() self.cnxn.close() except: # If we've already closed the cursor or connection, exceptions are thrown. pass def test_binary_type(self): if sys.hexversion >= 0x02060000: self.assertIs(pyodbc.BINARY, bytearray) else: self.assertIs(pyodbc.BINARY, buffer) def test_multiple_bindings(self): "More than one bind and select on a cursor" self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t1 values (?)", 2) self.cursor.execute("insert into t1 values (?)", 3) for i in range(3): self.cursor.execute("select n from t1 where n < ?", 10) self.cursor.execute("select n from t1 where n < 3") def test_different_bindings(self): self.cursor.execute("create table t1(n int)") self.cursor.execute("create table t2(d datetime)") self.cursor.execute("insert into t1 values (?)", 1) self.cursor.execute("insert into t2 values (?)", datetime.now()) def test_datasources(self): p = pyodbc.dataSources() self.assert_(isinstance(p, dict)) def test_getinfo_string(self): value = self.cnxn.getinfo(pyodbc.SQL_CATALOG_NAME_SEPARATOR) self.assert_(isinstance(value, str)) def test_getinfo_bool(self): value = self.cnxn.getinfo(pyodbc.SQL_ACCESSIBLE_TABLES) self.assert_(isinstance(value, bool)) def test_getinfo_int(self): value = self.cnxn.getinfo(pyodbc.SQL_DEFAULT_TXN_ISOLATION) self.assert_(isinstance(value, (int, long))) def test_getinfo_smallint(self): value = self.cnxn.getinfo(pyodbc.SQL_CONCAT_NULL_BEHAVIOR) self.assert_(isinstance(value, int)) def test_noscan(self): self.assertEqual(self.cursor.noscan, False) self.cursor.noscan = True self.assertEqual(self.cursor.noscan, True) def test_guid(self): self.cursor.execute("create table t1(g1 uniqueidentifier)") self.cursor.execute("insert into t1 values (newid())") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), 36) def test_nextset(self): self.cursor.execute("create table t1(i int)") for i in range(4): self.cursor.execute("insert into t1(i) values(?)", i) self.cursor.execute("select i from t1 where i < 2 order by i; select i from t1 where i >= 2 order by i") for i, row in enumerate(self.cursor): self.assertEqual(i, row.i) self.assertEqual(self.cursor.nextset(), True) for i, row in enumerate(self.cursor): self.assertEqual(i + 2, row.i) def test_fixed_unicode(self): value = u"t\xebsting" self.cursor.execute("create table t1(s nchar(7))") self.cursor.execute("insert into t1 values(?)", u"t\xebsting") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), unicode) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def _test_strtype(self, sqltype, value, resulttype=None, colsize=None): """ The implementation for string, Unicode, and binary tests. """ assert colsize is None or isinstance(colsize, int), colsize assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype if resulttype is None: resulttype = type(value) self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), resulttype) if value is not None: self.assertEqual(len(v), len(value)) # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before # comparing. if type(value) is not resulttype: value = resulttype(value) self.assertEqual(v, value) def _test_strliketype(self, sqltype, value, resulttype=None, colsize=None): """ The implementation for text, image, ntext, and binary. These types do not support comparison operators. """ assert colsize is None or isinstance(colsize, int), colsize assert colsize is None or (value is None or colsize >= len(value)) if colsize: sql = "create table t1(s %s(%s))" % (sqltype, colsize) else: sql = "create table t1(s %s)" % sqltype if resulttype is None: resulttype = type(value) self.cursor.execute(sql) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), resulttype) if value is not None: self.assertEqual(len(v), len(value)) # To allow buffer --> db --> bytearray tests, always convert the input to the expected result type before # comparing. if type(value) is not resulttype: value = resulttype(value) self.assertEqual(v, value) # # varchar # def test_varchar_null(self): self._test_strtype('varchar', None, colsize=100) # Generate a test for each fencepost size: test_varchar_0, etc. def _maketest(value): def t(self): self._test_strtype('varchar', value, colsize=len(value)) return t for value in ANSI_FENCEPOSTS: locals()['test_varchar_%s' % len(value)] = _maketest(value) def test_varchar_many(self): self.cursor.execute("create table t1(c1 varchar(300), c2 varchar(300), c3 varchar(300))") v1 = 'ABCDEFGHIJ' * 30 v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("insert into t1(c1, c2, c3) values (?,?,?)", v1, v2, v3); row = self.cursor.execute("select c1, c2, c3, len(c1) as l1, len(c2) as l2, len(c3) as l3 from t1").fetchone() self.assertEqual(v1, row.c1) self.assertEqual(v2, row.c2) self.assertEqual(v3, row.c3) def test_varchar_upperlatin(self): self._test_strtype('varchar', 'á') # # unicode # def test_unicode_null(self): self._test_strtype('nvarchar', None, colsize=100) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('nvarchar', value, colsize=len(value)) return t for value in UNICODE_FENCEPOSTS: locals()['test_unicode_%s' % len(value)] = _maketest(value) def test_unicode_upperlatin(self): self._test_strtype('nvarchar', u'á') def test_unicode_longmax(self): # Issue 188: Segfault when fetching NVARCHAR(MAX) data over 511 bytes ver = self.get_sqlserver_version() if ver < 9: # 2005+ return # so pass / ignore self.cursor.execute("select cast(replicate(N'x', 512) as nvarchar(max))") def test_unicode_bind(self): value = u'test' v = self.cursor.execute("select ?", value).fetchone()[0] self.assertEqual(value, v) # # binary # def test_binary_null(self): # FreeTDS does not support SQLDescribeParam, so we must specifically tell it when we are inserting # a NULL into a binary column. self.cursor.execute("create table t1(n varbinary(10))") self.cursor.execute("insert into t1 values (?)", pyodbc.BinaryNull); # buffer def _maketest(value): def t(self): self._test_strtype('varbinary', buffer(value), resulttype=pyodbc.BINARY, colsize=len(value)) return t for value in ANSI_FENCEPOSTS: locals()['test_binary_buffer_%s' % len(value)] = _maketest(value) # bytearray if sys.hexversion >= 0x02060000: def _maketest(value): def t(self): self._test_strtype('varbinary', bytearray(value), colsize=len(value)) return t for value in ANSI_FENCEPOSTS: locals()['test_binary_bytearray_%s' % len(value)] = _maketest(value) # # image # def test_image_null(self): self._test_strliketype('image', None, type(None)) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strliketype('image', buffer(value), pyodbc.BINARY) return t for value in IMAGE_FENCEPOSTS: locals()['test_image_buffer_%s' % len(value)] = _maketest(value) if sys.hexversion >= 0x02060000: # Python 2.6+ supports bytearray, which pyodbc considers varbinary. # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strtype('image', bytearray(value)) return t for value in IMAGE_FENCEPOSTS: locals()['test_image_bytearray_%s' % len(value)] = _maketest(value) def test_image_upperlatin(self): self._test_strliketype('image', buffer('á'), pyodbc.BINARY) # # text # # def test_empty_text(self): # self._test_strliketype('text', bytearray('')) def test_null_text(self): self._test_strliketype('text', None, type(None)) # Generate a test for each fencepost size: test_unicode_0, etc. def _maketest(value): def t(self): self._test_strliketype('text', value) return t for value in ANSI_FENCEPOSTS: locals()['test_text_buffer_%s' % len(value)] = _maketest(value) def test_text_upperlatin(self): self._test_strliketype('text', 'á') # # bit # def test_bit(self): value = True self.cursor.execute("create table t1(b bit)") self.cursor.execute("insert into t1 values (?)", value) v = self.cursor.execute("select b from t1").fetchone()[0] self.assertEqual(type(v), bool) self.assertEqual(v, value) # # decimal # def _decimal(self, precision, scale, negative): # From test provided by planders (thanks!) in Issue 91 self.cursor.execute("create table t1(d decimal(%s, %s))" % (precision, scale)) # Construct a decimal that uses the maximum precision and scale. decStr = '9' * (precision - scale) if scale: decStr = decStr + "." + '9' * scale if negative: decStr = "-" + decStr value = Decimal(decStr) self.cursor.execute("insert into t1 values(?)", value) v = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(v, value) def _maketest(p, s, n): def t(self): self._decimal(p, s, n) return t for (p, s, n) in [ (1, 0, False), (1, 0, True), (6, 0, False), (6, 2, False), (6, 4, True), (6, 6, True), (38, 0, False), (38, 10, False), (38, 38, False), (38, 0, True), (38, 10, True), (38, 38, True) ]: locals()['test_decimal_%s_%s_%s' % (p, s, n and 'n' or 'p')] = _maketest(p, s, n) def test_decimal_e(self): """Ensure exponential notation decimals are properly handled""" value = Decimal((0, (1, 2, 3), 5)) # prints as 1.23E+7 self.cursor.execute("create table t1(d decimal(10, 2))") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(result, value) def test_subquery_params(self): """Ensure parameter markers work in a subquery""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') row = self.cursor.execute(""" select x.id from ( select id from t1 where s = ? and id between ? and ? ) x """, 'test', 1, 10).fetchone() self.assertNotEqual(row, None) self.assertEqual(row[0], 1) def _exec(self): self.cursor.execute(self.sql) def test_close_cnxn(self): """Make sure using a Cursor after closing its connection doesn't crash.""" self.cursor.execute("create table t1(id integer, s varchar(20))") self.cursor.execute("insert into t1 values (?,?)", 1, 'test') self.cursor.execute("select * from t1") self.cnxn.close() # Now that the connection is closed, we expect an exception. (If the code attempts to use # the HSTMT, we'll get an access violation instead.) self.sql = "select * from t1" self.assertRaises(pyodbc.ProgrammingError, self._exec) def test_empty_string(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "") def test_fixed_str(self): value = "testing" self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(len(v), len(value)) # If we alloc'd wrong, the test below might work because of an embedded NULL self.assertEqual(v, value) def test_empty_unicode(self): self.cursor.execute("create table t1(s nvarchar(20))") self.cursor.execute("insert into t1 values(?)", u"") def test_unicode_query(self): self.cursor.execute(u"select 1") def test_negative_row_index(self): self.cursor.execute("create table t1(s varchar(20))") self.cursor.execute("insert into t1 values(?)", "1") row = self.cursor.execute("select * from t1").fetchone() self.assertEquals(row[0], "1") self.assertEquals(row[-1], "1") def test_version(self): self.assertEquals(3, len(pyodbc.version.split('.'))) # 1.3.1 etc. # # date, time, datetime # def test_datetime(self): value = datetime(2007, 1, 15, 3, 4, 5) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEquals(type(value), datetime) self.assertEquals(value, result) def test_datetime_fraction(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds, so the most granular datetime # supported is xxx000. value = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEquals(type(value), datetime) self.assertEquals(result, value) def test_datetime_fraction_rounded(self): # SQL Server supports milliseconds, but Python's datetime supports nanoseconds. pyodbc rounds down to what the # database supports. full = datetime(2007, 1, 15, 3, 4, 5, 123456) rounded = datetime(2007, 1, 15, 3, 4, 5, 123000) self.cursor.execute("create table t1(dt datetime)") self.cursor.execute("insert into t1 values (?)", full) result = self.cursor.execute("select dt from t1").fetchone()[0] self.assertEquals(type(result), datetime) self.assertEquals(result, rounded) # # ints and floats # def test_int(self): # Issue 226: Failure if there is more than one int? value1 = 1234 value2 = -1234 self.cursor.execute("create table t1(n1 int, n2 int)") self.cursor.execute("insert into t1 values (?, ?)", value1, value2) row = self.cursor.execute("select n1, n2 from t1").fetchone() self.assertEquals(row.n1, value1) self.assertEquals(row.n2, value2) def test_negative_int(self): value = -1 self.cursor.execute("create table t1(n int)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEquals(result, value) def test_bigint(self): input = 3000000000 self.cursor.execute("create table t1(d bigint)") self.cursor.execute("insert into t1 values (?)", input) result = self.cursor.execute("select d from t1").fetchone()[0] self.assertEqual(result, input) def test_float(self): value = 1234.567 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEquals(result, value) def test_negative_float(self): value = -200 self.cursor.execute("create table t1(n float)") self.cursor.execute("insert into t1 values (?)", value) result = self.cursor.execute("select n from t1").fetchone()[0] self.assertEqual(value, result) # # stored procedures # # def test_callproc(self): # "callproc with a simple input-only stored procedure" # pass def test_sp_results(self): self.cursor.execute( """ Create procedure proc1 AS select top 10 name, id, xtype, refdate from sysobjects """) rows = self.cursor.execute("exec proc1").fetchall() self.assertEquals(type(rows), list) self.assertEquals(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEquals(type(rows[0].refdate), datetime) def test_sp_results_from_temp(self): # Note: I've used "set nocount on" so that we don't get the number of rows deleted from #tmptable. # If you don't do this, you'd need to call nextset() once to skip it. self.cursor.execute( """ Create procedure proc1 AS set nocount on select top 10 name, id, xtype, refdate into #tmptable from sysobjects select * from #tmptable """) self.cursor.execute("exec proc1") self.assert_(self.cursor.description is not None) self.assert_(len(self.cursor.description) == 4) rows = self.cursor.fetchall() self.assertEquals(type(rows), list) self.assertEquals(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEquals(type(rows[0].refdate), datetime) def test_sp_results_from_vartbl(self): self.cursor.execute( """ Create procedure proc1 AS set nocount on declare @tmptbl table(name varchar(100), id int, xtype varchar(4), refdate datetime) insert into @tmptbl select top 10 name, id, xtype, refdate from sysobjects select * from @tmptbl """) self.cursor.execute("exec proc1") rows = self.cursor.fetchall() self.assertEquals(type(rows), list) self.assertEquals(len(rows), 10) # there has to be at least 10 items in sysobjects self.assertEquals(type(rows[0].refdate), datetime) def test_sp_with_dates(self): # Reported in the forums that passing two datetimes to a stored procedure doesn't work. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@d1 datetime, @d2 datetime) AS declare @d as int set @d = datediff(year, @d1, @d2) select @d """) self.cursor.execute("exec test_sp ?, ?", datetime.now(), datetime.now()) rows = self.cursor.fetchall() self.assert_(rows is not None) self.assert_(rows[0][0] == 0) # 0 years apart def test_sp_with_none(self): # Reported in the forums that passing None caused an error. self.cursor.execute( """ if exists (select * from dbo.sysobjects where id = object_id(N'[test_sp]') and OBJECTPROPERTY(id, N'IsProcedure') = 1) drop procedure [dbo].[test_sp] """) self.cursor.execute( """ create procedure test_sp(@x varchar(20)) AS declare @y varchar(20) set @y = @x select @y """) self.cursor.execute("exec test_sp ?", None) rows = self.cursor.fetchall() self.assert_(rows is not None) self.assert_(rows[0][0] == None) # 0 years apart # # rowcount # def test_rowcount_delete(self): self.assertEquals(self.cursor.rowcount, -1) self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("delete from t1") self.assertEquals(self.cursor.rowcount, count) def test_rowcount_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. On the other hand, we could hardcode a zero return value. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. self.cursor.execute("delete from t1") self.assertEquals(self.cursor.rowcount, 0) def test_rowcount_select(self): """ Ensure Cursor.rowcount is set properly after a select statement. pyodbc calls SQLRowCount after each execute and sets Cursor.rowcount, but SQL Server 2005 returns -1 after a select statement, so we'll test for that behavior. This is valid behavior according to the DB API specification, but people don't seem to like it. """ self.cursor.execute("create table t1(i int)") count = 4 for i in range(count): self.cursor.execute("insert into t1 values (?)", i) self.cursor.execute("select * from t1") self.assertEquals(self.cursor.rowcount, -1) rows = self.cursor.fetchall() self.assertEquals(len(rows), count) self.assertEquals(self.cursor.rowcount, -1) # # always return Cursor # # In the 2.0.x branch, Cursor.execute sometimes returned the cursor and sometimes the rowcount. This proved very # confusing when things went wrong and added very little value even when things went right since users could always # use: cursor.execute("...").rowcount def test_retcursor_delete(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("delete from t1") self.assertEquals(v, self.cursor) def test_retcursor_nodata(self): """ This represents a different code path than a delete that deleted something. The return value is SQL_NO_DATA and code after it was causing an error. We could use SQL_NO_DATA to step over the code that errors out and drop down to the same SQLRowCount code. """ self.cursor.execute("create table t1(i int)") # This is a different code path internally. v = self.cursor.execute("delete from t1") self.assertEquals(v, self.cursor) def test_retcursor_select(self): self.cursor.execute("create table t1(i int)") self.cursor.execute("insert into t1 values (1)") v = self.cursor.execute("select * from t1") self.assertEquals(v, self.cursor) # # misc # def test_lower_case(self): "Ensure pyodbc.lowercase forces returned column names to lowercase." # Has to be set before creating the cursor, so we must recreate self.cursor. pyodbc.lowercase = True self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(Abc int, dEf int)") self.cursor.execute("select * from t1") names = [ t[0] for t in self.cursor.description ] names.sort() self.assertEquals(names, [ "abc", "def" ]) # Put it back so other tests don't fail. pyodbc.lowercase = False def test_row_description(self): """ Ensure Cursor.description is accessible as Row.cursor_description. """ self.cursor = self.cnxn.cursor() self.cursor.execute("create table t1(a int, b char(3))") self.cnxn.commit() self.cursor.execute("insert into t1 values(1, 'abc')") row = self.cursor.execute("select * from t1").fetchone() self.assertEquals(self.cursor.description, row.cursor_description) def test_temp_select(self): # A project was failing to create temporary tables via select into. self.cursor.execute("create table t1(s char(7))") self.cursor.execute("insert into t1 values(?)", "testing") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(v, "testing") self.cursor.execute("select s into t2 from t1") v = self.cursor.execute("select * from t1").fetchone()[0] self.assertEqual(type(v), str) self.assertEqual(v, "testing") def test_money(self): d = Decimal('123456.78') self.cursor.execute("create table t1(i int identity(1,1), m money)") self.cursor.execute("insert into t1(m) values (?)", d) v = self.cursor.execute("select m from t1").fetchone()[0] self.assertEqual(v, d) def test_executemany(self): self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (i, str(i)) for i in range(1, 6) ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_one(self): "Pass executemany a single sequence" self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, "test") ] self.cursor.executemany("insert into t1(a, b) values (?,?)", params) count = self.cursor.execute("select count(*) from t1").fetchone()[0] self.assertEqual(count, len(params)) self.cursor.execute("select a, b from t1 order by a") rows = self.cursor.fetchall() self.assertEqual(count, len(rows)) for param, row in zip(params, rows): self.assertEqual(param[0], row[0]) self.assertEqual(param[1], row[1]) def test_executemany_failure(self): """ Ensure that an exception is raised if one query in an executemany fails. """ self.cursor.execute("create table t1(a int, b varchar(10))") params = [ (1, 'good'), ('error', 'not an int'), (3, 'good') ] self.failUnlessRaises(pyodbc.Error, self.cursor.executemany, "insert into t1(a, b) value (?, ?)", params) def test_row_slicing(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = row[:] self.failUnless(result is row) result = row[:-1] self.assertEqual(result, (1,2,3)) result = row[0:4] self.failUnless(result is row) def test_row_repr(self): self.cursor.execute("create table t1(a int, b int, c int, d int)"); self.cursor.execute("insert into t1 values(1,2,3,4)") row = self.cursor.execute("select * from t1").fetchone() result = str(row) self.assertEqual(result, "(1, 2, 3, 4)") result = str(row[:-1]) self.assertEqual(result, "(1, 2, 3)") result = str(row[:1]) self.assertEqual(result, "(1,)") def test_concatenation(self): v2 = '0123456789' * 30 v3 = '9876543210' * 30 self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(300), c3 varchar(300))") self.cursor.execute("insert into t1(c2, c3) values (?,?)", v2, v3) row = self.cursor.execute("select c2, c3, c2 + c3 as both from t1").fetchone() self.assertEqual(row.both, v2 + v3) def test_view_select(self): # Reported in forum: Can't select from a view? I think I do this a lot, but another test never hurts. # Create a table (t1) with 3 rows and a view (t2) into it. self.cursor.execute("create table t1(c1 int identity(1, 1), c2 varchar(50))") for i in range(3): self.cursor.execute("insert into t1(c2) values (?)", "string%s" % i) self.cursor.execute("create view t2 as select * from t1") # Select from the view self.cursor.execute("select * from t2") rows = self.cursor.fetchall() self.assert_(rows is not None) self.assert_(len(rows) == 3) def test_autocommit(self): self.assertEqual(self.cnxn.autocommit, False) othercnxn = pyodbc.connect(self.connection_string, autocommit=True) self.assertEqual(othercnxn.autocommit, True) othercnxn.autocommit = False self.assertEqual(othercnxn.autocommit, False) def test_unicode_results(self): "Ensure unicode_results forces Unicode" othercnxn = pyodbc.connect(self.connection_string, unicode_results=True) othercursor = othercnxn.cursor() # ANSI data in an ANSI column ... othercursor.execute("create table t1(s varchar(20))") othercursor.execute("insert into t1 values(?)", 'test') # ... should be returned as Unicode value = othercursor.execute("select s from t1").fetchone()[0] self.assertEqual(value, u'test') def test_sqlserver_callproc(self): try: self.cursor.execute("drop procedure pyodbctest") self.cnxn.commit() except: pass self.cursor.execute("create table t1(s varchar(10))") self.cursor.execute("insert into t1 values(?)", "testing") self.cursor.execute(""" create procedure pyodbctest @var1 varchar(32) as begin select s from t1 return end """) self.cnxn.commit() # for row in self.cursor.procedureColumns('pyodbctest'): # print row.procedure_name, row.column_name, row.column_type, row.type_name self.cursor.execute("exec pyodbctest 'hi'") # print self.cursor.description # for row in self.cursor: # print row.s def test_skip(self): # Insert 1, 2, and 3. Fetch 1, skip 2, fetch 3. self.cursor.execute("create table t1(id int)"); for i in range(1, 5): self.cursor.execute("insert into t1 values(?)", i) self.cursor.execute("select id from t1 order by id") self.assertEqual(self.cursor.fetchone()[0], 1) self.cursor.skip(2) self.assertEqual(self.cursor.fetchone()[0], 4) def test_timeout(self): self.assertEqual(self.cnxn.timeout, 0) # defaults to zero (off) self.cnxn.timeout = 30 self.assertEqual(self.cnxn.timeout, 30) self.cnxn.timeout = 0 self.assertEqual(self.cnxn.timeout, 0) def test_sets_execute(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.execute("insert into t1 (word) VALUES (?)", [words]) self.assertRaises(pyodbc.ProgrammingError, f) def test_sets_executemany(self): # Only lists and tuples are allowed. def f(): self.cursor.execute("create table t1 (word varchar (100))") words = set (['a']) self.cursor.executemany("insert into t1 (word) values (?)", [words]) self.assertRaises(TypeError, f) def test_row_execute(self): "Ensure we can use a Row object as a parameter to execute" self.cursor.execute("create table t1(n int, s varchar(10))") self.cursor.execute("insert into t1 values (1, 'a')") row = self.cursor.execute("select n, s from t1").fetchone() self.assertNotEqual(row, None) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.execute("insert into t2 values (?, ?)", row) def test_row_executemany(self): "Ensure we can use a Row object as a parameter to executemany" self.cursor.execute("create table t1(n int, s varchar(10))") for i in range(3): self.cursor.execute("insert into t1 values (?, ?)", i, chr(ord('a')+i)) rows = self.cursor.execute("select n, s from t1").fetchall() self.assertNotEqual(len(rows), 0) self.cursor.execute("create table t2(n int, s varchar(10))") self.cursor.executemany("insert into t2 values (?, ?)", rows) def test_description(self): "Ensure cursor.description is correct" self.cursor.execute("create table t1(n int, s varchar(8), d decimal(5,2))") self.cursor.execute("insert into t1 values (1, 'abc', '1.23')") self.cursor.execute("select * from t1") # (I'm not sure the precision of an int is constant across different versions, bits, so I'm hand checking the # items I do know. # int t = self.cursor.description[0] self.assertEqual(t[0], 'n') self.assertEqual(t[1], int) self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # varchar(8) t = self.cursor.description[1] self.assertEqual(t[0], 's') self.assertEqual(t[1], str) self.assertEqual(t[4], 8) # precision self.assertEqual(t[5], 0) # scale self.assertEqual(t[6], True) # nullable # decimal(5, 2) t = self.cursor.description[2] self.assertEqual(t[0], 'd') self.assertEqual(t[1], Decimal) self.assertEqual(t[4], 5) # precision self.assertEqual(t[5], 2) # scale self.assertEqual(t[6], True) # nullable def test_none_param(self): "Ensure None can be used for params other than the first" # Some driver/db versions would fail if NULL was not the first parameter because SQLDescribeParam (only used # with NULL) could not be used after the first call to SQLBindParameter. This means None always worked for the # first column, but did not work for later columns. # # If SQLDescribeParam doesn't work, pyodbc would use VARCHAR which almost always worked. However, # binary/varbinary won't allow an implicit conversion. self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'xyzzy')") row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 1) self.assertEqual(type(row.s), str) self.cursor.execute("update t1 set n=?, s=?", 2, None) row = self.cursor.execute("select * from t1").fetchone() self.assertEqual(row.n, 2) self.assertEqual(row.s, None) def test_output_conversion(self): def convert(value): # `value` will be a string. We'll simply add an X at the beginning at the end. return 'X' + value + 'X' self.cnxn.add_output_converter(pyodbc.SQL_VARCHAR, convert) self.cursor.execute("create table t1(n int, v varchar(10))") self.cursor.execute("insert into t1 values (1, '123.45')") value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, 'X123.45X') # Now clear the conversions and try again. There should be no Xs this time. self.cnxn.clear_output_converters() value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, '123.45') def test_too_large(self): """Ensure error raised if insert fails due to truncation""" value = 'x' * 1000 self.cursor.execute("create table t1(s varchar(800))") def test(): self.cursor.execute("insert into t1 values (?)", value) self.assertRaises(pyodbc.DataError, test) def test_geometry_null_insert(self): def convert(value): return value self.cnxn.add_output_converter(-151, convert) # -151 is SQL Server's geometry self.cursor.execute("create table t1(n int, v geometry)") self.cursor.execute("insert into t1 values (?, ?)", 1, None) value = self.cursor.execute("select v from t1").fetchone()[0] self.assertEqual(value, None) self.cnxn.clear_output_converters() def test_login_timeout(self): # This can only test setting since there isn't a way to cause it to block on the server side. cnxns = pyodbc.connect(self.connection_string, timeout=2) def test_row_equal(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test')") row1 = self.cursor.execute("select n, s from t1").fetchone() row2 = self.cursor.execute("select n, s from t1").fetchone() b = (row1 == row2) self.assertEqual(b, True) def test_row_gtlt(self): self.cursor.execute("create table t1(n int, s varchar(20))") self.cursor.execute("insert into t1 values (1, 'test1')") self.cursor.execute("insert into t1 values (1, 'test2')") rows = self.cursor.execute("select n, s from t1 order by s").fetchall() self.assert_(rows[0] < rows[1]) self.assert_(rows[0] <= rows[1]) self.assert_(rows[1] > rows[0]) self.assert_(rows[1] >= rows[0]) self.assert_(rows[0] != rows[1]) rows = list(rows) rows.sort() # uses < def test_context_manager_success(self): self.cursor.execute("create table t1(n int)") self.cnxn.commit() try: with pyodbc.connect(self.connection_string) as cnxn: cursor = cnxn.cursor() cursor.execute("insert into t1 values (1)") except Exception: pass cnxn = None cursor = None rows = self.cursor.execute("select n from t1").fetchall() self.assertEquals(len(rows), 1) self.assertEquals(rows[0][0], 1) def test_untyped_none(self): # From issue 129 value = self.cursor.execute("select ?", None).fetchone()[0] self.assertEqual(value, None) def test_large_update_nodata(self): self.cursor.execute('create table t1(a varbinary(max))') hundredkb = bytearray('x'*100*1024) self.cursor.execute('update t1 set a=? where 1=0', (hundredkb,)) def test_func_param(self): self.cursor.execute(''' create function func1 (@testparam varchar(4)) returns @rettest table (param varchar(4)) as begin insert @rettest select @testparam return end ''') self.cnxn.commit() value = self.cursor.execute("select * from func1(?)", 'test').fetchone()[0] self.assertEquals(value, 'test') def test_no_fetch(self): # Issue 89 with FreeTDS: Multiple selects (or catalog functions that issue selects) without fetches seem to # confuse the driver. self.cursor.execute('select 1') self.cursor.execute('select 1') self.cursor.execute('select 1') def main(): from optparse import OptionParser parser = OptionParser(usage=usage) parser.add_option("-v", "--verbose", action="count", help="Increment test verbosity (can be used multiple times)") parser.add_option("-d", "--debug", action="store_true", default=False, help="Print debugging items") parser.add_option("-t", "--test", help="Run only the named test") (options, args) = parser.parse_args() if len(args) > 1: parser.error('Only one argument is allowed. Do you need quotes around the connection string?') if not args: connection_string = load_setup_connection_string('freetdstests') if not connection_string: parser.print_help() raise SystemExit() else: connection_string = args[0] cnxn = pyodbc.connect(connection_string) print_library_info(cnxn) cnxn.close() suite = load_tests(FreeTDSTestCase, options.test, connection_string) testRunner = unittest.TextTestRunner(verbosity=options.verbose) result = testRunner.run(suite) if __name__ == '__main__': # Add the build directory to the path so we're testing the latest build, not the installed version. add_to_path() import pyodbc main()
mit
electric-cloud/EC-WebSphere
src/main/resources/project/wsadmin_scripts/check_app.py
1
1733
# # Copyright 2016 Electric Cloud, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # $[/myProject/wsadmin_scripts/preamble.py] appName = r''' $[appname] '''.strip() # EXISTS NOT_EXISTS READY NOT_READY RUNNING NOT_RUNNING appStateChecked = r''' $[appStateChecked] '''.strip() waitTime = r''' $[waitTimeForState] '''.strip() if not waitTime: waitTime = '0' print "WaitTime: %s" % (waitTime); # TODO: review this line: appName.replace(' ', '_') isOk = 0 startTime = int(time.time()) waitTime = int(waitTime) endTime = startTime + waitTime stateMatrix = { "EXISTS": "is installed", "NOT_EXISTS": "is not installed", "READY": "is ready", "NOT_READY": "is not ready", "RUNNING": "is running", "NOT_RUNNING": "is not running" } while 1 : sleepTime = 5 currentTime = int(time.time()); if endTime < currentTime : print "Timed out." break if isAppInDesiredState(appName, appStateChecked): isOk = 1 break else: print "Application %s %s, waiting" % (appName, stateMatrix[appStateChecked]) time.sleep(sleepTime); print "Application %s %s" % (appName, stateMatrix[appStateChecked]) if isOk: os._exit(0) else: os._exit(1)
apache-2.0
YangChihWei/w16b_test
static/Brython3.1.1-20150328-091302/Lib/external_import.py
742
2985
import os from browser import doc import urllib.request ## this module is able to download modules that are external to ## localhost/src ## so we could download from any URL class ModuleFinder: def __init__(self, path_entry): print("external_import here..") #print(path_entry) self._module=None if path_entry.startswith('http://'): self.path_entry=path_entry else: raise ImportError() def __str__(self): return '<%s for "%s">' % (self.__class__.__name__, self.path_entry) def find_module(self, fullname, path=None): path = path or self.path_entry #print('looking for "%s" in %s ...' % (fullname, path)) for _ext in ['js', 'pyj', 'py']: _fp,_url,_headers=urllib.request.urlopen(path + '/' + '%s.%s' % (fullname, _ext)) self._module=_fp.read() _fp.close() if self._module is not None: print("module found at %s:%s" % (path, fullname)) return ModuleLoader(path, fullname, self._module) print('module %s not found' % fullname) raise ImportError() return None class ModuleLoader: """Load source for modules""" def __init__(self, filepath, name, module_source): self._filepath=filepath self._name=name self._module_source=module_source def get_source(self): return self._module_source def is_package(self): return '.' in self._name def load_module(self): if self._name in sys.modules: #print('reusing existing module from previous import of "%s"' % fullname) mod = sys.modules[self._name] return mod _src=self.get_source() if self._filepath.endswith('.js'): mod=JSObject(import_js_module(_src, self._filepath, self._name)) elif self._filepath.endswith('.py'): mod=JSObject(import_py_module(_src, self._filepath, self._name)) elif self._filepath.endswith('.pyj'): mod=JSObject(import_pyj_module(_src, self._filepath, self._name)) else: raise ImportError('Invalid Module: %s' % self._filepath) # Set a few properties required by PEP 302 mod.__file__ = self._filepath mod.__name__ = self._name mod.__path__ = os.path.abspath(self._filepath) mod.__loader__ = self mod.__package__ = '.'.join(self._name.split('.')[:-1]) if self.is_package(): print('adding path for package') # Set __path__ for packages # so we can find the sub-modules. mod.__path__ = [ self._filepath ] else: print('imported as regular module') print('creating a new module object for "%s"' % self._name) sys.modules.setdefault(self._name, mod) JSObject(__BRYTHON__.imported)[self._name]=mod return mod
agpl-3.0
olivierdalang/stdm
third_party/reportlab/graphics/charts/lineplots.py
1
47988
#Copyright ReportLab Europe Ltd. 2000-2012 #see license.txt for license details #history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/lineplots.py __version__=''' $Id$ ''' __doc__="""This module defines a very preliminary Line Plot example.""" import string, time from reportlab.lib import colors from reportlab.lib.validators import * from reportlab.lib.attrmap import * from reportlab.graphics.shapes import Drawing, Group, Rect, Line, PolyLine, Polygon, _SetKeyWordArgs from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder from reportlab.graphics.charts.textlabels import Label from reportlab.graphics.charts.axes import XValueAxis, YValueAxis, AdjYValueAxis, NormalDateXValueAxis from reportlab.graphics.charts.utils import * from reportlab.graphics.widgets.markers import uSymbol2Symbol, isSymbol, makeMarker from reportlab.graphics.widgets.grids import Grid, DoubleGrid, ShadedRect, ShadedPolygon from reportlab.pdfbase.pdfmetrics import stringWidth, getFont from reportlab.graphics.charts.areas import PlotArea # This might be moved again from here... class LinePlotProperties(PropHolder): _attrMap = AttrMap( strokeWidth = AttrMapValue(isNumber, desc='Width of a line.'), strokeColor = AttrMapValue(isColorOrNone, desc='Color of a line.'), strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array of a line.'), symbol = AttrMapValue(None, desc='Widget placed at data points.',advancedUsage=1), shader = AttrMapValue(None, desc='Shader Class.',advancedUsage=1), filler = AttrMapValue(None, desc='Filler Class.',advancedUsage=1), name = AttrMapValue(isStringOrNone, desc='Name of the line.'), inFill = AttrMapValue(isBoolean, desc='If true flood fill to x axis',advancedUsage=1), ) class Shader(_SetKeyWordArgs): _attrMap = AttrMap(BASE=PlotArea, vertical = AttrMapValue(isBoolean, desc='If true shade to x axis'), colors = AttrMapValue(SequenceOf(isColorOrNone,lo=2,hi=2), desc='(AxisColor, LineColor)'), ) def shade(self, lp, g, rowNo, rowColor, row): c = [None,None] c = getattr(self,'colors',c) or c if not c[0]: c[0] = getattr(lp,'fillColor',colors.white) if not c[1]: c[1] = rowColor class NoFiller: def fill(self, lp, g, rowNo, rowColor, points): pass class Filler: '''mixin providing simple polygon fill''' _attrMap = AttrMap( fillColor = AttrMapValue(isColorOrNone, desc='filler interior color'), strokeColor = AttrMapValue(isColorOrNone, desc='filler edge color'), strokeWidth = AttrMapValue(isNumberOrNone, desc='filler edge width'), ) def __init__(self,**kw): self.__dict__ = kw def fill(self, lp, g, rowNo, rowColor, points): g.add(Polygon(points, fillColor=getattr(self,'fillColor',rowColor), strokeColor=getattr(self,'strokeColor',rowColor), strokeWidth=getattr(self,'strokeWidth',0.1))) class ShadedPolyFiller(Filler,ShadedPolygon): pass class PolyFiller(Filler,Polygon): pass from linecharts import AbstractLineChart class LinePlot(AbstractLineChart): """Line plot with multiple lines. Both x- and y-axis are value axis (so there are no seperate X and Y versions of this class). """ _attrMap = AttrMap(BASE=PlotArea, reversePlotOrder = AttrMapValue(isBoolean, desc='If true reverse plot order.',advancedUsage=1), lineLabelNudge = AttrMapValue(isNumber, desc='Distance between a data point and its label.',advancedUsage=1), lineLabels = AttrMapValue(None, desc='Handle to the list of data point labels.'), lineLabelFormat = AttrMapValue(None, desc='Formatting string or function used for data point labels.'), lineLabelArray = AttrMapValue(None, desc='explicit array of line label values, must match size of data if present.'), joinedLines = AttrMapValue(isNumber, desc='Display data points joined with lines if true.'), strokeColor = AttrMapValue(isColorOrNone, desc='Color used for background border of plot area.'), fillColor = AttrMapValue(isColorOrNone, desc='Color used for background interior of plot area.'), lines = AttrMapValue(None, desc='Handle of the lines.'), xValueAxis = AttrMapValue(None, desc='Handle of the x axis.'), yValueAxis = AttrMapValue(None, desc='Handle of the y axis.'), data = AttrMapValue(None, desc='Data to be plotted, list of (lists of) x/y tuples.'), annotations = AttrMapValue(None, desc='list of callables, will be called with self, xscale, yscale.',advancedUsage=1), behindAxes = AttrMapValue(isBoolean, desc='If true use separate line group.',advancedUsage=1), gridFirst = AttrMapValue(isBoolean, desc='If true use draw grids before axes.',advancedUsage=1), ) def __init__(self): PlotArea.__init__(self) self.reversePlotOrder = 0 self.xValueAxis = XValueAxis() self.yValueAxis = YValueAxis() # this defines two series of 3 points. Just an example. self.data = [ ((1,1), (2,2), (2.5,1), (3,3), (4,5)), ((1,2), (2,3), (2.5,2), (3,4), (4,6)) ] self.lines = TypedPropertyCollection(LinePlotProperties) self.lines.strokeWidth = 1 self.lines[0].strokeColor = colors.red self.lines[1].strokeColor = colors.blue self.lineLabels = TypedPropertyCollection(Label) self.lineLabelFormat = None self.lineLabelArray = None # this says whether the origin is inside or outside # the bar - +10 means put the origin ten points # above the tip of the bar if value > 0, or ten # points inside if bar value < 0. This is different # to label dx/dy which are not dependent on the # sign of the data. self.lineLabelNudge = 10 # if you have multiple series, by default they butt # together. # New line chart attributes. self.joinedLines = 1 # Connect items with straight lines. #private attributes self._inFill = None self.annotations = [] self.behindAxes = 0 self.gridFirst = 0 def demo(self): """Shows basic use of a line chart.""" drawing = Drawing(400, 200) data = [ ((1,1), (2,2), (2.5,1), (3,3), (4,5)), ((1,2), (2,3), (2.5,2), (3.5,5), (4,6)) ] lp = LinePlot() lp.x = 50 lp.y = 50 lp.height = 125 lp.width = 300 lp.data = data lp.joinedLines = 1 lp.lineLabelFormat = '%2.0f' lp.strokeColor = colors.black lp.lines[0].strokeColor = colors.red lp.lines[0].symbol = makeMarker('FilledCircle') lp.lines[1].strokeColor = colors.blue lp.lines[1].symbol = makeMarker('FilledDiamond') lp.xValueAxis.valueMin = 0 lp.xValueAxis.valueMax = 5 lp.xValueAxis.valueStep = 1 lp.yValueAxis.valueMin = 0 lp.yValueAxis.valueMax = 7 lp.yValueAxis.valueStep = 1 drawing.add(lp) return drawing def calcPositions(self): """Works out where they go. Sets an attribute _positions which is a list of lists of (x, y) matching the data. """ self._seriesCount = len(self.data) self._rowLength = max(map(len,self.data)) self._positions = [] for rowNo in range(len(self.data)): line = [] for colNo in range(len(self.data[rowNo])): datum = self.data[rowNo][colNo] # x,y value if type(datum[0]) == type(''): x = self.xValueAxis.scale(mktime(mkTimeTuple(datum[0]))) else: x = self.xValueAxis.scale(datum[0]) y = self.yValueAxis.scale(datum[1]) line.append((x, y)) self._positions.append(line) def _innerDrawLabel(self, rowNo, colNo, x, y): "Draw a label for a given item in the list." labelFmt = self.lineLabelFormat labelValue = self.data[rowNo][colNo][1] ### if labelFmt is None: labelText = None elif type(labelFmt) is StringType: if labelFmt == 'values': labelText = self.lineLabelArray[rowNo][colNo] else: labelText = labelFmt % labelValue elif hasattr(labelFmt,'__call__'): if not hasattr(labelFmt,'__labelFmtEX__'): labelText = labelFmt(labelValue) else: labelText = labelFmt(self,rowNo,colNo,x,y) else: raise ValueError("Unknown formatter type %s, expected string or function"%labelFmt) if labelText: label = self.lineLabels[(rowNo, colNo)] if not label.visible: return #hack to make sure labels are outside the bar if y > 0: label.setOrigin(x, y + self.lineLabelNudge) else: label.setOrigin(x, y - self.lineLabelNudge) label.setText(labelText) else: label = None return label def drawLabel(self, G, rowNo, colNo, x, y): '''Draw a label for a given item in the list. G must have an add method''' G.add(self._innerDrawLabel(rowNo,colNo,x,y)) def makeLines(self): g = Group() bubblePlot = getattr(self,'_bubblePlot',None) if bubblePlot: yA = self.yValueAxis xA = self.xValueAxis bubbleR = min(yA._bubbleRadius,xA._bubbleRadius) bubbleMax = xA._bubbleMax labelFmt = self.lineLabelFormat P = range(len(self._positions)) if self.reversePlotOrder: P.reverse() inFill = getattr(self,'_inFill',None) styleCount = len(self.lines) if inFill or [rowNo for rowNo in P if getattr(self.lines[rowNo%styleCount],'inFill',False)]: inFillY = self.xValueAxis._y inFillX0 = self.yValueAxis._x inFillX1 = inFillX0 + self.xValueAxis._length inFillG = getattr(self,'_inFillG',g) lG = getattr(self,'_lineG',g) # Iterate over data rows. for rowNo in P: row = self._positions[rowNo] rowStyle = self.lines[rowNo % styleCount] rowColor = getattr(rowStyle,'strokeColor',None) dash = getattr(rowStyle, 'strokeDashArray', None) if hasattr(rowStyle, 'strokeWidth'): width = rowStyle.strokeWidth elif hasattr(self.lines, 'strokeWidth'): width = self.lines.strokeWidth else: width = None # Iterate over data columns. if self.joinedLines: points = [] for xy in row: points += [xy[0], xy[1]] if inFill or getattr(rowStyle,'inFill',False): fpoints = [inFillX0,inFillY] + points + [inFillX1,inFillY] filler = getattr(rowStyle, 'filler', None) if filler: filler.fill(self,inFillG,rowNo,rowColor,fpoints) else: inFillG.add(Polygon(fpoints,fillColor=rowColor,strokeColor=rowColor,strokeWidth=width or 0.1)) if inFill in (None,0,2): line = PolyLine(points,strokeColor=rowColor,strokeLineCap=0,strokeLineJoin=1) if width: line.strokeWidth = width if dash: line.strokeDashArray = dash lG.add(line) if hasattr(rowStyle, 'symbol'): uSymbol = rowStyle.symbol elif hasattr(self.lines, 'symbol'): uSymbol = self.lines.symbol else: uSymbol = None if uSymbol: if bubblePlot: drow = self.data[rowNo] for j,xy in enumerate(row): symbol = uSymbol2Symbol(uSymbol,xy[0],xy[1],rowColor) if symbol: if bubblePlot: symbol.size = bubbleR*(drow[j][2]/bubbleMax)**0.5 g.add(symbol) else: if bubblePlot: drow = self.data[rowNo] for j,xy in enumerate(row): usymbol = getattr(self.lines[rowNo,j],'symbol',None) if not usymbol: continue symbol = uSymbol2Symbol(uSymbol,xy[0],xy[1],rowColor) if symbol: if bubblePlot: symbol.size = bubbleR*(drow[j][2]/bubbleMax)**0.5 g.add(symbol) # Draw data labels. for colNo in range(len(row)): x1, y1 = row[colNo] self.drawLabel(g, rowNo, colNo, x1, y1) shader = getattr(rowStyle, 'shader', None) if shader: shader.shade(self,g,rowNo,rowColor,row) return g def draw(self): yA = self.yValueAxis xA = self.xValueAxis if getattr(self,'_bubblePlot',None): yA._bubblePlot = xA._bubblePlot = 1 yA.setPosition(self.x, self.y, self.height) if yA: yA.joinAxis = xA if xA: xA.joinAxis = yA yA.configure(self.data) # if zero is in chart, put x axis there, otherwise use bottom. xAxisCrossesAt = yA.scale(0) if ((xAxisCrossesAt > self.y + self.height) or (xAxisCrossesAt < self.y)): y = self.y else: y = xAxisCrossesAt xA.setPosition(self.x, y, self.width) xA.configure(self.data) self.calcPositions() g = Group() g.add(self.makeBackground()) if self._inFill or self.behindAxes: xA._joinToAxis() if self._inFill: self._inFillG = Group() g.add(self._inFillG) if self.behindAxes: self._lineG = Group() g.add(self._lineG) xA._joinToAxis() yA._joinToAxis() xAex = xA.visibleAxis and [xA._y] or [] yAex = yA.visibleAxis and [yA._x] or [] skipGrid = getattr(xA,'skipGrid','none') if skipGrid!=None: if skipGrid in ('both','top'): yAex.append(xA._x+xA._length) if skipGrid in ('both','bottom'): yAex.append(xA._x) skipGrid = getattr(yA,'skipGrid','none') if skipGrid!=None: if skipGrid in ('both','top'): xAex.append(yA._y+yA._length) if skipGrid in ('both','bottom'): xAex.append(yA._y) if self.gridFirst: xA.makeGrid(g,parent=self,dim=yA.getGridDims,exclude=yAex) yA.makeGrid(g,parent=self,dim=xA.getGridDims,exclude=xAex) g.add(xA.draw()) g.add(yA.draw()) if not self.gridFirst: xAdgl = getattr(xA,'drawGridLast',False) yAdgl = getattr(yA,'drawGridLast',False) if not xAdgl: xA.makeGrid(g,parent=self,dim=yA.getGridDims,exclude=yAex) if not yAdgl: yA.makeGrid(g,parent=self,dim=xA.getGridDims,exclude=xAex) annotations = getattr(self,'annotations',[]) for a in annotations: if getattr(a,'beforeLines',None): g.add(a(self,xA.scale,yA.scale)) g.add(self.makeLines()) if not self.gridFirst: if xAdgl: xA.makeGrid(g,parent=self,dim=yA.getGridDims,exclude=yAex) if yAdgl: yA.makeGrid(g,parent=self,dim=xA.getGridDims,exclude=xAex) for a in annotations: if not getattr(a,'beforeLines',None): g.add(a(self,xA.scale,yA.scale)) return g def addCrossHair(self,name,xv,yv,strokeColor=colors.black,strokeWidth=1,beforeLines=True): from reportlab.graphics.shapes import Group, Line annotations = [a for a in getattr(self,'annotations',[]) if getattr(a,'name',None)!=name] def annotation(self,xScale,yScale): x = xScale(xv) y = yScale(yv) g = Group() xA = xScale.im_self #the x axis g.add(Line(xA._x,y,xA._x+xA._length,y,strokeColor=strokeColor,strokeWidth=strokeWidth)) yA = yScale.im_self #the y axis g.add(Line(x,yA._y,x,yA._y+yA._length,strokeColor=strokeColor,strokeWidth=strokeWidth)) return g annotation.beforeLines = beforeLines annotations.append(annotation) self.annotations = annotations class LinePlot3D(LinePlot): _attrMap = AttrMap(BASE=LinePlot, theta_x = AttrMapValue(isNumber, desc='dx/dz'), theta_y = AttrMapValue(isNumber, desc='dy/dz'), zDepth = AttrMapValue(isNumber, desc='depth of an individual series'), zSpace = AttrMapValue(isNumber, desc='z gap around series'), ) theta_x = .5 theta_y = .5 zDepth = 10 zSpace = 3 def calcPositions(self): LinePlot.calcPositions(self) nSeries = self._seriesCount zSpace = self.zSpace zDepth = self.zDepth if self.xValueAxis.style=='parallel_3d': _3d_depth = nSeries*zDepth+(nSeries+1)*zSpace else: _3d_depth = zDepth + 2*zSpace self._3d_dx = self.theta_x*_3d_depth self._3d_dy = self.theta_y*_3d_depth def _calc_z0(self,rowNo): zSpace = self.zSpace if self.xValueAxis.style=='parallel_3d': z0 = rowNo*(self.zDepth+zSpace)+zSpace else: z0 = zSpace return z0 def _zadjust(self,x,y,z): return x+z*self.theta_x, y+z*self.theta_y def makeLines(self): bubblePlot = getattr(self,'_bubblePlot',None) assert not bubblePlot, "_bubblePlot not supported for 3d yet" #if bubblePlot: # yA = self.yValueAxis # xA = self.xValueAxis # bubbleR = min(yA._bubbleRadius,xA._bubbleRadius) # bubbleMax = xA._bubbleMax labelFmt = self.lineLabelFormat positions = self._positions P = range(len(positions)) if self.reversePlotOrder: P.reverse() inFill = getattr(self,'_inFill',None) assert not inFill, "inFill not supported for 3d yet" #if inFill: # inFillY = self.xValueAxis._y # inFillX0 = self.yValueAxis._x # inFillX1 = inFillX0 + self.xValueAxis._length # inFillG = getattr(self,'_inFillG',g) zDepth = self.zDepth _zadjust = self._zadjust theta_x = self.theta_x theta_y = self.theta_y from linecharts import _FakeGroup F = _FakeGroup() from utils3d import _make_3d_line_info, find_intersections if self.xValueAxis.style!='parallel_3d': tileWidth = getattr(self,'_3d_tilewidth',1) if getattr(self,'_find_intersections',None): from copy import copy fpositions = map(copy,positions) I = find_intersections(fpositions,small=tileWidth) ic = None for i,j,x,y in I: if ic!=i: ic = i jc = 0 else: jc+=1 fpositions[i].insert(j+jc,(x,y)) tileWidth = None else: fpositions = positions else: tileWidth = None fpositions = positions # Iterate over data rows. styleCount = len(self.lines) for rowNo in P: row = positions[rowNo] n = len(row) rowStyle = self.lines[rowNo % styleCount] rowColor = rowStyle.strokeColor dash = getattr(rowStyle, 'strokeDashArray', None) z0 = self._calc_z0(rowNo) z1 = z0 + zDepth if hasattr(rowStyle, 'strokeWidth'): width = rowStyle.strokeWidth elif hasattr(self.lines, 'strokeWidth'): width = self.lines.strokeWidth else: width = None # Iterate over data columns. if self.joinedLines: if n: frow = fpositions[rowNo] x0, y0 = frow[0] for colNo in xrange(1,len(frow)): x1, y1 = frow[colNo] _make_3d_line_info( F, x0, x1, y0, y1, z0, z1, theta_x, theta_y, rowColor, fillColorShaded=None, tileWidth=tileWidth, strokeColor=None, strokeWidth=None, strokeDashArray=None, shading=0.1) x0, y0 = x1, y1 if hasattr(rowStyle, 'symbol'): uSymbol = rowStyle.symbol elif hasattr(self.lines, 'symbol'): uSymbol = self.lines.symbol else: uSymbol = None if uSymbol: for xy in row: x1, y1 = row[colNo] x1, y1 = _zadjust(x1,y1,z0) symbol = uSymbol2Symbol(uSymbol,xy[0],xy[1],rowColor) if symbol: F.add((1,z0,z0,x1,y1,symbol)) # Draw data labels. for colNo in xrange(n): x1, y1 = row[colNo] x1, y1 = _zadjust(x1,y1,z0) L = self._innerDrawLabel(rowNo, colNo, x1, y1) if L: F.add((2,z0,z0,x1,y1,L)) F.sort() g = Group() for v in F.value(): g.add(v[-1]) return g _monthlyIndexData = [[(19971202, 100.0), (19971231, 100.1704367), (19980131, 101.5639577), (19980228, 102.1879927), (19980331, 101.6337257), (19980430, 102.7640446), (19980531, 102.9198038), (19980630, 103.25938789999999), (19980731, 103.2516421), (19980831, 105.4744329), (19980930, 109.3242705), (19981031, 111.9859291), (19981130, 110.9184642), (19981231, 110.9184642), (19990131, 111.9882532), (19990228, 109.7912614), (19990331, 110.24189629999999), (19990430, 110.4279321), (19990531, 109.33955469999999), (19990630, 108.2341748), (19990731, 110.21294469999999), (19990831, 110.9683062), (19990930, 112.4425371), (19991031, 112.7314032), (19991130, 112.3509645), (19991231, 112.3660659), (20000131, 110.9255248), (20000229, 110.5266306), (20000331, 113.3116101), (20000430, 111.0449133), (20000531, 111.702717), (20000630, 113.5832178)], [(19971202, 100.0), (19971231, 100.0), (19980131, 100.8), (19980228, 102.0), (19980331, 101.9), (19980430, 103.0), (19980531, 103.0), (19980630, 103.1), (19980731, 103.1), (19980831, 102.8), (19980930, 105.6), (19981031, 108.3), (19981130, 108.1), (19981231, 111.9), (19990131, 113.1), (19990228, 110.2), (19990331, 111.8), (19990430, 112.3), (19990531, 110.1), (19990630, 109.3), (19990731, 111.2), (19990831, 111.7), (19990930, 112.6), (19991031, 113.2), (19991130, 113.9), (19991231, 115.4), (20000131, 112.7), (20000229, 113.9), (20000331, 115.8), (20000430, 112.2), (20000531, 112.6), (20000630, 114.6)]] class SimpleTimeSeriesPlot(LinePlot): """A customized version of LinePlot. It uses NormalDateXValueAxis() and AdjYValueAxis() for the X and Y axes. """ def __init__(self): LinePlot.__init__(self) self.xValueAxis = NormalDateXValueAxis() self.yValueAxis = YValueAxis() self.data = _monthlyIndexData class GridLinePlot(SimpleTimeSeriesPlot): """A customized version of SimpleTimeSeriesSPlot. It uses NormalDateXValueAxis() and AdjYValueAxis() for the X and Y axes. The chart has a default grid background with thin horizontal lines aligned with the tickmarks (and labels). You can change the back- ground to be any Grid or ShadedRect, or scale the whole chart. If you do provide a background, you can specify the colours of the stripes with 'background.stripeColors'. """ _attrMap = AttrMap(BASE=LinePlot, background = AttrMapValue(None, desc='Background for chart area (now Grid or ShadedRect).'), scaleFactor = AttrMapValue(isNumberOrNone, desc='Scalefactor to apply to whole drawing.'), ) def __init__(self): from reportlab.lib import colors SimpleTimeSeriesPlot.__init__(self) self.scaleFactor = None self.background = Grid() self.background.orientation = 'horizontal' self.background.useRects = 0 self.background.useLines = 1 self.background.strokeWidth = 0.5 self.background.strokeColor = colors.black def demo(self,drawing=None): from reportlab.lib import colors if not drawing: drawing = Drawing(400, 200) lp = GridLinePlot() lp.x = 50 lp.y = 50 lp.height = 125 lp.width = 300 lp.data = _monthlyIndexData lp.joinedLines = 1 lp.strokeColor = colors.black c0 = colors.PCMYKColor(100,65,0,30, spotName='PANTONE 288 CV', density=100) lp.lines[0].strokeColor = c0 lp.lines[0].strokeWidth = 2 lp.lines[0].strokeDashArray = None c1 = colors.PCMYKColor(0,79,91,0, spotName='PANTONE Wm Red CV', density=100) lp.lines[1].strokeColor = c1 lp.lines[1].strokeWidth = 1 lp.lines[1].strokeDashArray = [3,1] lp.xValueAxis.labels.fontSize = 10 lp.xValueAxis.labels.textAnchor = 'start' lp.xValueAxis.labels.boxAnchor = 'w' lp.xValueAxis.labels.angle = -45 lp.xValueAxis.labels.dx = 0 lp.xValueAxis.labels.dy = -8 lp.xValueAxis.xLabelFormat = '{mm}/{yy}' lp.yValueAxis.labelTextFormat = '%5d%% ' lp.yValueAxis.tickLeft = 5 lp.yValueAxis.labels.fontSize = 10 lp.background = Grid() lp.background.stripeColors = [colors.pink, colors.lightblue] lp.background.orientation = 'vertical' drawing.add(lp,'plot') return drawing def draw(self): xva, yva = self.xValueAxis, self.yValueAxis if xva: xva.joinAxis = yva if yva: yva.joinAxis = xva yva.setPosition(self.x, self.y, self.height) yva.configure(self.data) # if zero is in chart, put x axis there, otherwise # use bottom. xAxisCrossesAt = yva.scale(0) if ((xAxisCrossesAt > self.y + self.height) or (xAxisCrossesAt < self.y)): y = self.y else: y = xAxisCrossesAt xva.setPosition(self.x, y, self.width) xva.configure(self.data) back = self.background if isinstance(back, Grid): if back.orientation == 'vertical' and xva._tickValues: xpos = map(xva.scale, [xva._valueMin] + xva._tickValues) steps = [] for i in range(len(xpos)-1): steps.append(xpos[i+1] - xpos[i]) back.deltaSteps = steps elif back.orientation == 'horizontal' and yva._tickValues: ypos = map(yva.scale, [yva._valueMin] + yva._tickValues) steps = [] for i in range(len(ypos)-1): steps.append(ypos[i+1] - ypos[i]) back.deltaSteps = steps elif isinstance(back, DoubleGrid): # Ideally, these lines would not be needed... back.grid0.x = self.x back.grid0.y = self.y back.grid0.width = self.width back.grid0.height = self.height back.grid1.x = self.x back.grid1.y = self.y back.grid1.width = self.width back.grid1.height = self.height # some room left for optimization... if back.grid0.orientation == 'vertical' and xva._tickValues: xpos = map(xva.scale, [xva._valueMin] + xva._tickValues) steps = [] for i in range(len(xpos)-1): steps.append(xpos[i+1] - xpos[i]) back.grid0.deltaSteps = steps elif back.grid0.orientation == 'horizontal' and yva._tickValues: ypos = map(yva.scale, [yva._valueMin] + yva._tickValues) steps = [] for i in range(len(ypos)-1): steps.append(ypos[i+1] - ypos[i]) back.grid0.deltaSteps = steps if back.grid1.orientation == 'vertical' and xva._tickValues: xpos = map(xva.scale, [xva._valueMin] + xva._tickValues) steps = [] for i in range(len(xpos)-1): steps.append(xpos[i+1] - xpos[i]) back.grid1.deltaSteps = steps elif back.grid1.orientation == 'horizontal' and yva._tickValues: ypos = map(yva.scale, [yva._valueMin] + yva._tickValues) steps = [] for i in range(len(ypos)-1): steps.append(ypos[i+1] - ypos[i]) back.grid1.deltaSteps = steps self.calcPositions() width, height, scaleFactor = self.width, self.height, self.scaleFactor if scaleFactor and scaleFactor!=1: #g = Drawing(scaleFactor*width, scaleFactor*height) g.transform = (scaleFactor, 0, 0, scaleFactor,0,0) else: g = Group() g.add(self.makeBackground()) g.add(self.xValueAxis) g.add(self.yValueAxis) g.add(self.makeLines()) return g class AreaLinePlot(LinePlot): '''we're given data in the form [(X1,Y11,..Y1M)....(Xn,Yn1,...YnM)]'''#' def __init__(self): LinePlot.__init__(self) self._inFill = 1 self.reversePlotOrder = 1 self.data = [(1,20,100,30),(2,11,50,15),(3,15,70,40)] def draw(self): try: odata = self.data n = len(odata) m = len(odata[0]) S = n*[0] self.data = [] for i in xrange(1,m): D = [] for j in xrange(n): S[j] = S[j] + odata[j][i] D.append((odata[j][0],S[j])) self.data.append(D) return LinePlot.draw(self) finally: self.data = odata class SplitLinePlot(AreaLinePlot): def __init__(self): AreaLinePlot.__init__(self) self.xValueAxis = NormalDateXValueAxis() self.yValueAxis = AdjYValueAxis() self.data=[(20030601,0.95,0.05,0.0),(20030701,0.95,0.05,0.0),(20030801,0.95,0.05,0.0),(20030901,0.95,0.05,0.0),(20031001,0.95,0.05,0.0),(20031101,0.95,0.05,0.0),(20031201,0.95,0.05,0.0),(20040101,0.95,0.05,0.0),(20040201,0.95,0.05,0.0),(20040301,0.95,0.05,0.0),(20040401,0.95,0.05,0.0),(20040501,0.95,0.05,0.0),(20040601,0.95,0.05,0.0),(20040701,0.95,0.05,0.0),(20040801,0.95,0.05,0.0),(20040901,0.95,0.05,0.0),(20041001,0.95,0.05,0.0),(20041101,0.95,0.05,0.0),(20041201,0.95,0.05,0.0),(20050101,0.95,0.05,0.0),(20050201,0.95,0.05,0.0),(20050301,0.95,0.05,0.0),(20050401,0.95,0.05,0.0),(20050501,0.95,0.05,0.0),(20050601,0.95,0.05,0.0),(20050701,0.95,0.05,0.0),(20050801,0.95,0.05,0.0),(20050901,0.95,0.05,0.0),(20051001,0.95,0.05,0.0),(20051101,0.95,0.05,0.0),(20051201,0.95,0.05,0.0),(20060101,0.95,0.05,0.0),(20060201,0.95,0.05,0.0),(20060301,0.95,0.05,0.0),(20060401,0.95,0.05,0.0),(20060501,0.95,0.05,0.0),(20060601,0.95,0.05,0.0),(20060701,0.95,0.05,0.0),(20060801,0.95,0.05,0.0),(20060901,0.95,0.05,0.0),(20061001,0.95,0.05,0.0),(20061101,0.95,0.05,0.0),(20061201,0.95,0.05,0.0),(20070101,0.95,0.05,0.0),(20070201,0.95,0.05,0.0),(20070301,0.95,0.05,0.0),(20070401,0.95,0.05,0.0),(20070501,0.95,0.05,0.0),(20070601,0.95,0.05,0.0),(20070701,0.95,0.05,0.0),(20070801,0.95,0.05,0.0),(20070901,0.95,0.05,0.0),(20071001,0.95,0.05,0.0),(20071101,0.95,0.05,0.0),(20071201,0.95,0.05,0.0),(20080101,0.95,0.05,0.0),(20080201,0.95,0.05,0.0),(20080301,0.95,0.05,0.0),(20080401,0.95,0.05,0.0),(20080501,0.95,0.05,0.0),(20080601,0.95,0.05,0.0),(20080701,0.95,0.05,0.0),(20080801,0.95,0.05,0.0),(20080901,0.95,0.05,0.0),(20081001,0.95,0.05,0.0),(20081101,0.95,0.05,0.0),(20081201,0.95,0.05,0.0),(20090101,0.95,0.05,0.0),(20090201,0.91,0.09,0.0),(20090301,0.91,0.09,0.0),(20090401,0.91,0.09,0.0),(20090501,0.91,0.09,0.0),(20090601,0.91,0.09,0.0),(20090701,0.91,0.09,0.0),(20090801,0.91,0.09,0.0),(20090901,0.91,0.09,0.0),(20091001,0.91,0.09,0.0),(20091101,0.91,0.09,0.0),(20091201,0.91,0.09,0.0),(20100101,0.91,0.09,0.0),(20100201,0.81,0.19,0.0),(20100301,0.81,0.19,0.0),(20100401,0.81,0.19,0.0),(20100501,0.81,0.19,0.0),(20100601,0.81,0.19,0.0),(20100701,0.81,0.19,0.0),(20100801,0.81,0.19,0.0),(20100901,0.81,0.19,0.0),(20101001,0.81,0.19,0.0),(20101101,0.81,0.19,0.0),(20101201,0.81,0.19,0.0),(20110101,0.81,0.19,0.0),(20110201,0.72,0.28,0.0),(20110301,0.72,0.28,0.0),(20110401,0.72,0.28,0.0),(20110501,0.72,0.28,0.0),(20110601,0.72,0.28,0.0),(20110701,0.72,0.28,0.0),(20110801,0.72,0.28,0.0),(20110901,0.72,0.28,0.0),(20111001,0.72,0.28,0.0),(20111101,0.72,0.28,0.0),(20111201,0.72,0.28,0.0),(20120101,0.72,0.28,0.0),(20120201,0.53,0.47,0.0),(20120301,0.53,0.47,0.0),(20120401,0.53,0.47,0.0),(20120501,0.53,0.47,0.0),(20120601,0.53,0.47,0.0),(20120701,0.53,0.47,0.0),(20120801,0.53,0.47,0.0),(20120901,0.53,0.47,0.0),(20121001,0.53,0.47,0.0),(20121101,0.53,0.47,0.0),(20121201,0.53,0.47,0.0),(20130101,0.53,0.47,0.0),(20130201,0.44,0.56,0.0),(20130301,0.44,0.56,0.0),(20130401,0.44,0.56,0.0),(20130501,0.44,0.56,0.0),(20130601,0.44,0.56,0.0),(20130701,0.44,0.56,0.0),(20130801,0.44,0.56,0.0),(20130901,0.44,0.56,0.0),(20131001,0.44,0.56,0.0),(20131101,0.44,0.56,0.0),(20131201,0.44,0.56,0.0),(20140101,0.44,0.56,0.0),(20140201,0.36,0.5,0.14),(20140301,0.36,0.5,0.14),(20140401,0.36,0.5,0.14),(20140501,0.36,0.5,0.14),(20140601,0.36,0.5,0.14),(20140701,0.36,0.5,0.14),(20140801,0.36,0.5,0.14),(20140901,0.36,0.5,0.14),(20141001,0.36,0.5,0.14),(20141101,0.36,0.5,0.14),(20141201,0.36,0.5,0.14),(20150101,0.36,0.5,0.14),(20150201,0.3,0.41,0.29),(20150301,0.3,0.41,0.29),(20150401,0.3,0.41,0.29),(20150501,0.3,0.41,0.29),(20150601,0.3,0.41,0.29),(20150701,0.3,0.41,0.29),(20150801,0.3,0.41,0.29),(20150901,0.3,0.41,0.29),(20151001,0.3,0.41,0.29),(20151101,0.3,0.41,0.29),(20151201,0.3,0.41,0.29),(20160101,0.3,0.41,0.29),(20160201,0.26,0.36,0.38),(20160301,0.26,0.36,0.38),(20160401,0.26,0.36,0.38),(20160501,0.26,0.36,0.38),(20160601,0.26,0.36,0.38),(20160701,0.26,0.36,0.38),(20160801,0.26,0.36,0.38),(20160901,0.26,0.36,0.38),(20161001,0.26,0.36,0.38),(20161101,0.26,0.36,0.38),(20161201,0.26,0.36,0.38),(20170101,0.26,0.36,0.38),(20170201,0.2,0.3,0.5),(20170301,0.2,0.3,0.5),(20170401,0.2,0.3,0.5),(20170501,0.2,0.3,0.5),(20170601,0.2,0.3,0.5),(20170701,0.2,0.3,0.5),(20170801,0.2,0.3,0.5),(20170901,0.2,0.3,0.5),(20171001,0.2,0.3,0.5),(20171101,0.2,0.3,0.5),(20171201,0.2,0.3,0.5),(20180101,0.2,0.3,0.5),(20180201,0.13,0.37,0.5),(20180301,0.13,0.37,0.5),(20180401,0.13,0.37,0.5),(20180501,0.13,0.37,0.5),(20180601,0.13,0.37,0.5),(20180701,0.13,0.37,0.5),(20180801,0.13,0.37,0.5),(20180901,0.13,0.37,0.5),(20181001,0.13,0.37,0.5),(20181101,0.13,0.37,0.5),(20181201,0.13,0.37,0.5),(20190101,0.13,0.37,0.5),(20190201,0.1,0.4,0.5),(20190301,0.1,0.4,0.5),(20190401,0.1,0.4,0.5),(20190501,0.1,0.4,0.5),(20190601,0.1,0.4,0.5),(20190701,0.1,0.4,0.5),(20190801,0.1,0.4,0.5),(20190901,0.1,0.4,0.5),(20191001,0.1,0.4,0.5),(20191101,0.1,0.4,0.5),(20191201,0.1,0.4,0.5),(20200101,0.1,0.4,0.5)] self.yValueAxis.requiredRange = None self.yValueAxis.leftAxisPercent = 0 self.yValueAxis.leftAxisOrigShiftMin = 0 self.yValueAxis.leftAxisOrigShiftIPC = 0 self.lines[0].strokeColor = colors.toColor(0x0033cc) self.lines[1].strokeColor = colors.toColor(0x99c3ff) self.lines[2].strokeColor = colors.toColor(0xCC0033) def _maxWidth(T, fontName, fontSize): '''return max stringWidth for the list of strings T''' if type(T) not in (type(()),type([])): T = (T,) T = filter(None,T) return T and max(map(lambda t,sW=stringWidth,fN=fontName, fS=fontSize: sW(t,fN,fS),T)) or 0 class ScatterPlot(LinePlot): """A scatter plot widget""" _attrMap = AttrMap(BASE=LinePlot, width = AttrMapValue(isNumber, desc="Width of the area inside the axes"), height = AttrMapValue(isNumber, desc="Height of the area inside the axes"), outerBorderOn = AttrMapValue(isBoolean, desc="Is there an outer border (continuation of axes)"), outerBorderColor = AttrMapValue(isColorOrNone, desc="Color of outer border (if any)"), labelOffset = AttrMapValue(isNumber, desc="Space between label and Axis (or other labels)",advancedUsage=1), axisTickLengths = AttrMapValue(isNumber, desc="Lenth of the ticks on both axes"), axisStrokeWidth = AttrMapValue(isNumber, desc="Stroke width for both axes"), xLabel = AttrMapValue(isString, desc="Label for the whole X-Axis"), yLabel = AttrMapValue(isString, desc="Label for the whole Y-Axis"), data = AttrMapValue(isAnything, desc='Data points - a list of x/y tuples.'), strokeColor = AttrMapValue(isColorOrNone, desc='Color used for border of plot area.'), fillColor = AttrMapValue(isColorOrNone, desc='Color used for background interior of plot area.'), leftPadding = AttrMapValue(isNumber, desc='Padding on left of drawing'), rightPadding = AttrMapValue(isNumber, desc='Padding on right of drawing'), topPadding = AttrMapValue(isNumber, desc='Padding at top of drawing'), bottomPadding = AttrMapValue(isNumber, desc='Padding at bottom of drawing'), ) def __init__(self): LinePlot.__init__(self) self.width = 142 self.height = 77 self.outerBorderOn = 1 self.outerBorderColor = colors.black self.background = None _labelOffset = 3 _axisTickLengths = 2 _axisStrokeWidth = 0.5 self.yValueAxis.valueMin = None self.yValueAxis.valueMax = None self.yValueAxis.valueStep = None self.yValueAxis.labelTextFormat = '%s' self.xLabel="X Lable" self.xValueAxis.labels.fontSize = 6 self.yLabel="Y Lable" self.yValueAxis.labels.fontSize = 6 self.data =[((0.030, 62.73), (0.074, 54.363), (1.216, 17.964)), ((1.360, 11.621), (1.387, 50.011), (1.428, 68.953)), ((1.444, 86.888), (1.754, 35.58), (1.766, 36.05))] #values for lineplot self.joinedLines = 0 self.leftPadding=5 self.rightPadding=10 self.topPadding=5 self.bottomPadding=5 self.x = self.leftPadding+_axisTickLengths+(_labelOffset*2) self.x=self.x+_maxWidth(str(self.yValueAxis.valueMax), self.yValueAxis.labels.fontName, self.yValueAxis.labels.fontSize) self.y = self.bottomPadding+_axisTickLengths+_labelOffset+self.xValueAxis.labels.fontSize self.xValueAxis.labels.dy = -_labelOffset self.xValueAxis.tickDown = _axisTickLengths self.xValueAxis.strokeWidth = _axisStrokeWidth self.xValueAxis.rangeRound='both' self.yValueAxis.labels.dx = -_labelOffset self.yValueAxis.tickLeft = _axisTickLengths self.yValueAxis.strokeWidth = _axisStrokeWidth self.yValueAxis.rangeRound='both' self.lineLabelFormat="%.2f" self.lineLabels.fontSize = 5 self.lineLabels.boxAnchor = 'e' self.lineLabels.dx = -2 self.lineLabelNudge = 0 self.lines.symbol=makeMarker('FilledCircle',size=3) self.lines[1].symbol=makeMarker('FilledDiamond',size=3) self.lines[2].symbol=makeMarker('FilledSquare',size=3) self.lines[2].strokeColor = colors.green def _getDrawingDimensions(self): tx = self.leftPadding+self.yValueAxis.tickLeft+(self.yValueAxis.labels.dx*2)+self.xValueAxis.labels.fontSize tx=tx+(5*_maxWidth(str(self.yValueAxis.valueMax), self.yValueAxis.labels.fontName, self.yValueAxis.labels.fontSize)) tx=tx+self.width+self.rightPadding t=('%.2f%%'%self.xValueAxis.valueMax) tx=tx+(_maxWidth(t, self.yValueAxis.labels.fontName, self.yValueAxis.labels.fontSize)) ty = self.bottomPadding+self.xValueAxis.tickDown+(self.xValueAxis.labels.dy*2)+(self.xValueAxis.labels.fontSize*2) ty=ty+self.yValueAxis.labels.fontSize+self.height+self.topPadding #print (tx, ty) return (tx,ty) def demo(self,drawing=None): if not drawing: tx,ty=self._getDrawingDimensions() drawing = Drawing(tx,ty) drawing.add(self.draw()) return drawing def draw(self): ascent=getFont(self.xValueAxis.labels.fontName).face.ascent if ascent==0: ascent=0.718 # default (from helvetica) ascent=ascent*self.xValueAxis.labels.fontSize # normalize #basic LinePlot - does the Axes, Ticks etc lp = LinePlot.draw(self) xLabel = self.xLabel if xLabel: #Overall label for the X-axis xl=Label() xl.x = (self.x+self.width)/2.0 xl.y = 0 xl.fontName = self.xValueAxis.labels.fontName xl.fontSize = self.xValueAxis.labels.fontSize xl.setText(xLabel) lp.add(xl) yLabel = self.yLabel if yLabel: #Overall label for the Y-axis yl=Label() yl.angle = 90 yl.x = 0 yl.y = (self.y+self.height/2.0) yl.fontName = self.yValueAxis.labels.fontName yl.fontSize = self.yValueAxis.labels.fontSize yl.setText(yLabel) lp.add(yl) # do a bounding box - in the same style as the axes if self.outerBorderOn: lp.add(Rect(self.x, self.y, self.width, self.height, strokeColor = self.outerBorderColor, strokeWidth = self.yValueAxis.strokeWidth, fillColor = None)) lp.shift(self.leftPadding, self.bottomPadding) return lp def sample1a(): "A line plot with non-equidistant points in x-axis." drawing = Drawing(400, 200) data = [ ((1,1), (2,2), (2.5,1), (3,3), (4,5)), ((1,2), (2,3), (2.5,2), (3.5,5), (4,6)) ] lp = LinePlot() lp.x = 50 lp.y = 50 lp.height = 125 lp.width = 300 lp.data = data lp.joinedLines = 1 lp.strokeColor = colors.black lp.lines.symbol = makeMarker('UK_Flag') lp.lines[0].strokeWidth = 2 lp.lines[1].strokeWidth = 4 lp.xValueAxis.valueMin = 0 lp.xValueAxis.valueMax = 5 lp.xValueAxis.valueStep = 1 lp.yValueAxis.valueMin = 0 lp.yValueAxis.valueMax = 7 lp.yValueAxis.valueStep = 1 drawing.add(lp) return drawing def sample1b(): "A line plot with non-equidistant points in x-axis." drawing = Drawing(400, 200) data = [ ((1,1), (2,2), (2.5,1), (3,3), (4,5)), ((1,2), (2,3), (2.5,2), (3.5,5), (4,6)) ] lp = LinePlot() lp.x = 50 lp.y = 50 lp.height = 125 lp.width = 300 lp.data = data lp.joinedLines = 1 lp.lines.symbol = makeMarker('Circle') lp.lineLabelFormat = '%2.0f' lp.strokeColor = colors.black lp.xValueAxis.valueMin = 0 lp.xValueAxis.valueMax = 5 lp.xValueAxis.valueSteps = [1, 2, 2.5, 3, 4, 5] lp.xValueAxis.labelTextFormat = '%2.1f' lp.yValueAxis.valueMin = 0 lp.yValueAxis.valueMax = 7 lp.yValueAxis.valueStep = 1 drawing.add(lp) return drawing def sample1c(): "A line plot with non-equidistant points in x-axis." drawing = Drawing(400, 200) data = [ ((1,1), (2,2), (2.5,1), (3,3), (4,5)), ((1,2), (2,3), (2.5,2), (3.5,5), (4,6)) ] lp = LinePlot() lp.x = 50 lp.y = 50 lp.height = 125 lp.width = 300 lp.data = data lp.joinedLines = 1 lp.lines[0].symbol = makeMarker('FilledCircle') lp.lines[1].symbol = makeMarker('Circle') lp.lineLabelFormat = '%2.0f' lp.strokeColor = colors.black lp.xValueAxis.valueMin = 0 lp.xValueAxis.valueMax = 5 lp.xValueAxis.valueSteps = [1, 2, 2.5, 3, 4, 5] lp.xValueAxis.labelTextFormat = '%2.1f' lp.yValueAxis.valueMin = 0 lp.yValueAxis.valueMax = 7 lp.yValueAxis.valueSteps = [1, 2, 3, 5, 6] drawing.add(lp) return drawing def preprocessData(series): "Convert date strings into seconds and multiply values by 100." return map(lambda x: (str2seconds(x[0]), x[1]*100), series) def sample2(): "A line plot with non-equidistant points in x-axis." drawing = Drawing(400, 200) data = [ (('25/11/1991',1), ('30/11/1991',1.000933333), ('31/12/1991',1.0062), ('31/01/1992',1.0112), ('29/02/1992',1.0158), ('31/03/1992',1.020733333), ('30/04/1992',1.026133333), ('31/05/1992',1.030266667), ('30/06/1992',1.034466667), ('31/07/1992',1.038733333), ('31/08/1992',1.0422), ('30/09/1992',1.045533333), ('31/10/1992',1.049866667), ('30/11/1992',1.054733333), ('31/12/1992',1.061), ), ] data[0] = preprocessData(data[0]) lp = LinePlot() lp.x = 50 lp.y = 50 lp.height = 125 lp.width = 300 lp.data = data lp.joinedLines = 1 lp.lines.symbol = makeMarker('FilledDiamond') lp.strokeColor = colors.black start = mktime(mkTimeTuple('25/11/1991')) t0 = mktime(mkTimeTuple('30/11/1991')) t1 = mktime(mkTimeTuple('31/12/1991')) t2 = mktime(mkTimeTuple('31/03/1992')) t3 = mktime(mkTimeTuple('30/06/1992')) t4 = mktime(mkTimeTuple('30/09/1992')) end = mktime(mkTimeTuple('31/12/1992')) lp.xValueAxis.valueMin = start lp.xValueAxis.valueMax = end lp.xValueAxis.valueSteps = [start, t0, t1, t2, t3, t4, end] lp.xValueAxis.labelTextFormat = seconds2str lp.xValueAxis.labels[1].dy = -20 lp.xValueAxis.labels[2].dy = -35 lp.yValueAxis.labelTextFormat = '%4.2f' lp.yValueAxis.valueMin = 100 lp.yValueAxis.valueMax = 110 lp.yValueAxis.valueStep = 2 drawing.add(lp) return drawing
gpl-2.0
zploskey/servo
tests/wpt/update/tree.py
167
6560
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import re import tempfile from wptrunner import update as wptupdate from wptrunner.update.tree import Commit, CommitMessage, get_unique_name class HgTree(wptupdate.tree.HgTree): def __init__(self, *args, **kwargs): self.commit_cls = kwargs.pop("commit_cls", Commit) wptupdate.tree.HgTree.__init__(self, *args, **kwargs) # TODO: The extra methods for upstreaming patches from a # hg checkout class GitTree(wptupdate.tree.GitTree): def __init__(self, *args, **kwargs): """Extension of the basic GitTree with extra methods for transfering patches""" commit_cls = kwargs.pop("commit_cls", Commit) wptupdate.tree.GitTree.__init__(self, *args, **kwargs) self.commit_cls = commit_cls def create_branch(self, name, ref=None): """Create a named branch, :param name: String representing the branch name. :param ref: None to use current HEAD or rev that the branch should point to""" args = [] if ref is not None: if hasattr(ref, "sha1"): ref = ref.sha1 args.append(ref) self.git("branch", name, *args) def commits_by_message(self, message, path=None): """List of commits with messages containing a given string. :param message: The string that must be contained in the message. :param path: Path to a file or directory the commit touches """ args = ["--pretty=format:%H", "--reverse", "-z", "--grep=%s" % message] if path is not None: args.append("--") args.append(path) data = self.git("log", *args) return [self.commit_cls(self, sha1) for sha1 in data.split("\0")] def log(self, base_commit=None, path=None): """List commits touching a certian path from a given base commit. :base_param commit: Commit object for the base commit from which to log :param path: Path that the commits must touch """ args = ["--pretty=format:%H", "--reverse", "-z"] if base_commit is not None: args.append("%s.." % base_commit.sha1) if path is not None: args.append("--") args.append(path) data = self.git("log", *args) return [self.commit_cls(self, sha1) for sha1 in data.split("\0") if sha1] def import_patch(self, patch): """Import a patch file into the tree and commit it :param patch: a Patch object containing the patch to import """ with tempfile.NamedTemporaryFile() as f: f.write(patch.diff) f.flush() f.seek(0) self.git("apply", "--index", f.name) self.git("commit", "-m", patch.message.text, "--author=%s" % patch.full_author) def rebase(self, ref, continue_rebase=False): """Rebase the current branch onto another commit. :param ref: A Commit object for the commit to rebase onto :param continue_rebase: Continue an in-progress rebase""" if continue_rebase: args = ["--continue"] else: if hasattr(ref, "sha1"): ref = ref.sha1 args = [ref] self.git("rebase", *args) def push(self, remote, local_ref, remote_ref, force=False): """Push local changes to a remote. :param remote: URL of the remote to push to :param local_ref: Local branch to push :param remote_ref: Name of the remote branch to push to :param force: Do a force push """ args = [] if force: args.append("-f") args.extend([remote, "%s:%s" % (local_ref, remote_ref)]) self.git("push", *args) def unique_branch_name(self, prefix): """Get an unused branch name in the local tree :param prefix: Prefix to use at the start of the branch name""" branches = [ref[len("refs/heads/"):] for sha1, ref in self.list_refs() if ref.startswith("refs/heads/")] return get_unique_name(branches, prefix) class Patch(object): def __init__(self, author, email, message, diff): self.author = author self.email = email if isinstance(message, CommitMessage): self.message = message else: self.message = GeckoCommitMessage(message) self.diff = diff def __repr__(self): return "<Patch (%s)>" % self.message.full_summary @property def full_author(self): return "%s <%s>" % (self.author, self.email) @property def empty(self): return bool(self.diff.strip()) class GeckoCommitMessage(CommitMessage): """Commit message following the Gecko conventions for identifying bug number and reviewer""" # c.f. http://hg.mozilla.org/hgcustom/version-control-tools/file/tip/hghooks/mozhghooks/commit-message.py # which has the regexps that are actually enforced by the VCS hooks. These are # slightly different because we need to parse out specific parts of the message rather # than just enforce a general pattern. _bug_re = re.compile("^Bug (\d+)[^\w]*(?:Part \d+[^\w]*)?(.*?)\s*(?:r=(\w*))?$", re.IGNORECASE) _backout_re = re.compile("^(?:Back(?:ing|ed)\s+out)|Backout|(?:Revert|(?:ed|ing))", re.IGNORECASE) _backout_sha1_re = re.compile("(?:\s|\:)(0-9a-f){12}") def _parse_message(self): CommitMessage._parse_message(self) if self._backout_re.match(self.full_summary): self.backouts = self._backout_re.findall(self.full_summary) else: self.backouts = [] m = self._bug_re.match(self.full_summary) if m is not None: self.bug, self.summary, self.reviewer = m.groups() else: self.bug, self.summary, self.reviewer = None, self.full_summary, None class GeckoCommit(Commit): msg_cls = GeckoCommitMessage def export_patch(self, path=None): """Convert a commit in the tree to a Patch with the bug number and reviewer stripped from the message""" args = ["--binary", self.sha1] if path is not None: args.append("--") args.append(path) diff = self.git("show", *args) return Patch(self.author, self.email, self.message, diff)
mpl-2.0
opencontrail-ci-admin/git-repo
subcmds/info.py
46
6021
# # Copyright (C) 2012 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from command import PagedCommand from color import Coloring from error import NoSuchProjectError from git_refs import R_M class _Coloring(Coloring): def __init__(self, config): Coloring.__init__(self, config, "status") class Info(PagedCommand): common = True helpSummary = "Get info on the manifest branch, current branch or unmerged branches" helpUsage = "%prog [-dl] [-o [-b]] [<project>...]" def _Options(self, p): p.add_option('-d', '--diff', dest='all', action='store_true', help="show full info and commit diff including remote branches") p.add_option('-o', '--overview', dest='overview', action='store_true', help='show overview of all local commits') p.add_option('-b', '--current-branch', dest="current_branch", action="store_true", help="consider only checked out branches") p.add_option('-l', '--local-only', dest="local", action="store_true", help="Disable all remote operations") def Execute(self, opt, args): self.out = _Coloring(self.manifest.globalConfig) self.heading = self.out.printer('heading', attr = 'bold') self.headtext = self.out.printer('headtext', fg = 'yellow') self.redtext = self.out.printer('redtext', fg = 'red') self.sha = self.out.printer("sha", fg = 'yellow') self.text = self.out.nofmt_printer('text') self.dimtext = self.out.printer('dimtext', attr = 'dim') self.opt = opt manifestConfig = self.manifest.manifestProject.config mergeBranch = manifestConfig.GetBranch("default").merge manifestGroups = (manifestConfig.GetString('manifest.groups') or 'all,-notdefault') self.heading("Manifest branch: ") self.headtext(self.manifest.default.revisionExpr) self.out.nl() self.heading("Manifest merge branch: ") self.headtext(mergeBranch) self.out.nl() self.heading("Manifest groups: ") self.headtext(manifestGroups) self.out.nl() self.printSeparator() if not opt.overview: self.printDiffInfo(args) else: self.printCommitOverview(args) def printSeparator(self): self.text("----------------------------") self.out.nl() def printDiffInfo(self, args): try: projs = self.GetProjects(args) except NoSuchProjectError: return for p in projs: self.heading("Project: ") self.headtext(p.name) self.out.nl() self.heading("Mount path: ") self.headtext(p.worktree) self.out.nl() self.heading("Current revision: ") self.headtext(p.revisionExpr) self.out.nl() localBranches = p.GetBranches().keys() self.heading("Local Branches: ") self.redtext(str(len(localBranches))) if len(localBranches) > 0: self.text(" [") self.text(", ".join(localBranches)) self.text("]") self.out.nl() if self.opt.all: self.findRemoteLocalDiff(p) self.printSeparator() def findRemoteLocalDiff(self, project): #Fetch all the latest commits if not self.opt.local: project.Sync_NetworkHalf(quiet=True, current_branch_only=True) logTarget = R_M + self.manifest.manifestProject.config.GetBranch("default").merge bareTmp = project.bare_git._bare project.bare_git._bare = False localCommits = project.bare_git.rev_list( '--abbrev=8', '--abbrev-commit', '--pretty=oneline', logTarget + "..", '--') originCommits = project.bare_git.rev_list( '--abbrev=8', '--abbrev-commit', '--pretty=oneline', ".." + logTarget, '--') project.bare_git._bare = bareTmp self.heading("Local Commits: ") self.redtext(str(len(localCommits))) self.dimtext(" (on current branch)") self.out.nl() for c in localCommits: split = c.split() self.sha(split[0] + " ") self.text(" ".join(split[1:])) self.out.nl() self.printSeparator() self.heading("Remote Commits: ") self.redtext(str(len(originCommits))) self.out.nl() for c in originCommits: split = c.split() self.sha(split[0] + " ") self.text(" ".join(split[1:])) self.out.nl() def printCommitOverview(self, args): all_branches = [] for project in self.GetProjects(args): br = [project.GetUploadableBranch(x) for x in project.GetBranches()] br = [x for x in br if x] if self.opt.current_branch: br = [x for x in br if x.name == project.CurrentBranch] all_branches.extend(br) if not all_branches: return self.out.nl() self.heading('Projects Overview') project = None for branch in all_branches: if project != branch.project: project = branch.project self.out.nl() self.headtext(project.relpath) self.out.nl() commits = branch.commits date = branch.date self.text('%s %-33s (%2d commit%s, %s)' % ( branch.name == project.CurrentBranch and '*' or ' ', branch.name, len(commits), len(commits) != 1 and 's' or '', date)) self.out.nl() for commit in commits: split = commit.split() self.text('{0:38}{1} '.format('','-')) self.sha(split[0] + " ") self.text(" ".join(split[1:])) self.out.nl()
apache-2.0
benvermaercke/pyqtgraph
pyqtgraph/flowchart/library/Filters.py
24
13203
# -*- coding: utf-8 -*- import numpy as np from ...Qt import QtCore, QtGui from ..Node import Node from . import functions from ... import functions as pgfn from .common import * from ...python2_3 import xrange from ... import PolyLineROI from ... import Point from ... import metaarray as metaarray class Downsample(CtrlNode): """Downsample by averaging samples together.""" nodeName = 'Downsample' uiTemplate = [ ('n', 'intSpin', {'min': 1, 'max': 1000000}) ] def processData(self, data): return functions.downsample(data, self.ctrls['n'].value(), axis=0) class Subsample(CtrlNode): """Downsample by selecting every Nth sample.""" nodeName = 'Subsample' uiTemplate = [ ('n', 'intSpin', {'min': 1, 'max': 1000000}) ] def processData(self, data): return data[::self.ctrls['n'].value()] class Bessel(CtrlNode): """Bessel filter. Input data must have time values.""" nodeName = 'BesselFilter' uiTemplate = [ ('band', 'combo', {'values': ['lowpass', 'highpass'], 'index': 0}), ('cutoff', 'spin', {'value': 1000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}), ('order', 'intSpin', {'value': 4, 'min': 1, 'max': 16}), ('bidir', 'check', {'checked': True}) ] def processData(self, data): s = self.stateGroup.state() if s['band'] == 'lowpass': mode = 'low' else: mode = 'high' return functions.besselFilter(data, bidir=s['bidir'], btype=mode, cutoff=s['cutoff'], order=s['order']) class Butterworth(CtrlNode): """Butterworth filter""" nodeName = 'ButterworthFilter' uiTemplate = [ ('band', 'combo', {'values': ['lowpass', 'highpass'], 'index': 0}), ('wPass', 'spin', {'value': 1000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}), ('wStop', 'spin', {'value': 2000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}), ('gPass', 'spin', {'value': 2.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}), ('gStop', 'spin', {'value': 20.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}), ('bidir', 'check', {'checked': True}) ] def processData(self, data): s = self.stateGroup.state() if s['band'] == 'lowpass': mode = 'low' else: mode = 'high' ret = functions.butterworthFilter(data, bidir=s['bidir'], btype=mode, wPass=s['wPass'], wStop=s['wStop'], gPass=s['gPass'], gStop=s['gStop']) return ret class ButterworthNotch(CtrlNode): """Butterworth notch filter""" nodeName = 'ButterworthNotchFilter' uiTemplate = [ ('low_wPass', 'spin', {'value': 1000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}), ('low_wStop', 'spin', {'value': 2000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}), ('low_gPass', 'spin', {'value': 2.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}), ('low_gStop', 'spin', {'value': 20.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}), ('high_wPass', 'spin', {'value': 3000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}), ('high_wStop', 'spin', {'value': 4000., 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'Hz', 'siPrefix': True}), ('high_gPass', 'spin', {'value': 2.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}), ('high_gStop', 'spin', {'value': 20.0, 'step': 1, 'dec': True, 'range': [0.0, None], 'suffix': 'dB', 'siPrefix': True}), ('bidir', 'check', {'checked': True}) ] def processData(self, data): s = self.stateGroup.state() low = functions.butterworthFilter(data, bidir=s['bidir'], btype='low', wPass=s['low_wPass'], wStop=s['low_wStop'], gPass=s['low_gPass'], gStop=s['low_gStop']) high = functions.butterworthFilter(data, bidir=s['bidir'], btype='high', wPass=s['high_wPass'], wStop=s['high_wStop'], gPass=s['high_gPass'], gStop=s['high_gStop']) return low + high class Mean(CtrlNode): """Filters data by taking the mean of a sliding window""" nodeName = 'MeanFilter' uiTemplate = [ ('n', 'intSpin', {'min': 1, 'max': 1000000}) ] @metaArrayWrapper def processData(self, data): n = self.ctrls['n'].value() return functions.rollingSum(data, n) / n class Median(CtrlNode): """Filters data by taking the median of a sliding window""" nodeName = 'MedianFilter' uiTemplate = [ ('n', 'intSpin', {'min': 1, 'max': 1000000}) ] @metaArrayWrapper def processData(self, data): try: import scipy.ndimage except ImportError: raise Exception("MedianFilter node requires the package scipy.ndimage.") return scipy.ndimage.median_filter(data, self.ctrls['n'].value()) class Mode(CtrlNode): """Filters data by taking the mode (histogram-based) of a sliding window""" nodeName = 'ModeFilter' uiTemplate = [ ('window', 'intSpin', {'value': 500, 'min': 1, 'max': 1000000}), ] @metaArrayWrapper def processData(self, data): return functions.modeFilter(data, self.ctrls['window'].value()) class Denoise(CtrlNode): """Removes anomalous spikes from data, replacing with nearby values""" nodeName = 'DenoiseFilter' uiTemplate = [ ('radius', 'intSpin', {'value': 2, 'min': 0, 'max': 1000000}), ('threshold', 'doubleSpin', {'value': 4.0, 'min': 0, 'max': 1000}) ] def processData(self, data): #print "DENOISE" s = self.stateGroup.state() return functions.denoise(data, **s) class Gaussian(CtrlNode): """Gaussian smoothing filter.""" nodeName = 'GaussianFilter' uiTemplate = [ ('sigma', 'doubleSpin', {'min': 0, 'max': 1000000}) ] @metaArrayWrapper def processData(self, data): try: import scipy.ndimage except ImportError: raise Exception("GaussianFilter node requires the package scipy.ndimage.") return pgfn.gaussianFilter(data, self.ctrls['sigma'].value()) class Derivative(CtrlNode): """Returns the pointwise derivative of the input""" nodeName = 'DerivativeFilter' def processData(self, data): if hasattr(data, 'implements') and data.implements('MetaArray'): info = data.infoCopy() if 'values' in info[0]: info[0]['values'] = info[0]['values'][:-1] return metaarray.MetaArray(data[1:] - data[:-1], info=info) else: return data[1:] - data[:-1] class Integral(CtrlNode): """Returns the pointwise integral of the input""" nodeName = 'IntegralFilter' @metaArrayWrapper def processData(self, data): data[1:] += data[:-1] return data class Detrend(CtrlNode): """Removes linear trend from the data""" nodeName = 'DetrendFilter' @metaArrayWrapper def processData(self, data): try: from scipy.signal import detrend except ImportError: raise Exception("DetrendFilter node requires the package scipy.signal.") return detrend(data) class RemoveBaseline(PlottingCtrlNode): """Remove an arbitrary, graphically defined baseline from the data.""" nodeName = 'RemoveBaseline' def __init__(self, name): ## define inputs and outputs (one output needs to be a plot) PlottingCtrlNode.__init__(self, name) self.line = PolyLineROI([[0,0],[1,0]]) self.line.sigRegionChanged.connect(self.changed) ## create a PolyLineROI, add it to a plot -- actually, I think we want to do this after the node is connected to a plot (look at EventDetection.ThresholdEvents node for ideas), and possible after there is data. We will need to update the end positions of the line each time the input data changes #self.line = None ## will become a PolyLineROI def connectToPlot(self, node): """Define what happens when the node is connected to a plot""" if node.plot is None: return node.getPlot().addItem(self.line) def disconnectFromPlot(self, plot): """Define what happens when the node is disconnected from a plot""" plot.removeItem(self.line) def processData(self, data): ## get array of baseline (from PolyLineROI) h0 = self.line.getHandles()[0] h1 = self.line.getHandles()[-1] timeVals = data.xvals(0) h0.setPos(timeVals[0], h0.pos()[1]) h1.setPos(timeVals[-1], h1.pos()[1]) pts = self.line.listPoints() ## lists line handles in same coordinates as data pts, indices = self.adjustXPositions(pts, timeVals) ## maxe sure x positions match x positions of data points ## construct an array that represents the baseline arr = np.zeros(len(data), dtype=float) n = 1 arr[0] = pts[0].y() for i in range(len(pts)-1): x1 = pts[i].x() x2 = pts[i+1].x() y1 = pts[i].y() y2 = pts[i+1].y() m = (y2-y1)/(x2-x1) b = y1 times = timeVals[(timeVals > x1)*(timeVals <= x2)] arr[n:n+len(times)] = (m*(times-times[0]))+b n += len(times) return data - arr ## subract baseline from data def adjustXPositions(self, pts, data): """Return a list of Point() where the x position is set to the nearest x value in *data* for each point in *pts*.""" points = [] timeIndices = [] for p in pts: x = np.argwhere(abs(data - p.x()) == abs(data - p.x()).min()) points.append(Point(data[x], p.y())) timeIndices.append(x) return points, timeIndices class AdaptiveDetrend(CtrlNode): """Removes baseline from data, ignoring anomalous events""" nodeName = 'AdaptiveDetrend' uiTemplate = [ ('threshold', 'doubleSpin', {'value': 3.0, 'min': 0, 'max': 1000000}) ] def processData(self, data): return functions.adaptiveDetrend(data, threshold=self.ctrls['threshold'].value()) class HistogramDetrend(CtrlNode): """Removes baseline from data by computing mode (from histogram) of beginning and end of data.""" nodeName = 'HistogramDetrend' uiTemplate = [ ('windowSize', 'intSpin', {'value': 500, 'min': 10, 'max': 1000000, 'suffix': 'pts'}), ('numBins', 'intSpin', {'value': 50, 'min': 3, 'max': 1000000}), ('offsetOnly', 'check', {'checked': False}), ] def processData(self, data): s = self.stateGroup.state() #ws = self.ctrls['windowSize'].value() #bn = self.ctrls['numBins'].value() #offset = self.ctrls['offsetOnly'].checked() return functions.histogramDetrend(data, window=s['windowSize'], bins=s['numBins'], offsetOnly=s['offsetOnly']) class RemovePeriodic(CtrlNode): nodeName = 'RemovePeriodic' uiTemplate = [ #('windowSize', 'intSpin', {'value': 500, 'min': 10, 'max': 1000000, 'suffix': 'pts'}), #('numBins', 'intSpin', {'value': 50, 'min': 3, 'max': 1000000}) ('f0', 'spin', {'value': 60, 'suffix': 'Hz', 'siPrefix': True, 'min': 0, 'max': None}), ('harmonics', 'intSpin', {'value': 30, 'min': 0}), ('samples', 'intSpin', {'value': 1, 'min': 1}), ] def processData(self, data): times = data.xvals('Time') dt = times[1]-times[0] data1 = data.asarray() ft = np.fft.fft(data1) ## determine frequencies in fft data df = 1.0 / (len(data1) * dt) freqs = np.linspace(0.0, (len(ft)-1) * df, len(ft)) ## flatten spikes at f0 and harmonics f0 = self.ctrls['f0'].value() for i in xrange(1, self.ctrls['harmonics'].value()+2): f = f0 * i # target frequency ## determine index range to check for this frequency ind1 = int(np.floor(f / df)) ind2 = int(np.ceil(f / df)) + (self.ctrls['samples'].value()-1) if ind1 > len(ft)/2.: break mag = (abs(ft[ind1-1]) + abs(ft[ind2+1])) * 0.5 for j in range(ind1, ind2+1): phase = np.angle(ft[j]) ## Must preserve the phase of each point, otherwise any transients in the trace might lead to large artifacts. re = mag * np.cos(phase) im = mag * np.sin(phase) ft[j] = re + im*1j ft[len(ft)-j] = re - im*1j data2 = np.fft.ifft(ft).real ma = metaarray.MetaArray(data2, info=data.infoCopy()) return ma
mit
realzzt/BitCoin2013
qa/rpc-tests/nulldummy.py
1
6747
#!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. from test_framework.test_framework import ComparisonTestFramework from test_framework.util import * from test_framework.mininode import CTransaction, NetworkThread from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment from test_framework.comptool import TestManager from test_framework.script import CScript from io import BytesIO import time NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)" def trueDummy(tx): scriptSig = CScript(tx.vin[0].scriptSig) newscript = [] for i in scriptSig: if (len(newscript) == 0): assert(len(i) == 0) newscript.append(b'\x51') else: newscript.append(i) tx.vin[0].scriptSig = CScript(newscript) tx.rehash() ''' This test is meant to exercise NULLDUMMY softfork. Connect to a single node. Generate 2 blocks (save the coinbases for later). Generate 427 more blocks. [Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block. [Policy] Check that non-NULLDUMMY transactions are rejected before activation. [Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block. [Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block. ''' class NULLDUMMYTest(ComparisonTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 def setup_network(self): # Must set the blockversion for this test self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args=[['-debug', '-whitelist=127.0.0.1', '-walletprematurewitness']]) def run_test(self): self.address = self.nodes[0].getnewaddress() self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address]) self.wit_address = self.nodes[0].addwitnessaddress(self.address) self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address) test = TestManager(self, self.options.tmpdir) test.add_all_connections(self.nodes) NetworkThread().start() # Start up network handling in another thread self.coinbase_blocks = self.nodes[0].generate(2) # Block 2 coinbase_txid = [] for i in self.coinbase_blocks: coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0]) self.nodes[0].generate(427) # Block 429 self.lastblockhash = self.nodes[0].getbestblockhash() self.tip = int("0x" + self.lastblockhash, 0) self.lastblockheight = 429 self.lastblocktime = int(time.time()) + 429 print ("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]") test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)] txid1 = self.tx_submit(self.nodes[0], test1txs[0]) test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48)) txid2 = self.tx_submit(self.nodes[0], test1txs[1]) test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49)) txid3 = self.tx_submit(self.nodes[0], test1txs[2]) self.block_submit(self.nodes[0], test1txs, False, True) print ("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation") test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 48) trueDummy(test2tx) txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR) print ("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]") self.block_submit(self.nodes[0], [test2tx], False, True) print ("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation") test4tx = self.create_transaction(self.nodes[0], txid4, self.address, 47) test6txs=[CTransaction(test4tx)] trueDummy(test4tx) self.tx_submit(self.nodes[0], test4tx, NULLDUMMY_ERROR) self.block_submit(self.nodes[0], [test4tx]) print ("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation") test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48) test6txs.append(CTransaction(test5tx)) test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01' self.tx_submit(self.nodes[0], test5tx, NULLDUMMY_ERROR) self.block_submit(self.nodes[0], [test5tx], True) print ("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]") for i in test6txs: self.tx_submit(self.nodes[0], i) self.block_submit(self.nodes[0], test6txs, True, True) def create_transaction(self, node, txid, to_address, amount): inputs = [{ "txid" : txid, "vout" : 0}] outputs = { to_address : amount } rawtx = node.createrawtransaction(inputs, outputs) signresult = node.signrawtransaction(rawtx) tx = CTransaction() f = BytesIO(hex_str_to_bytes(signresult['hex'])) tx.deserialize(f) return tx def tx_submit(self, node, tx, msg = ""): tx.rehash() try: node.sendrawtransaction(bytes_to_hex_str(tx.serialize_with_witness()), True) except JSONRPCException as exp: assert_equal(exp.error["message"], msg) return tx.hash def block_submit(self, node, txs, witness = False, accept = False): block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1) block.nVersion = 4 for tx in txs: tx.rehash() block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() witness and add_witness_commitment(block) block.rehash() block.solve() node.submitblock(bytes_to_hex_str(block.serialize(True))) if (accept): assert_equal(node.getbestblockhash(), block.hash) self.tip = block.sha256 self.lastblockhash = block.hash self.lastblocktime += 1 self.lastblockheight += 1 else: assert_equal(node.getbestblockhash(), self.lastblockhash) if __name__ == '__main__': NULLDUMMYTest().main()
mit
asm-products/unsquat-it
lib/postsdb.py
1
16012
import pymongo from bson.objectid import ObjectId import re import settings import json from lib import sanitize from datetime import datetime from datetime import date from datetime import timedelta from mongoengine import * from lib.userdb import User from lib.custom_fields import ImprovedStringField, ImprovedURLField from mongo import db from slugify import slugify from lib.commentsdb import Comment # # Post Object # class Post(Document): def __init__(self, *args, **kwargs): super(Post, self).__init__(*args, **kwargs) db = self._get_db() meta = { 'indexes': ['slug', 'date_created', 'votes', 'disqus_comment_count'] } date_created = DateTimeField(required=True, default=datetime.now()) title = StringField(required=True) slugs = ListField(StringField()) slug = StringField() user = EmbeddedDocumentField(User, required=True) tags = ListField(ImprovedStringField()) topic_slug = StringField() url = ImprovedURLField(max_length=65000, required=False) # link to external content (if any) normalized_url = ImprovedStringField(max_length=65000, required=False) hackpad_url = ImprovedURLField(max_length=65000) has_hackpad = BooleanField(default=False) body_raw = ImprovedStringField(required=False) body_html = ImprovedStringField(required=False) body_truncated = ImprovedStringField(required=False) body_text = ImprovedStringField(required=False) status = StringField(default="new") featured = BooleanField(default=False) date_featured = DateTimeField() voted_users = ListField(EmbeddedDocumentField(User)) votes = IntField() deleted = IntField() date_deleted = DateTimeField() sort_score = IntField() daily_sort_score = FloatField() downvotes = IntField() super_upvotes = IntField() super_downvotes = IntField() subscribed = ListField(EmbeddedDocumentField(User)) domain = StringField() disqus_thread_id_str = StringField() disqus_comment_count = IntField() def __str__(self): try: return 'Post: "%s" by %s' % (self.title, self.user.screen_name) except: return 'Empty Post' def add_slug(self, title): slug = slugify(title) counter_coll = self._get_collection_name() + 'Slug' counter = self._get_db()[counter_coll].find_and_modify(query={'_id': slug}, update={'$inc': {'value': 1}}, upsert=True, new=True) if counter['value'] != 1: slug = '%s-%i' % (counter['_id'], counter['value']) self._data['slugs'] = self._data.get('slugs', []) + [slug] self._data['slug'] = slug return slug def save(self, *args, **kwargs): self.validate() title_changed = hasattr(self, '_changed_fields') and 'title' in self._changed_fields if (title_changed or not self._data.get('slug')) and len(self._data.get('slugs', [])) < 6: try: self.add_slug(unicode(self._data['title'])) except: self.add_slug(unicode(self._data['title'].decode('utf-8'))) if hasattr(self, '_changed_fields'): self._changed_fields += ['slug', 'slugs'] super(Post, self).save(*args, **kwargs) def set_fields(self, **kwargs): for fname in self._fields.keys(): if kwargs.has_key(fname): setattr(self, fname, kwargs[fname]) def permalink(self): return "/posts/%s" % self._data['slug'] def editlink(self): return "%s/edit" % self.permalink() def invitelink(self): return "%s/invite" % self.permalink() def comment_count(self): return len(Comment.objects(post=self)) #return "5" def dblink(self): return "%s/post/%s" % (settings.get('db_edit_baseurl'), self._data['id']) def add_comment_link(self): return self.permalink() + "/add_comment" def add_slug(self, title): slug = slugify(title) counter_coll = self._get_collection_name() + 'Slug' counter = self._get_db()[counter_coll].find_and_modify(query={'_id': slug}, update={'$inc': {'value': 1}}, upsert=True, new=True) if counter['value'] != 1: slug = '%s-%i' % (counter['_id'], counter['value']) self._data['slugs'] = self._data.get('slugs', []) + [slug] self._data['slug'] = slug return slug ########################### ### GET ALL POSTS ########################### def get_all(): return db.post.find() ########################### ### GET A SPECIFIC POST ########################### def get_post_by_slug(slug): return Post.objects(slug=slug).first() def get_post_by_id(post_id): return Post.objects(id=post_id).first() def get_all(): return Post.objects() ########################### ### GET PAGED LISTING OF POSTS ########################### def get_posts_by_bumps(screen_name, per_page, page): return Post.objects(voted_users__screen_name=screen_name, user__screen_name__ne=screen_name).order_by('-date_created').skip((page-1)*per_page).limit(per_page) def get_posts_by_query(query, per_page=10, page=1): query_regex = re.compile('%s[\s$]' % query, re.I) return Post.objects(__raw__={'$or':[{'title':query_regex}, {'body_raw':query_regex}]}).order_by('-date_created').skip((page-1)*per_page).limit(per_page) def get_posts_by_tag(tag): return Post.objects(deleted__ne=True, tags__in=[tag]).order_by('-date_created') def get_posts_by_topic(topic_slug): return Post.objects(deleted__ne=True, topic_slug=topic_slug).order_by('-date_created') def get_essentials_by_topic(topic_slug): return Post.objects(deleted__ne=True, topic_slug=topic_slug).order_by('-votes') def get_essentials_by_tag(tag): return Post.objects(deleted__ne=True, tags__in=[tag]).order_by('-votes') def get_posts_by_screen_name(screen_name, per_page=10, page=1): return Post.objects(deleted__ne=True, user__screen_name=screen_name).order_by('-date_created').skip((page-1)*per_page).limit(per_page) def get_posts_by_screen_name_and_tag(screen_name, tag, per_page=10, page=1): return Post.objects(deleted__ne=True, user__screen_name=screen_name, tags__in=[tag]).order_by('-date_created').skip((page-1)*per_page).limit(per_page) def get_featured_posts(per_page=10, page=1): today = datetime.today() week_ago = today - timedelta(days=3) return Post.objects(deleted__ne=True, featured=True).order_by('-date_created').skip((page-1)*per_page).limit(per_page) def get_new_posts(per_page=50, page=1): return Post.objects(deleted__ne=True).order_by('-date_created').limit(per_page).skip((page-1)*per_page) def get_hot_posts_by_day(day=date.today(), hide_featured=False): day = datetime.combine(day, datetime.min.time()) day_plus_one = day + timedelta(days=1) if hide_featured: posts = Post.objects(deleted__ne=True, featured__ne=True, date_created__gte=day, date_created__lte=day_plus_one).order_by('-daily_sort_score') else: posts = Post.objects(deleted__ne=True, date_created__gte=day, date_created__lte=day_plus_one).order_by('-daily_sort_score') return posts def get_daily_posts_by_sort_score(min_score=8): day=date.today() day = datetime.combine(day, datetime.min.time()) day_plus_one = day + timedelta(days=1) return list(db.post.find({'daily_sort_score': {"$gte" : min_score }, "deleted": { "$ne": True }, 'date_created': {'$gte': day, '$lte': day_plus_one}}, sort=[('daily_sort_score', pymongo.DESCENDING)])) def get_hot_posts_24hr(start=datetime.now()): end = start - timedelta(hours=24) return Post.objects(deleted__ne=True, date_created__gte=end, date_created__lte=start).order_by('-daily_sort_score') def get_sad_posts(per_page=50, page=1): return Post.objects(date_created__gt=datetime.strptime("10/12/13", "%m/%d/%y"), votes=1, comment_count=0, deleted__ne=True, featured__ne=True).order_by('-date_created').skip((page-1)*per_page).limit(per_page) def get_deleted_posts(per_page=50, page=1): return Post.objects(deleted=True).order_by('-date_created').skip((page-1)*per_page).limit(per_page) ########################### ### AGGREGATE QUERIES ########################### def get_unique_posters(start_date, end_date): """ Original query: return db.post.group(["user.screen_name"], {'date_created':{'$gte': start_date, '$lte': end_date}}, {"count":0}, "function(o, p){p.count++}" ) Using MongoEngine necessitating breaking out the logic """ # Get all posts created in a given time period posts = Post.objects(date_created__gte=start_date, date_created__lte=end_date) # Determine unique authors of this subset of posts unique_posters = [] for p in posts: if not any(p.user.screen_name == entry['user.screen_name'] for entry in unique_posters): unique_posters.append( {'count':len(Post.objects(user__screen_name=p.user.screen_name, date_created__gte=start_date, date_created__lte=end_date)), 'user.screen_name':p.user.screen_name}) return unique_posters ########################### ### GET POST COUNTS ########################### def get_featured_posts_count(): """ Returns number of posts that have been featured #return len(list(db.post.find({'featured':True}))) """ return len(Post.objects(featured=True)) def get_post_count_by_query(query): """ Returns numbers of posts matching an arbitrary query Executes using a raw mongodb query through MongoEngine #return len(list(db.post.find({'$or':[{'title':query_regex}, {'body_raw':query_regex}]}))) """ query_regex = re.compile('%s[\s$]' % query, re.I) return len(Post.objects(__raw__={'$or':[{'title':query_regex}, {'body_raw':query_regex}]})) def get_post_count(): """ Get total number of posts. Arbitrarily set beginning of time to 10/12/13 to ignore posts that came before this #return len(list(db.post.find({'date_created':{'$gt': datetime.datetime.strptime("10/12/13", "%m/%d/%y")}}))) """ return len(Post.objects(date_created__gte=datetime.datetime.strptime("10/12/13", "%m/%d/%y"))) def get_post_count_for_range(start_date, end_date): """ Return number of posts created within a date get_post_count_for_range #return len(list(db.post.find({'date_created':{'$gte': start_date, '$lte': end_date}}))) """ return len(Post.objects(date_created__gte=state_date, date_created__lte=end_date)) def get_delete_posts_count(): """" Returns number of posts that have been deleted #return len(list(db.post.find({'deleted':True}))) """ return len(Post.objects(deleted=True)) def get_post_count_by_tag(tag): """ Returns number of posts with a given tag #return len(list(db.post.find({'tags':tag}))) """ return len(Post.objects(tags__in=tag)) ########################### ### GET LIST OF POSTS BY CRITERIA ########################### def get_latest_staff_posts_by_tag(tag, limit=10): staff = settings.get('staff') return Post.objects(deleted__ne=True, user__username__in=staff, tags__in=[tag]).order_by('-date_featured').limit(limit) def get_posts_by_normalized_url(normalized_url, limit): return Post.objects(normalized_url=normalized_url, deleted__ne=True).order_by('-date_created').limit(limit) def get_posts_with_min_votes(min_votes): return Post.objects(deleted__ne=True, votes__gte=min_votes).order_by('-date_created') def get_hot_posts_past_week(): yesterday = datetime.today() - timedelta(days=1) week_ago = datetime.today() - timedelta(days=5) return Post.objects(deleted__ne=True, date_created__lte=yesterday, date_created__gte=week_ago).order_by('-daily_sort_score')[:5] def get_related_posts_by_tag(tag): return Post.objects(deleted__ne=True, tags__in=tag).order_by('-daily_sort_score')[:2] ########################### ### UPDATE POST DETAIL ########################### def delete_all_posts_by_user(screen_name): posts = get_posts_by_screen_name(screen_name, 1000, 1) posts.update(set__deleted=True, set__date_deleted=datetime.datetime.utcnow()) ########################### ### ADD A NEW POST ########################### def insert_post(post_dict): slug = slugify(post_dict['title']) slug_count = len(list(db.post.find({'slug':slug}))) if slug_count > 0: slug = '%s-%i' % (slug, slug_count) post_dict['slug'] = slug post_dict['slugs'] = [slug] if 'subscribed' not in post_dict.keys(): post_dict['subscribed'] = [] post = Post(**post_dict) post.save() return post def save_post(post): post.save() return ########################### ### SORT ALL POSTS ### RUN BY HEROKU SCHEDULER EVERY 5 MIN ### VIA SCRIPTS/SORT_POSTS.PY ########################### def sort_posts(day="all"): # set our config values up staff_bonus = -3 comments_multiplier = 3.0 votes_multiplier = 1.0 super_upvotes_multiplier = 3.0 super_downvotes_multiplier = 3.0 if day == "all": posts = get_all() else: posts = get_hot_posts_by_day(day) for post in posts: # determine if we should assign a staff bonus or not if post['user']['username'] in settings.get('staff'): staff_bonus = staff_bonus else: staff_bonus = 0 # determine how to weight votes votes_base_score = 0 if post['votes'] == 1 and post['comment_count'] > 2: votes_base_score = -2 if post['votes'] > 8 and post['comment_count'] == 0: votes_base_score = -2 if 'super_upvotes' in post.keys(): super_upvotes = post['super_upvotes'] else: super_upvotes = 0 #super_upvotes = post.get('super_upvotes', 0) super_downvotes = post.get('super_downvotes', 0) # calculate the sub-scores scores = {} scores['votes'] = (votes_base_score + post['votes'] * votes_multiplier) scores['super_upvotes'] = (super_upvotes * super_upvotes_multiplier) scores['super_downvotes'] = (super_downvotes * super_downvotes_multiplier * -1) scores['comments'] = (post['comment_count'] * comments_multiplier) # add up the scores total_score = 0 total_score += staff_bonus for score in scores: total_score += scores[score] # and save the new score post['scores'] = scores update_post_score(post['slug'], total_score, scores) print post['slug'] print "-- %s" % total_score print "---- %s" % json.dumps(scores, indent=4) print "All posts sorted!" ########################### ### UPDATES USER DATA FOR ALL POSTS ### RUN VIA SCRIPTS/UPDATE_POSTS_USER_DATA.PY ########################### def update_posts_user_data(): print "Updating user data for all posts..." for post in get_all(): # user try: user = post['user'] if user: new_user = userdb.get_user_by_id_str(user['id_str']) post['user'] = new_user['user'] except: print "Failed to update user for post of slug %s" % post['slug'] # voted_users try: voted_users = post['voted_users'] new_voted_users = [] for voted_user in voted_users: new_voted_user = userdb.get_user_by_id_str(voted_user['id_str']) if new_voted_user: new_voted_users.append(new_voted_user['user']) post['voted_users'] = new_voted_users except: print "Failed to update voted_users for post of slug %s" % post['slug'] # Save post save_post(post) print "Finished updating user data for all posts"
gpl-3.0
openstack/taskflow
taskflow/tests/test_examples.py
3
4946
# -*- coding: utf-8 -*- # Copyright (C) 2012 Yahoo! Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Run examples as unit tests. This module executes examples as unit tests, thus ensuring they at least can be executed with current taskflow. For examples with deterministic output, the output can be put to file with same name and '.out.txt' extension; then it will be checked that output did not change. When this module is used as main module, output for all examples are generated. Please note that this will break tests as output for most examples is indeterministic (due to hash randomization for example). """ import keyword import os import re import subprocess import sys import six from taskflow import test ROOT_DIR = os.path.abspath( os.path.dirname( os.path.dirname( os.path.dirname(__file__)))) # This is used so that any uuid like data being output is removed (since it # will change per test run and will invalidate the deterministic output that # we expect to be able to check). UUID_RE = re.compile('XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX' .replace('X', '[0-9a-f]')) def safe_filename(filename): # Translates a filename into a method name, returns falsey if not # possible to perform this translation... name = re.sub("[^a-zA-Z0-9_]+", "_", filename) if not name or re.match(r"^[_]+$", name) or keyword.iskeyword(name): return False return name def root_path(*args): return os.path.join(ROOT_DIR, *args) def run_example(name): path = root_path('taskflow', 'examples', '%s.py' % name) obj = subprocess.Popen([sys.executable, path], stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = obj.communicate() stdout = output[0].decode() stderr = output[1].decode() rc = obj.wait() if rc != 0: raise RuntimeError('Example %s failed, return code=%s\n' '<<<Begin captured STDOUT>>>\n%s' '<<<End captured STDOUT>>>\n' '<<<Begin captured STDERR>>>\n%s' '<<<End captured STDERR>>>' % (name, rc, stdout, stderr)) return stdout def expected_output_path(name): return root_path('taskflow', 'examples', '%s.out.txt' % name) def iter_examples(): examples_dir = root_path('taskflow', 'examples') for filename in os.listdir(examples_dir): path = os.path.join(examples_dir, filename) if not os.path.isfile(path): continue name, ext = os.path.splitext(filename) if ext != ".py": continue if not name.endswith('utils'): safe_name = safe_filename(name) if safe_name: yield name, safe_name class ExampleAdderMeta(type): """Translates examples into test cases/methods.""" def __new__(cls, name, parents, dct): def generate_test(example_name): def test_example(self): self._check_example(example_name) return test_example for example_name, safe_name in iter_examples(): test_name = 'test_%s' % safe_name dct[test_name] = generate_test(example_name) return type.__new__(cls, name, parents, dct) @six.add_metaclass(ExampleAdderMeta) class ExamplesTestCase(test.TestCase): """Runs the examples, and checks the outputs against expected outputs.""" def _check_example(self, name): output = run_example(name) eop = expected_output_path(name) if os.path.isfile(eop): with open(eop) as f: expected_output = f.read() # NOTE(imelnikov): on each run new uuid is generated, so we just # replace them with some constant string output = UUID_RE.sub('<SOME UUID>', output) expected_output = UUID_RE.sub('<SOME UUID>', expected_output) self.assertEqual(expected_output, output) def make_output_files(): """Generate output files for all examples.""" for example_name, _safe_name in iter_examples(): print("Running %s" % example_name) print("Please wait...") output = run_example(example_name) with open(expected_output_path(example_name), 'w') as f: f.write(output) if __name__ == '__main__': make_output_files()
apache-2.0
atuljain/odoo
addons/l10n_be_coda/wizard/__init__.py
439
1098
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # # Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import account_coda_import # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
msracver/Deformable-ConvNets
faster_rcnn/function/train_rpn.py
2
5938
# -------------------------------------------------------- # Deformable Convolutional Networks # Copyright (c) 2017 Microsoft # Licensed under The MIT License [see LICENSE for details] # Modified by Yuwen Xiong # -------------------------------------------------------- # Based on: # MX-RCNN # Copyright (c) 2016 by Contributors # Licence under The Apache 2.0 License # https://github.com/ijkguo/mx-rcnn/ # -------------------------------------------------------- import argparse import logging import pprint import mxnet as mx from symbols import * from core import callback, metric from core.loader import AnchorLoader from core.module import MutableModule from utils.load_data import load_gt_roidb, merge_roidb, filter_roidb from utils.load_model import load_param from utils.PrefetchingIter import PrefetchingIter from utils.lr_scheduler import WarmupMultiFactorScheduler def train_rpn(cfg, dataset, image_set, root_path, dataset_path, frequent, kvstore, flip, shuffle, resume, ctx, pretrained, epoch, prefix, begin_epoch, end_epoch, train_shared, lr, lr_step, logger=None, output_path=None): # set up logger if not logger: logging.basicConfig() logger = logging.getLogger() logger.setLevel(logging.INFO) # set up config cfg.TRAIN.BATCH_IMAGES = cfg.TRAIN.ALTERNATE.RPN_BATCH_IMAGES # load symbol sym_instance = eval(cfg.symbol + '.' + cfg.symbol)() sym = sym_instance.get_symbol_rpn(cfg, is_train=True) feat_sym = sym.get_internals()['rpn_cls_score_output'] # setup multi-gpu batch_size = len(ctx) input_batch_size = cfg.TRAIN.BATCH_IMAGES * batch_size # print cfg pprint.pprint(cfg) logger.info('training rpn cfg:{}\n'.format(pprint.pformat(cfg))) # load dataset and prepare imdb for training image_sets = [iset for iset in image_set.split('+')] roidbs = [load_gt_roidb(dataset, image_set, root_path, dataset_path, result_path=output_path, flip=flip) for image_set in image_sets] roidb = merge_roidb(roidbs) roidb = filter_roidb(roidb, cfg) # load training data train_data = AnchorLoader(feat_sym, roidb, cfg, batch_size=input_batch_size, shuffle=shuffle, ctx=ctx, feat_stride=cfg.network.RPN_FEAT_STRIDE, anchor_scales=cfg.network.ANCHOR_SCALES, anchor_ratios=cfg.network.ANCHOR_RATIOS, aspect_grouping=cfg.TRAIN.ASPECT_GROUPING) # infer max shape max_data_shape = [('data', (cfg.TRAIN.BATCH_IMAGES, 3, max([v[0] for v in cfg.SCALES]), max([v[1] for v in cfg.SCALES])))] max_data_shape, max_label_shape = train_data.infer_shape(max_data_shape) print('providing maximum shape', max_data_shape, max_label_shape) # infer shape data_shape_dict = dict(train_data.provide_data_single + train_data.provide_label_single) sym_instance.infer_shape(data_shape_dict) # load and initialize params if resume: print('continue training from ', begin_epoch) arg_params, aux_params = load_param(prefix, begin_epoch, convert=True) else: arg_params, aux_params = load_param(pretrained, epoch, convert=True) sym_instance.init_weight_rpn(cfg, arg_params, aux_params) # check parameter shapes sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict) # create solver data_names = [k[0] for k in train_data.provide_data_single] label_names = [k[0] for k in train_data.provide_label_single] if train_shared: fixed_param_prefix = cfg.network.FIXED_PARAMS_SHARED else: fixed_param_prefix = cfg.network.FIXED_PARAMS mod = MutableModule(sym, data_names=data_names, label_names=label_names, logger=logger, context=ctx, max_data_shapes=[max_data_shape for _ in xrange(batch_size)], max_label_shapes=[max_label_shape for _ in xrange(batch_size)], fixed_param_prefix=fixed_param_prefix) # decide training params # metric eval_metric = metric.RPNAccMetric() cls_metric = metric.RPNLogLossMetric() bbox_metric = metric.RPNL1LossMetric() eval_metrics = mx.metric.CompositeEvalMetric() for child_metric in [eval_metric, cls_metric, bbox_metric]: eval_metrics.add(child_metric) # callback batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=frequent) # epoch_end_callback = mx.callback.do_checkpoint(prefix) epoch_end_callback = mx.callback.module_checkpoint(mod, prefix, period=1, save_optimizer_states=True) # decide learning rate base_lr = lr lr_factor = cfg.TRAIN.lr_factor lr_epoch = [int(epoch) for epoch in lr_step.split(',')] lr_epoch_diff = [epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch] lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff))) lr_iters = [int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff] print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters) lr_scheduler = WarmupMultiFactorScheduler(lr_iters, lr_factor, cfg.TRAIN.warmup, cfg.TRAIN.warmup_lr, cfg.TRAIN.warmup_step) # optimizer optimizer_params = {'momentum': cfg.TRAIN.momentum, 'wd': cfg.TRAIN.wd, 'learning_rate': lr, 'lr_scheduler': lr_scheduler, 'rescale_grad': 1.0, 'clip_gradient': None} if not isinstance(train_data, PrefetchingIter): train_data = PrefetchingIter(train_data) # train mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback, batch_end_callback=batch_end_callback, kvstore=kvstore, optimizer='sgd', optimizer_params=optimizer_params, arg_params=arg_params, aux_params=aux_params, begin_epoch=begin_epoch, num_epoch=end_epoch)
mit
yfu/tools
select-longest-isoforms.py
1
1051
#!/usr/bin python # Select the longest isoform for each gene # Accepts a bed file as input # Example: gtfToGenePred ~/data/piPipes/common/dm3/dm3.genes.gtf stdout -genePredExt | awk '{ $1=$12; print }' | genePredToBed stdin stdout | python run3-select-longest-isoforms.py | sort -k1,1 -k2,2n > longest_isoform_per_gene.bed import fileinput strand_info = {} start_coor = {} end_coor = {} chrom_info = {} for line in fileinput.input(): line = line.strip() col = line.split() chrom, start, end, name, signal, strand = col[0:6] start = int(start) end = int(end) chrom_info[name] = chrom strand_info[name] = strand if name not in start_coor: start_coor[name] = start elif start < start_coor[name]: start_coor[name] = start if name not in end_coor: end_coor[name] = end elif end > end_coor[name]: end_coor[name] = end for i in chrom_info.keys(): print chrom_info[i] + "\t" + str(start_coor[i]) + "\t" + str(end_coor[i]) + "\t" + i + "\t" + "0" + "\t" + strand_info[i]
gpl-3.0
codervince/flashingredlight
env/lib/python2.7/site-packages/bson/__init__.py
12
21524
# Copyright 2009-2012 10gen, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BSON (Binary JSON) encoding and decoding. """ import calendar import datetime import re import struct import sys from bson.binary import (Binary, OLD_UUID_SUBTYPE, JAVA_LEGACY, CSHARP_LEGACY) from bson.code import Code from bson.dbref import DBRef from bson.errors import (InvalidBSON, InvalidDocument, InvalidStringData) from bson.max_key import MaxKey from bson.min_key import MinKey from bson.objectid import ObjectId from bson.py3compat import b, binary_type from bson.son import SON, RE_TYPE from bson.timestamp import Timestamp from bson.tz_util import utc try: from bson import _cbson _use_c = True except ImportError: _use_c = False try: import uuid _use_uuid = True except ImportError: _use_uuid = False PY3 = sys.version_info[0] == 3 MAX_INT32 = 2147483647 MIN_INT32 = -2147483648 MAX_INT64 = 9223372036854775807 MIN_INT64 = -9223372036854775808 EPOCH_AWARE = datetime.datetime.fromtimestamp(0, utc) EPOCH_NAIVE = datetime.datetime.utcfromtimestamp(0) # Create constants compatible with all versions of # python from 2.4 forward. In 2.x b("foo") is just # "foo". In 3.x it becomes b"foo". EMPTY = b("") ZERO = b("\x00") ONE = b("\x01") BSONNUM = b("\x01") # Floating point BSONSTR = b("\x02") # UTF-8 string BSONOBJ = b("\x03") # Embedded document BSONARR = b("\x04") # Array BSONBIN = b("\x05") # Binary BSONUND = b("\x06") # Undefined BSONOID = b("\x07") # ObjectId BSONBOO = b("\x08") # Boolean BSONDAT = b("\x09") # UTC Datetime BSONNUL = b("\x0A") # Null BSONRGX = b("\x0B") # Regex BSONREF = b("\x0C") # DBRef BSONCOD = b("\x0D") # Javascript code BSONSYM = b("\x0E") # Symbol BSONCWS = b("\x0F") # Javascript code with scope BSONINT = b("\x10") # 32bit int BSONTIM = b("\x11") # Timestamp BSONLON = b("\x12") # 64bit int BSONMIN = b("\xFF") # Min key BSONMAX = b("\x7F") # Max key def _get_int(data, position, as_class=None, tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE, unsigned=False): format = unsigned and "I" or "i" try: value = struct.unpack("<%s" % format, data[position:position + 4])[0] except struct.error: raise InvalidBSON() position += 4 return value, position def _get_c_string(data, position, length=None): if length is None: try: end = data.index(ZERO, position) except ValueError: raise InvalidBSON() else: end = position + length value = data[position:end].decode("utf-8") position = end + 1 return value, position def _make_c_string(string, check_null=False): if isinstance(string, unicode): if check_null and "\x00" in string: raise InvalidDocument("BSON keys / regex patterns must not " "contain a NULL character") return string.encode("utf-8") + ZERO else: if check_null and ZERO in string: raise InvalidDocument("BSON keys / regex patterns must not " "contain a NULL character") try: string.decode("utf-8") return string + ZERO except UnicodeError: raise InvalidStringData("strings in documents must be valid " "UTF-8: %r" % string) def _get_number(data, position, as_class, tz_aware, uuid_subtype): num = struct.unpack("<d", data[position:position + 8])[0] position += 8 return num, position def _get_string(data, position, as_class, tz_aware, uuid_subtype): length = struct.unpack("<i", data[position:position + 4])[0] if (len(data) - position - 4) < length: raise InvalidBSON("invalid string length") position += 4 if data[position + length - 1:position + length] != ZERO: raise InvalidBSON("invalid end of string") return _get_c_string(data, position, length - 1) def _get_object(data, position, as_class, tz_aware, uuid_subtype): obj_size = struct.unpack("<i", data[position:position + 4])[0] if data[position + obj_size - 1:position + obj_size] != ZERO: raise InvalidBSON("bad eoo") encoded = data[position + 4:position + obj_size - 1] object = _elements_to_dict(encoded, as_class, tz_aware, uuid_subtype) position += obj_size if "$ref" in object: return (DBRef(object.pop("$ref"), object.pop("$id", None), object.pop("$db", None), object), position) return object, position def _get_array(data, position, as_class, tz_aware, uuid_subtype): obj, position = _get_object(data, position, as_class, tz_aware, uuid_subtype) result = [] i = 0 while True: try: result.append(obj[str(i)]) i += 1 except KeyError: break return result, position def _get_binary(data, position, as_class, tz_aware, uuid_subtype): length, position = _get_int(data, position) subtype = ord(data[position:position + 1]) position += 1 if subtype == 2: length2, position = _get_int(data, position) if length2 != length - 4: raise InvalidBSON("invalid binary (st 2) - lengths don't match!") length = length2 if subtype in (3, 4) and _use_uuid: # Java Legacy if uuid_subtype == JAVA_LEGACY: java = data[position:position + length] value = uuid.UUID(bytes=java[0:8][::-1] + java[8:16][::-1]) # C# legacy elif uuid_subtype == CSHARP_LEGACY: value = uuid.UUID(bytes_le=data[position:position + length]) # Python else: value = uuid.UUID(bytes=data[position:position + length]) position += length return (value, position) # Python3 special case. Decode subtype 0 to 'bytes'. if PY3 and subtype == 0: value = data[position:position + length] else: value = Binary(data[position:position + length], subtype) position += length return value, position def _get_oid(data, position, as_class=None, tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE): value = ObjectId(data[position:position + 12]) position += 12 return value, position def _get_boolean(data, position, as_class, tz_aware, uuid_subtype): value = data[position:position + 1] == ONE position += 1 return value, position def _get_date(data, position, as_class, tz_aware, uuid_subtype): millis = struct.unpack("<q", data[position:position + 8])[0] diff = millis % 1000 seconds = (millis - diff) / 1000 position += 8 if tz_aware: dt = EPOCH_AWARE + datetime.timedelta(seconds=seconds) else: dt = EPOCH_NAIVE + datetime.timedelta(seconds=seconds) return dt.replace(microsecond=diff * 1000), position def _get_code(data, position, as_class, tz_aware, uuid_subtype): code, position = _get_string(data, position, as_class, tz_aware, uuid_subtype) return Code(code), position def _get_code_w_scope(data, position, as_class, tz_aware, uuid_subtype): _, position = _get_int(data, position) code, position = _get_string(data, position, as_class, tz_aware, uuid_subtype) scope, position = _get_object(data, position, as_class, tz_aware, uuid_subtype) return Code(code, scope), position def _get_null(data, position, as_class, tz_aware, uuid_subtype): return None, position def _get_regex(data, position, as_class, tz_aware, uuid_subtype): pattern, position = _get_c_string(data, position) bson_flags, position = _get_c_string(data, position) flags = 0 if "i" in bson_flags: flags |= re.IGNORECASE if "l" in bson_flags: flags |= re.LOCALE if "m" in bson_flags: flags |= re.MULTILINE if "s" in bson_flags: flags |= re.DOTALL if "u" in bson_flags: flags |= re.UNICODE if "x" in bson_flags: flags |= re.VERBOSE return re.compile(pattern, flags), position def _get_ref(data, position, as_class, tz_aware, uuid_subtype): position += 4 collection, position = _get_c_string(data, position) oid, position = _get_oid(data, position) return DBRef(collection, oid), position def _get_timestamp(data, position, as_class, tz_aware, uuid_subtype): inc, position = _get_int(data, position, unsigned=True) timestamp, position = _get_int(data, position, unsigned=True) return Timestamp(timestamp, inc), position def _get_long(data, position, as_class, tz_aware, uuid_subtype): # Have to cast to long; on 32-bit unpack may return an int. # 2to3 will change long to int. That's fine since long doesn't # exist in python3. value = long(struct.unpack("<q", data[position:position + 8])[0]) position += 8 return value, position _element_getter = { BSONNUM: _get_number, BSONSTR: _get_string, BSONOBJ: _get_object, BSONARR: _get_array, BSONBIN: _get_binary, BSONUND: _get_null, # undefined BSONOID: _get_oid, BSONBOO: _get_boolean, BSONDAT: _get_date, BSONNUL: _get_null, BSONRGX: _get_regex, BSONREF: _get_ref, BSONCOD: _get_code, # code BSONSYM: _get_string, # symbol BSONCWS: _get_code_w_scope, BSONINT: _get_int, # number_int BSONTIM: _get_timestamp, BSONLON: _get_long, # Same as _get_int after 2to3 runs. BSONMIN: lambda v, w, x, y, z: (MinKey(), w), BSONMAX: lambda v, w, x, y, z: (MaxKey(), w)} def _element_to_dict(data, position, as_class, tz_aware, uuid_subtype): element_type = data[position:position + 1] position += 1 element_name, position = _get_c_string(data, position) value, position = _element_getter[element_type](data, position, as_class, tz_aware, uuid_subtype) return element_name, value, position def _elements_to_dict(data, as_class, tz_aware, uuid_subtype): result = as_class() position = 0 end = len(data) - 1 while position < end: (key, value, position) = _element_to_dict(data, position, as_class, tz_aware, uuid_subtype) result[key] = value return result def _bson_to_dict(data, as_class, tz_aware, uuid_subtype): obj_size = struct.unpack("<i", data[:4])[0] length = len(data) if length < obj_size: raise InvalidBSON("objsize too large") if obj_size != length or data[obj_size - 1:obj_size] != ZERO: raise InvalidBSON("bad eoo") elements = data[4:obj_size - 1] return (_elements_to_dict(elements, as_class, tz_aware, uuid_subtype), data[obj_size:]) if _use_c: _bson_to_dict = _cbson._bson_to_dict def _element_to_bson(key, value, check_keys, uuid_subtype): if not isinstance(key, basestring): raise InvalidDocument("documents must have only string keys, " "key was %r" % key) if check_keys: if key.startswith("$"): raise InvalidDocument("key %r must not start with '$'" % key) if "." in key: raise InvalidDocument("key %r must not contain '.'" % key) name = _make_c_string(key, True) if isinstance(value, float): return BSONNUM + name + struct.pack("<d", value) if _use_uuid: if isinstance(value, uuid.UUID): # Java Legacy if uuid_subtype == JAVA_LEGACY: # Python 3.0(.1) returns a bytearray instance for bytes (3.1 # and newer just return a bytes instance). Convert that to # binary_type (here and below) for compatibility. from_uuid = binary_type(value.bytes) as_legacy_java = from_uuid[0:8][::-1] + from_uuid[8:16][::-1] value = Binary(as_legacy_java, subtype=OLD_UUID_SUBTYPE) # C# legacy elif uuid_subtype == CSHARP_LEGACY: # Microsoft GUID representation. value = Binary(binary_type(value.bytes_le), subtype=OLD_UUID_SUBTYPE) # Python else: value = Binary(binary_type(value.bytes), subtype=uuid_subtype) if isinstance(value, Binary): subtype = value.subtype if subtype == 2: value = struct.pack("<i", len(value)) + value return (BSONBIN + name + struct.pack("<i", len(value)) + b(chr(subtype)) + value) if isinstance(value, Code): cstring = _make_c_string(value) if not value.scope: length = struct.pack("<i", len(cstring)) return BSONCOD + name + length + cstring scope = _dict_to_bson(value.scope, False, uuid_subtype, False) full_length = struct.pack("<i", 8 + len(cstring) + len(scope)) length = struct.pack("<i", len(cstring)) return BSONCWS + name + full_length + length + cstring + scope if isinstance(value, binary_type): if PY3: # Python3 special case. Store 'bytes' as BSON binary subtype 0. return (BSONBIN + name + struct.pack("<i", len(value)) + ZERO + value) cstring = _make_c_string(value) length = struct.pack("<i", len(cstring)) return BSONSTR + name + length + cstring if isinstance(value, unicode): cstring = _make_c_string(value) length = struct.pack("<i", len(cstring)) return BSONSTR + name + length + cstring if isinstance(value, dict): return BSONOBJ + name + _dict_to_bson(value, check_keys, uuid_subtype, False) if isinstance(value, (list, tuple)): as_dict = SON(zip([str(i) for i in range(len(value))], value)) return BSONARR + name + _dict_to_bson(as_dict, check_keys, uuid_subtype, False) if isinstance(value, ObjectId): return BSONOID + name + value.binary if value is True: return BSONBOO + name + ONE if value is False: return BSONBOO + name + ZERO if isinstance(value, int): # TODO this is an ugly way to check for this... if value > MAX_INT64 or value < MIN_INT64: raise OverflowError("BSON can only handle up to 8-byte ints") if value > MAX_INT32 or value < MIN_INT32: return BSONLON + name + struct.pack("<q", value) return BSONINT + name + struct.pack("<i", value) # 2to3 will convert long to int here since there is no long in python3. # That's OK. The previous if block will match instead. if isinstance(value, long): if value > MAX_INT64 or value < MIN_INT64: raise OverflowError("BSON can only handle up to 8-byte ints") return BSONLON + name + struct.pack("<q", value) if isinstance(value, datetime.datetime): if value.utcoffset() is not None: value = value - value.utcoffset() millis = int(calendar.timegm(value.timetuple()) * 1000 + value.microsecond / 1000) return BSONDAT + name + struct.pack("<q", millis) if isinstance(value, Timestamp): time = struct.pack("<I", value.time) inc = struct.pack("<I", value.inc) return BSONTIM + name + inc + time if value is None: return BSONNUL + name if isinstance(value, RE_TYPE): pattern = value.pattern flags = "" if value.flags & re.IGNORECASE: flags += "i" if value.flags & re.LOCALE: flags += "l" if value.flags & re.MULTILINE: flags += "m" if value.flags & re.DOTALL: flags += "s" if value.flags & re.UNICODE: flags += "u" if value.flags & re.VERBOSE: flags += "x" return BSONRGX + name + _make_c_string(pattern, True) + \ _make_c_string(flags) if isinstance(value, DBRef): return _element_to_bson(key, value.as_doc(), False, uuid_subtype) if isinstance(value, MinKey): return BSONMIN + name if isinstance(value, MaxKey): return BSONMAX + name raise InvalidDocument("cannot convert value of type %s to bson" % type(value)) def _dict_to_bson(dict, check_keys, uuid_subtype, top_level=True): try: elements = [] if top_level and "_id" in dict: elements.append(_element_to_bson("_id", dict["_id"], False, uuid_subtype)) for (key, value) in dict.iteritems(): if not top_level or key != "_id": elements.append(_element_to_bson(key, value, check_keys, uuid_subtype)) except AttributeError: raise TypeError("encoder expected a mapping type but got: %r" % dict) encoded = EMPTY.join(elements) length = len(encoded) + 5 return struct.pack("<i", length) + encoded + ZERO if _use_c: _dict_to_bson = _cbson._dict_to_bson def decode_all(data, as_class=dict, tz_aware=True, uuid_subtype=OLD_UUID_SUBTYPE): """Decode BSON data to multiple documents. `data` must be a string of concatenated, valid, BSON-encoded documents. :Parameters: - `data`: BSON data - `as_class` (optional): the class to use for the resulting documents - `tz_aware` (optional): if ``True``, return timezone-aware :class:`~datetime.datetime` instances .. versionadded:: 1.9 """ docs = [] position = 0 end = len(data) - 1 while position < end: obj_size = struct.unpack("<i", data[position:position + 4])[0] if len(data) - position < obj_size: raise InvalidBSON("objsize too large") if data[position + obj_size - 1:position + obj_size] != ZERO: raise InvalidBSON("bad eoo") elements = data[position + 4:position + obj_size - 1] position += obj_size docs.append(_elements_to_dict(elements, as_class, tz_aware, uuid_subtype)) return docs if _use_c: decode_all = _cbson.decode_all def is_valid(bson): """Check that the given string represents valid :class:`BSON` data. Raises :class:`TypeError` if `bson` is not an instance of :class:`str` (:class:`bytes` in python 3). Returns ``True`` if `bson` is valid :class:`BSON`, ``False`` otherwise. :Parameters: - `bson`: the data to be validated """ if not isinstance(bson, binary_type): raise TypeError("BSON data must be an instance " "of a subclass of %s" % (binary_type.__name__,)) try: (_, remainder) = _bson_to_dict(bson, dict, True, OLD_UUID_SUBTYPE) return remainder == EMPTY except: return False class BSON(binary_type): """BSON (Binary JSON) data. """ @classmethod def encode(cls, document, check_keys=False, uuid_subtype=OLD_UUID_SUBTYPE): """Encode a document to a new :class:`BSON` instance. A document can be any mapping type (like :class:`dict`). Raises :class:`TypeError` if `document` is not a mapping type, or contains keys that are not instances of :class:`basestring` (:class:`str` in python 3). Raises :class:`~bson.errors.InvalidDocument` if `document` cannot be converted to :class:`BSON`. :Parameters: - `document`: mapping type representing a document - `check_keys` (optional): check if keys start with '$' or contain '.', raising :class:`~bson.errors.InvalidDocument` in either case .. versionadded:: 1.9 """ return cls(_dict_to_bson(document, check_keys, uuid_subtype)) def decode(self, as_class=dict, tz_aware=False, uuid_subtype=OLD_UUID_SUBTYPE): """Decode this BSON data. The default type to use for the resultant document is :class:`dict`. Any other class that supports :meth:`__setitem__` can be used instead by passing it as the `as_class` parameter. If `tz_aware` is ``True`` (recommended), any :class:`~datetime.datetime` instances returned will be timezone-aware, with their timezone set to :attr:`bson.tz_util.utc`. Otherwise (default), all :class:`~datetime.datetime` instances will be naive (but contain UTC). :Parameters: - `as_class` (optional): the class to use for the resulting document - `tz_aware` (optional): if ``True``, return timezone-aware :class:`~datetime.datetime` instances .. versionadded:: 1.9 """ (document, _) = _bson_to_dict(self, as_class, tz_aware, uuid_subtype) return document def has_c(): """Is the C extension installed? .. versionadded:: 1.9 """ return _use_c def has_uuid(): """Is the uuid module available? .. versionadded:: 2.3 """ return _use_uuid
mit
Vagab0nd/SiCKRAGE
lib3/twilio/rest/sync/v1/service/sync_map/sync_map_item.py
1
20699
# coding=utf-8 r""" This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / """ from twilio.base import deserialize from twilio.base import serialize from twilio.base import values from twilio.base.instance_context import InstanceContext from twilio.base.instance_resource import InstanceResource from twilio.base.list_resource import ListResource from twilio.base.page import Page class SyncMapItemList(ListResource): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, service_sid, map_sid): """ Initialize the SyncMapItemList :param Version version: Version that contains the resource :param service_sid: The SID of the Sync Service that the resource is associated with :param map_sid: The SID of the Sync Map that contains the Map Item :returns: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemList :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemList """ super(SyncMapItemList, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, 'map_sid': map_sid, } self._uri = '/Services/{service_sid}/Maps/{map_sid}/Items'.format(**self._solution) def create(self, key, data, ttl=values.unset, item_ttl=values.unset, collection_ttl=values.unset): """ Create the SyncMapItemInstance :param unicode key: The unique, user-defined key for the Map Item :param dict data: A JSON string that represents an arbitrary, schema-less object that the Map Item stores :param unicode ttl: An alias for item_ttl :param unicode item_ttl: How long, in seconds, before the Map Item expires :param unicode collection_ttl: How long, in seconds, before the Map Item's parent Sync Map expires and is deleted :returns: The created SyncMapItemInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemInstance """ data = values.of({ 'Key': key, 'Data': serialize.object(data), 'Ttl': ttl, 'ItemTtl': item_ttl, 'CollectionTtl': collection_ttl, }) payload = self._version.create(method='POST', uri=self._uri, data=data, ) return SyncMapItemInstance( self._version, payload, service_sid=self._solution['service_sid'], map_sid=self._solution['map_sid'], ) def stream(self, order=values.unset, from_=values.unset, bounds=values.unset, limit=None, page_size=None): """ Streams SyncMapItemInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param SyncMapItemInstance.QueryResultOrder order: How to order the Map Items returned by their key value :param unicode from_: The index of the first Sync Map Item resource to read :param SyncMapItemInstance.QueryFromBoundType bounds: Whether to include the Map Item referenced by the from parameter :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemInstance] """ limits = self._version.read_limits(limit, page_size) page = self.page(order=order, from_=from_, bounds=bounds, page_size=limits['page_size'], ) return self._version.stream(page, limits['limit']) def list(self, order=values.unset, from_=values.unset, bounds=values.unset, limit=None, page_size=None): """ Lists SyncMapItemInstance records from the API as a list. Unlike stream(), this operation is eager and will load `limit` records into memory before returning. :param SyncMapItemInstance.QueryResultOrder order: How to order the Map Items returned by their key value :param unicode from_: The index of the first Sync Map Item resource to read :param SyncMapItemInstance.QueryFromBoundType bounds: Whether to include the Map Item referenced by the from parameter :param int limit: Upper limit for the number of records to return. list() guarantees never to return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, list() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemInstance] """ return list(self.stream(order=order, from_=from_, bounds=bounds, limit=limit, page_size=page_size, )) def page(self, order=values.unset, from_=values.unset, bounds=values.unset, page_token=values.unset, page_number=values.unset, page_size=values.unset): """ Retrieve a single page of SyncMapItemInstance records from the API. Request is executed immediately :param SyncMapItemInstance.QueryResultOrder order: How to order the Map Items returned by their key value :param unicode from_: The index of the first Sync Map Item resource to read :param SyncMapItemInstance.QueryFromBoundType bounds: Whether to include the Map Item referenced by the from parameter :param str page_token: PageToken provided by the API :param int page_number: Page Number, this value is simply for client state :param int page_size: Number of records to return, defaults to 50 :returns: Page of SyncMapItemInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemPage """ data = values.of({ 'Order': order, 'From': from_, 'Bounds': bounds, 'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, }) response = self._version.page(method='GET', uri=self._uri, params=data, ) return SyncMapItemPage(self._version, response, self._solution) def get_page(self, target_url): """ Retrieve a specific page of SyncMapItemInstance records from the API. Request is executed immediately :param str target_url: API-generated URL for the requested results page :returns: Page of SyncMapItemInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemPage """ response = self._version.domain.twilio.request( 'GET', target_url, ) return SyncMapItemPage(self._version, response, self._solution) def get(self, key): """ Constructs a SyncMapItemContext :param key: The key value of the Sync Map Item resource to fetch :returns: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemContext :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemContext """ return SyncMapItemContext( self._version, service_sid=self._solution['service_sid'], map_sid=self._solution['map_sid'], key=key, ) def __call__(self, key): """ Constructs a SyncMapItemContext :param key: The key value of the Sync Map Item resource to fetch :returns: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemContext :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemContext """ return SyncMapItemContext( self._version, service_sid=self._solution['service_sid'], map_sid=self._solution['map_sid'], key=key, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Sync.V1.SyncMapItemList>' class SyncMapItemPage(Page): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, response, solution): """ Initialize the SyncMapItemPage :param Version version: Version that contains the resource :param Response response: Response from the API :param service_sid: The SID of the Sync Service that the resource is associated with :param map_sid: The SID of the Sync Map that contains the Map Item :returns: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemPage :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemPage """ super(SyncMapItemPage, self).__init__(version, response) # Path Solution self._solution = solution def get_instance(self, payload): """ Build an instance of SyncMapItemInstance :param dict payload: Payload response from the API :returns: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemInstance """ return SyncMapItemInstance( self._version, payload, service_sid=self._solution['service_sid'], map_sid=self._solution['map_sid'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ return '<Twilio.Sync.V1.SyncMapItemPage>' class SyncMapItemContext(InstanceContext): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ def __init__(self, version, service_sid, map_sid, key): """ Initialize the SyncMapItemContext :param Version version: Version that contains the resource :param service_sid: The SID of the Sync Service with the Sync Map Item resource to fetch :param map_sid: The SID of the Sync Map with the Sync Map Item resource to fetch :param key: The key value of the Sync Map Item resource to fetch :returns: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemContext :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemContext """ super(SyncMapItemContext, self).__init__(version) # Path Solution self._solution = {'service_sid': service_sid, 'map_sid': map_sid, 'key': key, } self._uri = '/Services/{service_sid}/Maps/{map_sid}/Items/{key}'.format(**self._solution) def fetch(self): """ Fetch the SyncMapItemInstance :returns: The fetched SyncMapItemInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemInstance """ payload = self._version.fetch(method='GET', uri=self._uri, ) return SyncMapItemInstance( self._version, payload, service_sid=self._solution['service_sid'], map_sid=self._solution['map_sid'], key=self._solution['key'], ) def delete(self, if_match=values.unset): """ Deletes the SyncMapItemInstance :param unicode if_match: The If-Match HTTP request header :returns: True if delete succeeds, False otherwise :rtype: bool """ headers = values.of({'If-Match': if_match, }) return self._version.delete(method='DELETE', uri=self._uri, headers=headers, ) def update(self, data=values.unset, ttl=values.unset, item_ttl=values.unset, collection_ttl=values.unset, if_match=values.unset): """ Update the SyncMapItemInstance :param dict data: A JSON string that represents an arbitrary, schema-less object that the Map Item stores :param unicode ttl: An alias for item_ttl :param unicode item_ttl: How long, in seconds, before the Map Item expires :param unicode collection_ttl: How long, in seconds, before the Map Item's parent Sync Map expires and is deleted :param unicode if_match: The If-Match HTTP request header :returns: The updated SyncMapItemInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemInstance """ data = values.of({ 'Data': serialize.object(data), 'Ttl': ttl, 'ItemTtl': item_ttl, 'CollectionTtl': collection_ttl, }) headers = values.of({'If-Match': if_match, }) payload = self._version.update(method='POST', uri=self._uri, data=data, headers=headers, ) return SyncMapItemInstance( self._version, payload, service_sid=self._solution['service_sid'], map_sid=self._solution['map_sid'], key=self._solution['key'], ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Sync.V1.SyncMapItemContext {}>'.format(context) class SyncMapItemInstance(InstanceResource): """ PLEASE NOTE that this class contains beta products that are subject to change. Use them with caution. """ class QueryResultOrder(object): ASC = "asc" DESC = "desc" class QueryFromBoundType(object): INCLUSIVE = "inclusive" EXCLUSIVE = "exclusive" def __init__(self, version, payload, service_sid, map_sid, key=None): """ Initialize the SyncMapItemInstance :returns: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemInstance """ super(SyncMapItemInstance, self).__init__(version) # Marshaled Properties self._properties = { 'key': payload.get('key'), 'account_sid': payload.get('account_sid'), 'service_sid': payload.get('service_sid'), 'map_sid': payload.get('map_sid'), 'url': payload.get('url'), 'revision': payload.get('revision'), 'data': payload.get('data'), 'date_expires': deserialize.iso8601_datetime(payload.get('date_expires')), 'date_created': deserialize.iso8601_datetime(payload.get('date_created')), 'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')), 'created_by': payload.get('created_by'), } # Context self._context = None self._solution = { 'service_sid': service_sid, 'map_sid': map_sid, 'key': key or self._properties['key'], } @property def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: SyncMapItemContext for this SyncMapItemInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemContext """ if self._context is None: self._context = SyncMapItemContext( self._version, service_sid=self._solution['service_sid'], map_sid=self._solution['map_sid'], key=self._solution['key'], ) return self._context @property def key(self): """ :returns: The unique, user-defined key for the Map Item :rtype: unicode """ return self._properties['key'] @property def account_sid(self): """ :returns: The SID of the Account that created the resource :rtype: unicode """ return self._properties['account_sid'] @property def service_sid(self): """ :returns: The SID of the Sync Service that the resource is associated with :rtype: unicode """ return self._properties['service_sid'] @property def map_sid(self): """ :returns: The SID of the Sync Map that contains the Map Item :rtype: unicode """ return self._properties['map_sid'] @property def url(self): """ :returns: The absolute URL of the Map Item resource :rtype: unicode """ return self._properties['url'] @property def revision(self): """ :returns: The current revision of the Map Item, represented as a string :rtype: unicode """ return self._properties['revision'] @property def data(self): """ :returns: An arbitrary, schema-less object that the Map Item stores :rtype: dict """ return self._properties['data'] @property def date_expires(self): """ :returns: The ISO 8601 date and time in GMT when the Map Item expires :rtype: datetime """ return self._properties['date_expires'] @property def date_created(self): """ :returns: The ISO 8601 date and time in GMT when the resource was created :rtype: datetime """ return self._properties['date_created'] @property def date_updated(self): """ :returns: The ISO 8601 date and time in GMT when the resource was last updated :rtype: datetime """ return self._properties['date_updated'] @property def created_by(self): """ :returns: The identity of the Map Item's creator :rtype: unicode """ return self._properties['created_by'] def fetch(self): """ Fetch the SyncMapItemInstance :returns: The fetched SyncMapItemInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemInstance """ return self._proxy.fetch() def delete(self, if_match=values.unset): """ Deletes the SyncMapItemInstance :param unicode if_match: The If-Match HTTP request header :returns: True if delete succeeds, False otherwise :rtype: bool """ return self._proxy.delete(if_match=if_match, ) def update(self, data=values.unset, ttl=values.unset, item_ttl=values.unset, collection_ttl=values.unset, if_match=values.unset): """ Update the SyncMapItemInstance :param dict data: A JSON string that represents an arbitrary, schema-less object that the Map Item stores :param unicode ttl: An alias for item_ttl :param unicode item_ttl: How long, in seconds, before the Map Item expires :param unicode collection_ttl: How long, in seconds, before the Map Item's parent Sync Map expires and is deleted :param unicode if_match: The If-Match HTTP request header :returns: The updated SyncMapItemInstance :rtype: twilio.rest.sync.v1.service.sync_map.sync_map_item.SyncMapItemInstance """ return self._proxy.update( data=data, ttl=ttl, item_ttl=item_ttl, collection_ttl=collection_ttl, if_match=if_match, ) def __repr__(self): """ Provide a friendly representation :returns: Machine friendly representation :rtype: str """ context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items()) return '<Twilio.Sync.V1.SyncMapItemInstance {}>'.format(context)
gpl-3.0
sserrot/champion_relationships
venv/Lib/site-packages/win32com/demos/excelRTDServer.py
6
15526
"""Excel IRTDServer implementation. This module is a functional example of how to implement the IRTDServer interface in python, using the pywin32 extensions. Further details, about this interface and it can be found at: http://msdn.microsoft.com/library/default.asp?url=/library/en-us/dnexcl2k2/html/odc_xlrtdfaq.asp """ # Copyright (c) 2003-2004 by Chris Nilsson <[email protected]> # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Christopher Nilsson (the author) not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. import pythoncom import win32com.client from win32com import universal from win32com.client import gencache from win32com.server.exception import COMException import threading import datetime # For the example classes... # Typelib info for version 10 - aka Excel XP. # This is the minimum version of excel that we can work with as this is when # Microsoft introduced these interfaces. EXCEL_TLB_GUID = '{00020813-0000-0000-C000-000000000046}' EXCEL_TLB_LCID = 0 EXCEL_TLB_MAJOR = 1 EXCEL_TLB_MINOR = 4 # Import the excel typelib to make sure we've got early-binding going on. # The "ByRef" parameters we use later won't work without this. gencache.EnsureModule(EXCEL_TLB_GUID, EXCEL_TLB_LCID, \ EXCEL_TLB_MAJOR, EXCEL_TLB_MINOR) # Tell pywin to import these extra interfaces. # -- # QUESTION: Why? The interfaces seem to descend from IDispatch, so # I'd have thought, for example, calling callback.UpdateNotify() (on the # IRTDUpdateEvent callback excel gives us) would work without molestation. # But the callback needs to be cast to a "real" IRTDUpdateEvent type. Hmm... # This is where my small knowledge of the pywin framework / COM gets hazy. # -- # Again, we feed in the Excel typelib as the source of these interfaces. universal.RegisterInterfaces(EXCEL_TLB_GUID, EXCEL_TLB_LCID, EXCEL_TLB_MAJOR, EXCEL_TLB_MINOR, ['IRtdServer','IRTDUpdateEvent']) class ExcelRTDServer(object): """Base RTDServer class. Provides most of the features needed to implement the IRtdServer interface. Manages topic adding, removal, and packing up the values for excel. Shouldn't be instanciated directly. Instead, descendant classes should override the CreateTopic() method. Topic objects only need to provide a GetValue() function to play nice here. The values given need to be atomic (eg. string, int, float... etc). Also note: nothing has been done within this class to ensure that we get time to check our topics for updates. I've left that up to the subclass since the ways, and needs, of refreshing your topics will vary greatly. For example, the sample implementation uses a timer thread to wake itself up. Whichever way you choose to do it, your class needs to be able to wake up occaisionally, since excel will never call your class without being asked to first. Excel will communicate with our object in this order: 1. Excel instanciates our object and calls ServerStart, providing us with an IRTDUpdateEvent callback object. 2. Excel calls ConnectData when it wants to subscribe to a new "topic". 3. When we have new data to provide, we call the UpdateNotify method of the callback object we were given. 4. Excel calls our RefreshData method, and receives a 2d SafeArray (row-major) containing the Topic ids in the 1st dim, and the topic values in the 2nd dim. 5. When not needed anymore, Excel will call our DisconnectData to unsubscribe from a topic. 6. When there are no more topics left, Excel will call our ServerTerminate method to kill us. Throughout, at undetermined periods, Excel will call our Heartbeat method to see if we're still alive. It must return a non-zero value, or we'll be killed. NOTE: By default, excel will at most call RefreshData once every 2 seconds. This is a setting that needs to be changed excel-side. To change this, you can set the throttle interval like this in the excel VBA object model: Application.RTD.ThrottleInterval = 1000 ' milliseconds """ _com_interfaces_ = ['IRtdServer'] _public_methods_ = ['ConnectData','DisconnectData','Heartbeat', 'RefreshData','ServerStart','ServerTerminate'] _reg_clsctx_ = pythoncom.CLSCTX_INPROC_SERVER #_reg_clsid_ = "# subclass must provide this class attribute" #_reg_desc_ = "# subclass should provide this description" #_reg_progid_ = "# subclass must provide this class attribute" ALIVE = 1 NOT_ALIVE = 0 def __init__(self): """Constructor""" super(ExcelRTDServer, self).__init__() self.IsAlive = self.ALIVE self.__callback = None self.topics = {} def SignalExcel(self): """Use the callback we were given to tell excel new data is available.""" if self.__callback is None: raise COMException(desc="Callback excel provided is Null") self.__callback.UpdateNotify() def ConnectData(self, TopicID, Strings, GetNewValues): """Creates a new topic out of the Strings excel gives us.""" try: self.topics[TopicID] = self.CreateTopic(Strings) except Exception as why: raise COMException(desc=str(why)) GetNewValues = True result = self.topics[TopicID] if result is None: result = "# %s: Waiting for update" % self.__class__.__name__ else: result = result.GetValue() # fire out internal event... self.OnConnectData(TopicID) # GetNewValues as per interface is ByRef, so we need to pass it back too. return result, GetNewValues def DisconnectData(self, TopicID): """Deletes the given topic.""" self.OnDisconnectData(TopicID) if TopicID in self.topics: self.topics[TopicID] = None del self.topics[TopicID] def Heartbeat(self): """Called by excel to see if we're still here.""" return self.IsAlive def RefreshData(self, TopicCount): """Packs up the topic values. Called by excel when it's ready for an update. Needs to: * Return the current number of topics, via the "ByRef" TopicCount * Return a 2d SafeArray of the topic data. - 1st dim: topic numbers - 2nd dim: topic values We could do some caching, instead of repacking everytime... But this works for demonstration purposes.""" TopicCount = len(self.topics) self.OnRefreshData() # Grow the lists, so we don't need a heap of calls to append() results = [[None] * TopicCount, [None] * TopicCount] # Excel expects a 2-dimensional array. The first dim contains the # topic numbers, and the second contains the values for the topics. # In true VBA style (yuck), we need to pack the array in row-major format, # which looks like: # ( (topic_num1, topic_num2, ..., topic_numN), \ # (topic_val1, topic_val2, ..., topic_valN) ) for idx, topicdata in enumerate(self.topics.items()): topicNum, topic = topicdata results[0][idx] = topicNum results[1][idx] = topic.GetValue() # TopicCount is meant to be passed to us ByRef, so return it as well, as per # the way pywin32 handles ByRef arguments. return tuple(results), TopicCount def ServerStart(self, CallbackObject): """Excel has just created us... We take its callback for later, and set up shop.""" self.IsAlive = self.ALIVE if CallbackObject is None: raise COMException(desc='Excel did not provide a callback') # Need to "cast" the raw PyIDispatch object to the IRTDUpdateEvent interface IRTDUpdateEventKlass = win32com.client.CLSIDToClass.GetClass('{A43788C1-D91B-11D3-8F39-00C04F3651B8}') self.__callback = IRTDUpdateEventKlass(CallbackObject) self.OnServerStart() return self.IsAlive def ServerTerminate(self): """Called when excel no longer wants us.""" self.IsAlive = self.NOT_ALIVE # On next heartbeat, excel will free us self.OnServerTerminate() def CreateTopic(self, TopicStrings=None): """Topic factory method. Subclass must override. Topic objects need to provide: * GetValue() method which returns an atomic value. Will raise NotImplemented if not overridden. """ raise NotImplemented('Subclass must implement') # Overridable class events... def OnConnectData(self, TopicID): """Called when a new topic has been created, at excel's request.""" pass def OnDisconnectData(self, TopicID): """Called when a topic is about to be deleted, at excel's request.""" pass def OnRefreshData(self): """Called when excel has requested all current topic data.""" pass def OnServerStart(self): """Called when excel has instanciated us.""" pass def OnServerTerminate(self): """Called when excel is about to destroy us.""" pass class RTDTopic(object): """Base RTD Topic. Only method required by our RTDServer implementation is GetValue(). The others are more for convenience.""" def __init__(self, TopicStrings): super(RTDTopic, self).__init__() self.TopicStrings = TopicStrings self.__currentValue = None self.__dirty = False def Update(self, sender): """Called by the RTD Server. Gives us a chance to check if our topic data needs to be changed (eg. check a file, quiz a database, etc).""" raise NotImplemented('subclass must implement') def Reset(self): """Call when this topic isn't considered "dirty" anymore.""" self.__dirty = False def GetValue(self): return self.__currentValue def SetValue(self, value): self.__dirty = True self.__currentValue = value def HasChanged(self): return self.__dirty # -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= ###################################### # Example classes ###################################### class TimeServer(ExcelRTDServer): """Example Time RTD server. Sends time updates back to excel. example of use, in an excel sheet: =RTD("Python.RTD.TimeServer","","seconds","5") This will cause a timestamp string to fill the cell, and update its value every 5 seconds (or as close as possible depending on how busy excel is). The empty string parameter denotes the com server is running on the local machine. Otherwise, put in the hostname to look on. For more info on this, lookup the Excel help for its "RTD" worksheet function. Obviously, you'd want to wrap this kind of thing in a friendlier VBA function. Also, remember that the RTD function accepts a maximum of 28 arguments! If you want to pass more, you may need to concatenate arguments into one string, and have your topic parse them appropriately. """ # win32com.server setup attributes... # Never copy the _reg_clsid_ value in your own classes! _reg_clsid_ = '{EA7F2CF1-11A2-45E4-B2D5-68E240DB8CB1}' _reg_progid_ = 'Python.RTD.TimeServer' _reg_desc_ = "Python class implementing Excel IRTDServer -- feeds time" # other class attributes... INTERVAL = 0.5 # secs. Threaded timer will wake us up at this interval. def __init__(self): super(TimeServer, self).__init__() # Simply timer thread to ensure we get to update our topics, and # tell excel about any changes. This is a pretty basic and dirty way to # do this. Ideally, there should be some sort of waitable (eg. either win32 # event, socket data event...) and be kicked off by that event triggering. # As soon as we set up shop here, we _must_ return control back to excel. # (ie. we can't block and do our own thing...) self.ticker = threading.Timer(self.INTERVAL, self.Update) def OnServerStart(self): self.ticker.start() def OnServerTerminate(self): if not self.ticker.finished.isSet(): self.ticker.cancel() # Cancel our wake-up thread. Excel has killed us. def Update(self): # Get our wake-up thread ready... self.ticker = threading.Timer(self.INTERVAL, self.Update) try: # Check if any of our topics have new info to pass on if len(self.topics): refresh = False for topic in self.topics.values(): topic.Update(self) if topic.HasChanged(): refresh = True topic.Reset() if refresh: self.SignalExcel() finally: self.ticker.start() # Make sure we get to run again def CreateTopic(self, TopicStrings=None): """Topic factory. Builds a TimeTopic object out of the given TopicStrings.""" return TimeTopic(TopicStrings) class TimeTopic(RTDTopic): """Example topic for example RTD server. Will accept some simple commands to alter how long to delay value updates. Commands: * seconds, delay_in_seconds * minutes, delay_in_minutes * hours, delay_in_hours """ def __init__(self, TopicStrings): super(TimeTopic, self).__init__(TopicStrings) try: self.cmd, self.delay = self.TopicStrings except Exception as E: # We could simply return a "# ERROR" type string as the # topic value, but explosions like this should be able to get handled by # the VBA-side "On Error" stuff. raise ValueError("Invalid topic strings: %s" % str(TopicStrings)) #self.cmd = str(self.cmd) self.delay = float(self.delay) # setup our initial value self.checkpoint = self.timestamp() self.SetValue(str(self.checkpoint)) def timestamp(self): return datetime.datetime.now() def Update(self, sender): now = self.timestamp() delta = now - self.checkpoint refresh = False if self.cmd == "seconds": if delta.seconds >= self.delay: refresh = True elif self.cmd == "minutes": if delta.minutes >= self.delay: refresh = True elif self.cmd == "hours": if delta.hours >= self.delay: refresh = True else: self.SetValue("#Unknown command: " + self.cmd) if refresh: self.SetValue(str(now)) self.checkpoint = now if __name__ == "__main__": import win32com.server.register # Register/Unregister TimeServer example # eg. at the command line: excelrtd.py --register # Then type in an excel cell something like: # =RTD("Python.RTD.TimeServer","","seconds","5") win32com.server.register.UseCommandLine(TimeServer)
mit
lele1122/pelican-plugins
html_entity/html_entity.py
76
1310
""" HTML Entities for reStructured Text =================================== Allows user to use HTML entities (&copy;, &#149;, etc.) in RST documents. Usage: :html_entity:`copy` :html_entity:`149` :html_entity:`#149` """ from __future__ import unicode_literals from docutils import nodes, utils from docutils.parsers.rst import roles from pelican.readers import PelicanHTMLTranslator import six class html_entity(nodes.Inline, nodes.Node): # Subclassing Node directly since TextElement automatically appends the escaped element def __init__(self, rawsource, text): self.rawsource = rawsource self.text = text self.children = [] self.attributes = {} def astext(self): return self.text def entity_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): text = utils.unescape(text) entity_code = text try: entity_code = "#{}".format(six.u(int(entity_code))) except ValueError: pass entity_code = "&{};".format(entity_code) return [html_entity(text, entity_code)], [] def register(): roles.register_local_role('html_entity', entity_role) PelicanHTMLTranslator.visit_html_entity = lambda self, node: self.body.append(node.astext()) PelicanHTMLTranslator.depart_html_entity = lambda self, node: None
agpl-3.0
leereilly/django-1
tests/regressiontests/model_forms_regress/tests.py
6
20105
from __future__ import absolute_import from datetime import date from django import forms from django.core.exceptions import FieldError, ValidationError from django.core.files.uploadedfile import SimpleUploadedFile from django.forms.models import (modelform_factory, ModelChoiceField, fields_for_model, construct_instance, ModelFormMetaclass) from django.utils import unittest from django.test import TestCase from .models import (Person, RealPerson, Triple, FilePathModel, Article, Publication, CustomFF, Author, Author1, Homepage, Document, Edition) class ModelMultipleChoiceFieldTests(TestCase): def test_model_multiple_choice_number_of_queries(self): """ Test that ModelMultipleChoiceField does O(1) queries instead of O(n) (#10156). """ persons = [Person.objects.create(name="Person %s" % i) for i in range(30)] f = forms.ModelMultipleChoiceField(queryset=Person.objects.all()) self.assertNumQueries(1, f.clean, [p.pk for p in persons[1:11:2]]) def test_model_multiple_choice_run_validators(self): """ Test that ModelMultipleChoiceField run given validators (#14144). """ for i in range(30): Person.objects.create(name="Person %s" % i) self._validator_run = False def my_validator(value): self._validator_run = True f = forms.ModelMultipleChoiceField(queryset=Person.objects.all(), validators=[my_validator]) f.clean([p.pk for p in Person.objects.all()[8:9]]) self.assertTrue(self._validator_run) class TripleForm(forms.ModelForm): class Meta: model = Triple class UniqueTogetherTests(TestCase): def test_multiple_field_unique_together(self): """ When the same field is involved in multiple unique_together constraints, we need to make sure we don't remove the data for it before doing all the validation checking (not just failing after the first one). """ Triple.objects.create(left=1, middle=2, right=3) form = TripleForm({'left': '1', 'middle': '2', 'right': '3'}) self.assertFalse(form.is_valid()) form = TripleForm({'left': '1', 'middle': '3', 'right': '1'}) self.assertTrue(form.is_valid()) class TripleFormWithCleanOverride(forms.ModelForm): class Meta: model = Triple def clean(self): if not self.cleaned_data['left'] == self.cleaned_data['right']: raise forms.ValidationError('Left and right should be equal') return self.cleaned_data class OverrideCleanTests(TestCase): def test_override_clean(self): """ Regression for #12596: Calling super from ModelForm.clean() should be optional. """ form = TripleFormWithCleanOverride({'left': 1, 'middle': 2, 'right': 1}) self.assertTrue(form.is_valid()) # form.instance.left will be None if the instance was not constructed # by form.full_clean(). self.assertEqual(form.instance.left, 1) # Regression test for #12960. # Make sure the cleaned_data returned from ModelForm.clean() is applied to the # model instance. class PublicationForm(forms.ModelForm): def clean(self): self.cleaned_data['title'] = self.cleaned_data['title'].upper() return self.cleaned_data class Meta: model = Publication class ModelFormCleanTest(TestCase): def test_model_form_clean_applies_to_model(self): data = {'title': 'test', 'date_published': '2010-2-25'} form = PublicationForm(data) publication = form.save() self.assertEqual(publication.title, 'TEST') class FPForm(forms.ModelForm): class Meta: model = FilePathModel class FilePathFieldTests(TestCase): def test_file_path_field_blank(self): """ Regression test for #8842: FilePathField(blank=True) """ form = FPForm() names = [p[1] for p in form['path'].field.choices] names.sort() self.assertEqual(names, ['---------', '__init__.py', 'models.py', 'tests.py']) class ManyToManyCallableInitialTests(TestCase): def test_callable(self): "Regression for #10349: A callable can be provided as the initial value for an m2m field" # Set up a callable initial value def formfield_for_dbfield(db_field, **kwargs): if db_field.name == 'publications': kwargs['initial'] = lambda: Publication.objects.all().order_by('date_published')[:2] return db_field.formfield(**kwargs) # Set up some Publications to use as data book1 = Publication.objects.create(title="First Book", date_published=date(2007,1,1)) book2 = Publication.objects.create(title="Second Book", date_published=date(2008,1,1)) book3 = Publication.objects.create(title="Third Book", date_published=date(2009,1,1)) # Create a ModelForm, instantiate it, and check that the output is as expected ModelForm = modelform_factory(Article, formfield_callback=formfield_for_dbfield) form = ModelForm() self.assertHTMLEqual(form.as_ul(), u"""<li><label for="id_headline">Headline:</label> <input id="id_headline" type="text" name="headline" maxlength="100" /></li> <li><label for="id_publications">Publications:</label> <select multiple="multiple" name="publications" id="id_publications"> <option value="%d" selected="selected">First Book</option> <option value="%d" selected="selected">Second Book</option> <option value="%d">Third Book</option> </select> <span class="helptext"> Hold down "Control", or "Command" on a Mac, to select more than one.</span></li>""" % (book1.pk, book2.pk, book3.pk)) class CFFForm(forms.ModelForm): class Meta: model = CustomFF class CustomFieldSaveTests(TestCase): def test_save(self): "Regression for #11149: save_form_data should be called only once" # It's enough that the form saves without error -- the custom save routine will # generate an AssertionError if it is called more than once during save. form = CFFForm(data = {'f': None}) form.save() class ModelChoiceIteratorTests(TestCase): def test_len(self): class Form(forms.ModelForm): class Meta: model = Article fields = ["publications"] Publication.objects.create(title="Pravda", date_published=date(1991, 8, 22)) f = Form() self.assertEqual(len(f.fields["publications"].choices), 1) class RealPersonForm(forms.ModelForm): class Meta: model = RealPerson class CustomModelFormSaveMethod(TestCase): def test_string_message(self): data = {'name': 'anonymous'} form = RealPersonForm(data) self.assertEqual(form.is_valid(), False) self.assertEqual(form.errors['__all__'], ['Please specify a real name.']) class ModelClassTests(TestCase): def test_no_model_class(self): class NoModelModelForm(forms.ModelForm): pass self.assertRaises(ValueError, NoModelModelForm) class OneToOneFieldTests(TestCase): def test_assignment_of_none(self): class AuthorForm(forms.ModelForm): class Meta: model = Author fields = ['publication', 'full_name'] publication = Publication.objects.create(title="Pravda", date_published=date(1991, 8, 22)) author = Author.objects.create(publication=publication, full_name='John Doe') form = AuthorForm({'publication':u'', 'full_name':'John Doe'}, instance=author) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data['publication'], None) author = form.save() # author object returned from form still retains original publication object # that's why we need to retreive it from database again new_author = Author.objects.get(pk=author.pk) self.assertEqual(new_author.publication, None) def test_assignment_of_none_null_false(self): class AuthorForm(forms.ModelForm): class Meta: model = Author1 fields = ['publication', 'full_name'] publication = Publication.objects.create(title="Pravda", date_published=date(1991, 8, 22)) author = Author1.objects.create(publication=publication, full_name='John Doe') form = AuthorForm({'publication':u'', 'full_name':'John Doe'}, instance=author) self.assertTrue(not form.is_valid()) class ModelChoiceForm(forms.Form): person = ModelChoiceField(Person.objects.all()) class TestTicket11183(TestCase): def test_11183(self): form1 = ModelChoiceForm() field1 = form1.fields['person'] # To allow the widget to change the queryset of field1.widget.choices correctly, # without affecting other forms, the following must hold: self.assertTrue(field1 is not ModelChoiceForm.base_fields['person']) self.assertTrue(field1.widget.choices.field is field1) class HomepageForm(forms.ModelForm): class Meta: model = Homepage class URLFieldTests(TestCase): def test_url_on_modelform(self): "Check basic URL field validation on model forms" self.assertFalse(HomepageForm({'url': 'foo'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://example'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://example.'}).is_valid()) self.assertFalse(HomepageForm({'url': 'http://com.'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://localhost'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://example.com'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com/test'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://www.example.com:8000/test'}).is_valid()) self.assertTrue(HomepageForm({'url': 'http://example.com/foo/bar'}).is_valid()) def test_http_prefixing(self): "If the http:// prefix is omitted on form input, the field adds it again. (Refs #13613)" form = HomepageForm({'url': 'example.com'}) form.is_valid() # self.assertTrue(form.is_valid()) # self.assertEqual(form.cleaned_data['url'], 'http://example.com/') form = HomepageForm({'url': 'example.com/test'}) form.is_valid() # self.assertTrue(form.is_valid()) # self.assertEqual(form.cleaned_data['url'], 'http://example.com/test') class FormFieldCallbackTests(TestCase): def test_baseform_with_widgets_in_meta(self): """Regression for #13095: Using base forms with widgets defined in Meta should not raise errors.""" widget = forms.Textarea() class BaseForm(forms.ModelForm): class Meta: model = Person widgets = {'name': widget} Form = modelform_factory(Person, form=BaseForm) self.assertTrue(Form.base_fields['name'].widget is widget) def test_factory_with_widget_argument(self): """ Regression for #15315: modelform_factory should accept widgets argument """ widget = forms.Textarea() # Without a widget should not set the widget to textarea Form = modelform_factory(Person) self.assertNotEqual(Form.base_fields['name'].widget.__class__, forms.Textarea) # With a widget should not set the widget to textarea Form = modelform_factory(Person, widgets={'name':widget}) self.assertEqual(Form.base_fields['name'].widget.__class__, forms.Textarea) def test_custom_callback(self): """Test that a custom formfield_callback is used if provided""" callback_args = [] def callback(db_field, **kwargs): callback_args.append((db_field, kwargs)) return db_field.formfield(**kwargs) widget = forms.Textarea() class BaseForm(forms.ModelForm): class Meta: model = Person widgets = {'name': widget} _ = modelform_factory(Person, form=BaseForm, formfield_callback=callback) id_field, name_field = Person._meta.fields self.assertEqual(callback_args, [(id_field, {}), (name_field, {'widget': widget})]) def test_bad_callback(self): # A bad callback provided by user still gives an error self.assertRaises(TypeError, modelform_factory, Person, formfield_callback='not a function or callable') class InvalidFieldAndFactory(TestCase): """ Tests for #11905 """ def test_extra_field_model_form(self): try: class ExtraPersonForm(forms.ModelForm): """ ModelForm with an extra field """ age = forms.IntegerField() class Meta: model = Person fields = ('name', 'no-field') except FieldError as e: # Make sure the exception contains some reference to the # field responsible for the problem. self.assertTrue('no-field' in e.args[0]) else: self.fail('Invalid "no-field" field not caught') def test_extra_declared_field_model_form(self): try: class ExtraPersonForm(forms.ModelForm): """ ModelForm with an extra field """ age = forms.IntegerField() class Meta: model = Person fields = ('name', 'age') except FieldError: self.fail('Declarative field raised FieldError incorrectly') def test_extra_field_modelform_factory(self): self.assertRaises(FieldError, modelform_factory, Person, fields=['no-field', 'name']) class DocumentForm(forms.ModelForm): class Meta: model = Document class FileFieldTests(unittest.TestCase): def test_clean_false(self): """ If the ``clean`` method on a non-required FileField receives False as the data (meaning clear the field value), it returns False, regardless of the value of ``initial``. """ f = forms.FileField(required=False) self.assertEqual(f.clean(False), False) self.assertEqual(f.clean(False, 'initial'), False) def test_clean_false_required(self): """ If the ``clean`` method on a required FileField receives False as the data, it has the same effect as None: initial is returned if non-empty, otherwise the validation catches the lack of a required value. """ f = forms.FileField(required=True) self.assertEqual(f.clean(False, 'initial'), 'initial') self.assertRaises(ValidationError, f.clean, False) def test_full_clear(self): """ Integration happy-path test that a model FileField can actually be set and cleared via a ModelForm. """ form = DocumentForm() self.assertTrue('name="myfile"' in unicode(form)) self.assertTrue('myfile-clear' not in unicode(form)) form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', 'content')}) self.assertTrue(form.is_valid()) doc = form.save(commit=False) self.assertEqual(doc.myfile.name, 'something.txt') form = DocumentForm(instance=doc) self.assertTrue('myfile-clear' in unicode(form)) form = DocumentForm(instance=doc, data={'myfile-clear': 'true'}) doc = form.save(commit=False) self.assertEqual(bool(doc.myfile), False) def test_clear_and_file_contradiction(self): """ If the user submits a new file upload AND checks the clear checkbox, they get a validation error, and the bound redisplay of the form still includes the current file and the clear checkbox. """ form = DocumentForm(files={'myfile': SimpleUploadedFile('something.txt', 'content')}) self.assertTrue(form.is_valid()) doc = form.save(commit=False) form = DocumentForm(instance=doc, files={'myfile': SimpleUploadedFile('something.txt', 'content')}, data={'myfile-clear': 'true'}) self.assertTrue(not form.is_valid()) self.assertEqual(form.errors['myfile'], [u'Please either submit a file or check the clear checkbox, not both.']) rendered = unicode(form) self.assertTrue('something.txt' in rendered) self.assertTrue('myfile-clear' in rendered) class EditionForm(forms.ModelForm): author = forms.ModelChoiceField(queryset=Person.objects.all()) publication = forms.ModelChoiceField(queryset=Publication.objects.all()) edition = forms.IntegerField() isbn = forms.CharField(max_length=13) class Meta: model = Edition class UniqueErrorsTests(TestCase): def setUp(self): self.author1 = Person.objects.create(name=u'Author #1') self.author2 = Person.objects.create(name=u'Author #2') self.pub1 = Publication.objects.create(title='Pub #1', date_published=date(2000, 10, 31)) self.pub2 = Publication.objects.create(title='Pub #2', date_published=date(2004, 1, 5)) form = EditionForm(data={'author': self.author1.pk, 'publication': self.pub1.pk, 'edition': 1, 'isbn': '9783161484100'}) form.save() def test_unique_error_message(self): form = EditionForm(data={'author': self.author1.pk, 'publication': self.pub2.pk, 'edition': 1, 'isbn': '9783161484100'}) self.assertEqual(form.errors, {'isbn': [u'Edition with this Isbn already exists.']}) def test_unique_together_error_message(self): form = EditionForm(data={'author': self.author1.pk, 'publication': self.pub1.pk, 'edition': 2, 'isbn': '9783161489999'}) self.assertEqual(form.errors, {'__all__': [u'Edition with this Author and Publication already exists.']}) form = EditionForm(data={'author': self.author2.pk, 'publication': self.pub1.pk, 'edition': 1, 'isbn': '9783161487777'}) self.assertEqual(form.errors, {'__all__': [u'Edition with this Publication and Edition already exists.']}) class EmptyFieldsTestCase(TestCase): "Tests for fields=() cases as reported in #14119" class EmptyPersonForm(forms.ModelForm): class Meta: model = Person fields = () def test_empty_fields_to_fields_for_model(self): "An argument of fields=() to fields_for_model should return an empty dictionary" field_dict = fields_for_model(Person, fields=()) self.assertEqual(len(field_dict), 0) def test_empty_fields_on_modelform(self): "No fields on a ModelForm should actually result in no fields" form = self.EmptyPersonForm() self.assertEqual(len(form.fields), 0) def test_empty_fields_to_construct_instance(self): "No fields should be set on a model instance if construct_instance receives fields=()" form = modelform_factory(Person)({'name': 'John Doe'}) self.assertTrue(form.is_valid()) instance = construct_instance(form, Person(), fields=()) self.assertEqual(instance.name, '') class CustomMetaclass(ModelFormMetaclass): def __new__(cls, name, bases, attrs): new = super(CustomMetaclass, cls).__new__(cls, name, bases, attrs) new.base_fields = {} return new class CustomMetaclassForm(forms.ModelForm): __metaclass__ = CustomMetaclass class CustomMetaclassTestCase(TestCase): def test_modelform_factory_metaclass(self): new_cls = modelform_factory(Person, form=CustomMetaclassForm) self.assertEqual(new_cls.base_fields, {})
bsd-3-clause
kfox1111/horizon
horizon/templatetags/branding.py
88
2028
# Copyright 2012 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Template tags for customizing Horizon. """ from django.conf import settings from django.core.urlresolvers import reverse from django import template from django.utils.translation import ugettext_lazy as _ register = template.Library() class SiteBrandingNode(template.Node): def render(self, context): return getattr(settings, "SITE_BRANDING", _("Horizon")) @register.tag def site_branding(parser, token): return SiteBrandingNode() @register.tag def site_title(parser, token): return settings.SITE_BRANDING @register.simple_tag def site_branding_link(): return getattr(settings, "SITE_BRANDING_LINK", reverse("horizon:user_home")) # TODO(jeffjapan): This is just an assignment tag version of the above, replace # when the dashboard is upgraded to a django version that # supports the @assignment_tag decorator syntax instead. class SaveBrandingNode(template.Node): def __init__(self, var_name): self.var_name = var_name def render(self, context): context[self.var_name] = settings.SITE_BRANDING return "" @register.tag def save_site_branding(parser, token): tagname = token.contents.split() return SaveBrandingNode(tagname[-1])
apache-2.0
CLOUGH/info3180-lab5
main/lib/flask/helpers.py
776
33793
# -*- coding: utf-8 -*- """ flask.helpers ~~~~~~~~~~~~~ Implements various helpers. :copyright: (c) 2011 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import os import sys import pkgutil import posixpath import mimetypes from time import time from zlib import adler32 from threading import RLock from werkzeug.routing import BuildError from functools import update_wrapper try: from werkzeug.urls import url_quote except ImportError: from urlparse import quote as url_quote from werkzeug.datastructures import Headers from werkzeug.exceptions import NotFound # this was moved in 0.7 try: from werkzeug.wsgi import wrap_file except ImportError: from werkzeug.utils import wrap_file from jinja2 import FileSystemLoader from .signals import message_flashed from .globals import session, _request_ctx_stack, _app_ctx_stack, \ current_app, request from ._compat import string_types, text_type # sentinel _missing = object() # what separators does this operating system provide that are not a slash? # this is used by the send_from_directory function to ensure that nobody is # able to access files from outside the filesystem. _os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep] if sep not in (None, '/')) def _endpoint_from_view_func(view_func): """Internal helper that returns the default endpoint for a given function. This always is the function name. """ assert view_func is not None, 'expected view func if endpoint ' \ 'is not provided.' return view_func.__name__ def stream_with_context(generator_or_function): """Request contexts disappear when the response is started on the server. This is done for efficiency reasons and to make it less likely to encounter memory leaks with badly written WSGI middlewares. The downside is that if you are using streamed responses, the generator cannot access request bound information any more. This function however can help you keep the context around for longer:: from flask import stream_with_context, request, Response @app.route('/stream') def streamed_response(): @stream_with_context def generate(): yield 'Hello ' yield request.args['name'] yield '!' return Response(generate()) Alternatively it can also be used around a specific generator:: from flask import stream_with_context, request, Response @app.route('/stream') def streamed_response(): def generate(): yield 'Hello ' yield request.args['name'] yield '!' return Response(stream_with_context(generate())) .. versionadded:: 0.9 """ try: gen = iter(generator_or_function) except TypeError: def decorator(*args, **kwargs): gen = generator_or_function() return stream_with_context(gen) return update_wrapper(decorator, generator_or_function) def generator(): ctx = _request_ctx_stack.top if ctx is None: raise RuntimeError('Attempted to stream with context but ' 'there was no context in the first place to keep around.') with ctx: # Dummy sentinel. Has to be inside the context block or we're # not actually keeping the context around. yield None # The try/finally is here so that if someone passes a WSGI level # iterator in we're still running the cleanup logic. Generators # don't need that because they are closed on their destruction # automatically. try: for item in gen: yield item finally: if hasattr(gen, 'close'): gen.close() # The trick is to start the generator. Then the code execution runs until # the first dummy None is yielded at which point the context was already # pushed. This item is discarded. Then when the iteration continues the # real generator is executed. wrapped_g = generator() next(wrapped_g) return wrapped_g def make_response(*args): """Sometimes it is necessary to set additional headers in a view. Because views do not have to return response objects but can return a value that is converted into a response object by Flask itself, it becomes tricky to add headers to it. This function can be called instead of using a return and you will get a response object which you can use to attach headers. If view looked like this and you want to add a new header:: def index(): return render_template('index.html', foo=42) You can now do something like this:: def index(): response = make_response(render_template('index.html', foo=42)) response.headers['X-Parachutes'] = 'parachutes are cool' return response This function accepts the very same arguments you can return from a view function. This for example creates a response with a 404 error code:: response = make_response(render_template('not_found.html'), 404) The other use case of this function is to force the return value of a view function into a response which is helpful with view decorators:: response = make_response(view_function()) response.headers['X-Parachutes'] = 'parachutes are cool' Internally this function does the following things: - if no arguments are passed, it creates a new response argument - if one argument is passed, :meth:`flask.Flask.make_response` is invoked with it. - if more than one argument is passed, the arguments are passed to the :meth:`flask.Flask.make_response` function as tuple. .. versionadded:: 0.6 """ if not args: return current_app.response_class() if len(args) == 1: args = args[0] return current_app.make_response(args) def url_for(endpoint, **values): """Generates a URL to the given endpoint with the method provided. Variable arguments that are unknown to the target endpoint are appended to the generated URL as query arguments. If the value of a query argument is `None`, the whole pair is skipped. In case blueprints are active you can shortcut references to the same blueprint by prefixing the local endpoint with a dot (``.``). This will reference the index function local to the current blueprint:: url_for('.index') For more information, head over to the :ref:`Quickstart <url-building>`. To integrate applications, :class:`Flask` has a hook to intercept URL build errors through :attr:`Flask.build_error_handler`. The `url_for` function results in a :exc:`~werkzeug.routing.BuildError` when the current app does not have a URL for the given endpoint and values. When it does, the :data:`~flask.current_app` calls its :attr:`~Flask.build_error_handler` if it is not `None`, which can return a string to use as the result of `url_for` (instead of `url_for`'s default to raise the :exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception. An example:: def external_url_handler(error, endpoint, **values): "Looks up an external URL when `url_for` cannot build a URL." # This is an example of hooking the build_error_handler. # Here, lookup_url is some utility function you've built # which looks up the endpoint in some external URL registry. url = lookup_url(endpoint, **values) if url is None: # External lookup did not have a URL. # Re-raise the BuildError, in context of original traceback. exc_type, exc_value, tb = sys.exc_info() if exc_value is error: raise exc_type, exc_value, tb else: raise error # url_for will use this result, instead of raising BuildError. return url app.build_error_handler = external_url_handler Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and `endpoint` and `**values` are the arguments passed into `url_for`. Note that this is for building URLs outside the current application, and not for handling 404 NotFound errors. .. versionadded:: 0.10 The `_scheme` parameter was added. .. versionadded:: 0.9 The `_anchor` and `_method` parameters were added. .. versionadded:: 0.9 Calls :meth:`Flask.handle_build_error` on :exc:`~werkzeug.routing.BuildError`. :param endpoint: the endpoint of the URL (name of the function) :param values: the variable arguments of the URL rule :param _external: if set to `True`, an absolute URL is generated. Server address can be changed via `SERVER_NAME` configuration variable which defaults to `localhost`. :param _scheme: a string specifying the desired URL scheme. The `_external` parameter must be set to `True` or a `ValueError` is raised. :param _anchor: if provided this is added as anchor to the URL. :param _method: if provided this explicitly specifies an HTTP method. """ appctx = _app_ctx_stack.top reqctx = _request_ctx_stack.top if appctx is None: raise RuntimeError('Attempted to generate a URL without the ' 'application context being pushed. This has to be ' 'executed when application context is available.') # If request specific information is available we have some extra # features that support "relative" urls. if reqctx is not None: url_adapter = reqctx.url_adapter blueprint_name = request.blueprint if not reqctx.request._is_old_module: if endpoint[:1] == '.': if blueprint_name is not None: endpoint = blueprint_name + endpoint else: endpoint = endpoint[1:] else: # TODO: get rid of this deprecated functionality in 1.0 if '.' not in endpoint: if blueprint_name is not None: endpoint = blueprint_name + '.' + endpoint elif endpoint.startswith('.'): endpoint = endpoint[1:] external = values.pop('_external', False) # Otherwise go with the url adapter from the appctx and make # the urls external by default. else: url_adapter = appctx.url_adapter if url_adapter is None: raise RuntimeError('Application was not able to create a URL ' 'adapter for request independent URL generation. ' 'You might be able to fix this by setting ' 'the SERVER_NAME config variable.') external = values.pop('_external', True) anchor = values.pop('_anchor', None) method = values.pop('_method', None) scheme = values.pop('_scheme', None) appctx.app.inject_url_defaults(endpoint, values) if scheme is not None: if not external: raise ValueError('When specifying _scheme, _external must be True') url_adapter.url_scheme = scheme try: rv = url_adapter.build(endpoint, values, method=method, force_external=external) except BuildError as error: # We need to inject the values again so that the app callback can # deal with that sort of stuff. values['_external'] = external values['_anchor'] = anchor values['_method'] = method return appctx.app.handle_url_build_error(error, endpoint, values) if anchor is not None: rv += '#' + url_quote(anchor) return rv def get_template_attribute(template_name, attribute): """Loads a macro (or variable) a template exports. This can be used to invoke a macro from within Python code. If you for example have a template named `_cider.html` with the following contents: .. sourcecode:: html+jinja {% macro hello(name) %}Hello {{ name }}!{% endmacro %} You can access this from Python code like this:: hello = get_template_attribute('_cider.html', 'hello') return hello('World') .. versionadded:: 0.2 :param template_name: the name of the template :param attribute: the name of the variable of macro to access """ return getattr(current_app.jinja_env.get_template(template_name).module, attribute) def flash(message, category='message'): """Flashes a message to the next request. In order to remove the flashed message from the session and to display it to the user, the template has to call :func:`get_flashed_messages`. .. versionchanged:: 0.3 `category` parameter added. :param message: the message to be flashed. :param category: the category for the message. The following values are recommended: ``'message'`` for any kind of message, ``'error'`` for errors, ``'info'`` for information messages and ``'warning'`` for warnings. However any kind of string can be used as category. """ # Original implementation: # # session.setdefault('_flashes', []).append((category, message)) # # This assumed that changes made to mutable structures in the session are # are always in sync with the sess on object, which is not true for session # implementations that use external storage for keeping their keys/values. flashes = session.get('_flashes', []) flashes.append((category, message)) session['_flashes'] = flashes message_flashed.send(current_app._get_current_object(), message=message, category=category) def get_flashed_messages(with_categories=False, category_filter=[]): """Pulls all flashed messages from the session and returns them. Further calls in the same request to the function will return the same messages. By default just the messages are returned, but when `with_categories` is set to `True`, the return value will be a list of tuples in the form ``(category, message)`` instead. Filter the flashed messages to one or more categories by providing those categories in `category_filter`. This allows rendering categories in separate html blocks. The `with_categories` and `category_filter` arguments are distinct: * `with_categories` controls whether categories are returned with message text (`True` gives a tuple, where `False` gives just the message text). * `category_filter` filters the messages down to only those matching the provided categories. See :ref:`message-flashing-pattern` for examples. .. versionchanged:: 0.3 `with_categories` parameter added. .. versionchanged:: 0.9 `category_filter` parameter added. :param with_categories: set to `True` to also receive categories. :param category_filter: whitelist of categories to limit return values """ flashes = _request_ctx_stack.top.flashes if flashes is None: _request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \ if '_flashes' in session else [] if category_filter: flashes = list(filter(lambda f: f[0] in category_filter, flashes)) if not with_categories: return [x[1] for x in flashes] return flashes def send_file(filename_or_fp, mimetype=None, as_attachment=False, attachment_filename=None, add_etags=True, cache_timeout=None, conditional=False): """Sends the contents of a file to the client. This will use the most efficient method available and configured. By default it will try to use the WSGI server's file_wrapper support. Alternatively you can set the application's :attr:`~Flask.use_x_sendfile` attribute to ``True`` to directly emit an `X-Sendfile` header. This however requires support of the underlying webserver for `X-Sendfile`. By default it will try to guess the mimetype for you, but you can also explicitly provide one. For extra security you probably want to send certain files as attachment (HTML for instance). The mimetype guessing requires a `filename` or an `attachment_filename` to be provided. Please never pass filenames to this function from user sources without checking them first. Something like this is usually sufficient to avoid security problems:: if '..' in filename or filename.startswith('/'): abort(404) .. versionadded:: 0.2 .. versionadded:: 0.5 The `add_etags`, `cache_timeout` and `conditional` parameters were added. The default behavior is now to attach etags. .. versionchanged:: 0.7 mimetype guessing and etag support for file objects was deprecated because it was unreliable. Pass a filename if you are able to, otherwise attach an etag yourself. This functionality will be removed in Flask 1.0 .. versionchanged:: 0.9 cache_timeout pulls its default from application config, when None. :param filename_or_fp: the filename of the file to send. This is relative to the :attr:`~Flask.root_path` if a relative path is specified. Alternatively a file object might be provided in which case `X-Sendfile` might not work and fall back to the traditional method. Make sure that the file pointer is positioned at the start of data to send before calling :func:`send_file`. :param mimetype: the mimetype of the file if provided, otherwise auto detection happens. :param as_attachment: set to `True` if you want to send this file with a ``Content-Disposition: attachment`` header. :param attachment_filename: the filename for the attachment if it differs from the file's filename. :param add_etags: set to `False` to disable attaching of etags. :param conditional: set to `True` to enable conditional responses. :param cache_timeout: the timeout in seconds for the headers. When `None` (default), this value is set by :meth:`~Flask.get_send_file_max_age` of :data:`~flask.current_app`. """ mtime = None if isinstance(filename_or_fp, string_types): filename = filename_or_fp file = None else: from warnings import warn file = filename_or_fp filename = getattr(file, 'name', None) # XXX: this behavior is now deprecated because it was unreliable. # removed in Flask 1.0 if not attachment_filename and not mimetype \ and isinstance(filename, string_types): warn(DeprecationWarning('The filename support for file objects ' 'passed to send_file is now deprecated. Pass an ' 'attach_filename if you want mimetypes to be guessed.'), stacklevel=2) if add_etags: warn(DeprecationWarning('In future flask releases etags will no ' 'longer be generated for file objects passed to the send_file ' 'function because this behavior was unreliable. Pass ' 'filenames instead if possible, otherwise attach an etag ' 'yourself based on another value'), stacklevel=2) if filename is not None: if not os.path.isabs(filename): filename = os.path.join(current_app.root_path, filename) if mimetype is None and (filename or attachment_filename): mimetype = mimetypes.guess_type(filename or attachment_filename)[0] if mimetype is None: mimetype = 'application/octet-stream' headers = Headers() if as_attachment: if attachment_filename is None: if filename is None: raise TypeError('filename unavailable, required for ' 'sending as attachment') attachment_filename = os.path.basename(filename) headers.add('Content-Disposition', 'attachment', filename=attachment_filename) if current_app.use_x_sendfile and filename: if file is not None: file.close() headers['X-Sendfile'] = filename headers['Content-Length'] = os.path.getsize(filename) data = None else: if file is None: file = open(filename, 'rb') mtime = os.path.getmtime(filename) headers['Content-Length'] = os.path.getsize(filename) data = wrap_file(request.environ, file) rv = current_app.response_class(data, mimetype=mimetype, headers=headers, direct_passthrough=True) # if we know the file modification date, we can store it as the # the time of the last modification. if mtime is not None: rv.last_modified = int(mtime) rv.cache_control.public = True if cache_timeout is None: cache_timeout = current_app.get_send_file_max_age(filename) if cache_timeout is not None: rv.cache_control.max_age = cache_timeout rv.expires = int(time() + cache_timeout) if add_etags and filename is not None: rv.set_etag('flask-%s-%s-%s' % ( os.path.getmtime(filename), os.path.getsize(filename), adler32( filename.encode('utf-8') if isinstance(filename, text_type) else filename ) & 0xffffffff )) if conditional: rv = rv.make_conditional(request) # make sure we don't send x-sendfile for servers that # ignore the 304 status code for x-sendfile. if rv.status_code == 304: rv.headers.pop('x-sendfile', None) return rv def safe_join(directory, filename): """Safely join `directory` and `filename`. Example usage:: @app.route('/wiki/<path:filename>') def wiki_page(filename): filename = safe_join(app.config['WIKI_FOLDER'], filename) with open(filename, 'rb') as fd: content = fd.read() # Read and process the file content... :param directory: the base directory. :param filename: the untrusted filename relative to that directory. :raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path would fall out of `directory`. """ filename = posixpath.normpath(filename) for sep in _os_alt_seps: if sep in filename: raise NotFound() if os.path.isabs(filename) or \ filename == '..' or \ filename.startswith('../'): raise NotFound() return os.path.join(directory, filename) def send_from_directory(directory, filename, **options): """Send a file from a given directory with :func:`send_file`. This is a secure way to quickly expose static files from an upload folder or something similar. Example usage:: @app.route('/uploads/<path:filename>') def download_file(filename): return send_from_directory(app.config['UPLOAD_FOLDER'], filename, as_attachment=True) .. admonition:: Sending files and Performance It is strongly recommended to activate either `X-Sendfile` support in your webserver or (if no authentication happens) to tell the webserver to serve files for the given path on its own without calling into the web application for improved performance. .. versionadded:: 0.5 :param directory: the directory where all the files are stored. :param filename: the filename relative to that directory to download. :param options: optional keyword arguments that are directly forwarded to :func:`send_file`. """ filename = safe_join(directory, filename) if not os.path.isfile(filename): raise NotFound() options.setdefault('conditional', True) return send_file(filename, **options) def get_root_path(import_name): """Returns the path to a package or cwd if that cannot be found. This returns the path of a package or the folder that contains a module. Not to be confused with the package path returned by :func:`find_package`. """ # Module already imported and has a file attribute. Use that first. mod = sys.modules.get(import_name) if mod is not None and hasattr(mod, '__file__'): return os.path.dirname(os.path.abspath(mod.__file__)) # Next attempt: check the loader. loader = pkgutil.get_loader(import_name) # Loader does not exist or we're referring to an unloaded main module # or a main module without path (interactive sessions), go with the # current working directory. if loader is None or import_name == '__main__': return os.getcwd() # For .egg, zipimporter does not have get_filename until Python 2.7. # Some other loaders might exhibit the same behavior. if hasattr(loader, 'get_filename'): filepath = loader.get_filename(import_name) else: # Fall back to imports. __import__(import_name) filepath = sys.modules[import_name].__file__ # filepath is import_name.py for a module, or __init__.py for a package. return os.path.dirname(os.path.abspath(filepath)) def find_package(import_name): """Finds a package and returns the prefix (or None if the package is not installed) as well as the folder that contains the package or module as a tuple. The package path returned is the module that would have to be added to the pythonpath in order to make it possible to import the module. The prefix is the path below which a UNIX like folder structure exists (lib, share etc.). """ root_mod_name = import_name.split('.')[0] loader = pkgutil.get_loader(root_mod_name) if loader is None or import_name == '__main__': # import name is not found, or interactive/main module package_path = os.getcwd() else: # For .egg, zipimporter does not have get_filename until Python 2.7. if hasattr(loader, 'get_filename'): filename = loader.get_filename(root_mod_name) elif hasattr(loader, 'archive'): # zipimporter's loader.archive points to the .egg or .zip # archive filename is dropped in call to dirname below. filename = loader.archive else: # At least one loader is missing both get_filename and archive: # Google App Engine's HardenedModulesHook # # Fall back to imports. __import__(import_name) filename = sys.modules[import_name].__file__ package_path = os.path.abspath(os.path.dirname(filename)) # package_path ends with __init__.py for a package if loader.is_package(root_mod_name): package_path = os.path.dirname(package_path) site_parent, site_folder = os.path.split(package_path) py_prefix = os.path.abspath(sys.prefix) if package_path.startswith(py_prefix): return py_prefix, package_path elif site_folder.lower() == 'site-packages': parent, folder = os.path.split(site_parent) # Windows like installations if folder.lower() == 'lib': base_dir = parent # UNIX like installations elif os.path.basename(parent).lower() == 'lib': base_dir = os.path.dirname(parent) else: base_dir = site_parent return base_dir, package_path return None, package_path class locked_cached_property(object): """A decorator that converts a function into a lazy property. The function wrapped is called the first time to retrieve the result and then that calculated result is used the next time you access the value. Works like the one in Werkzeug but has a lock for thread safety. """ def __init__(self, func, name=None, doc=None): self.__name__ = name or func.__name__ self.__module__ = func.__module__ self.__doc__ = doc or func.__doc__ self.func = func self.lock = RLock() def __get__(self, obj, type=None): if obj is None: return self with self.lock: value = obj.__dict__.get(self.__name__, _missing) if value is _missing: value = self.func(obj) obj.__dict__[self.__name__] = value return value class _PackageBoundObject(object): def __init__(self, import_name, template_folder=None): #: The name of the package or module. Do not change this once #: it was set by the constructor. self.import_name = import_name #: location of the templates. `None` if templates should not be #: exposed. self.template_folder = template_folder #: Where is the app root located? self.root_path = get_root_path(self.import_name) self._static_folder = None self._static_url_path = None def _get_static_folder(self): if self._static_folder is not None: return os.path.join(self.root_path, self._static_folder) def _set_static_folder(self, value): self._static_folder = value static_folder = property(_get_static_folder, _set_static_folder) del _get_static_folder, _set_static_folder def _get_static_url_path(self): if self._static_url_path is None: if self.static_folder is None: return None return '/' + os.path.basename(self.static_folder) return self._static_url_path def _set_static_url_path(self, value): self._static_url_path = value static_url_path = property(_get_static_url_path, _set_static_url_path) del _get_static_url_path, _set_static_url_path @property def has_static_folder(self): """This is `True` if the package bound object's container has a folder named ``'static'``. .. versionadded:: 0.5 """ return self.static_folder is not None @locked_cached_property def jinja_loader(self): """The Jinja loader for this package bound object. .. versionadded:: 0.5 """ if self.template_folder is not None: return FileSystemLoader(os.path.join(self.root_path, self.template_folder)) def get_send_file_max_age(self, filename): """Provides default cache_timeout for the :func:`send_file` functions. By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from the configuration of :data:`~flask.current_app`. Static file functions such as :func:`send_from_directory` use this function, and :func:`send_file` calls this function on :data:`~flask.current_app` when the given cache_timeout is `None`. If a cache_timeout is given in :func:`send_file`, that timeout is used; otherwise, this method is called. This allows subclasses to change the behavior when sending files based on the filename. For example, to set the cache timeout for .js files to 60 seconds:: class MyFlask(flask.Flask): def get_send_file_max_age(self, name): if name.lower().endswith('.js'): return 60 return flask.Flask.get_send_file_max_age(self, name) .. versionadded:: 0.9 """ return current_app.config['SEND_FILE_MAX_AGE_DEFAULT'] def send_static_file(self, filename): """Function used internally to send static files from the static folder to the browser. .. versionadded:: 0.5 """ if not self.has_static_folder: raise RuntimeError('No static folder for this object') # Ensure get_send_file_max_age is called in all cases. # Here, we ensure get_send_file_max_age is called for Blueprints. cache_timeout = self.get_send_file_max_age(filename) return send_from_directory(self.static_folder, filename, cache_timeout=cache_timeout) def open_resource(self, resource, mode='rb'): """Opens a resource from the application's resource folder. To see how this works, consider the following folder structure:: /myapplication.py /schema.sql /static /style.css /templates /layout.html /index.html If you want to open the `schema.sql` file you would do the following:: with app.open_resource('schema.sql') as f: contents = f.read() do_something_with(contents) :param resource: the name of the resource. To access resources within subfolders use forward slashes as separator. :param mode: resource file opening mode, default is 'rb'. """ if mode not in ('r', 'rb'): raise ValueError('Resources can only be opened for reading') return open(os.path.join(self.root_path, resource), mode)
mit
OptiPop/external_chromium_org
tools/perf/page_sets/intl_ko_th_vi.py
34
1829
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.page import page as page_module from telemetry.page import page_set as page_set_module class IntlKoThViPage(page_module.Page): def __init__(self, url, page_set): super(IntlKoThViPage, self).__init__(url=url, page_set=page_set) self.user_agent_type = 'desktop' self.archive_data_file = 'data/intl_ko_th_vi.json' def RunSmoothness(self, action_runner): interaction = action_runner.BeginGestureInteraction( 'ScrollAction', is_smooth=True) action_runner.ScrollPage() interaction.End() class IntlKoThViPageSet(page_set_module.PageSet): """ Popular pages in Korean, Thai and Vietnamese. """ def __init__(self): super(IntlKoThViPageSet, self).__init__( user_agent_type='desktop', archive_data_file='data/intl_ko_th_vi.json', bucket=page_set_module.PARTNER_BUCKET) urls_list = [ # Why: #7 site in Vietnam 'http://us.24h.com.vn/', # Why: #6 site in Vietnam 'http://vnexpress.net/', # Why: #18 site in Vietnam 'http://vietnamnet.vn/', # Why: #5 site in Vietnam # pylint: disable=C0301 'http://news.zing.vn/the-gioi/ba-dam-thep-margaret-thatcher-qua-doi/a312895.html#home_noibat1', 'http://kenh14.vn/home.chn', # Why: #5 site in Korea 'http://www.naver.com/', # Why: #9 site in Korea 'http://www.daum.net/', # Why: #25 site in Korea 'http://www.donga.com/', 'http://www.chosun.com/', 'http://www.danawa.com/', # Why: #10 site in Thailand 'http://pantip.com/', 'http://thaimisc.com/' ] for url in urls_list: self.AddPage(IntlKoThViPage(url, self))
bsd-3-clause
stephane-martin/salt-debian-packaging
salt-2016.3.3/tests/integration/states/rabbitmq_vhost.py
2
1363
# -*- coding: utf-8 -*- ''' Tests for the rabbitmq state ''' # Import python libs from __future__ import absolute_import import os # Import Salt Testing libs from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') # Import salt libs import integration class RabbitVHostTestCase(integration.ModuleCase, integration.SaltReturnAssertsMixIn): ''' Validate the rabbitmq virtual host states. ''' def setUp(self): super(RabbitVHostTestCase, self).setUp() rabbit_installed = self.run_function('cmd.has_exec', ['rabbitmqctl']) if not rabbit_installed: self.skipTest('rabbitmq-server not installed') if os.geteuid() != 0: self.skipTest('You must be root to run this test') def test_present(self): ''' rabbitmq_vhost.present null_host ''' ret = self.run_state( 'rabbitmq_vhost.present', name='null_host', test=True ) self.assertSaltFalseReturn(ret) def absent(self): ''' rabbitmq_vhost.absent null_host ''' ret = self.run_state( 'rabbitmq_vhost.absent', name='null_host', test=True ) self.assertSaltFalseReturn(ret) if __name__ == '__main__': from integration import run_tests run_tests(RabbitVHostTestCase)
apache-2.0
indico/indico
indico/cli/watchman.py
3
5829
# This file is part of Indico. # Copyright (C) 2002 - 2021 CERN # # Indico is free software; you can redistribute it and/or # modify it under the terms of the MIT License; see the # LICENSE file for more details. import atexit import os import subprocess import sys import time import pywatchman from flask.helpers import get_root_path from werkzeug._reloader import _find_watchdog_paths from indico.util.console import cformat def _patterns_to_terms(patterns): return ['anyof'] + [['match', p, 'wholename', {'includedotfiles': True}] for p in patterns] def _disable_reloader(argv): argv = list(argv) # we usually pass sys.argv, so let's not modify that for i, arg in enumerate(argv): if arg == '--reloader' and argv[i + 1] == 'watchman': argv[i + 1] = 'none' elif arg.startswith('--reloader') and '=' in arg: argv[i] = '--reloader=none' return argv class Watcher: def __init__(self, path, patterns): self.path = path self.name = path.replace('/', '-').strip('-') self.patterns = patterns self.root_dir = None self.triggered = False def start(self, client): query = { 'expression': _patterns_to_terms(self.patterns), 'fields': ['name'] } watch = client.query('watch-project', self.path) if 'warning' in watch: # no idea when this happens, but the example scripts have it... print('watchman warning:', watch['warning']) self.root_dir = watch['watch'] if 'relative_path' in watch: query['relative_root'] = watch['relative_path'] # get the initial clock value so that we only get updates query['since'] = client.query('clock', self.root_dir)['clock'] client.query('subscribe', self.root_dir, self.name, query) def consume(self, client): data = client.getSubscription(self.name) if data: files = sorted(f for record in data for f in record.get('files', [])) relpath = os.path.relpath(self.path) if relpath == '.': print(cformat('%{cyan}Changes found:').format(relpath)) else: print(cformat('%{cyan}Changes found in %{cyan!}{}%{reset}%{cyan}:').format(relpath)) for f in files: print(cformat(' * %{blue!}{}').format(f)) self.triggered = True def check(self): triggered = self.triggered self.triggered = False return triggered def __repr__(self): return f'<Watcher({self.path!r}, {self.patterns!r})>' class Watchman: def __init__(self): self._proc = None self._watchers = set() self._client = None atexit.register(self._terminate) def run(self): self._client = pywatchman.client(timeout=300) self._client.capabilityCheck(required=['wildmatch', 'cmd-watch-project']) indico_project_root = os.path.realpath(os.path.join(get_root_path('indico'), '..')) paths = sorted({os.path.realpath(p) for p in _find_watchdog_paths(set(), set()) if os.path.exists(p)}) for path in paths: patterns = ['**/*.py', '**/entry_points.txt'] if path == indico_project_root: patterns += ['indico/indico.conf', 'indico/logging.yaml'] watcher = Watcher(path, patterns) watcher.start(self._client) self._watchers.add(watcher) self._launch() self._monitor() def _monitor(self): while True: self._client.setTimeout(300) try: self._client.receive() for w in self._watchers: w.consume(self._client) self._client.setTimeout(0.1) settled = False while not settled: try: self._client.receive() for w in self._watchers: w.consume(self._client) except pywatchman.SocketTimeout: settled = True break triggered = False for w in self._watchers: # this cannot be done with any() since all triggered watchers # need to be reset during the check if w.check(): triggered = True if triggered: self._restart() except pywatchman.SocketTimeout: # are we still connected? try: self._client.query('version') except Exception as exc: print('watchman error:', exc) return except KeyboardInterrupt: print() return def _launch(self, quiet=False, retry=0): assert not self._proc if not quiet and not retry: print(cformat('%{green!}Launching Indico')) try: argv = _disable_reloader(sys.argv) self._proc = subprocess.Popen(argv) except OSError as exc: delay = (retry + 1) * 0.5 print(cformat('%{red!}Could not launch Indico: {}').format(exc)) print(cformat('%{yellow}Retrying in {}s').format(delay)) time.sleep(delay) self._launch(quiet=quiet, retry=(retry + 1)) def _terminate(self, quiet=False): if not self._proc: return if not quiet: print(cformat('%{red!}Terminating Indico')) self._proc.terminate() self._proc = None def _restart(self): print(cformat('%{yellow!}Restarting Indico')) self._terminate(quiet=True) self._launch(quiet=True)
mit
yb-kim/gemV
tests/configs/realview-o3-dual.py
10
2340
# Copyright (c) 2012 ARM Limited # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Andreas Sandberg from m5.objects import * from arm_generic import * root = LinuxArmFSSystem(mem_mode='timing', mem_class=DDR3_1600_x64, cpu_class=DerivO3CPU, num_cpus=2).create_root()
bsd-3-clause
Shaswat27/sympy
sympy/core/trace.py
82
6108
from __future__ import print_function, division from sympy import Expr, Add, Mul, Pow, sympify, Matrix, Tuple from sympy.core.compatibility import range from sympy.utilities import default_sort_key def _is_scalar(e): """ Helper method used in Tr""" # sympify to set proper attributes e = sympify(e) if isinstance(e, Expr): if (e.is_Integer or e.is_Float or e.is_Rational or e.is_Number or (e.is_Symbol and e.is_commutative) ): return True return False def _cycle_permute(l): """ Cyclic permutations based on canonical ordering This method does the sort based ascii values while a better approach would be to used lexicographic sort. TODO: Handle condition such as symbols have subscripts/superscripts in case of lexicographic sort """ if len(l) == 1: return l min_item = min(l, key=default_sort_key) indices = [i for i, x in enumerate(l) if x == min_item] le = list(l) le.extend(l) # duplicate and extend string for easy processing # adding the first min_item index back for easier looping indices.append(len(l) + indices[0]) # create sublist of items with first item as min_item and last_item # in each of the sublist is item just before the next occurence of # minitem in the cycle formed. sublist = [[le[indices[i]:indices[i + 1]]] for i in range(len(indices) - 1)] # we do comparison of strings by comparing elements # in each sublist idx = sublist.index(min(sublist)) ordered_l = le[indices[idx]:indices[idx] + len(l)] return ordered_l def _rearrange_args(l): """ this just moves the last arg to first position to enable expansion of args A,B,A ==> A**2,B """ if len(l) == 1: return l x = list(l[-1:]) x.extend(l[0:-1]) return Mul(*x).args class Tr(Expr): """ Generic Trace operation than can trace over: a) sympy matrix b) operators c) outer products Parameters ========== o : operator, matrix, expr i : tuple/list indices (optional) Examples ======== # TODO: Need to handle printing a) Trace(A+B) = Tr(A) + Tr(B) b) Trace(scalar*Operator) = scalar*Trace(Operator) >>> from sympy.core.trace import Tr >>> from sympy import symbols, Matrix >>> a, b = symbols('a b', commutative=True) >>> A, B = symbols('A B', commutative=False) >>> Tr(a*A,[2]) a*Tr(A) >>> m = Matrix([[1,2],[1,1]]) >>> Tr(m) 2 """ def __new__(cls, *args): """ Construct a Trace object. Parameters ========== args = sympy expression indices = tuple/list if indices, optional """ # expect no indices,int or a tuple/list/Tuple if (len(args) == 2): if not isinstance(args[1], (list, Tuple, tuple)): indices = Tuple(args[1]) else: indices = Tuple(*args[1]) expr = args[0] elif (len(args) == 1): indices = Tuple() expr = args[0] else: raise ValueError("Arguments to Tr should be of form " "(expr[, [indices]])") if isinstance(expr, Matrix): return expr.trace() elif hasattr(expr, 'trace') and callable(expr.trace): #for any objects that have trace() defined e.g numpy return expr.trace() elif isinstance(expr, Add): return Add(*[Tr(arg, indices) for arg in expr.args]) elif isinstance(expr, Mul): c_part, nc_part = expr.args_cnc() if len(nc_part) == 0: return Mul(*c_part) else: obj = Expr.__new__(cls, Mul(*nc_part), indices ) #this check is needed to prevent cached instances #being returned even if len(c_part)==0 return Mul(*c_part)*obj if len(c_part) > 0 else obj elif isinstance(expr, Pow): if (_is_scalar(expr.args[0]) and _is_scalar(expr.args[1])): return expr else: return Expr.__new__(cls, expr, indices) else: if (_is_scalar(expr)): return expr return Expr.__new__(cls, expr, indices) def doit(self, **kwargs): """ Perform the trace operation. #TODO: Current version ignores the indices set for partial trace. >>> from sympy.core.trace import Tr >>> from sympy.physics.quantum.operator import OuterProduct >>> from sympy.physics.quantum.spin import JzKet, JzBra >>> t = Tr(OuterProduct(JzKet(1,1), JzBra(1,1))) >>> t.doit() 1 """ if hasattr(self.args[0], '_eval_trace'): return self.args[0]._eval_trace(indices=self.args[1]) return self @property def is_number(self): # TODO : improve this implementation return True #TODO: Review if the permute method is needed # and if it needs to return a new instance def permute(self, pos): """ Permute the arguments cyclically. Parameters ========== pos : integer, if positive, shift-right, else shift-left Examples ======== >>> from sympy.core.trace import Tr >>> from sympy import symbols >>> A, B, C, D = symbols('A B C D', commutative=False) >>> t = Tr(A*B*C*D) >>> t.permute(2) Tr(C*D*A*B) >>> t.permute(-2) Tr(C*D*A*B) """ if pos > 0: pos = pos % len(self.args[0].args) else: pos = -(abs(pos) % len(self.args[0].args)) args = list(self.args[0].args[-pos:] + self.args[0].args[0:-pos]) return Tr(Mul(*(args))) def _hashable_content(self): if isinstance(self.args[0], Mul): args = _cycle_permute(_rearrange_args(self.args[0].args)) else: args = [self.args[0]] return tuple(args) + (self.args[1], )
bsd-3-clause
JamesMura/elections
apollo/submissions/__init__.py
1
6906
from ..core import Service from .models import Submission, SubmissionComment, SubmissionVersion from ..locations.models import LocationType from datetime import datetime from flask import g from unidecode import unidecode import csv try: from cStringIO import StringIO except: from StringIO import StringIO class SubmissionsService(Service): __model__ = Submission def _set_default_filter_parameters(self, kwargs): """Updates the kwargs by setting the default filter parameters if available. :param kwargs: a dictionary of parameters """ try: deployment = kwargs.get('deployment', g.get('deployment')) event = kwargs.get('event', g.get('event')) if deployment: kwargs.update({'deployment': deployment}) if event: kwargs.update({'event': event}) except RuntimeError: pass return kwargs def export_list(self, queryset, deployment): if queryset.count() < 1: yield else: submission = queryset.first() form = submission.form fields = [ field.name for group in form.groups for field in group.fields] location_types = LocationType.objects( is_political=True, deployment=deployment) if submission.submission_type == 'O': ds_headers = [ 'Participant ID', 'Name', 'DB Phone', 'Recent Phone'] + \ map(lambda location_type: location_type.name, location_types) if form.form_type == 'INCIDENT': ds_headers += ['Location', 'PS Code', 'RV'] + fields \ + ['Timestamp', 'Witness', 'Status', 'Description'] else: ds_headers += ['Location', 'PS Code', 'RV'] + fields \ + ['Timestamp', 'Comment'] else: ds_headers = [ 'Participant ID', 'Name', 'DB Phone', 'Recent Phone'] + \ map(lambda location_type: location_type.name, location_types) ds_headers += ['Location', 'PS Code', 'RV'] + fields \ + ['Timestamp'] + map(lambda f: '%s-C' % f, fields) output = StringIO() writer = csv.writer(output) writer.writerow([unidecode(unicode(i)) for i in ds_headers]) yield output.getvalue() output.close() for submission in queryset: if submission.submission_type == 'O': record = [ getattr(submission.contributor, 'participant_id', '') if getattr( submission.contributor, 'participant_id', '') else '', getattr(submission.contributor, 'name', '') if getattr(submission.contributor, 'name', '') else '', getattr(submission.contributor, 'phone', '') if getattr( submission.contributor, 'phone', '') else '', submission.contributor.phones[-1].number if getattr(submission.contributor, 'phones', None) else ''] + \ [submission.location_name_path.get( location_type.name, '') for location_type in location_types] + \ [getattr(submission.location, 'code', '') if submission.location else '', getattr(submission.location, 'political_code', '') if submission.location else '', getattr(submission.location, 'registered_voters', '') if submission.location else ''] + \ [getattr(submission, field, '') for field in fields] record += \ [submission.updated.strftime('%Y-%m-%d %H:%M:%S'), getattr(submission, 'witness', '') if getattr(submission, 'witness', '') else '', getattr(submission, 'status', '') if getattr(submission, 'status', '') else '', getattr(submission, 'description', '') if getattr(submission, 'description', '') else ''] \ if form.form_type == 'INCIDENT' else \ [submission.updated.strftime('%Y-%m-%d %H:%M:%S'), submission.comments.first().comment if submission.comments.first() else ''] else: sib = submission.siblings.first() record = [ getattr(sib.contributor, 'participant_id', '') if sib else '', getattr(sib.contributor, 'name', '') if sib else '', getattr(sib.contributor, 'phone', '') if sib else '', sib.contributor.phones[-1].number if getattr(sib.contributor, 'phones', None) else '' if sib else ''] + \ [submission.location_name_path.get( location_type.name, '') for location_type in location_types] + \ [getattr(submission.location, 'code', '') if submission.location else '', getattr(submission.location, 'political_code', '') if submission.location else '', getattr(submission.location, 'registered_voters', '') if submission.location else ''] + \ [getattr(submission, field, '') for field in fields] + \ [submission.updated.strftime('%Y-%m-%d %H:%M:%S')] + \ [submission.confidence.get(field, '') or '' for field in fields] output = StringIO() writer = csv.writer(output) writer.writerow([unidecode(unicode(i)) for i in record]) yield output.getvalue() output.close() class SubmissionCommentsService(Service): __model__ = SubmissionComment def create_comment(self, submission, comment, user=None): return self.create( submission=submission, user=user, comment=comment, submit_date=datetime.utcnow()) class SubmissionVersionsService(Service): __model__ = SubmissionVersion
gpl-3.0
WafaaT/spark-tk
python/sparktk/frame/constructors/import_csv_raw.py
12
4415
# vim: set encoding=utf-8 # Copyright (c) 2016 Intel Corporation  # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # #       http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from sparktk.tkcontext import TkContext from pyspark.rdd import RDD import sparktk.dtypes as dtypes from sparktk.arguments import require_type def import_csv_raw(path, delimiter=",", header=False, tc=TkContext.implicit): """ Creates a frame by importing the data as strings from the specified csv file. If the csv file has a header row, those values will be used as column names. Otherwise, columns will be named generically, like 'C0', 'C1', 'C2', etc. Parameters ---------- :param path: (str) Full path to the csv file :param delimiter: (str) A string which indicates the separation of data fields. This is usually a single character and could be a non-visible character, such as a tab. The default delimiter is a comma (,). :param header: (bool) Boolean value indicating if the first line of the file will be used to name columns, and not be included in the data. The default value is false. :return: (Frame) Frame that contains the data from the csv file Examples -------- Import raw data from a csv file by specifying the path to the file, delimiter, and header option. All data will be brought in the frame as strings, and columns will be named according to the header row, if there was one. >>> file_path = "../datasets/cities.csv" >>> frame = tc.frame.import_csv_raw(file_path, delimiter="|", header=True) -etc- >>> frame.inspect() [#] rank city population_2013 population_2010 change county ============================================================================ [0] 1 Portland 609456 583776 4.40% Multnomah [1] 2 Salem 160614 154637 3.87% Marion [2] 3 Eugene 159190 156185 1.92% Lane [3] 4 Gresham 109397 105594 3.60% Multnomah [4] 5 Hillsboro 97368 91611 6.28% Washington [5] 6 Beaverton 93542 89803 4.16% Washington [6] 15 Grants Pass 35076 34533 1.57% Josephine [7] 16 Oregon City 34622 31859 8.67% Clackamas [8] 17 McMinnville 33131 32187 2.93% Yamhill [9] 18 Redmond 27427 26215 4.62% Deschutes >>> frame.schema [('rank', <type 'str'>), ('city', <type 'str'>), ('population_2013', <type 'str'>), ('population_2010', <type 'str'>), ('change', <type 'str'>), ('county', <type 'str'>)] """ TkContext.validate(tc) require_type.non_empty_str(path, "path") require_type.non_empty_str(delimiter, "delimiter") require_type(bool, header, "header") df = tc.sql_context.read.format( "com.databricks.spark.csv.org.trustedanalytics.sparktk").options( delimiter=delimiter, header=str(header).lower(), inferschema="false").load(path, schema=None) df_schema = [] for column in df.schema.fields: try: datatype = dtypes.dtypes.get_primitive_type_from_pyspark_type(type(column.dataType)) except ValueError: raise TypeError("Unsupported data type ({0}) for column {1}.".format(str(column.dataType), column.name)) df_schema.append((column.name, datatype)) jrdd = tc.sc._jvm.org.trustedanalytics.sparktk.frame.internal.rdd.PythonJavaRdd.scalaToPython(df._jdf.rdd()) rdd = RDD(jrdd, tc.sc) from sparktk.frame.frame import Frame # circular dependency, so import late return Frame(tc, rdd, df_schema)
apache-2.0
macchina-io/macchina.io
platform/JS/V8/v8/tools/turbolizer-perf.py
12
1590
# Copyright 2016 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import sys import json import re import argparse sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * def trace_begin(): json_obj['eventCounts'] = {} prog = re.compile(r'0x[0-9a-fA-F]+') for phase in reversed(json_obj['phases']): if phase['name'] == "disassembly": for line in phase['data'].splitlines(): result = re.match(prog, line) if result: known_addrs.add(result.group(0)) def trace_end(): print json.dumps(json_obj) def process_event(param_dict): addr = "0x%x" % int(param_dict['sample']['ip']) # Only count samples that belong to the function if addr not in known_addrs: return ev_name = param_dict['ev_name'] if ev_name not in json_obj['eventCounts']: json_obj['eventCounts'][ev_name] = {} if addr not in json_obj['eventCounts'][ev_name]: json_obj['eventCounts'][ev_name][addr] = 0 json_obj['eventCounts'][ev_name][addr] += 1 if __name__ == "__main__": parser = argparse.ArgumentParser( description="Perf script to merge profiling data with turbofan compiler " "traces.") parser.add_argument("file_name", metavar="JSON File", help="turbo trace json file.") args = parser.parse_args() with open(args.file_name, 'r') as json_file: json_obj = json.load(json_file) known_addrs = set()
apache-2.0
gezb/osmc
package/mediacenter-addon-osmc/src/script.module.elementtree/lib/selftest.py
22
28466
# $Id: selftest.py 3224 2007-08-27 21:23:39Z fredrik $ # -*- coding: iso-8859-1 -*- # elementtree selftest program # this test script uses Python's "doctest" module to check that the # *test script* works as expected. # TODO: add more elementtree method tests # TODO: add xml/html parsing tests # TODO: etc import sys, cStringIO from elementtree import ElementTree from elementtree import ElementPath from elementtree import ElementInclude from elementtree import HTMLTreeBuilder from elementtree import SimpleXMLWriter def serialize(elem, encoding=None): import cStringIO file = cStringIO.StringIO() tree = ElementTree.ElementTree(elem) if encoding: tree.write(file, encoding) else: tree.write(file) return file.getvalue() def summarize(elem): return elem.tag def summarize_list(seq): return map(summarize, seq) def normalize_crlf(tree): for elem in tree.getiterator(): if elem.text: elem.text = elem.text.replace("\r\n", "\n") if elem.tail: elem.tail = elem.tail.replace("\r\n", "\n") SAMPLE_XML = ElementTree.XML(""" <body> <tag>text</tag> <tag /> <section> <tag>subtext</tag> </section> </body> """) # # interface tests def check_string(string): len(string) for char in string: if len(char) != 1: print "expected one-character string, got %r" % char new_string = string + "" new_string = string + " " string[:0] def check_string_or_none(value): if value is None: return return check_string(value) def check_mapping(mapping): len(mapping) keys = mapping.keys() items = mapping.items() for key in keys: item = mapping[key] mapping["key"] = "value" if mapping["key"] != "value": print "expected value string, got %r" % mapping["key"] def check_element(element): if not hasattr(element, "tag"): print "no tag member" if not hasattr(element, "attrib"): print "no attrib member" if not hasattr(element, "text"): print "no text member" if not hasattr(element, "tail"): print "no tail member" check_string(element.tag) check_mapping(element.attrib) check_string_or_none(element.text) check_string_or_none(element.tail) for elem in element: check_element(elem) def check_element_tree(tree): check_element(tree.getroot()) # -------------------------------------------------------------------- # element tree tests def sanity(): """ >>> from elementtree.ElementTree import * >>> from elementtree.ElementInclude import * >>> from elementtree.ElementPath import * >>> from elementtree.HTMLTreeBuilder import * >>> from elementtree.SimpleXMLTreeBuilder import * >>> from elementtree.SimpleXMLWriter import * >>> from elementtree.TidyTools import * >>> from elementtree.XMLTreeBuilder import * """ def version(): """ >>> ElementTree.VERSION '1.2.7' """ def interface(): """ Test element tree interface. >>> element = ElementTree.Element("tag") >>> check_element(element) >>> tree = ElementTree.ElementTree(element) >>> check_element_tree(tree) """ def simplefind(): """ Test find methods using the elementpath fallback. >>> CurrentElementPath = ElementTree.ElementPath >>> ElementTree.ElementPath = ElementTree._SimpleElementPath() >>> elem = SAMPLE_XML >>> elem.find("tag").tag 'tag' >>> ElementTree.ElementTree(elem).find("tag").tag 'tag' >>> elem.findtext("tag") 'text' >>> elem.findtext("tog") >>> elem.findtext("tog", "default") 'default' >>> ElementTree.ElementTree(elem).findtext("tag") 'text' >>> summarize_list(elem.findall("tag")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag'] Path syntax doesn't work in this case. >>> elem.find("section/tag") >>> elem.findtext("section/tag") >>> elem.findall("section/tag") [] >>> ElementTree.ElementPath = CurrentElementPath """ def find(): """ Test find methods (including xpath syntax). >>> elem = SAMPLE_XML >>> elem.find("tag").tag 'tag' >>> ElementTree.ElementTree(elem).find("tag").tag 'tag' >>> elem.find("section/tag").tag 'tag' >>> ElementTree.ElementTree(elem).find("section/tag").tag 'tag' >>> elem.findtext("tag") 'text' >>> elem.findtext("tog") >>> elem.findtext("tog", "default") 'default' >>> ElementTree.ElementTree(elem).findtext("tag") 'text' >>> elem.findtext("section/tag") 'subtext' >>> ElementTree.ElementTree(elem).findtext("section/tag") 'subtext' >>> summarize_list(elem.findall("tag")) ['tag', 'tag'] >>> summarize_list(elem.findall("*")) ['tag', 'tag', 'section'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag'] >>> summarize_list(elem.findall("section/tag")) ['tag'] >>> summarize_list(elem.findall("section//tag")) ['tag'] >>> summarize_list(elem.findall("section/*")) ['tag'] >>> summarize_list(elem.findall("section//*")) ['tag'] >>> summarize_list(elem.findall("section/.//*")) ['tag'] >>> summarize_list(elem.findall("*/*")) ['tag'] >>> summarize_list(elem.findall("*//*")) ['tag'] >>> summarize_list(elem.findall("*/tag")) ['tag'] >>> summarize_list(elem.findall("*/./tag")) ['tag'] >>> summarize_list(elem.findall("./tag")) ['tag', 'tag'] >>> summarize_list(elem.findall(".//tag")) ['tag', 'tag', 'tag'] >>> summarize_list(elem.findall("././tag")) ['tag', 'tag'] >>> summarize_list(ElementTree.ElementTree(elem).findall("/tag")) ['tag', 'tag'] >>> summarize_list(ElementTree.ElementTree(elem).findall("./tag")) ['tag', 'tag'] """ def bad_find(): """ Check bad or unsupported path expressions. >>> elem = SAMPLE_XML >>> elem.findall("/tag") Traceback (most recent call last): SyntaxError: cannot use absolute path on element >>> elem.findall("../tag") Traceback (most recent call last): SyntaxError: unsupported path syntax (..) >>> elem.findall("section//") Traceback (most recent call last): SyntaxError: path cannot end with // >>> elem.findall("tag[tag]") Traceback (most recent call last): SyntaxError: expected path separator ([) """ def parsefile(): """ Test parsing from file. >>> tree = ElementTree.parse("samples/simple.xml") >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> >>> tree = ElementTree.parse("samples/simple-ns.xml") >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <ns0:root xmlns:ns0="namespace"> <ns0:element key="value">text</ns0:element> <ns0:element>text</ns0:element>tail <ns0:empty-element /> </ns0:root> """ def parsehtml(): """ Test HTML parsing. >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p><p>spam<b>egg</b></p>") >>> serialize(p.close()) '<p>spam<b>egg</b></p>' """ def parseliteral(): r""" >>> element = ElementTree.XML("<html><body>text</body></html>") >>> ElementTree.ElementTree(element).write(sys.stdout) <html><body>text</body></html> >>> element = ElementTree.fromstring("<html><body>text</body></html>") >>> ElementTree.ElementTree(element).write(sys.stdout) <html><body>text</body></html> >>> print ElementTree.tostring(element) <html><body>text</body></html> >>> print ElementTree.tostring(element, "ascii") <?xml version='1.0' encoding='ascii'?> <html><body>text</body></html> >>> _, ids = ElementTree.XMLID("<html><body>text</body></html>") >>> len(ids) 0 >>> _, ids = ElementTree.XMLID("<html><body id='body'>text</body></html>") >>> len(ids) 1 >>> ids["body"].tag 'body' """ def simpleparsefile(): """ Test the xmllib-based parser. >>> from elementtree import SimpleXMLTreeBuilder >>> parser = SimpleXMLTreeBuilder.TreeBuilder() >>> tree = ElementTree.parse("samples/simple.xml", parser) >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> """ def iterparse(): """ Test iterparse interface. >>> iterparse = ElementTree.iterparse >>> context = iterparse("samples/simple.xml") >>> for action, elem in context: ... print action, elem.tag end element end element end empty-element end root >>> context.root.tag 'root' >>> context = iterparse("samples/simple-ns.xml") >>> for action, elem in context: ... print action, elem.tag end {namespace}element end {namespace}element end {namespace}empty-element end {namespace}root >>> events = () >>> context = iterparse("samples/simple.xml", events) >>> for action, elem in context: ... print action, elem.tag >>> events = () >>> context = iterparse("samples/simple.xml", events=events) >>> for action, elem in context: ... print action, elem.tag >>> events = ("start", "end") >>> context = iterparse("samples/simple.xml", events) >>> for action, elem in context: ... print action, elem.tag start root start element end element start element end element start empty-element end empty-element end root >>> events = ("start", "end", "start-ns", "end-ns") >>> context = iterparse("samples/simple-ns.xml", events) >>> for action, elem in context: ... if action in ("start", "end"): ... print action, elem.tag ... else: ... print action, elem start-ns ('', 'namespace') start {namespace}root start {namespace}element end {namespace}element start {namespace}element end {namespace}element start {namespace}empty-element end {namespace}empty-element end {namespace}root end-ns None """ def fancyparsefile(): """ Test the "fancy" parser. Sanity check. >>> from elementtree import XMLTreeBuilder >>> parser = XMLTreeBuilder.FancyTreeBuilder() >>> tree = ElementTree.parse("samples/simple.xml", parser) >>> normalize_crlf(tree) >>> tree.write(sys.stdout) <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> Callback check. >>> class MyFancyParser(XMLTreeBuilder.FancyTreeBuilder): ... def start(self, elem): ... print "START", elem.tag ... def end(self, elem): ... print "END", elem.tag >>> parser = MyFancyParser() >>> tree = ElementTree.parse("samples/simple.xml", parser) START root START element END element START element END element START empty-element END empty-element END root """ def writefile(): """ >>> elem = ElementTree.Element("tag") >>> elem.text = "text" >>> serialize(elem) '<tag>text</tag>' >>> ElementTree.SubElement(elem, "subtag").text = "subtext" >>> serialize(elem) '<tag>text<subtag>subtext</subtag></tag>' """ def writestring(): """ >>> elem = ElementTree.XML("<html><body>text</body></html>") >>> ElementTree.tostring(elem) '<html><body>text</body></html>' >>> elem = ElementTree.fromstring("<html><body>text</body></html>") >>> ElementTree.tostring(elem) '<html><body>text</body></html>' """ def encoding(): r""" Test encoding issues. >>> elem = ElementTree.Element("tag") >>> elem.text = u"abc" >>> serialize(elem) '<tag>abc</tag>' >>> serialize(elem, "utf-8") '<tag>abc</tag>' >>> serialize(elem, "us-ascii") '<tag>abc</tag>' >>> serialize(elem, "iso-8859-1") "<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>abc</tag>" >>> elem.text = "<&\"\'>" >>> serialize(elem) '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, "utf-8") '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, "us-ascii") # cdata characters '<tag>&lt;&amp;"\'&gt;</tag>' >>> serialize(elem, "iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag>&lt;&amp;"\'&gt;</tag>' >>> elem.attrib["key"] = "<&\"\'>" >>> elem.text = None >>> serialize(elem) '<tag key="&lt;&amp;&quot;\'&gt;" />' >>> serialize(elem, "utf-8") '<tag key="&lt;&amp;&quot;\'&gt;" />' >>> serialize(elem, "us-ascii") '<tag key="&lt;&amp;&quot;\'&gt;" />' >>> serialize(elem, "iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="&lt;&amp;&quot;\'&gt;" />' >>> elem.text = u'\xe5\xf6\xf6<>' >>> elem.attrib.clear() >>> serialize(elem) '<tag>&#229;&#246;&#246;&lt;&gt;</tag>' >>> serialize(elem, "utf-8") '<tag>\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;</tag>' >>> serialize(elem, "us-ascii") '<tag>&#229;&#246;&#246;&lt;&gt;</tag>' >>> serialize(elem, "iso-8859-1") "<?xml version='1.0' encoding='iso-8859-1'?>\n<tag>\xe5\xf6\xf6&lt;&gt;</tag>" >>> elem.attrib["key"] = u'\xe5\xf6\xf6<>' >>> elem.text = None >>> serialize(elem) '<tag key="&#229;&#246;&#246;&lt;&gt;" />' >>> serialize(elem, "utf-8") '<tag key="\xc3\xa5\xc3\xb6\xc3\xb6&lt;&gt;" />' >>> serialize(elem, "us-ascii") '<tag key="&#229;&#246;&#246;&lt;&gt;" />' >>> serialize(elem, "iso-8859-1") '<?xml version=\'1.0\' encoding=\'iso-8859-1\'?>\n<tag key="\xe5\xf6\xf6&lt;&gt;" />' >>> elem.attrib["key"] = u'foo\nbar' >>> serialize(elem) '<tag key="foo&#10;bar" />' """ ENTITY_XML = """\ <!DOCTYPE points [ <!ENTITY % user-entities SYSTEM 'user-entities.xml'> %user-entities; ]> <document>&entity;</document> """ def entity(): """ Test entity handling. 1) bad entities >>> ElementTree.XML("<document>&entity;</document>") Traceback (most recent call last): ExpatError: undefined entity: line 1, column 10 >>> ElementTree.XML(ENTITY_XML) Traceback (most recent call last): ExpatError: undefined entity &entity;: line 5, column 10 (add more tests here) """ def namespace(): """ Test namespace issues. 1) xml namespace >>> elem = ElementTree.XML("<tag xml:lang='en' />") >>> serialize(elem) # 1.1 '<tag xml:lang="en" />' 2) other "well-known" namespaces >>> elem = ElementTree.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />") >>> serialize(elem) # 2.1 '<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />' >>> elem = ElementTree.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />") >>> serialize(elem) # 2.2 '<html:html xmlns:html="http://www.w3.org/1999/xhtml" />' >>> elem = ElementTree.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />") >>> serialize(elem) # 2.3 '<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />' 3) unknown namespaces """ def qname(): """ Test QName handling. 1) decorated tags >>> elem = ElementTree.Element("{uri}tag") >>> serialize(elem) # 1.1 '<ns0:tag xmlns:ns0="uri" />' >>> elem = ElementTree.Element(ElementTree.QName("{uri}tag")) >>> serialize(elem) # 1.2 '<ns0:tag xmlns:ns0="uri" />' >>> elem = ElementTree.Element(ElementTree.QName("uri", "tag")) >>> serialize(elem) # 1.3 '<ns0:tag xmlns:ns0="uri" />' 2) decorated attributes >>> elem.clear() >>> elem.attrib["{uri}key"] = "value" >>> serialize(elem) # 2.1 '<ns0:tag ns0:key="value" xmlns:ns0="uri" />' >>> elem.clear() >>> elem.attrib[ElementTree.QName("{uri}key")] = "value" >>> serialize(elem) # 2.2 '<ns0:tag ns0:key="value" xmlns:ns0="uri" />' 3) decorated values are not converted by default, but the QName wrapper can be used for values >>> elem.clear() >>> elem.attrib["{uri}key"] = "{uri}value" >>> serialize(elem) # 3.1 '<ns0:tag ns0:key="{uri}value" xmlns:ns0="uri" />' >>> elem.clear() >>> elem.attrib["{uri}key"] = ElementTree.QName("{uri}value") >>> serialize(elem) # 3.2 '<ns0:tag ns0:key="ns0:value" xmlns:ns0="uri" />' >>> elem.clear() >>> subelem = ElementTree.Element("tag") >>> subelem.attrib["{uri1}key"] = ElementTree.QName("{uri2}value") >>> elem.append(subelem) >>> elem.append(subelem) >>> serialize(elem) # 3.3 '<ns0:tag xmlns:ns0="uri"><tag ns1:key="ns2:value" xmlns:ns1="uri1" xmlns:ns2="uri2" /><tag ns1:key="ns2:value" xmlns:ns1="uri1" xmlns:ns2="uri2" /></ns0:tag>' """ def xpath_tokenizer(p): """ Test the XPath tokenizer. >>> # tests from the xml specification >>> xpath_tokenizer("*") ['*'] >>> xpath_tokenizer("text()") ['text', '()'] >>> xpath_tokenizer("@name") ['@', 'name'] >>> xpath_tokenizer("@*") ['@', '*'] >>> xpath_tokenizer("para[1]") ['para', '[', '1', ']'] >>> xpath_tokenizer("para[last()]") ['para', '[', 'last', '()', ']'] >>> xpath_tokenizer("*/para") ['*', '/', 'para'] >>> xpath_tokenizer("/doc/chapter[5]/section[2]") ['/', 'doc', '/', 'chapter', '[', '5', ']', '/', 'section', '[', '2', ']'] >>> xpath_tokenizer("chapter//para") ['chapter', '/', '/', 'para'] >>> xpath_tokenizer("//para") ['/', '/', 'para'] >>> xpath_tokenizer("//olist/item") ['/', '/', 'olist', '/', 'item'] >>> xpath_tokenizer(".") ['.'] >>> xpath_tokenizer(".//para") ['.', '/', '/', 'para'] >>> xpath_tokenizer("..") ['..'] >>> xpath_tokenizer("../@lang") ['..', '/', '@', 'lang'] >>> xpath_tokenizer("chapter[title]") ['chapter', '[', 'title', ']'] >>> xpath_tokenizer("employee[@secretary and @assistant]") ['employee', '[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']'] >>> # additional tests >>> xpath_tokenizer("{http://spam}egg") ['{http://spam}egg'] >>> xpath_tokenizer("./spam.egg") ['.', '/', 'spam.egg'] >>> xpath_tokenizer(".//{http://spam}egg") ['.', '/', '/', '{http://spam}egg'] """ out = [] for op, tag in ElementPath.xpath_tokenizer(p): out.append(op or tag) return out # # xinclude tests (samples from appendix C of the xinclude specification) XINCLUDE = {} XINCLUDE["C1.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>120 Mz is adequate for an average home user.</p> <xi:include href="disclaimer.xml"/> </document> """ XINCLUDE["disclaimer.xml"] = """\ <?xml version='1.0'?> <disclaimer> <p>The opinions represented herein represent those of the individual and should not be interpreted as official policy endorsed by this organization.</p> </disclaimer> """ XINCLUDE["C2.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>This document has been accessed <xi:include href="count.txt" parse="text"/> times.</p> </document> """ XINCLUDE["count.txt"] = "324387" XINCLUDE["C3.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>The following is the source of the "data.xml" resource:</p> <example><xi:include href="data.xml" parse="text"/></example> </document> """ XINCLUDE["data.xml"] = """\ <?xml version='1.0'?> <data> <item><![CDATA[Brooks & Shields]]></item> </data> """ XINCLUDE["C5.xml"] = """\ <?xml version='1.0'?> <div xmlns:xi="http://www.w3.org/2001/XInclude"> <xi:include href="example.txt" parse="text"> <xi:fallback> <xi:include href="fallback-example.txt" parse="text"> <xi:fallback><a href="mailto:[email protected]">Report error</a></xi:fallback> </xi:include> </xi:fallback> </xi:include> </div> """ XINCLUDE["default.xml"] = """\ <?xml version='1.0'?> <document xmlns:xi="http://www.w3.org/2001/XInclude"> <p>Example.</p> <xi:include href="samples/simple.xml"/> </document> """ def xinclude_loader(href, parse="xml", encoding=None): try: data = XINCLUDE[href] except KeyError: raise IOError("resource not found") if parse == "xml": return ElementTree.XML(data) return data def xinclude(): r""" Basic inclusion example (XInclude C.1) >>> document = xinclude_loader("C1.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C1 <document> <p>120 Mz is adequate for an average home user.</p> <disclaimer> <p>The opinions represented herein represent those of the individual and should not be interpreted as official policy endorsed by this organization.</p> </disclaimer> </document> Textual inclusion example (XInclude C.2) >>> document = xinclude_loader("C2.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C2 <document> <p>This document has been accessed 324387 times.</p> </document> Textual inclusion of XML example (XInclude C.3) >>> document = xinclude_loader("C3.xml") >>> ElementInclude.include(document, xinclude_loader) >>> print serialize(document) # C3 <document> <p>The following is the source of the "data.xml" resource:</p> <example>&lt;?xml version='1.0'?&gt; &lt;data&gt; &lt;item&gt;&lt;![CDATA[Brooks &amp; Shields]]&gt;&lt;/item&gt; &lt;/data&gt; </example> </document> Fallback example (XInclude C.5) Note! Fallback support is not yet implemented >>> document = xinclude_loader("C5.xml") >>> ElementInclude.include(document, xinclude_loader) Traceback (most recent call last): IOError: resource not found >>> # print serialize(document) # C5 """ def xinclude_default(): """ >>> document = xinclude_loader("default.xml") >>> ElementInclude.include(document) >>> print serialize(document) # default <document> <p>Example.</p> <root> <element key="value">text</element> <element>text</element>tail <empty-element /> </root> </document> """ # # xmlwriter def xmlwriter(): r""" >>> file = cStringIO.StringIO() >>> w = SimpleXMLWriter.XMLWriter(file) >>> html = w.start("html") >>> x = w.start("head") >>> w.element("title", "my document") >>> w.data("\n") >>> w.element("meta", name="hello", value="goodbye") >>> w.data("\n") >>> w.end() >>> x = w.start("body") >>> w.element("h1", "this is a heading") >>> w.data("\n") >>> w.element("p", u"this is a paragraph") >>> w.data("\n") >>> w.element("p", u"reserved characters: <&>") >>> w.data("\n") >>> w.element("p", u"detta är också ett stycke") >>> w.data("\n") >>> w.close(html) >>> print file.getvalue() <html><head><title>my document</title> <meta name="hello" value="goodbye" /> </head><body><h1>this is a heading</h1> <p>this is a paragraph</p> <p>reserved characters: &lt;&amp;&gt;</p> <p>detta &#228;r ocks&#229; ett stycke</p> </body></html> """ # -------------------------------------------------------------------- # reported bugs def bug_xmltoolkit21(): """ marshaller gives obscure errors for non-string values >>> elem = ElementTree.Element(123) >>> serialize(elem) # tag Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ElementTree.Element("elem") >>> elem.text = 123 >>> serialize(elem) # text Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ElementTree.Element("elem") >>> elem.tail = 123 >>> serialize(elem) # tail Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ElementTree.Element("elem") >>> elem.set(123, "123") >>> serialize(elem) # attribute key Traceback (most recent call last): TypeError: cannot serialize 123 (type int) >>> elem = ElementTree.Element("elem") >>> elem.set("123", 123) >>> serialize(elem) # attribute value Traceback (most recent call last): TypeError: cannot serialize 123 (type int) """ def bug_xmltoolkit25(): """ typo in ElementTree.findtext >>> tree = ElementTree.ElementTree(SAMPLE_XML) >>> tree.findtext("tag") 'text' >>> tree.findtext("section/tag") 'subtext' """ def bug_xmltoolkit28(): """ .//tag causes exceptions >>> tree = ElementTree.XML("<doc><table><tbody/></table></doc>") >>> summarize_list(tree.findall(".//thead")) [] >>> summarize_list(tree.findall(".//tbody")) ['tbody'] """ def bug_xmltoolkitX1(): """ dump() doesn't flush the output buffer >>> tree = ElementTree.XML("<doc><table><tbody/></table></doc>") >>> ElementTree.dump(tree); sys.stdout.write("tail") <doc><table><tbody /></table></doc> tail """ def bug_xmltoolkit39(): """ non-ascii element and attribute names doesn't work >>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><täg />") >>> ElementTree.tostring(tree, "utf-8") '<t\\xc3\\xa4g />' >>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><tag ättr='v&#228;lue' />") >>> tree.attrib {u'\\xe4ttr': u'v\\xe4lue'} >>> ElementTree.tostring(tree, "utf-8") '<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />' >>> tree = ElementTree.XML("<?xml version='1.0' encoding='iso-8859-1'?><täg>text</täg>") >>> ElementTree.tostring(tree, "utf-8") '<t\\xc3\\xa4g>text</t\\xc3\\xa4g>' >>> tree = ElementTree.Element(u"täg") >>> ElementTree.tostring(tree, "utf-8") '<t\\xc3\\xa4g />' >>> tree = ElementTree.Element("tag") >>> tree.set(u"ättr", u"välue") >>> ElementTree.tostring(tree, "utf-8") '<tag \\xc3\\xa4ttr="v\\xc3\\xa4lue" />' """ def bug_xmltoolkit45(): """ problems parsing mixed unicode/non-ascii html documents latin-1 text >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>välue</p>") >>> serialize(p.close()) '<p>v&#228;lue</p>' utf-8 text >>> p = HTMLTreeBuilder.TreeBuilder(encoding="utf-8") >>> p.feed("<p>v\xc3\xa4lue</p>") >>> serialize(p.close()) '<p>v&#228;lue</p>' utf-8 text using meta tag >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<html><meta http-equiv='Content-Type' content='text/html; charset=utf-8'><p>v\xc3\xa4lue</p></html>") >>> serialize(p.close().find("p")) '<p>v&#228;lue</p>' latin-1 character references >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>v&#228;lue</p>") >>> serialize(p.close()) '<p>v&#228;lue</p>' latin-1 character entities >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>v&auml;lue</p>") >>> serialize(p.close()) '<p>v&#228;lue</p>' mixed latin-1 text and unicode entities >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>&#8221;välue&#8221;</p>") >>> serialize(p.close()) '<p>&#8221;v&#228;lue&#8221;</p>' mixed unicode and latin-1 entities >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>&#8221;v&auml;lue&#8221;</p>") >>> serialize(p.close()) '<p>&#8221;v&#228;lue&#8221;</p>' """ def bug_xmltoolkit46(): """ problems parsing open BR tags >>> p = HTMLTreeBuilder.TreeBuilder() >>> p.feed("<p>key<br>value</p>") >>> serialize(p.close()) '<p>key<br />value</p>' """ def bug_xmltoolkit54(): """ problems handling internally defined entities >>> e = ElementTree.XML("<!DOCTYPE doc [<!ENTITY ldots '&#x8230;'>]><doc>&ldots;</doc>") >>> serialize(e) '<doc>&#33328;</doc>' """ def bug_xmltoolkit55(): """ make sure we're reporting the first error, not the last >>> e = ElementTree.XML("<!DOCTYPE doc SYSTEM 'doc.dtd'><doc>&ldots;&ndots;&rdots;</doc>") Traceback (most recent call last): ExpatError: undefined entity &ldots;: line 1, column 36 """ # -------------------------------------------------------------------- if __name__ == "__main__": import doctest, selftest failed, tested = doctest.testmod(selftest) print tested - failed, "tests ok."
gpl-2.0
nemesiscodex/JukyOS-sugar
extensions/cpsection/datetime/model.py
1
2814
# Copyright (C) 2007, 2008 One Laptop Per Child # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # # The timezone config is based on the system-config-date # (http://fedoraproject.org/wiki/SystemConfig/date) tool. # Parts of the code were reused. # import os from gettext import gettext as _ import gconf _zone_tab = '/usr/share/zoneinfo/zone.tab' def _initialize(): """Initialize the docstring of the set function""" if set_timezone.__doc__ is None: # when running under 'python -OO', all __doc__ fields are None, # so += would fail -- and this function would be unnecessary anyway. return timezones = read_all_timezones() for timezone in timezones: set_timezone.__doc__ += timezone + '\n' def read_all_timezones(fn=_zone_tab): fd = open(fn, 'r') lines = fd.readlines() fd.close() timezones = [] for line in lines: if line.startswith('#'): continue line = line.split() if len(line) > 1: timezones.append(line[2]) timezones.sort() for offset in xrange(-12, 13): if offset < 0: tz = 'GMT%d' % offset elif offset > 0: tz = 'GMT+%d' % offset else: tz = 'GMT' timezones.append(tz) for offset in xrange(-12, 13): if offset < 0: tz = 'UTC%d' % offset elif offset > 0: tz = 'UTC+%d' % offset else: tz = 'UTC' timezones.append(tz) return timezones def get_timezone(): client = gconf.client_get_default() return client.get_string('/desktop/sugar/date/timezone') def print_timezone(): print get_timezone() def set_timezone(timezone): """Set the system timezone timezone : e.g. 'America/Los_Angeles' """ timezones = read_all_timezones() if timezone in timezones: os.environ['TZ'] = timezone client = gconf.client_get_default() client.set_string('/desktop/sugar/date/timezone', timezone) else: raise ValueError(_('Error timezone does not exist.')) return 1 # inilialize the docstrings for the timezone _initialize()
gpl-2.0
ykaneko/quantum
quantum/plugins/linuxbridge/db/l2network_db_v2.py
3
10016
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sqlalchemy.orm import exc from quantum.common import exceptions as q_exc import quantum.db.api as db from quantum.db import models_v2 from quantum.db import securitygroups_db as sg_db from quantum import manager from quantum.openstack.common import log as logging from quantum.plugins.linuxbridge.common import config # noqa from quantum.plugins.linuxbridge.common import constants from quantum.plugins.linuxbridge.db import l2network_models_v2 LOG = logging.getLogger(__name__) def initialize(): db.configure_db() def sync_network_states(network_vlan_ranges): """Synchronize network_states table with current configured VLAN ranges.""" session = db.get_session() with session.begin(): # get existing allocations for all physical networks allocations = dict() states = (session.query(l2network_models_v2.NetworkState). all()) for state in states: if state.physical_network not in allocations: allocations[state.physical_network] = set() allocations[state.physical_network].add(state) # process vlan ranges for each configured physical network for physical_network, vlan_ranges in network_vlan_ranges.iteritems(): # determine current configured allocatable vlans for this # physical network vlan_ids = set() for vlan_range in vlan_ranges: vlan_ids |= set(xrange(vlan_range[0], vlan_range[1] + 1)) # remove from table unallocated vlans not currently allocatable if physical_network in allocations: for state in allocations[physical_network]: try: # see if vlan is allocatable vlan_ids.remove(state.vlan_id) except KeyError: # it's not allocatable, so check if its allocated if not state.allocated: # it's not, so remove it from table LOG.debug(_("Removing vlan %(vlan_id)s on " "physical network %(physical_network)s" " from pool"), {'vlan_id': state.vlan_id, 'physical_network': physical_network}) session.delete(state) del allocations[physical_network] # add missing allocatable vlans to table for vlan_id in sorted(vlan_ids): state = l2network_models_v2.NetworkState(physical_network, vlan_id) session.add(state) # remove from table unallocated vlans for any unconfigured physical # networks for states in allocations.itervalues(): for state in states: if not state.allocated: LOG.debug(_("Removing vlan %(vlan_id)s on physical " "network %(physical_network)s" " from pool"), {'vlan_id': state.vlan_id, 'physical_network': physical_network}) session.delete(state) def get_network_state(physical_network, vlan_id): """Get state of specified network.""" session = db.get_session() try: state = (session.query(l2network_models_v2.NetworkState). filter_by(physical_network=physical_network, vlan_id=vlan_id). one()) return state except exc.NoResultFound: return None def reserve_network(session): with session.begin(subtransactions=True): state = (session.query(l2network_models_v2.NetworkState). filter_by(allocated=False). with_lockmode('update'). first()) if not state: raise q_exc.NoNetworkAvailable() LOG.debug(_("Reserving vlan %(vlan_id)s on physical network " "%(physical_network)s from pool"), {'vlan_id': state.vlan_id, 'physical_network': state.physical_network}) state.allocated = True return (state.physical_network, state.vlan_id) def reserve_specific_network(session, physical_network, vlan_id): with session.begin(subtransactions=True): try: state = (session.query(l2network_models_v2.NetworkState). filter_by(physical_network=physical_network, vlan_id=vlan_id). with_lockmode('update'). one()) if state.allocated: if vlan_id == constants.FLAT_VLAN_ID: raise q_exc.FlatNetworkInUse( physical_network=physical_network) else: raise q_exc.VlanIdInUse(vlan_id=vlan_id, physical_network=physical_network) LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " "network %(physical_network)s from pool"), {'vlan_id': vlan_id, 'physical_network': physical_network}) state.allocated = True except exc.NoResultFound: LOG.debug(_("Reserving specific vlan %(vlan_id)s on physical " "network %(physical_network)s outside pool"), {'vlan_id': vlan_id, 'physical_network': physical_network}) state = l2network_models_v2.NetworkState(physical_network, vlan_id) state.allocated = True session.add(state) def release_network(session, physical_network, vlan_id, network_vlan_ranges): with session.begin(subtransactions=True): try: state = (session.query(l2network_models_v2.NetworkState). filter_by(physical_network=physical_network, vlan_id=vlan_id). with_lockmode('update'). one()) state.allocated = False inside = False for vlan_range in network_vlan_ranges.get(physical_network, []): if vlan_id >= vlan_range[0] and vlan_id <= vlan_range[1]: inside = True break if inside: LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " "%(physical_network)s to pool"), {'vlan_id': vlan_id, 'physical_network': physical_network}) else: LOG.debug(_("Releasing vlan %(vlan_id)s on physical network " "%(physical_network)s outside pool"), {'vlan_id': vlan_id, 'physical_network': physical_network}) session.delete(state) except exc.NoResultFound: LOG.warning(_("vlan_id %(vlan_id)s on physical network " "%(physical_network)s not found"), {'vlan_id': vlan_id, 'physical_network': physical_network}) def add_network_binding(session, network_id, physical_network, vlan_id): with session.begin(subtransactions=True): binding = l2network_models_v2.NetworkBinding(network_id, physical_network, vlan_id) session.add(binding) def get_network_binding(session, network_id): try: binding = (session.query(l2network_models_v2.NetworkBinding). filter_by(network_id=network_id). one()) return binding except exc.NoResultFound: return def get_port_from_device(device): """Get port from database.""" LOG.debug(_("get_port_from_device() called")) session = db.get_session() sg_binding_port = sg_db.SecurityGroupPortBinding.port_id query = session.query(models_v2.Port, sg_db.SecurityGroupPortBinding.security_group_id) query = query.outerjoin(sg_db.SecurityGroupPortBinding, models_v2.Port.id == sg_binding_port) query = query.filter(models_v2.Port.id.startswith(device)) port_and_sgs = query.all() if not port_and_sgs: return port = port_and_sgs[0][0] plugin = manager.QuantumManager.get_plugin() port_dict = plugin._make_port_dict(port) port_dict['security_groups'] = [] for port_in_db, sg_id in port_and_sgs: if sg_id: port_dict['security_groups'].append(sg_id) port_dict['security_group_rules'] = [] port_dict['security_group_source_groups'] = [] port_dict['fixed_ips'] = [ip['ip_address'] for ip in port['fixed_ips']] return port_dict def set_port_status(port_id, status): """Set the port status.""" LOG.debug(_("set_port_status as %s called"), status) session = db.get_session() try: port = session.query(models_v2.Port).filter_by(id=port_id).one() port['status'] = status session.merge(port) session.flush() except exc.NoResultFound: raise q_exc.PortNotFound(port_id=port_id)
apache-2.0
malikcjm/qtcreator
tests/system/shared/qtcreator.py
3
11814
############################################################################# ## ## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies). ## Contact: http://www.qt-project.org/legal ## ## This file is part of Qt Creator. ## ## Commercial License Usage ## Licensees holding valid commercial Qt licenses may use this file in ## accordance with the commercial license agreement provided with the ## Software or, alternatively, in accordance with the terms contained in ## a written agreement between you and Digia. For licensing terms and ## conditions see http://qt.digia.com/licensing. For further information ## use the contact form at http://qt.digia.com/contact-us. ## ## GNU Lesser General Public License Usage ## Alternatively, this file may be used under the terms of the GNU Lesser ## General Public License version 2.1 as published by the Free Software ## Foundation and appearing in the file LICENSE.LGPL included in the ## packaging of this file. Please review the following information to ## ensure the GNU Lesser General Public License version 2.1 requirements ## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ## ## In addition, as a special exception, Digia gives you certain additional ## rights. These rights are described in the Digia Qt LGPL Exception ## version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ## ############################################################################# import platform; import shutil; import os; import glob; import atexit; import codecs; import subprocess; import sys import errno; from datetime import datetime,timedelta; srcPath = '' SettingsPath = '' tmpSettingsDir = '' testSettings.logScreenshotOnFail = True testSettings.logScreenshotOnError = True __origStartApplication__ = None source("../../shared/classes.py") source("../../shared/utils.py") source("../../shared/fs_utils.py") source("../../shared/build_utils.py") source("../../shared/project.py") source("../../shared/editor_utils.py") source("../../shared/project_explorer.py") source("../../shared/hook_utils.py") source("../../shared/debugger.py") source("../../shared/workarounds.py") # ATTENTION: if a test case calls startApplication("qtcreator...") for several times this # function must be called BEFORE any call except the first (which is done always automatically) def overrideStartApplication(): global startApplication, __origStartApplication__ if (platform.system() != "Darwin"): return if (__origStartApplication__ == None): __origStartApplication__ = startApplication def startApplication(*args): args = list(args) if str(args[0]).startswith('qtcreator'): args[0] = args[0].replace('qtcreator', '"Qt Creator"', 1) test.log("Using workaround for MacOS (different AUT name)") return __origStartApplication__(*args) def startedWithoutPluginError(): try: loaderErrorWidgetName = ("{name='ExtensionSystem__Internal__PluginErrorOverview' " "type='ExtensionSystem::PluginErrorOverview' visible='1' " "windowTitle='Qt Creator - Plugin loader messages'}") loaderError = waitForObject(loaderErrorWidgetName, 1000) test.fatal("Could not perform clean start of Qt Creator - Plugin error occurred.", waitForObject("{name='pluginError' type='QTextEdit' visible='1' window=%s}" % loaderErrorWidgetName, 1000).plainText) clickButton("{text~='(Next.*|Continue)' type='QPushButton' visible='1'}") invokeMenuItem("File", "Exit") return False except: return True def waitForCleanShutdown(timeOut=10): appCtxt = currentApplicationContext() shutdownDone = (str(appCtxt)=="") if platform.system() in ('Windows','Microsoft'): # cleaning helper for running on the build machines __checkForQmlViewer__() endtime = datetime.utcnow() + timedelta(seconds=timeOut) while not shutdownDone: # following work-around because os.kill() works for win not until python 2.7 if appCtxt.pid==-1: break tasks = subprocess.Popen("tasklist /FI \"PID eq %d\"" % appCtxt.pid, shell=True,stdout=subprocess.PIPE) output = tasks.communicate()[0] tasks.stdout.close() if (output=="INFO: No tasks are running which match the specified criteria." or output=="" or output.find("ERROR")==0): shutdownDone=True if not shutdownDone and datetime.utcnow() > endtime: break else: endtime = datetime.utcnow() + timedelta(seconds=timeOut) while not shutdownDone: try: os.kill(appCtxt.pid,0) except OSError, err: if err.errno == errno.EPERM or err.errno == errno.ESRCH: shutdownDone=True if not shutdownDone and datetime.utcnow() > endtime: break def __checkForQmlViewer__(): tasks = subprocess.Popen("tasklist /FI \"IMAGENAME eq qmlviewer.exe\"", shell=True, stdout=subprocess.PIPE) output = tasks.communicate()[0] tasks.stdout.close() if "INFO: No tasks are running which match the specified criteria." in output: return else: if subprocess.call("taskkill /F /FI \"IMAGENAME eq qmlviewer.exe\"", shell=True) == 0: print "Killed still running qmlviewer" else: print "qmlviewer is still running - failed to kill it" def __removeTestingDir__(): def __removeIt__(directory): deleteDirIfExists(directory) return not os.path.exists(directory) devicesXML = os.path.join(tmpSettingsDir, "QtProject", "qtcreator", "devices.xml") lastMTime = os.path.getmtime(devicesXML) testingDir = os.path.dirname(tmpSettingsDir) waitForCleanShutdown() waitFor('os.path.getmtime(devicesXML) > lastMTime', 5000) waitFor('__removeIt__(testingDir)', 2000) def __substitute__(fileName, search, replace): origFileName = fileName + "_orig" os.rename(fileName, origFileName) origFile = open(origFileName, "r") modifiedFile = open(fileName, "w") for line in origFile: modifiedFile.write(line.replace(search, replace)) origFile.close() modifiedFile.close() os.remove(origFileName) def substituteTildeWithinToolchains(settingsDir): toolchains = os.path.join(settingsDir, "QtProject", 'qtcreator', 'toolchains.xml') home = os.path.expanduser("~") __substitute__(toolchains, "~", home) test.log("Substituted all tildes with '%s' inside toolchains.xml..." % home) def substituteDefaultCompiler(settingsDir): compiler = None if platform.system() == 'Darwin': compiler = "clang_64" elif platform.system() == 'Linux': if __is64BitOS__(): compiler = "gcc_64" else: compiler = "gcc" else: test.warning("Called substituteDefaultCompiler() on wrong platform.", "This is a script error.") if compiler: qtversion = os.path.join(settingsDir, "QtProject", 'qtcreator', 'qtversion.xml') __substitute__(qtversion, "SQUISH_DEFAULT_COMPILER", compiler) test.log("Injected default compiler '%s' to qtversion.xml..." % compiler) def __guessABI__(supportedABIs, use64Bit): if platform.system() == 'Linux': supportedABIs = filter(lambda x: 'linux' in x, supportedABIs) elif platform.system() == 'Darwin': supportedABIs = filter(lambda x: 'macos' in x, supportedABIs) if use64Bit: searchFor = "64bit" else: searchFor = "32bit" for abi in supportedABIs: if searchFor in abi: return abi if use64Bit: test.log("Supported ABIs do not include an ABI supporting 64bit - trying 32bit now") return __guessABI__(supportedABIs, False) test.fatal('Could not guess ABI!', 'Given ABIs: %s' % str(supportedABIs)) return '' def __is64BitOS__(): if platform.system() in ('Microsoft', 'Windows'): machine = os.getenv("PROCESSOR_ARCHITEW6432", os.getenv("PROCESSOR_ARCHITECTURE")) else: machine = platform.machine() if machine: return '64' in machine else: return False def substituteUnchosenTargetABIs(settingsDir): class ReadState: NONE = 0 READING = 1 CLOSED = 2 on64Bit = __is64BitOS__() toolchains = os.path.join(settingsDir, "QtProject", 'qtcreator', 'toolchains.xml') origToolchains = toolchains + "_orig" os.rename(toolchains, origToolchains) origFile = open(origToolchains, "r") modifiedFile = open(toolchains, "w") supported = [] readState = ReadState.NONE for line in origFile: if readState == ReadState.NONE: if "SupportedAbis" in line: supported = [] readState = ReadState.READING elif readState == ReadState.READING: if "</valuelist>" in line: readState = ReadState.CLOSED else: supported.append(line.split(">", 1)[1].rsplit("<", 1)[0]) elif readState == ReadState.CLOSED: if "SupportedAbis" in line: supported = [] readState = ReadState.READING elif "SET_BY_SQUISH" in line: line = line.replace("SET_BY_SQUISH", __guessABI__(supported, on64Bit)) modifiedFile.write(line) origFile.close() modifiedFile.close() os.remove(origToolchains) test.log("Substituted unchosen ABIs inside toolchains.xml...") def copySettingsToTmpDir(destination=None, omitFiles=[]): global tmpSettingsDir, SettingsPath, origSettingsDir if destination: destination = os.path.abspath(destination) if not os.path.exists(destination): os.makedirs(destination) elif os.path.isfile(destination): test.warning("Provided destination for settings exists as file.", "Creating another folder for being able to execute tests.") destination = tempDir() else: destination = tempDir() tmpSettingsDir = destination pathLen = len(origSettingsDir) + 1 for r,d,f in os.walk(origSettingsDir): currentPath = os.path.join(tmpSettingsDir, r[pathLen:]) for dd in d: folder = os.path.join(currentPath, dd) if not os.path.exists(folder): os.makedirs(folder) for ff in f: if not ff in omitFiles: shutil.copy(os.path.join(r, ff), currentPath) if platform.system() in ('Linux', 'Darwin'): substituteTildeWithinToolchains(tmpSettingsDir) substituteDefaultCompiler(tmpSettingsDir) substituteUnchosenTargetABIs(tmpSettingsDir) SettingsPath = ' -settingspath "%s"' % tmpSettingsDir # current dir is directory holding qtcreator.py origSettingsDir = os.path.abspath(os.path.join(os.getcwd(), "..", "..", "settings")) if platform.system() in ('Windows', 'Microsoft'): sdkPath = "C:\\QtSDK" origSettingsDir = os.path.join(origSettingsDir, "windows") else: sdkPath = os.path.expanduser("~/QtSDK") origSettingsDir = os.path.join(origSettingsDir, "unix") srcPath = os.getenv("SYSTEST_SRCPATH", sdkPath + "/src") overrideStartApplication() # the following only doesn't work if the test ends in an exception if os.getenv("SYSTEST_NOSETTINGSPATH") != "1": copySettingsToTmpDir() atexit.register(__removeTestingDir__) if os.getenv("SYSTEST_WRITE_RESULTS") == "1" and os.getenv("SYSTEST_RESULTS_FOLDER") != None: atexit.register(writeTestResults, os.getenv("SYSTEST_RESULTS_FOLDER"))
lgpl-2.1
gavinshaw/py_learning
scrapy-online/spiders/deep_spider.py
1
9155
# -*- coding: utf-8 -*- import scrapy from scrapy.spiders import CrawlSpider, Rule from scrapy.linkextractors import LinkExtractor from scrapy.selector import Selector from scrapy import settings from scrapy.exceptions import CloseSpider import os import re import chardet import requests import logging import datetime import urllib import sys sys.path.append('../') import settings reload(sys) from lxml import etree sys.setdefaultencoding('utf8') date=datetime.datetime.now() YEAR=datetime.datetime.now().year MONTH=datetime.datetime.now().month DAY=datetime.datetime.now().day date_str=date.strftime("%Y-%m-%d_%H") # 设置感兴趣的app名称 I_want_apps = set(['Crawler', 'u.memory']) LOG_FILENAME = "/logs/log_%s.txt"%date_str logging.basicConfig(filename=LOG_FILENAME, level=logging.NOTSET) logging.debug("This message should go to the log file") class Article(scrapy.Item): title = scrapy.Field() catalog_id = scrapy.Field() url = scrapy.Field() description = scrapy.Field() body = scrapy.Field() publish_time = scrapy.Field() source_site = scrapy.Field() scrapy_id = scrapy.Field() struct=scrapy.Field() class DeepSpider(CrawlSpider): name = "Deep" def __init__(self, rule): self.rule = rule self.name = rule.name self.catalog_id = rule.catalog_id self.allow_url = rule.allow_url self.allowed_domains = rule.allow_domains.split(",") self.start_urls = rule.start_urls.split(",") rule_list = [] self.body_startpage=rule.body_startpage # 添加`下一页`的规则 if rule.next_page: rule_list.append( Rule(LinkExtractor(restrict_xpaths=rule.next_page,process_value=lambda x: x.replace('\r\n','')) ,process_links="link_filtering")) # 添加抽取文章链接的规则 rule_list.append(Rule(LinkExtractor( allow=[rule.allow_url], restrict_xpaths=[rule.extract_from],process_value=lambda x: x.replace('\r\n','') ), callback='parse_item')) self.rules = tuple(rule_list) super(DeepSpider, self).__init__() def link_filtering(self, links): ret = [] for link in links: url = link.url print url return links def parse_item(self, response): self.log('Hi, this is an article page! %s' % response.url) article = Article() publish_time = response.xpath(self.rule.publish_time_xpath).extract() article["publish_time"] = publish_time[0] if publish_time else "" descript = response.xpath(self.rule.desc_xpath).extract() article["description"] = descript[0] if descript else "" if self.rule.frequency and article["publish_time"]: publish_time=re.search(r'\d{4}-\d{2}-\d{2}', article["publish_time"]).group(0) print publish_time if publish_time: publish_date = publish_time.split("-") t_year=publish_date[0] t_month=publish_date[1] t_date=publish_date[2] print t_year,YEAR,self.rule.frequency if self.rule.frequency=='y' and str(t_year)!=str(YEAR) : print "NOT" return [] raise CloseSpider('t_year!=YEAR') if self.rule.frequency=='m' and str(t_month)!=str(MONTH): return [] raise CloseSpider('t_month!=MONTH') article["url"] = response.url.replace("&#10;","") article['catalog_id'] = self.catalog_id article['scrapy_id'] = self.rule.id article['struct'] = self.rule.struct title = response.xpath(self.rule.title_xpath).extract() article["title"] = title[0] if title else "" body = response.xpath(self.rule.body_xpath).extract() # article["body"] = '\n'.join(body) if body else "" article["body"] = body[0] if body else "" #print response if self.body_startpage: startpage=self.body_startpage else: startpage=2 images_urls = [] for row in range(startpage, 15): next_page_url = article["url"] url_ext=os.path.splitext(next_page_url)[1] next_page_url_t="" if url_ext=='.html': next_page_url_t = "%s_%s%s" % (next_page_url[:-5], row, next_page_url[-5:]) elif url_ext=='.shtml': next_page_url_t = "%s_%s%s" % (next_page_url[:-6], row, next_page_url[-6:]) #status = urllib.urlopen(next_page_url_t).code if next_page_url_t=="": break #r = requests.get(next_page_url_t,proxies=settings.PROXIES, allow_redirects=False) r = requests.get(next_page_url_t, allow_redirects=False) status=r.status_code if (status != 200): break print next_page_url_t content = r.content #detection the charset char_detection= chardet.detect(content) char_encode=char_detection['encoding'] if char_encode.strip()=='' or char_encode.lower()=='utf-8': char_encode="utf-8" content = content.decode('utf-8') elif char_encode.lower()=='gbk' or char_encode.lower()=='gb2312': content = content.decode("gbk").encode('utf-8') else: content = content.decode('gbk', 'ignore').encode('utf-8') content = content.decode('utf-8') print char_encode #content = content.encode(char_encode) temp_body = Selector(text=content).xpath(self.rule.body_xpath).extract() images = Selector(text=article["body"]).xpath(self.rule.image_xpath).extract() images_urls = images_urls + images if len(temp_body)!=1: continue #有的网站内容同样返回200 情况处理 if article["body"]==temp_body[0]: break article["body"] = article["body"] + temp_body[0] # 下载图片到本地 if self.rule.image_xpath: images_2 = Selector(text=article["body"]).xpath(self.rule.image_xpath).extract() images_urls = images_urls + images_2 article["body"] = self.process_images(images_urls, article['body']) article["body"] = self.strip_a(article['body']) # strip a link in the content article["body"] = self.strip_js(article['body']) # strip JavaScript in the content #article["body"] = self.strip_iframe(article['body']) # strip iframe in the content source_site = response.xpath(self.rule.source_site_xpath).extract() article["source_site"] = source_site[0] if source_site else "" return article def re_images(self, html): import re html = html.encode("utf-8") pat = '<img src="(.*?)"' reslist = re.findall(pat, html, re.M) return reslist # for x in reslist: # print x # 去掉a标签 def strip_a(self, html): dr = re.compile(r'<a[^>]*>', re.S) dd = dr.sub('', html) drr = re.compile(r'<\/a>', re.S) d = drr.sub('', dd) return d # 去掉javascript代码 def strip_js(self, html): dr = re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>', re.I) # Script dd = dr.sub('', html) return dd def strip_iframe(self,html): dr = re.compile('<\s*iframe[^>]*>[^<]*<\s*/\s*iframe\s*>', re.I) # Script dd = dr.sub('', html) return dd def process_images(self, image_urls, body): images = [] # a.replace('word', 'python') dir_path = '%s/%s' % ("/sdb/images_data/images", datetime.date.today()) dir_path_string = '%s/%s' % ("/images", datetime.date.today()) body = body.encode("utf-8") # dir_path="images" if not os.path.exists(dir_path): os.makedirs(dir_path,0777) for image_url in image_urls: us = image_url.split('/')[3:] image_file_name = '_'.join(us) file_path = '%s/%s' % (dir_path, image_file_name) file_path_string = '%s/%s' % (dir_path_string, image_file_name) image_temp = image_url ## or (image_url.find(self.allow_url) == -1) if (image_url.find("http") == -1): image_url = self.allow_url + image_url #print image_url images.append(file_path) body = body.replace(image_temp, file_path_string) # 图片文件替换 if os.path.exists(file_path): continue with open(file_path, 'wb') as handle: # image_url=image_url.strip("/") #response = requests.get(image_url, proxies=settings.PROXIES,stream=True) response = requests.get(image_url, stream=True) for block in response.iter_content(1024): if not block: break handle.write(block) return body
mit
ido-ran/ran-smart-frame2
web/server/lib/pyasn1_modules/rfc3281.py
3
9866
# coding: utf-8 # # This file is part of pyasn1-modules software. # # Created by Stanisław Pitucha with asn1ate tool. # Copyright (c) 2005-2018, Ilya Etingof <[email protected]> # License: http://snmplabs.com/pyasn1/license.html # # An Internet Attribute Certificate Profile for Authorization # # ASN.1 source from: # http://www.ietf.org/rfc/rfc3281.txt # from pyasn1.type import char from pyasn1.type import constraint from pyasn1.type import namedtype from pyasn1.type import namedval from pyasn1.type import tag from pyasn1.type import univ from pyasn1.type import useful from pyasn1_modules import rfc3280 MAX = float('inf') def _buildOid(*components): output = [] for x in tuple(components): if isinstance(x, univ.ObjectIdentifier): output.extend(list(x)) else: output.append(int(x)) return univ.ObjectIdentifier(output) class ObjectDigestInfo(univ.Sequence): pass ObjectDigestInfo.componentType = namedtype.NamedTypes( namedtype.NamedType('digestedObjectType', univ.Enumerated( namedValues=namedval.NamedValues(('publicKey', 0), ('publicKeyCert', 1), ('otherObjectTypes', 2)))), namedtype.OptionalNamedType('otherObjectTypeID', univ.ObjectIdentifier()), namedtype.NamedType('digestAlgorithm', rfc3280.AlgorithmIdentifier()), namedtype.NamedType('objectDigest', univ.BitString()) ) class IssuerSerial(univ.Sequence): pass IssuerSerial.componentType = namedtype.NamedTypes( namedtype.NamedType('issuer', rfc3280.GeneralNames()), namedtype.NamedType('serial', rfc3280.CertificateSerialNumber()), namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier()) ) class TargetCert(univ.Sequence): pass TargetCert.componentType = namedtype.NamedTypes( namedtype.NamedType('targetCertificate', IssuerSerial()), namedtype.OptionalNamedType('targetName', rfc3280.GeneralName()), namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo()) ) class Target(univ.Choice): pass Target.componentType = namedtype.NamedTypes( namedtype.NamedType('targetName', rfc3280.GeneralName().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), namedtype.NamedType('targetGroup', rfc3280.GeneralName().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), namedtype.NamedType('targetCert', TargetCert().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))) ) class Targets(univ.SequenceOf): pass Targets.componentType = Target() class ProxyInfo(univ.SequenceOf): pass ProxyInfo.componentType = Targets() id_at_role = _buildOid(rfc3280.id_at, 72) id_pe_aaControls = _buildOid(rfc3280.id_pe, 6) id_ce_targetInformation = _buildOid(rfc3280.id_ce, 55) id_pe_ac_auditIdentity = _buildOid(rfc3280.id_pe, 4) class ClassList(univ.BitString): pass ClassList.namedValues = namedval.NamedValues( ('unmarked', 0), ('unclassified', 1), ('restricted', 2), ('confidential', 3), ('secret', 4), ('topSecret', 5) ) class SecurityCategory(univ.Sequence): pass SecurityCategory.componentType = namedtype.NamedTypes( namedtype.NamedType('type', univ.ObjectIdentifier().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) ) class Clearance(univ.Sequence): pass Clearance.componentType = namedtype.NamedTypes( namedtype.NamedType('policyId', univ.ObjectIdentifier().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), namedtype.DefaultedNamedType('classList', ClassList().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)).subtype( value="unclassified")), namedtype.OptionalNamedType('securityCategories', univ.SetOf(componentType=SecurityCategory()).subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))) ) class AttCertVersion(univ.Integer): pass AttCertVersion.namedValues = namedval.NamedValues( ('v2', 1) ) id_aca = _buildOid(rfc3280.id_pkix, 10) id_at_clearance = _buildOid(2, 5, 1, 5, 55) class AttrSpec(univ.SequenceOf): pass AttrSpec.componentType = univ.ObjectIdentifier() class AAControls(univ.Sequence): pass AAControls.componentType = namedtype.NamedTypes( namedtype.OptionalNamedType('pathLenConstraint', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))), namedtype.OptionalNamedType('permittedAttrs', AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), namedtype.OptionalNamedType('excludedAttrs', AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), namedtype.DefaultedNamedType('permitUnSpecified', univ.Boolean().subtype(value=1)) ) class AttCertValidityPeriod(univ.Sequence): pass AttCertValidityPeriod.componentType = namedtype.NamedTypes( namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()), namedtype.NamedType('notAfterTime', useful.GeneralizedTime()) ) id_aca_authenticationInfo = _buildOid(id_aca, 1) class V2Form(univ.Sequence): pass V2Form.componentType = namedtype.NamedTypes( namedtype.OptionalNamedType('issuerName', rfc3280.GeneralNames()), namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))) ) class AttCertIssuer(univ.Choice): pass AttCertIssuer.componentType = namedtype.NamedTypes( namedtype.NamedType('v1Form', rfc3280.GeneralNames()), namedtype.NamedType('v2Form', V2Form().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))) ) class Holder(univ.Sequence): pass Holder.componentType = namedtype.NamedTypes( namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))), namedtype.OptionalNamedType('entityName', rfc3280.GeneralNames().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))), namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))) ) class AttributeCertificateInfo(univ.Sequence): pass AttributeCertificateInfo.componentType = namedtype.NamedTypes( namedtype.NamedType('version', AttCertVersion()), namedtype.NamedType('holder', Holder()), namedtype.NamedType('issuer', AttCertIssuer()), namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()), namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()), namedtype.NamedType('attrCertValidityPeriod', AttCertValidityPeriod()), namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())), namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()), namedtype.OptionalNamedType('extensions', rfc3280.Extensions()) ) class AttributeCertificate(univ.Sequence): pass AttributeCertificate.componentType = namedtype.NamedTypes( namedtype.NamedType('acinfo', AttributeCertificateInfo()), namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()), namedtype.NamedType('signatureValue', univ.BitString()) ) id_mod = _buildOid(rfc3280.id_pkix, 0) id_mod_attribute_cert = _buildOid(id_mod, 12) id_aca_accessIdentity = _buildOid(id_aca, 2) class RoleSyntax(univ.Sequence): pass RoleSyntax.componentType = namedtype.NamedTypes( namedtype.OptionalNamedType('roleAuthority', rfc3280.GeneralNames().subtype( implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))), namedtype.NamedType('roleName', rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))) ) id_aca_chargingIdentity = _buildOid(id_aca, 3) class ACClearAttrs(univ.Sequence): pass ACClearAttrs.componentType = namedtype.NamedTypes( namedtype.NamedType('acIssuer', rfc3280.GeneralName()), namedtype.NamedType('acSerial', univ.Integer()), namedtype.NamedType('attrs', univ.SequenceOf(componentType=rfc3280.Attribute())) ) id_aca_group = _buildOid(id_aca, 4) id_pe_ac_proxying = _buildOid(rfc3280.id_pe, 10) class SvceAuthInfo(univ.Sequence): pass SvceAuthInfo.componentType = namedtype.NamedTypes( namedtype.NamedType('service', rfc3280.GeneralName()), namedtype.NamedType('ident', rfc3280.GeneralName()), namedtype.OptionalNamedType('authInfo', univ.OctetString()) ) class IetfAttrSyntax(univ.Sequence): pass IetfAttrSyntax.componentType = namedtype.NamedTypes( namedtype.OptionalNamedType( 'policyAuthority', rfc3280.GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)) ), namedtype.NamedType( 'values', univ.SequenceOf( componentType=univ.Choice( componentType=namedtype.NamedTypes( namedtype.NamedType('octets', univ.OctetString()), namedtype.NamedType('oid', univ.ObjectIdentifier()), namedtype.NamedType('string', char.UTF8String()) ) ) ) ) ) id_aca_encAttrs = _buildOid(id_aca, 6)
mit
ccalleu/halo-halo
CSipSimple/jni/pjsip/sources/pjsip-apps/src/python/samples/registration.py
60
2047
# $Id: registration.py 2171 2008-07-24 09:01:33Z bennylp $ # # SIP account and registration sample. In this sample, the program # will block to wait until registration is complete # # Copyright (C) 2003-2008 Benny Prijono <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # import sys import pjsua as pj import threading def log_cb(level, str, len): print str, class MyAccountCallback(pj.AccountCallback): sem = None def __init__(self, account): pj.AccountCallback.__init__(self, account) def wait(self): self.sem = threading.Semaphore(0) self.sem.acquire() def on_reg_state(self): if self.sem: if self.account.info().reg_status >= 200: self.sem.release() lib = pj.Lib() try: lib.init(log_cfg = pj.LogConfig(level=4, callback=log_cb)) lib.create_transport(pj.TransportType.UDP, pj.TransportConfig(5080)) lib.start() acc = lib.create_account(pj.AccountConfig("pjsip.org", "bennylp", "***")) acc_cb = MyAccountCallback(acc) acc.set_callback(acc_cb) acc_cb.wait() print "\n" print "Registration complete, status=", acc.info().reg_status, \ "(" + acc.info().reg_reason + ")" print "\nPress ENTER to quit" sys.stdin.readline() lib.destroy() lib = None except pj.Error, e: print "Exception: " + str(e) lib.destroy()
gpl-3.0
chuan9/chromium-crosswalk
native_client_sdk/src/build_tools/tests/build_artifacts_test.py
12
11804
#!/usr/bin/env python # Copyright (c) 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import ntpath import posixpath import sys import collections import unittest SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR) CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(BUILD_TOOLS_DIR))) MOCK_DIR = os.path.join(CHROME_SRC, 'third_party', 'pymock') # For the mock library sys.path.append(MOCK_DIR) from mock import call, patch, Mock sys.path.append(BUILD_TOOLS_DIR) import build_artifacts class BasePosixTestCase(unittest.TestCase): def setUp(self): self.addCleanup(patch.stopall) patch('build_artifacts.PLATFORM', 'posix').start() patch('build_artifacts.BUILD_ARCHIVE_DIR', '/archive_dir/').start() patch('os.path.join', posixpath.join).start() class PosixTestCase(BasePosixTestCase): def setUp(self): BasePosixTestCase.setUp(self) def testGetToolchainNaClLib(self): tests = [ (('newlib', 'x86_32'), 'foo/x86_64-nacl/lib32'), (('newlib', 'x86_64'), 'foo/x86_64-nacl/lib'), (('newlib', 'arm'), 'foo/arm-nacl/lib'), (('glibc', 'x86_32'), 'foo/x86_64-nacl/lib32'), (('glibc', 'x86_64'), 'foo/x86_64-nacl/lib'), (('bionic', 'arm'), 'foo/arm-nacl/lib'), (('pnacl', None), 'foo/le32-nacl/lib'), ] for test in tests: self.assertEqual( build_artifacts.GetToolchainNaClLib(test[0][0], 'foo', test[0][1]), test[1]) def testGetGypBuiltLib(self): tests = [ (('newlib', 'x86_32'), 'foo/Release/gen/tc_newlib/lib32'), (('newlib', 'x86_64'), 'foo/Release/gen/tc_newlib/lib64'), (('newlib', 'arm'), 'foo/Release/gen/tc_newlib/libarm'), (('glibc', 'x86_32'), 'foo/Release/gen/tc_glibc/lib32'), (('glibc', 'x86_64'), 'foo/Release/gen/tc_glibc/lib64'), (('pnacl', None), 'foo/Release/gen/tc_pnacl_newlib/lib') ] for test in tests: self.assertEqual( build_artifacts.GetGypBuiltLib('foo', test[0][0], test[0][1]), test[1]) def testGetGypToolchainLib(self): tests = [ (('newlib', 'x86_32'), 'foo/Release/gen/sdk/posix_x86/nacl_x86_newlib/x86_64-nacl/lib32'), (('newlib', 'x86_64'), 'foo/Release/gen/sdk/posix_x86/nacl_x86_newlib/x86_64-nacl/lib'), (('newlib', 'arm'), 'foo/Release/gen/sdk/posix_x86/nacl_arm_newlib/arm-nacl/lib'), (('glibc', 'x86_32'), 'foo/Release/gen/sdk/posix_x86/nacl_x86_glibc/x86_64-nacl/lib32'), (('glibc', 'x86_64'), 'foo/Release/gen/sdk/posix_x86/nacl_x86_glibc/x86_64-nacl/lib'), # Bionic uses the newlib toolchain lib directory (('bionic', 'arm'), 'foo/Release/gen/sdk/posix_x86/nacl_arm_newlib/arm-nacl/lib'), (('pnacl', None), 'foo/Release/gen/sdk/posix_x86/pnacl_newlib/le32-nacl/lib'), ] for test in tests: self.assertEqual( build_artifacts.GetGypToolchainLib('foo', test[0][0], test[0][1]), test[1]) @patch('build_artifacts.all_archives', ['foo.tar.bz2', 'bar.tar.bz2']) @patch('build_version.ChromeMajorVersion', Mock(return_value='40')) @patch('build_version.ChromeRevision', Mock(return_value='302630')) @patch('build_version.ChromeCommitPosition', Mock(return_value= '1492c3d296476fe12cafecabba6ebabe-refs/heads/master@{#302630}')) @patch('buildbot_common.Archive') def testUploadArchives(self, archive_mock): build_artifacts.UploadArchives() cwd = '/archive_dir/' bucket_path = 'native-client-sdk/archives/40-302630-1492c3d29' archive_mock.assert_has_calls([ call('foo.tar.bz2', bucket_path, cwd=cwd, step_link=False), call('foo.tar.bz2.sha1', bucket_path, cwd=cwd, step_link=False), call('bar.tar.bz2', bucket_path, cwd=cwd, step_link=False), call('bar.tar.bz2.sha1', bucket_path, cwd=cwd, step_link=False) ]) class GypNinjaPosixTestCase(BasePosixTestCase): def setUp(self): BasePosixTestCase.setUp(self) patch('sys.executable', 'python').start() patch('build_artifacts.SRC_DIR', 'src_dir').start() patch('os.environ', {}).start() self.run_mock = patch('buildbot_common.Run').start() self.options_mock = patch('build_artifacts.options').start() self.options_mock.mac_sdk = False self.options_mock.no_arm_trusted = False self.gyp_defines_base = ['nacl_allow_thin_archives=0'] def testSimple(self): build_artifacts.GypNinjaBuild( None, 'gyp.py', 'foo.gyp', 'target', 'out_dir') self.run_mock.assert_has_calls([ call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G', 'output_dir=out_dir'], cwd='src_dir', env={'GYP_GENERATORS': 'ninja', 'GYP_DEFINES': ' '.join(self.gyp_defines_base)}), call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir') ]) def testTargetArch(self): build_artifacts.GypNinjaBuild( 'x64', 'gyp.py', 'foo.gyp', 'target', 'out_dir') self.run_mock.assert_has_calls([ call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G', 'output_dir=out_dir'], cwd='src_dir', env={ 'GYP_GENERATORS': 'ninja', 'GYP_DEFINES': ' '.join(self.gyp_defines_base + ['target_arch=x64']), }), call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir') ]) def testMultipleTargets(self): build_artifacts.GypNinjaBuild( None, 'gyp.py', 'foo.gyp', ['target1', 'target2'], 'out_dir') self.run_mock.assert_has_calls([ call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G', 'output_dir=out_dir'], cwd='src_dir', env={'GYP_GENERATORS': 'ninja', 'GYP_DEFINES': ' '.join(self.gyp_defines_base)}), call(['ninja', '-C', 'out_dir/Release', 'target1', 'target2'], cwd='src_dir') ]) def testMacSdk(self): build_artifacts.PLATFORM = 'mac' self.options_mock.mac_sdk = '10.6' build_artifacts.GypNinjaBuild( None, 'gyp.py', 'foo.gyp', 'target', 'out_dir') self.run_mock.assert_has_calls([ call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G', 'output_dir=out_dir'], cwd='src_dir', env={ 'GYP_GENERATORS': 'ninja', 'GYP_DEFINES': ' '.join(self.gyp_defines_base + ['mac_sdk=10.6', 'clang=1']), }), call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir') ]) def testArmLinux(self): build_artifacts.PLATFORM = 'linux' build_artifacts.GypNinjaBuild( 'arm', 'gyp.py', 'foo.gyp', 'target', 'out_dir') self.run_mock.assert_has_calls([ call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G', 'output_dir=out_dir'], cwd='src_dir', env={ 'GYP_CROSSCOMPILE': '1', 'GYP_GENERATORS': 'ninja', 'GYP_DEFINES': ' '.join(self.gyp_defines_base + ['target_arch=arm']), }), call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir') ]) def testNoArmTrusted(self): build_artifacts.PLATFORM = 'linux' self.options_mock.no_arm_trusted = True build_artifacts.GypNinjaBuild( 'arm', 'gyp.py', 'foo.gyp', 'target', 'out_dir') self.run_mock.assert_has_calls([ call(['python', 'gyp.py', 'foo.gyp', '--depth=.', '-G', 'output_dir=out_dir'], cwd='src_dir', env={ 'GYP_CROSSCOMPILE': '1', 'GYP_GENERATORS': 'ninja', 'GYP_DEFINES': ' '.join(self.gyp_defines_base + ['target_arch=arm', 'disable_cross_trusted=1']), }), call(['ninja', '-C', 'out_dir/Release', 'target'], cwd='src_dir') ]) class ArchivePosixTestCase(BasePosixTestCase): def setUp(self): BasePosixTestCase.setUp(self) self.makedir_mock = patch('buildbot_common.MakeDir').start() self.copyfile_mock = patch('buildbot_common.CopyFile').start() self.copydir_mock = patch('buildbot_common.CopyDir').start() self.isdir_mock = patch('os.path.isdir').start() patch('os.path.exists', Mock(return_value=False)).start() def dummy_isdir(path): if path == '/archive_dir/posix_foo': return True return False self.isdir_mock.side_effect = dummy_isdir self.archive = build_artifacts.Archive('foo') def testInit(self): self.assertEqual(self.archive.name, 'posix_foo') self.assertEqual(self.archive.archive_name, 'posix_foo.tar.bz2') self.assertEqual(self.archive.archive_path, '/archive_dir/posix_foo.tar.bz2') self.assertEqual(self.archive.dirname, '/archive_dir/posix_foo') self.makedir_mock.assert_called_once_with('/archive_dir/posix_foo') @patch('glob.glob', Mock(side_effect=lambda x: [x])) def testCopySimple(self): self.archive.Copy('/copy_from', ['file1', 'file2']) self.assertEqual(self.copydir_mock.call_count, 0) self.copyfile_mock.assert_has_calls([ call('/copy_from/file1', '/archive_dir/posix_foo/file1'), call('/copy_from/file2', '/archive_dir/posix_foo/file2')]) @patch('glob.glob') def testCopyGlob(self, glob_mock): glob_mock.return_value = ['/copy_from/foo', '/copy_from/bar'] self.archive.Copy('/copy_from', [('*', '')]) glob_mock.assert_called_once_with('/copy_from/*') self.assertEqual(self.copydir_mock.call_count, 0) self.copyfile_mock.assert_has_calls([ call('/copy_from/foo', '/archive_dir/posix_foo/'), call('/copy_from/bar', '/archive_dir/posix_foo/')]) @patch('glob.glob', Mock(side_effect=lambda x: [x])) def testCopyRename(self): self.archive.Copy('/copy_from', [('file1', 'file1_renamed')]) self.assertEqual(self.copydir_mock.call_count, 0) self.copyfile_mock.assert_called_once_with( '/copy_from/file1', '/archive_dir/posix_foo/file1_renamed') @patch('glob.glob', Mock(side_effect=lambda x: [x])) def testCopyNewDir(self): self.archive.Copy('/copy_from', [('file1', 'todir/')]) self.assertEqual(self.copydir_mock.call_count, 0) self.copyfile_mock.assert_called_once_with( '/copy_from/file1', '/archive_dir/posix_foo/todir/file1') @patch('glob.glob', Mock(side_effect=lambda x: [x])) def testCopyDir(self): self.isdir_mock.side_effect = lambda _: True self.archive.Copy('/copy_from', ['dirname']) self.assertEqual(self.copyfile_mock.call_count, 0) self.copydir_mock.assert_called_once_with( '/copy_from/dirname', '/archive_dir/posix_foo/dirname') class WinTestCase(unittest.TestCase): def setUp(self): patch('build_artifacts.PLATFORM', 'win').start() patch('build_artifacts.BUILD_ARCHIVE_DIR', 'c:\\archive_dir\\').start() patch('os.path.join', ntpath.join).start() def tearDown(self): patch.stopall() @patch('os.path.exists', Mock(return_value=False)) @patch('buildbot_common.MakeDir') def testArchiveInit(self, makedir_mock): archive = build_artifacts.Archive('foo') self.assertEqual(archive.name, 'win_foo') self.assertEqual(archive.archive_name, 'win_foo.tar.bz2') self.assertEqual(archive.archive_path, r'c:\archive_dir\win_foo.tar.bz2') self.assertEqual(archive.dirname, r'c:\archive_dir\win_foo') makedir_mock.assert_called_once_with(r'c:\archive_dir\win_foo') if __name__ == '__main__': unittest.main()
bsd-3-clause
ianberinger/mailinabox
management/web_update.py
7
9141
# Creates an nginx configuration file so we serve HTTP/HTTPS on all # domains for which a mail account has been set up. ######################################################################## import os.path, re, rtyaml from mailconfig import get_mail_domains from dns_update import get_custom_dns_config, get_dns_zones from ssl_certificates import get_ssl_certificates, get_domain_ssl_files, check_certificate from utils import shell, safe_domain_name, sort_domains def get_web_domains(env, include_www_redirects=True, exclude_dns_elsewhere=True): # What domains should we serve HTTP(S) for? domains = set() # Serve web for all mail domains so that we might at least # provide auto-discover of email settings, and also a static website # if the user wants to make one. domains |= get_mail_domains(env) if include_www_redirects: # Add 'www.' subdomains that we want to provide default redirects # to the main domain for. We'll add 'www.' to any DNS zones, i.e. # the topmost of each domain we serve. domains |= set('www.' + zone for zone, zonefile in get_dns_zones(env)) if exclude_dns_elsewhere: # ...Unless the domain has an A/AAAA record that maps it to a different # IP address than this box. Remove those domains from our list. domains -= get_domains_with_a_records(env) # Ensure the PRIMARY_HOSTNAME is in the list so we can serve webmail # as well as Z-Push for Exchange ActiveSync. This can't be removed # by a custom A/AAAA record and is never a 'www.' redirect. domains.add(env['PRIMARY_HOSTNAME']) # Sort the list so the nginx conf gets written in a stable order. domains = sort_domains(domains, env) return domains def get_domains_with_a_records(env): domains = set() dns = get_custom_dns_config(env) for domain, rtype, value in dns: if rtype == "CNAME" or (rtype in ("A", "AAAA") and value not in ("local", env['PUBLIC_IP'])): domains.add(domain) return domains def get_web_domains_with_root_overrides(env): # Load custom settings so we can tell what domains have a redirect or proxy set up on '/', # which means static hosting is not happening. root_overrides = { } nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml") if os.path.exists(nginx_conf_custom_fn): custom_settings = rtyaml.load(open(nginx_conf_custom_fn)) for domain, settings in custom_settings.items(): for type, value in [('redirect', settings.get('redirects', {}).get('/')), ('proxy', settings.get('proxies', {}).get('/'))]: if value: root_overrides[domain] = (type, value) return root_overrides def do_web_update(env): # Pre-load what SSL certificates we will use for each domain. ssl_certificates = get_ssl_certificates(env) # Build an nginx configuration file. nginx_conf = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-top.conf")).read() # Load the templates. template0 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx.conf")).read() template1 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-alldomains.conf")).read() template2 = open(os.path.join(os.path.dirname(__file__), "../conf/nginx-primaryonly.conf")).read() template3 = "\trewrite ^(.*) https://$REDIRECT_DOMAIN$1 permanent;\n" # Add the PRIMARY_HOST configuration first so it becomes nginx's default server. nginx_conf += make_domain_config(env['PRIMARY_HOSTNAME'], [template0, template1, template2], ssl_certificates, env) # Add configuration all other web domains. has_root_proxy_or_redirect = get_web_domains_with_root_overrides(env) web_domains_not_redirect = get_web_domains(env, include_www_redirects=False) for domain in get_web_domains(env): if domain == env['PRIMARY_HOSTNAME']: # PRIMARY_HOSTNAME is handled above. continue if domain in web_domains_not_redirect: # This is a regular domain. if domain not in has_root_proxy_or_redirect: nginx_conf += make_domain_config(domain, [template0, template1], ssl_certificates, env) else: nginx_conf += make_domain_config(domain, [template0], ssl_certificates, env) else: # Add default 'www.' redirect. nginx_conf += make_domain_config(domain, [template0, template3], ssl_certificates, env) # Did the file change? If not, don't bother writing & restarting nginx. nginx_conf_fn = "/etc/nginx/conf.d/local.conf" if os.path.exists(nginx_conf_fn): with open(nginx_conf_fn) as f: if f.read() == nginx_conf: return "" # Save the file. with open(nginx_conf_fn, "w") as f: f.write(nginx_conf) # Kick nginx. Since this might be called from the web admin # don't do a 'restart'. That would kill the connection before # the API returns its response. A 'reload' should be good # enough and doesn't break any open connections. shell('check_call', ["/usr/sbin/service", "nginx", "reload"]) return "web updated\n" def make_domain_config(domain, templates, ssl_certificates, env): # GET SOME VARIABLES # Where will its root directory be for static files? root = get_web_root(domain, env) # What private key and SSL certificate will we use for this domain? tls_cert = get_domain_ssl_files(domain, ssl_certificates, env) # ADDITIONAL DIRECTIVES. nginx_conf_extra = "" # Because the certificate may change, we should recognize this so we # can trigger an nginx update. def hashfile(filepath): import hashlib sha1 = hashlib.sha1() f = open(filepath, 'rb') try: sha1.update(f.read()) finally: f.close() return sha1.hexdigest() nginx_conf_extra += "# ssl files sha1: %s / %s\n" % (hashfile(tls_cert["private-key"]), hashfile(tls_cert["certificate"])) # Add in any user customizations in YAML format. hsts = "yes" nginx_conf_custom_fn = os.path.join(env["STORAGE_ROOT"], "www/custom.yaml") if os.path.exists(nginx_conf_custom_fn): yaml = rtyaml.load(open(nginx_conf_custom_fn)) if domain in yaml: yaml = yaml[domain] # any proxy or redirect here? for path, url in yaml.get("proxies", {}).items(): nginx_conf_extra += "\tlocation %s {\n\t\tproxy_pass %s;\n\t}\n" % (path, url) for path, url in yaml.get("redirects", {}).items(): nginx_conf_extra += "\trewrite %s %s permanent;\n" % (path, url) # override the HSTS directive type hsts = yaml.get("hsts", hsts) # Add the HSTS header. if hsts == "yes": nginx_conf_extra += "add_header Strict-Transport-Security max-age=31536000;\n" elif hsts == "preload": nginx_conf_extra += "add_header Strict-Transport-Security \"max-age=10886400; includeSubDomains; preload\";\n" # Add in any user customizations in the includes/ folder. nginx_conf_custom_include = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(domain) + ".conf") if os.path.exists(nginx_conf_custom_include): nginx_conf_extra += "\tinclude %s;\n" % (nginx_conf_custom_include) # PUT IT ALL TOGETHER # Combine the pieces. Iteratively place each template into the "# ADDITIONAL DIRECTIVES HERE" placeholder # of the previous template. nginx_conf = "# ADDITIONAL DIRECTIVES HERE\n" for t in templates + [nginx_conf_extra]: nginx_conf = re.sub("[ \t]*# ADDITIONAL DIRECTIVES HERE *\n", t, nginx_conf) # Replace substitution strings in the template & return. nginx_conf = nginx_conf.replace("$STORAGE_ROOT", env['STORAGE_ROOT']) nginx_conf = nginx_conf.replace("$HOSTNAME", domain) nginx_conf = nginx_conf.replace("$ROOT", root) nginx_conf = nginx_conf.replace("$SSL_KEY", tls_cert["private-key"]) nginx_conf = nginx_conf.replace("$SSL_CERTIFICATE", tls_cert["certificate"]) nginx_conf = nginx_conf.replace("$REDIRECT_DOMAIN", re.sub(r"^www\.", "", domain)) # for default www redirects to parent domain return nginx_conf def get_web_root(domain, env, test_exists=True): # Try STORAGE_ROOT/web/domain_name if it exists, but fall back to STORAGE_ROOT/web/default. for test_domain in (domain, 'default'): root = os.path.join(env["STORAGE_ROOT"], "www", safe_domain_name(test_domain)) if os.path.exists(root) or not test_exists: break return root def get_web_domains_info(env): www_redirects = set(get_web_domains(env)) - set(get_web_domains(env, include_www_redirects=False)) has_root_proxy_or_redirect = set(get_web_domains_with_root_overrides(env)) ssl_certificates = get_ssl_certificates(env) # for the SSL config panel, get cert status def check_cert(domain): tls_cert = get_domain_ssl_files(domain, ssl_certificates, env, allow_missing_cert=True) if tls_cert is None: return ("danger", "No Certificate Installed") cert_status, cert_status_details = check_certificate(domain, tls_cert["certificate"], tls_cert["private-key"]) if cert_status == "OK": return ("success", "Signed & valid. " + cert_status_details) elif cert_status == "SELF-SIGNED": return ("warning", "Self-signed. Get a signed certificate to stop warnings.") else: return ("danger", "Certificate has a problem: " + cert_status) return [ { "domain": domain, "root": get_web_root(domain, env), "custom_root": get_web_root(domain, env, test_exists=False), "ssl_certificate": check_cert(domain), "static_enabled": domain not in (www_redirects | has_root_proxy_or_redirect), } for domain in get_web_domains(env) ]
cc0-1.0
fooying/Mobile-Security-Framework-MobSF
DynamicAnalyzer/tools/pyWebProxy/gen_cert.py
37
4662
#!/usr/bin/env python ''' owtf is an OWASP+PTES-focused try to unite great tools & facilitate pentesting Copyright (c) 2013, Abraham Aranguren <[email protected]> http://7-a.org All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright owner nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Inbound Proxy Module developed by Bharadwaj Machiraju (blog.tunnelshade.in) # as a part of Google Summer of Code 2013 ''' from OpenSSL import crypto import os import hashlib import re def gen_signed_cert(domain, ca_crt, ca_key, ca_pass, certs_folder): """ This function takes a domain name as a parameter and then creates a certificate and key with the domain name(replacing dots by underscores), finally signing the certificate using specified CA and returns the path of key and cert files. If you are yet to generate a CA then check the top comments """ key_path = os.path.join(certs_folder, re.sub('[^-0-9a-zA-Z_]', '_', domain) + ".key") cert_path = os.path.join(certs_folder, re.sub('[^-0-9a-zA-Z_]', '_', domain) + ".crt") # The first conditions checks if file exists, and does nothing if true # If file doenst exist lock is obtained for writing (Other processes in race must wait) # After obtaining lock another check to handle race conditions gracefully if os.path.exists(key_path) and os.path.exists(cert_path): pass else: # Check happens if the certificate and key pair already exists for a domain if os.path.exists(key_path) and os.path.exists(cert_path): pass else: # Serial Generation - Serial number must be unique for each certificate, # so serial is generated based on domain name md5_hash = hashlib.md5() md5_hash.update(domain) serial = int(md5_hash.hexdigest(), 36) # The CA stuff is loaded from the same folder as this script ca_cert = crypto.load_certificate(crypto.FILETYPE_PEM, open(ca_crt).read()) # The last parameter is the password for your CA key file ca_key = crypto.load_privatekey(crypto.FILETYPE_PEM, open(ca_key).read(), ca_pass) key = crypto.PKey() key.generate_key(crypto.TYPE_RSA, 2048) cert = crypto.X509() cert.get_subject().C = "IN" cert.get_subject().ST = "BL" cert.get_subject().L = "127.0.0.1" cert.get_subject().O = "MobSec" cert.get_subject().OU = "MobSec-Proxy" cert.get_subject().CN = domain cert.gmtime_adj_notBefore(0) cert.gmtime_adj_notAfter(365 * 24 * 60 * 60) cert.set_serial_number(serial) cert.set_issuer(ca_cert.get_subject()) cert.set_pubkey(key) cert.sign(ca_key, "sha1") # The key and cert files are dumped and their paths are returned domain_key = open(key_path, "w") domain_key.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, key)) domain_cert = open(cert_path, "w") domain_cert.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert)) # print(("[*] Generated signed certificate for %s" % (domain))) return key_path, cert_path
gpl-3.0
freeitaly/Trading-System
vn.demo/ctpdemo/mdtest.py
96
5287
# encoding: UTF-8 import sys from time import sleep from PyQt4 import QtGui from vnctpmd import * #---------------------------------------------------------------------- def print_dict(d): """按照键值打印一个字典""" for key,value in d.items(): print key + ':' + str(value) #---------------------------------------------------------------------- def simple_log(func): """简单装饰器用于输出函数名""" def wrapper(*args, **kw): print "" print str(func.__name__) return func(*args, **kw) return wrapper ######################################################################## class TestMdApi(MdApi): """测试用实例""" #---------------------------------------------------------------------- def __init__(self): """Constructor""" super(TestMdApi, self).__init__() #---------------------------------------------------------------------- @simple_log def onFrontConnected(self): """服务器连接""" pass #---------------------------------------------------------------------- @simple_log def onFrontDisconnected(self, n): """服务器断开""" print n #---------------------------------------------------------------------- @simple_log def onHeartBeatWarning(self, n): """心跳报警""" print n #---------------------------------------------------------------------- @simple_log def onRspError(self, error, n, last): """错误""" print_dict(error) @simple_log #---------------------------------------------------------------------- def onRspUserLogin(self, data, error, n, last): """登陆回报""" print_dict(data) print_dict(error) #---------------------------------------------------------------------- @simple_log def onRspUserLogout(self, data, error, n, last): """登出回报""" print_dict(data) print_dict(error) #---------------------------------------------------------------------- @simple_log def onRspSubMarketData(self, data, error, n, last): """订阅合约回报""" print_dict(data) print_dict(error) #---------------------------------------------------------------------- @simple_log def onRspUnSubMarketData(self, data, error, n, last): """退订合约回报""" print_dict(data) print_dict(error) #---------------------------------------------------------------------- @simple_log def onRtnDepthMarketData(self, data): """行情推送""" print_dict(data) #---------------------------------------------------------------------- @simple_log def onRspSubForQuoteRsp(self, data, error, n, last): """订阅合约回报""" print_dict(data) print_dict(error) #---------------------------------------------------------------------- @simple_log def onRspUnSubForQuoteRsp(self, data, error, n, last): """退订合约回报""" print_dict(data) print_dict(error) #---------------------------------------------------------------------- @simple_log def onRtnForQuoteRsp(self, data): """行情推送""" print_dict(data) #---------------------------------------------------------------------- def main(): """主测试函数,出现堵塞时可以考虑使用sleep""" reqid = 0 # 创建Qt应用对象,用于事件循环 app = QtGui.QApplication(sys.argv) # 创建API对象 api = TestMdApi() # 在C++环境中创建MdApi对象,传入参数是希望用来保存.con文件的地址 api.createFtdcMdApi('') # 注册前置机地址 api.registerFront("tcp://qqfz-md1.ctp.shcifco.com:32313") # 初始化api,连接前置机 api.init() sleep(0.5) # 登陆 loginReq = {} # 创建一个空字典 loginReq['UserID'] = '' # 参数作为字典键值的方式传入 loginReq['Password'] = '' # 键名和C++中的结构体成员名对应 loginReq['BrokerID'] = '' reqid = reqid + 1 # 请求数必须保持唯一性 i = api.reqUserLogin(loginReq, 1) sleep(0.5) ## 登出,测试出错(无此功能) #reqid = reqid + 1 #i = api.reqUserLogout({}, 1) #sleep(0.5) ## 安全退出,测试通过 #i = api.exit() ## 获取交易日,目前输出为空 #day = api.getTradingDay() #print 'Trading Day is:' + str(day) #sleep(0.5) ## 订阅合约,测试通过 #i = api.subscribeMarketData('IF1505') ## 退订合约,测试通过 #i = api.unSubscribeMarketData('IF1505') # 订阅询价,测试通过 i = api.subscribeForQuoteRsp('IO1504-C-3900') # 退订询价,测试通过 i = api.unSubscribeForQuoteRsp('IO1504-C-3900') # 连续运行,用于输出行情 app.exec_() if __name__ == '__main__': main()
mit
aps-sids/zulip
api/integrations/codebase/zulip_codebase_config.py
124
2537
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright © 2014 Zulip, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # Change these values to configure authentication for your codebase account # Note that this is the Codebase API Username, found in the Settings page # for your account CODEBASE_API_USERNAME = "[email protected]" CODEBASE_API_KEY = "1234561234567abcdef" # The URL of your codebase setup CODEBASE_ROOT_URL = "https://YOUR_COMPANY.codebasehq.com" # When initially started, how many hours of messages to include. # Note that the Codebase API only returns the 20 latest events, # if you have more than 20 events that fit within this window, # earlier ones may be lost CODEBASE_INITIAL_HISTORY_HOURS = 12 # Change these values to configure Zulip authentication for the plugin ZULIP_USER = "[email protected]" ZULIP_API_KEY = "0123456789abcdef0123456789abcdef" # The streams to send commit information and ticket information to ZULIP_COMMITS_STREAM_NAME = "codebase" ZULIP_TICKETS_STREAM_NAME = "tickets" # If properly installed, the Zulip API should be in your import # path, but if not, set a custom path below ZULIP_API_PATH = None # Set this to your Zulip API server URI ZULIP_SITE = "https://api.zulip.com" # If you wish to log to a file rather than stdout/stderr, # please fill this out your desired path LOG_FILE = None # This file is used to resume this mirror in case the script shuts down. # It is required and needs to be writeable. RESUME_FILE = "/var/tmp/zulip_codebase.state"
apache-2.0
TridevGuha/django
tests/urlpatterns_reverse/views.py
218
1538
from functools import partial, update_wrapper from django.contrib.auth.decorators import user_passes_test from django.core.urlresolvers import reverse_lazy from django.http import HttpResponse from django.views.generic import RedirectView def empty_view(request, *args, **kwargs): return HttpResponse('') def kwargs_view(request, arg1=1, arg2=2): return HttpResponse('') def absolute_kwargs_view(request, arg1=1, arg2=2): return HttpResponse('') def defaults_view(request, arg1, arg2): pass def nested_view(request): pass def erroneous_view(request): import non_existent # NOQA def pass_resolver_match_view(request, *args, **kwargs): response = HttpResponse('') response.resolver_match = request.resolver_match return response uncallable = None # neither a callable nor a string class ViewClass(object): def __call__(self, request, *args, **kwargs): return HttpResponse('') view_class_instance = ViewClass() class LazyRedirectView(RedirectView): url = reverse_lazy('named-lazy-url-redirected-to') @user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page')) def login_required_view(request): return HttpResponse('Hello you') def bad_view(request, *args, **kwargs): raise ValueError("I don't think I'm getting good value for this view") empty_view_partial = partial(empty_view, template_name="template.html") empty_view_wrapped = update_wrapper( partial(empty_view, template_name="template.html"), empty_view, )
bsd-3-clause
Jeff-Tian/mybnb
Python27/Lib/lib2to3/tests/pytree_idempotency.py
14
2477
#!/usr/bin/env python # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Main program for testing the infrastructure.""" __author__ = "Guido van Rossum <[email protected]>" # Support imports (need to be imported first) from . import support # Python imports import os import sys import logging # Local imports from .. import pytree import pgen2 from pgen2 import driver logging.basicConfig() def main(): gr = driver.load_grammar("Grammar.txt") dr = driver.Driver(gr, convert=pytree.convert) fn = "example.py" tree = dr.parse_file(fn, debug=True) if not diff(fn, tree): print "No diffs." if not sys.argv[1:]: return # Pass a dummy argument to run the complete test suite below problems = [] # Process every imported module for name in sys.modules: mod = sys.modules[name] if mod is None or not hasattr(mod, "__file__"): continue fn = mod.__file__ if fn.endswith(".pyc"): fn = fn[:-1] if not fn.endswith(".py"): continue print >>sys.stderr, "Parsing", fn tree = dr.parse_file(fn, debug=True) if diff(fn, tree): problems.append(fn) # Process every single module on sys.path (but not in packages) for dir in sys.path: try: names = os.listdir(dir) except os.error: continue print >>sys.stderr, "Scanning", dir, "..." for name in names: if not name.endswith(".py"): continue print >>sys.stderr, "Parsing", name fn = os.path.join(dir, name) try: tree = dr.parse_file(fn, debug=True) except pgen2.parse.ParseError, err: print "ParseError:", err else: if diff(fn, tree): problems.append(fn) # Show summary of problem files if not problems: print "No problems. Congratulations!" else: print "Problems in following files:" for fn in problems: print "***", fn def diff(fn, tree): f = open("@", "w") try: f.write(str(tree)) finally: f.close() try: return os.system("diff -u %s @" % fn) finally: os.remove("@") if __name__ == "__main__": main()
apache-2.0
OpenNetworkingFoundation/ONFOpenTransport
RI/flask_server/tapi_server/models/tapi_connectivity_protection_role.py
4
1213
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from tapi_server.models.base_model_ import Model from tapi_server import util class TapiConnectivityProtectionRole(Model): """NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). Do not edit the class manually. """ """ allowed enum values """ WORK = "WORK" PROTECT = "PROTECT" PROTECTED = "PROTECTED" NA = "NA" WORK_RESTORE = "WORK_RESTORE" PROTECT_RESTORE = "PROTECT_RESTORE" def __init__(self): # noqa: E501 """TapiConnectivityProtectionRole - a model defined in OpenAPI """ self.openapi_types = { } self.attribute_map = { } @classmethod def from_dict(cls, dikt) -> 'TapiConnectivityProtectionRole': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The tapi.connectivity.ProtectionRole of this TapiConnectivityProtectionRole. # noqa: E501 :rtype: TapiConnectivityProtectionRole """ return util.deserialize_model(dikt, cls)
apache-2.0
skirsdeda/djangocms-blog
djangocms_blog/models.py
1
23353
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import hashlib from aldryn_apphooks_config.fields import AppHookConfigField from aldryn_apphooks_config.managers.parler import AppHookConfigTranslatableManager from cms.models import CMSPlugin, PlaceholderField from django.conf import settings as dj_settings from django.contrib.auth import get_user_model from django.contrib.sites.shortcuts import get_current_site from django.core.cache import cache from django.core.urlresolvers import reverse from django.db import models from django.db.models.signals import post_save, pre_delete from django.dispatch import receiver from django.utils import timezone from django.utils.encoding import force_bytes, force_text, python_2_unicode_compatible from django.utils.functional import cached_property from django.utils.html import escape, strip_tags from django.utils.translation import get_language, ugettext, ugettext_lazy as _ from djangocms_text_ckeditor.fields import HTMLField from filer.fields.image import FilerImageField from filer.models import ThumbnailOption from meta.models import ModelMeta from parler.models import TranslatableModel, TranslatedFields from parler.utils.context import switch_language from sortedm2m.fields import SortedManyToManyField from taggit_autosuggest.managers import TaggableManager from .cms_appconfig import BlogConfig from .fields import AutoSlugField, slugify from .managers import GenericDateTaggedManager from .settings import get_setting BLOG_CURRENT_POST_IDENTIFIER = get_setting('CURRENT_POST_IDENTIFIER') BLOG_CURRENT_NAMESPACE = get_setting('CURRENT_NAMESPACE') BLOG_PLUGIN_TEMPLATE_FOLDERS = get_setting('PLUGIN_TEMPLATE_FOLDERS') thumbnail_model = '%s.%s' % ( ThumbnailOption._meta.app_label, ThumbnailOption.__name__ ) try: from knocker.mixins import KnockerModel except ImportError: class KnockerModel(object): """ Stub class if django-knocker is not installed """ pass class BlogMetaMixin(ModelMeta): def get_meta_attribute(self, param): """ Retrieves django-meta attributes from apphook config instance :param param: django-meta attribute passed as key """ return self._get_meta_value(param, getattr(self.app_config, param)) or '' def get_locale(self): return self.get_current_language() def get_full_url(self): """ Return the url with protocol and domain url """ return self.build_absolute_uri(self.get_absolute_url()) @python_2_unicode_compatible class BlogCategory(BlogMetaMixin, TranslatableModel): """ Blog category """ parent = models.ForeignKey( 'self', verbose_name=_('parent'), null=True, blank=True, related_name='children' ) date_created = models.DateTimeField(_('created at'), auto_now_add=True) date_modified = models.DateTimeField(_('modified at'), auto_now=True) app_config = AppHookConfigField( BlogConfig, null=True, verbose_name=_('app. config') ) translations = TranslatedFields( name=models.CharField(_('name'), max_length=767), slug=models.SlugField(_('slug'), max_length=767, blank=True, db_index=True), meta_description=models.TextField( verbose_name=_('category meta description'), blank=True, default='' ), meta={'unique_together': (('language_code', 'slug'),)} ) objects = AppHookConfigTranslatableManager() _metadata = { 'title': 'get_title', 'description': 'get_description', 'og_description': 'get_description', 'twitter_description': 'get_description', 'gplus_description': 'get_description', 'locale': 'get_locale', 'object_type': 'get_meta_attribute', 'og_type': 'get_meta_attribute', 'og_app_id': 'get_meta_attribute', 'og_profile_id': 'get_meta_attribute', 'og_publisher': 'get_meta_attribute', 'og_author_url': 'get_meta_attribute', 'og_author': 'get_meta_attribute', 'twitter_type': 'get_meta_attribute', 'twitter_site': 'get_meta_attribute', 'twitter_author': 'get_meta_attribute', 'gplus_type': 'get_meta_attribute', 'gplus_author': 'get_meta_attribute', 'url': 'get_absolute_url', } class Meta: verbose_name = _('blog category') verbose_name_plural = _('blog categories') def descendants(self): children = [] if self.children.exists(): children.extend(self.children.all()) for child in self.children.all(): children.extend(child.descendants()) return children @cached_property def linked_posts(self): return self.blog_posts.namespace(self.app_config.namespace) @cached_property def count(self): return self.linked_posts.published().count() @cached_property def count_all_sites(self): return self.linked_posts.published(current_site=False).count() def get_absolute_url(self, lang=None): if not lang or lang not in self.get_available_languages(): lang = get_language() if not lang or lang not in self.get_available_languages(): lang = self.get_current_language() if self.has_translation(lang): slug = self.safe_translation_getter('slug', language_code=lang) return reverse( '%s:posts-category' % self.app_config.namespace, kwargs={'category': slug}, current_app=self.app_config.namespace ) # in case category doesn't exist in this language, gracefully fallback # to posts-latest return reverse( '%s:posts-latest' % self.app_config.namespace, current_app=self.app_config.namespace ) def __str__(self): default = ugettext('BlogCategory (no translation)') return self.safe_translation_getter('name', any_language=True, default=default) def save(self, *args, **kwargs): super(BlogCategory, self).save(*args, **kwargs) for lang in self.get_available_languages(): self.set_current_language(lang) if not self.slug and self.name: self.slug = slugify(force_text(self.name)) self.save_translations() def get_title(self): title = self.safe_translation_getter('name', any_language=True) return title.strip() def get_description(self): description = self.safe_translation_getter('meta_description', any_language=True) return escape(strip_tags(description)).strip() @python_2_unicode_compatible class Post(KnockerModel, BlogMetaMixin, TranslatableModel): """ Blog post """ author = models.ForeignKey(dj_settings.AUTH_USER_MODEL, verbose_name=_('author'), null=True, blank=True, related_name='djangocms_blog_post_author') date_created = models.DateTimeField(_('created'), auto_now_add=True) date_modified = models.DateTimeField(_('last modified'), auto_now=True) date_published = models.DateTimeField(_('published since'), null=True, blank=True) date_published_end = models.DateTimeField(_('published until'), null=True, blank=True) date_featured = models.DateTimeField(_('featured date'), null=True, blank=True) publish = models.BooleanField(_('publish'), default=False) categories = models.ManyToManyField('djangocms_blog.BlogCategory', verbose_name=_('category'), related_name='blog_posts', blank=True) main_image = FilerImageField(verbose_name=_('main image'), blank=True, null=True, on_delete=models.SET_NULL, related_name='djangocms_blog_post_image') main_image_thumbnail = models.ForeignKey(thumbnail_model, verbose_name=_('main image thumbnail'), related_name='djangocms_blog_post_thumbnail', on_delete=models.SET_NULL, blank=True, null=True) main_image_full = models.ForeignKey(thumbnail_model, verbose_name=_('main image full'), related_name='djangocms_blog_post_full', on_delete=models.SET_NULL, blank=True, null=True) enable_comments = models.BooleanField(verbose_name=_('enable comments on post'), default=get_setting('ENABLE_COMMENTS')) sites = models.ManyToManyField('sites.Site', verbose_name=_('Site(s)'), blank=True, help_text=_('Select sites in which to show the post. ' 'If none is set it will be ' 'visible in all the configured sites.')) app_config = AppHookConfigField( BlogConfig, null=True, verbose_name=_('app. config') ) translations = TranslatedFields( title=models.CharField(_('title'), max_length=767), slug=AutoSlugField(_('slug'), max_length=767, blank=True, db_index=True, allow_unicode=True), subtitle=models.CharField(verbose_name=_('subtitle'), max_length=767, blank=True, default=''), abstract=HTMLField(_('abstract'), blank=True, default='', configuration='BLOG_ABSTRACT_CKEDITOR'), meta_description=models.TextField(verbose_name=_('post meta description'), blank=True, default=''), meta_keywords=models.TextField(verbose_name=_('post meta keywords'), blank=True, default=''), meta_title=models.CharField(verbose_name=_('post meta title'), help_text=_('used in title tag and social sharing'), max_length=2000, blank=True, default=''), post_text=HTMLField(_('text'), default='', blank=True, configuration='BLOG_POST_TEXT_CKEDITOR'), meta={'unique_together': (('language_code', 'slug'),)} ) content = PlaceholderField('post_content', related_name='post_content') liveblog = PlaceholderField('live_blog', related_name='live_blog') enable_liveblog = models.BooleanField(verbose_name=_('enable liveblog on post'), default=False) objects = GenericDateTaggedManager() tags = TaggableManager(blank=True, related_name='djangocms_blog_tags') related = SortedManyToManyField('self', verbose_name=_('Related Posts'), blank=True, symmetrical=False) _metadata = { 'title': 'get_title', 'description': 'get_description', 'keywords': 'get_keywords', 'og_description': 'get_description', 'twitter_description': 'get_description', 'gplus_description': 'get_description', 'locale': 'get_locale', 'image': 'get_image_full_url', 'object_type': 'get_meta_attribute', 'og_type': 'get_meta_attribute', 'og_app_id': 'get_meta_attribute', 'og_profile_id': 'get_meta_attribute', 'og_publisher': 'get_meta_attribute', 'og_author_url': 'get_meta_attribute', 'og_author': 'get_meta_attribute', 'twitter_type': 'get_meta_attribute', 'twitter_site': 'get_meta_attribute', 'twitter_author': 'get_meta_attribute', 'gplus_type': 'get_meta_attribute', 'gplus_author': 'get_meta_attribute', 'published_time': 'date_published', 'modified_time': 'date_modified', 'expiration_time': 'date_published_end', 'tag': 'get_tags', 'url': 'get_absolute_url', } class Meta: verbose_name = _('blog article') verbose_name_plural = _('blog articles') ordering = ('-date_published', '-date_created') get_latest_by = 'date_published' def __str__(self): default = ugettext('Post (no translation)') return self.safe_translation_getter('title', any_language=True, default=default) @property def guid(self, language=None): if not language: language = self.get_current_language() base_string = '-{0}-{2}-{1}-'.format( language, self.app_config.namespace, self.safe_translation_getter('slug', language_code=language, any_language=True) ) return hashlib.sha256(force_bytes(base_string)).hexdigest() @property def date(self): if self.date_featured: return self.date_featured return self.date_published def save(self, *args, **kwargs): """ Handle some auto configuration during save """ if self.publish and self.date_published is None: self.date_published = timezone.now() if not self.slug and self.title: self.slug = slugify(self.title) super(Post, self).save(*args, **kwargs) def save_translation(self, translation, *args, **kwargs): """ Handle some auto configuration during save """ if not translation.slug and translation.title: translation.slug = slugify(translation.title) super(Post, self).save_translation(translation, *args, **kwargs) def get_absolute_url(self, lang=None): if not lang or lang not in self.get_available_languages(): lang = get_language() if not lang or lang not in self.get_available_languages(): lang = self.get_current_language() with switch_language(self, lang): category = self.categories.first() kwargs = {} if self.date_published: current_date = self.date_published else: current_date = self.date_created urlconf = get_setting('PERMALINK_URLS')[self.app_config.url_patterns] if '<year>' in urlconf: kwargs['year'] = current_date.year if '<month>' in urlconf: kwargs['month'] = '%02d' % current_date.month if '<day>' in urlconf: kwargs['day'] = '%02d' % current_date.day if '<slug>' in urlconf: kwargs['slug'] = self.safe_translation_getter( 'slug', language_code=lang, any_language=True ) # NOQA if '<category>' in urlconf: kwargs['category'] = category.safe_translation_getter( 'slug', language_code=lang, any_language=True) # NOQA return reverse('%s:post-detail' % self.app_config.namespace, kwargs=kwargs) def get_title(self): title = self.safe_translation_getter('meta_title', any_language=True) if not title: title = self.safe_translation_getter('title', any_language=True) return title.strip() def get_keywords(self): """ Returns the list of keywords (as python list) :return: list """ return self.safe_translation_getter('meta_keywords', default='').strip().split(',') def get_description(self): description = self.safe_translation_getter('meta_description', any_language=True) if not description: description = self.safe_translation_getter('abstract', any_language=True) return escape(strip_tags(description)).strip() def get_image_full_url(self): if self.main_image: return self.build_absolute_uri(self.main_image.url) return '' def get_tags(self): """ Returns the list of object tags as comma separated list """ taglist = [tag.name for tag in self.tags.all()] return ','.join(taglist) def get_author(self): """ Return the author (user) objects """ return self.author def _set_default_author(self, current_user): if not self.author_id and self.app_config.set_author: if get_setting('AUTHOR_DEFAULT') is True: user = current_user else: user = get_user_model().objects.get(username=get_setting('AUTHOR_DEFAULT')) self.author = user def thumbnail_options(self): if self.main_image_thumbnail_id: return self.main_image_thumbnail.as_dict else: return get_setting('IMAGE_THUMBNAIL_SIZE') def full_image_options(self): if self.main_image_full_id: return self.main_image_full.as_dict else: return get_setting('IMAGE_FULL_SIZE') @property def is_published(self): """ Checks wether the blog post is *really* published by checking publishing dates too """ return (self.publish and (self.date_published and self.date_published <= timezone.now()) and (self.date_published_end is None or self.date_published_end > timezone.now()) ) def should_knock(self, created=False): """ Returns whether to emit knocks according to the post state """ new = (self.app_config.send_knock_create and self.is_published and self.date_published == self.date_modified) updated = self.app_config.send_knock_update and self.is_published return new or updated def get_cache_key(self, language, prefix): return 'djangocms-blog:{2}:{0}:{1}'.format(language, self.guid, prefix) @property def liveblog_group(self): return 'liveblog-{apphook}-{lang}-{post}'.format( lang=self.get_current_language(), apphook=self.app_config.namespace, post=self.safe_translation_getter('slug', any_language=True) ) class BasePostPlugin(CMSPlugin): app_config = AppHookConfigField( BlogConfig, null=True, verbose_name=_('app. config'), blank=True ) current_site = models.BooleanField( _('current site'), default=True, help_text=_('Select items from the current site only') ) template_folder = models.CharField( max_length=200, verbose_name=_('Plugin template'), help_text=_('Select plugin template to load for this instance'), default=BLOG_PLUGIN_TEMPLATE_FOLDERS[0][0], choices=BLOG_PLUGIN_TEMPLATE_FOLDERS ) class Meta: abstract = True def optimize(self, qs): """ Apply select_related / prefetch_related to optimize the view queries :param qs: queryset to optimize :return: optimized queryset """ return qs.select_related('app_config').prefetch_related( 'translations', 'categories', 'categories__translations', 'categories__app_config' ) def post_queryset(self, request=None, published_only=True): language = get_language() posts = Post.objects if self.app_config: posts = posts.namespace(self.app_config.namespace) if self.current_site: posts = posts.on_site(get_current_site(request)) posts = posts.active_translations(language_code=language) if (published_only or not request or not getattr(request, 'toolbar', False) or not request.toolbar.edit_mode): posts = posts.published(current_site=self.current_site) return self.optimize(posts.all()) @python_2_unicode_compatible class LatestPostsPlugin(BasePostPlugin): latest_posts = models.IntegerField(_('articles'), default=get_setting('LATEST_POSTS'), help_text=_('The number of latests ' 'articles to be displayed.')) tags = TaggableManager(_('filter by tag'), blank=True, help_text=_('Show only the blog articles tagged with chosen tags.'), related_name='djangocms_blog_latest_post') categories = models.ManyToManyField('djangocms_blog.BlogCategory', blank=True, verbose_name=_('filter by category'), help_text=_('Show only the blog articles tagged ' 'with chosen categories.')) def __str__(self): return force_text(_('%s latest articles by tag') % self.latest_posts) def copy_relations(self, oldinstance): for tag in oldinstance.tags.all(): self.tags.add(tag) for category in oldinstance.categories.all(): self.categories.add(category) def get_posts(self, request, published_only=True): posts = self.post_queryset(request, published_only) if self.tags.exists(): posts = posts.filter(tags__in=list(self.tags.all())) if self.categories.exists(): posts = posts.filter(categories__in=list(self.categories.all())) return self.optimize(posts.distinct())[:self.latest_posts] @python_2_unicode_compatible class AuthorEntriesPlugin(BasePostPlugin): authors = models.ManyToManyField( dj_settings.AUTH_USER_MODEL, verbose_name=_('authors'), limit_choices_to={'djangocms_blog_post_author__publish': True} ) latest_posts = models.IntegerField( _('articles'), default=get_setting('LATEST_POSTS'), help_text=_('The number of author articles to be displayed.') ) def __str__(self): return force_text(_('%s latest articles by author') % self.latest_posts) def copy_relations(self, oldinstance): self.authors = oldinstance.authors.all() def get_posts(self, request, published_only=True): posts = self.post_queryset(request, published_only) return posts[:self.latest_posts] def get_authors(self): authors = self.authors.all() for author in authors: author.count = 0 qs = author.djangocms_blog_post_author if self.app_config: qs = qs.namespace(self.app_config.namespace) if self.current_site: qs = qs.published() else: qs = qs.published(current_site=False) count = qs.count() if count: author.count = count return authors @python_2_unicode_compatible class GenericBlogPlugin(BasePostPlugin): class Meta: abstract = False def __str__(self): return force_text(_('generic blog plugin')) @receiver(pre_delete, sender=Post) def pre_delete_post(sender, instance, **kwargs): for language in instance.get_available_languages(): key = instance.get_cache_key(language, 'feed') cache.delete(key) @receiver(post_save, sender=Post) def post_save_post(sender, instance, **kwargs): for language in instance.get_available_languages(): key = instance.get_cache_key(language, 'feed') cache.delete(key)
bsd-3-clause
houzhenggang/hiwifi-openwrt-HC5661-HC5761
staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/idlelib/ToolTip.py
149
2736
# general purpose 'tooltip' routines - currently unused in idlefork # (although the 'calltips' extension is partly based on this code) # may be useful for some purposes in (or almost in ;) the current project scope # Ideas gleaned from PySol from Tkinter import * class ToolTipBase: def __init__(self, button): self.button = button self.tipwindow = None self.id = None self.x = self.y = 0 self._id1 = self.button.bind("<Enter>", self.enter) self._id2 = self.button.bind("<Leave>", self.leave) self._id3 = self.button.bind("<ButtonPress>", self.leave) def enter(self, event=None): self.schedule() def leave(self, event=None): self.unschedule() self.hidetip() def schedule(self): self.unschedule() self.id = self.button.after(1500, self.showtip) def unschedule(self): id = self.id self.id = None if id: self.button.after_cancel(id) def showtip(self): if self.tipwindow: return # The tip window must be completely outside the button; # otherwise when the mouse enters the tip window we get # a leave event and it disappears, and then we get an enter # event and it reappears, and so on forever :-( x = self.button.winfo_rootx() + 20 y = self.button.winfo_rooty() + self.button.winfo_height() + 1 self.tipwindow = tw = Toplevel(self.button) tw.wm_overrideredirect(1) tw.wm_geometry("+%d+%d" % (x, y)) self.showcontents() def showcontents(self, text="Your text here"): # Override this in derived class label = Label(self.tipwindow, text=text, justify=LEFT, background="#ffffe0", relief=SOLID, borderwidth=1) label.pack() def hidetip(self): tw = self.tipwindow self.tipwindow = None if tw: tw.destroy() class ToolTip(ToolTipBase): def __init__(self, button, text): ToolTipBase.__init__(self, button) self.text = text def showcontents(self): ToolTipBase.showcontents(self, self.text) class ListboxToolTip(ToolTipBase): def __init__(self, button, items): ToolTipBase.__init__(self, button) self.items = items def showcontents(self): listbox = Listbox(self.tipwindow, background="#ffffe0") listbox.pack() for item in self.items: listbox.insert(END, item) def main(): # Test code root = Tk() b = Button(root, text="Hello", command=root.destroy) b.pack() root.update() tip = ListboxToolTip(b, ["Hello", "world"]) root.mainloop() if __name__ == '__main__': main()
gpl-2.0
Epirex/android_external_chromium_org
build/android/pylib/instrumentation/test_jar.py
27
8479
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Helper class for instrumenation test jar.""" import collections import logging import os import pickle import re from pylib import cmd_helper from pylib import constants # If you change the cached output of proguard, increment this number PICKLE_FORMAT_VERSION = 1 class TestJar(object): _ANNOTATIONS = frozenset( ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', 'EnormousTest', 'FlakyTest', 'DisabledTest', 'Manual', 'PerfTest', 'HostDrivenTest']) _DEFAULT_ANNOTATION = 'SmallTest' _PROGUARD_CLASS_RE = re.compile(r'\s*?- Program class:\s*([\S]+)$') _PROGUARD_METHOD_RE = re.compile(r'\s*?- Method:\s*(\S*)[(].*$') _PROGUARD_ANNOTATION_RE = re.compile(r'\s*?- Annotation \[L(\S*);\]:$') _PROGUARD_ANNOTATION_CONST_RE = ( re.compile(r'\s*?- Constant element value.*$')) _PROGUARD_ANNOTATION_VALUE_RE = re.compile(r'\s*?- \S+? \[(.*)\]$') def __init__(self, jar_path): if not os.path.exists(jar_path): raise Exception('%s not found, please build it' % jar_path) sdk_root = os.getenv('ANDROID_SDK_ROOT', constants.ANDROID_SDK_ROOT) self._PROGUARD_PATH = os.path.join(sdk_root, 'tools/proguard/bin/proguard.sh') if not os.path.exists(self._PROGUARD_PATH): self._PROGUARD_PATH = os.path.join(os.environ['ANDROID_BUILD_TOP'], 'external/proguard/bin/proguard.sh') self._jar_path = jar_path self._annotation_map = collections.defaultdict(list) self._pickled_proguard_name = self._jar_path + '-proguard.pickle' self._test_methods = [] if not self._GetCachedProguardData(): self._GetProguardData() def _GetCachedProguardData(self): if (os.path.exists(self._pickled_proguard_name) and (os.path.getmtime(self._pickled_proguard_name) > os.path.getmtime(self._jar_path))): logging.info('Loading cached proguard output from %s', self._pickled_proguard_name) try: with open(self._pickled_proguard_name, 'r') as r: d = pickle.loads(r.read()) if d['VERSION'] == PICKLE_FORMAT_VERSION: self._annotation_map = d['ANNOTATION_MAP'] self._test_methods = d['TEST_METHODS'] return True except: logging.warning('PICKLE_FORMAT_VERSION has changed, ignoring cache') return False def _GetProguardData(self): proguard_output = cmd_helper.GetCmdOutput([self._PROGUARD_PATH, '-injars', self._jar_path, '-dontshrink', '-dontoptimize', '-dontobfuscate', '-dontpreverify', '-dump', ]).split('\n') clazz = None method = None annotation = None has_value = False qualified_method = None for line in proguard_output: m = self._PROGUARD_CLASS_RE.match(line) if m: clazz = m.group(1).replace('/', '.') # Change package delim. annotation = None continue m = self._PROGUARD_METHOD_RE.match(line) if m: method = m.group(1) annotation = None qualified_method = clazz + '#' + method if method.startswith('test') and clazz.endswith('Test'): self._test_methods += [qualified_method] continue if not qualified_method: # Ignore non-method annotations. continue m = self._PROGUARD_ANNOTATION_RE.match(line) if m: annotation = m.group(1).split('/')[-1] # Ignore the annotation package. self._annotation_map[qualified_method].append(annotation) has_value = False continue if annotation: if not has_value: m = self._PROGUARD_ANNOTATION_CONST_RE.match(line) if m: has_value = True else: m = self._PROGUARD_ANNOTATION_VALUE_RE.match(line) if m: value = m.group(1) self._annotation_map[qualified_method].append( annotation + ':' + value) has_value = False logging.info('Storing proguard output to %s', self._pickled_proguard_name) d = {'VERSION': PICKLE_FORMAT_VERSION, 'ANNOTATION_MAP': self._annotation_map, 'TEST_METHODS': self._test_methods} with open(self._pickled_proguard_name, 'w') as f: f.write(pickle.dumps(d)) def _GetAnnotationMap(self): return self._annotation_map def _IsTestMethod(self, test): class_name, method = test.split('#') return class_name.endswith('Test') and method.startswith('test') def GetTestAnnotations(self, test): """Returns a list of all annotations for the given |test|. May be empty.""" if not self._IsTestMethod(test): return [] return self._GetAnnotationMap()[test] def _AnnotationsMatchFilters(self, annotation_filter_list, annotations): """Checks if annotations match any of the filters.""" if not annotation_filter_list: return True for annotation_filter in annotation_filter_list: filters = annotation_filter.split('=') if len(filters) == 2: key = filters[0] value_list = filters[1].split(',') for value in value_list: if key + ':' + value in annotations: return True elif annotation_filter in annotations: return True return False def GetAnnotatedTests(self, annotation_filter_list): """Returns a list of all tests that match the given annotation filters.""" return [test for test, annotations in self._GetAnnotationMap().iteritems() if self._IsTestMethod(test) and self._AnnotationsMatchFilters( annotation_filter_list, annotations)] def GetTestMethods(self): """Returns a list of all test methods in this apk as Class#testMethod.""" return self._test_methods def _GetTestsMissingAnnotation(self): """Get a list of test methods with no known annotations.""" tests_missing_annotations = [] for test_method in self.GetTestMethods(): annotations_ = frozenset(self.GetTestAnnotations(test_method)) if (annotations_.isdisjoint(self._ANNOTATIONS) and not self.IsHostDrivenTest(test_method)): tests_missing_annotations.append(test_method) return sorted(tests_missing_annotations) def _GetAllMatchingTests(self, annotation_filter_list, exclude_annotation_list, test_filter): """Get a list of tests matching any of the annotations and the filter. Args: annotation_filter_list: List of test annotations. A test must have at least one of these annotations. A test without any annotations is considered to be SmallTest. exclude_annotation_list: List of test annotations. A test must not have any of these annotations. test_filter: Filter used for partial matching on the test method names. Returns: List of all matching tests. """ if annotation_filter_list: available_tests = self.GetAnnotatedTests(annotation_filter_list) # Include un-annotated tests in SmallTest. if annotation_filter_list.count(self._DEFAULT_ANNOTATION) > 0: for test in self._GetTestsMissingAnnotation(): logging.warning( '%s has no annotations. Assuming "%s".', test, self._DEFAULT_ANNOTATION) available_tests.append(test) if exclude_annotation_list: excluded_tests = self.GetAnnotatedTests(exclude_annotation_list) available_tests = list(set(available_tests) - set(excluded_tests)) else: available_tests = [m for m in self.GetTestMethods() if not self.IsHostDrivenTest(m)] tests = [] if test_filter: # |available_tests| are in adb instrument format: package.path.class#test. filter_without_hash = test_filter.replace('#', '.') tests = [t for t in available_tests if filter_without_hash in t.replace('#', '.')] else: tests = available_tests return tests @staticmethod def IsHostDrivenTest(test): return 'pythonDrivenTests' in test
bsd-3-clause
ahmed-mahran/hue
desktop/core/ext-py/Django-1.6.10/django/contrib/admin/validation.py
108
23236
from django.core.exceptions import ImproperlyConfigured from django.db import models from django.db.models.fields import FieldDoesNotExist from django.forms.models import BaseModelForm, BaseModelFormSet, _get_foreign_key from django.contrib.admin.util import get_fields_from_path, NotRelationField """ Does basic ModelAdmin option validation. Calls custom validation classmethod in the end if it is provided in cls. The signature of the custom validation classmethod should be: def validate(cls, model). """ __all__ = ['BaseValidator', 'InlineValidator'] class BaseValidator(object): def __init__(self): # Before we can introspect models, they need to be fully loaded so that # inter-relations are set up correctly. We force that here. models.get_apps() def validate(self, cls, model): for m in dir(self): if m.startswith('validate_'): getattr(self, m)(cls, model) def check_field_spec(self, cls, model, flds, label): """ Validate the fields specification in `flds` from a ModelAdmin subclass `cls` for the `model` model. Use `label` for reporting problems to the user. The fields specification can be a ``fields`` option or a ``fields`` sub-option from a ``fieldsets`` option component. """ for fields in flds: # The entry in fields might be a tuple. If it is a standalone # field, make it into a tuple to make processing easier. if type(fields) != tuple: fields = (fields,) for field in fields: if field in cls.readonly_fields: # Stuff can be put in fields that isn't actually a # model field if it's in readonly_fields, # readonly_fields will handle the validation of such # things. continue try: f = model._meta.get_field(field) except models.FieldDoesNotExist: # If we can't find a field on the model that matches, it could be an # extra field on the form; nothing to check so move on to the next field. continue if isinstance(f, models.ManyToManyField) and not f.rel.through._meta.auto_created: raise ImproperlyConfigured("'%s.%s' " "can't include the ManyToManyField field '%s' because " "'%s' manually specifies a 'through' model." % ( cls.__name__, label, field, field)) def validate_raw_id_fields(self, cls, model): " Validate that raw_id_fields only contains field names that are listed on the model. " if hasattr(cls, 'raw_id_fields'): check_isseq(cls, 'raw_id_fields', cls.raw_id_fields) for idx, field in enumerate(cls.raw_id_fields): f = get_field(cls, model, 'raw_id_fields', field) if not isinstance(f, (models.ForeignKey, models.ManyToManyField)): raise ImproperlyConfigured("'%s.raw_id_fields[%d]', '%s' must " "be either a ForeignKey or ManyToManyField." % (cls.__name__, idx, field)) def validate_fields(self, cls, model): " Validate that fields only refer to existing fields, doesn't contain duplicates. " # fields if cls.fields: # default value is None check_isseq(cls, 'fields', cls.fields) self.check_field_spec(cls, model, cls.fields, 'fields') if cls.fieldsets: raise ImproperlyConfigured('Both fieldsets and fields are specified in %s.' % cls.__name__) if len(cls.fields) > len(set(cls.fields)): raise ImproperlyConfigured('There are duplicate field(s) in %s.fields' % cls.__name__) def validate_fieldsets(self, cls, model): " Validate that fieldsets is properly formatted and doesn't contain duplicates. " from django.contrib.admin.options import flatten_fieldsets if cls.fieldsets: # default value is None check_isseq(cls, 'fieldsets', cls.fieldsets) for idx, fieldset in enumerate(cls.fieldsets): check_isseq(cls, 'fieldsets[%d]' % idx, fieldset) if len(fieldset) != 2: raise ImproperlyConfigured("'%s.fieldsets[%d]' does not " "have exactly two elements." % (cls.__name__, idx)) check_isdict(cls, 'fieldsets[%d][1]' % idx, fieldset[1]) if 'fields' not in fieldset[1]: raise ImproperlyConfigured("'fields' key is required in " "%s.fieldsets[%d][1] field options dict." % (cls.__name__, idx)) self.check_field_spec(cls, model, fieldset[1]['fields'], "fieldsets[%d][1]['fields']" % idx) flattened_fieldsets = flatten_fieldsets(cls.fieldsets) if len(flattened_fieldsets) > len(set(flattened_fieldsets)): raise ImproperlyConfigured('There are duplicate field(s) in %s.fieldsets' % cls.__name__) def validate_exclude(self, cls, model): " Validate that exclude is a sequence without duplicates. " if cls.exclude: # default value is None check_isseq(cls, 'exclude', cls.exclude) if len(cls.exclude) > len(set(cls.exclude)): raise ImproperlyConfigured('There are duplicate field(s) in %s.exclude' % cls.__name__) def validate_form(self, cls, model): " Validate that form subclasses BaseModelForm. " if hasattr(cls, 'form') and not issubclass(cls.form, BaseModelForm): raise ImproperlyConfigured("%s.form does not inherit from " "BaseModelForm." % cls.__name__) def validate_filter_vertical(self, cls, model): " Validate that filter_vertical is a sequence of field names. " if hasattr(cls, 'filter_vertical'): check_isseq(cls, 'filter_vertical', cls.filter_vertical) for idx, field in enumerate(cls.filter_vertical): f = get_field(cls, model, 'filter_vertical', field) if not isinstance(f, models.ManyToManyField): raise ImproperlyConfigured("'%s.filter_vertical[%d]' must be " "a ManyToManyField." % (cls.__name__, idx)) def validate_filter_horizontal(self, cls, model): " Validate that filter_horizontal is a sequence of field names. " if hasattr(cls, 'filter_horizontal'): check_isseq(cls, 'filter_horizontal', cls.filter_horizontal) for idx, field in enumerate(cls.filter_horizontal): f = get_field(cls, model, 'filter_horizontal', field) if not isinstance(f, models.ManyToManyField): raise ImproperlyConfigured("'%s.filter_horizontal[%d]' must be " "a ManyToManyField." % (cls.__name__, idx)) def validate_radio_fields(self, cls, model): " Validate that radio_fields is a dictionary of choice or foreign key fields. " from django.contrib.admin.options import HORIZONTAL, VERTICAL if hasattr(cls, 'radio_fields'): check_isdict(cls, 'radio_fields', cls.radio_fields) for field, val in cls.radio_fields.items(): f = get_field(cls, model, 'radio_fields', field) if not (isinstance(f, models.ForeignKey) or f.choices): raise ImproperlyConfigured("'%s.radio_fields['%s']' " "is neither an instance of ForeignKey nor does " "have choices set." % (cls.__name__, field)) if not val in (HORIZONTAL, VERTICAL): raise ImproperlyConfigured("'%s.radio_fields['%s']' " "is neither admin.HORIZONTAL nor admin.VERTICAL." % (cls.__name__, field)) def validate_prepopulated_fields(self, cls, model): " Validate that prepopulated_fields if a dictionary containing allowed field types. " # prepopulated_fields if hasattr(cls, 'prepopulated_fields'): check_isdict(cls, 'prepopulated_fields', cls.prepopulated_fields) for field, val in cls.prepopulated_fields.items(): f = get_field(cls, model, 'prepopulated_fields', field) if isinstance(f, (models.DateTimeField, models.ForeignKey, models.ManyToManyField)): raise ImproperlyConfigured("'%s.prepopulated_fields['%s']' " "is either a DateTimeField, ForeignKey or " "ManyToManyField. This isn't allowed." % (cls.__name__, field)) check_isseq(cls, "prepopulated_fields['%s']" % field, val) for idx, f in enumerate(val): get_field(cls, model, "prepopulated_fields['%s'][%d]" % (field, idx), f) def validate_ordering(self, cls, model): " Validate that ordering refers to existing fields or is random. " # ordering = None if cls.ordering: check_isseq(cls, 'ordering', cls.ordering) for idx, field in enumerate(cls.ordering): if field == '?' and len(cls.ordering) != 1: raise ImproperlyConfigured("'%s.ordering' has the random " "ordering marker '?', but contains other fields as " "well. Please either remove '?' or the other fields." % cls.__name__) if field == '?': continue if field.startswith('-'): field = field[1:] # Skip ordering in the format field1__field2 (FIXME: checking # this format would be nice, but it's a little fiddly). if '__' in field: continue get_field(cls, model, 'ordering[%d]' % idx, field) def validate_readonly_fields(self, cls, model): " Validate that readonly_fields refers to proper attribute or field. " if hasattr(cls, "readonly_fields"): check_isseq(cls, "readonly_fields", cls.readonly_fields) for idx, field in enumerate(cls.readonly_fields): if not callable(field): if not hasattr(cls, field): if not hasattr(model, field): try: model._meta.get_field(field) except models.FieldDoesNotExist: raise ImproperlyConfigured("%s.readonly_fields[%d], %r is not a callable or an attribute of %r or found in the model %r." % (cls.__name__, idx, field, cls.__name__, model._meta.object_name)) class ModelAdminValidator(BaseValidator): def validate_save_as(self, cls, model): " Validate save_as is a boolean. " check_type(cls, 'save_as', bool) def validate_save_on_top(self, cls, model): " Validate save_on_top is a boolean. " check_type(cls, 'save_on_top', bool) def validate_inlines(self, cls, model): " Validate inline model admin classes. " from django.contrib.admin.options import BaseModelAdmin if hasattr(cls, 'inlines'): check_isseq(cls, 'inlines', cls.inlines) for idx, inline in enumerate(cls.inlines): if not issubclass(inline, BaseModelAdmin): raise ImproperlyConfigured("'%s.inlines[%d]' does not inherit " "from BaseModelAdmin." % (cls.__name__, idx)) if not inline.model: raise ImproperlyConfigured("'model' is a required attribute " "of '%s.inlines[%d]'." % (cls.__name__, idx)) if not issubclass(inline.model, models.Model): raise ImproperlyConfigured("'%s.inlines[%d].model' does not " "inherit from models.Model." % (cls.__name__, idx)) inline.validate(inline.model) self.check_inline(inline, model) def check_inline(self, cls, parent_model): " Validate inline class's fk field is not excluded. " fk = _get_foreign_key(parent_model, cls.model, fk_name=cls.fk_name, can_fail=True) if hasattr(cls, 'exclude') and cls.exclude: if fk and fk.name in cls.exclude: raise ImproperlyConfigured("%s cannot exclude the field " "'%s' - this is the foreign key to the parent model " "%s.%s." % (cls.__name__, fk.name, parent_model._meta.app_label, parent_model.__name__)) def validate_list_display(self, cls, model): " Validate that list_display only contains fields or usable attributes. " if hasattr(cls, 'list_display'): check_isseq(cls, 'list_display', cls.list_display) for idx, field in enumerate(cls.list_display): if not callable(field): if not hasattr(cls, field): if not hasattr(model, field): try: model._meta.get_field(field) except models.FieldDoesNotExist: raise ImproperlyConfigured("%s.list_display[%d], %r is not a callable or an attribute of %r or found in the model %r." % (cls.__name__, idx, field, cls.__name__, model._meta.object_name)) else: # getattr(model, field) could be an X_RelatedObjectsDescriptor f = fetch_attr(cls, model, "list_display[%d]" % idx, field) if isinstance(f, models.ManyToManyField): raise ImproperlyConfigured("'%s.list_display[%d]', '%s' is a ManyToManyField which is not supported." % (cls.__name__, idx, field)) def validate_list_display_links(self, cls, model): " Validate that list_display_links is a unique subset of list_display. " if hasattr(cls, 'list_display_links'): check_isseq(cls, 'list_display_links', cls.list_display_links) for idx, field in enumerate(cls.list_display_links): if field not in cls.list_display: raise ImproperlyConfigured("'%s.list_display_links[%d]' " "refers to '%s' which is not defined in 'list_display'." % (cls.__name__, idx, field)) def validate_list_filter(self, cls, model): """ Validate that list_filter is a sequence of one of three options: 1: 'field' - a basic field filter, possibly w/ relationships (eg, 'field__rel') 2: ('field', SomeFieldListFilter) - a field-based list filter class 3: SomeListFilter - a non-field list filter class """ from django.contrib.admin import ListFilter, FieldListFilter if hasattr(cls, 'list_filter'): check_isseq(cls, 'list_filter', cls.list_filter) for idx, item in enumerate(cls.list_filter): if callable(item) and not isinstance(item, models.Field): # If item is option 3, it should be a ListFilter... if not issubclass(item, ListFilter): raise ImproperlyConfigured("'%s.list_filter[%d]' is '%s'" " which is not a descendant of ListFilter." % (cls.__name__, idx, item.__name__)) # ... but not a FieldListFilter. if issubclass(item, FieldListFilter): raise ImproperlyConfigured("'%s.list_filter[%d]' is '%s'" " which is of type FieldListFilter but is not" " associated with a field name." % (cls.__name__, idx, item.__name__)) else: if isinstance(item, (tuple, list)): # item is option #2 field, list_filter_class = item if not issubclass(list_filter_class, FieldListFilter): raise ImproperlyConfigured("'%s.list_filter[%d][1]'" " is '%s' which is not of type FieldListFilter." % (cls.__name__, idx, list_filter_class.__name__)) else: # item is option #1 field = item # Validate the field string try: get_fields_from_path(model, field) except (NotRelationField, FieldDoesNotExist): raise ImproperlyConfigured("'%s.list_filter[%d]' refers to '%s'" " which does not refer to a Field." % (cls.__name__, idx, field)) def validate_list_select_related(self, cls, model): " Validate that list_select_related is a boolean, a list or a tuple. " list_select_related = getattr(cls, 'list_select_related', None) if list_select_related: types = (bool, tuple, list) if not isinstance(list_select_related, types): raise ImproperlyConfigured("'%s.list_select_related' should be " "either a bool, a tuple or a list" % cls.__name__) def validate_list_per_page(self, cls, model): " Validate that list_per_page is an integer. " check_type(cls, 'list_per_page', int) def validate_list_max_show_all(self, cls, model): " Validate that list_max_show_all is an integer. " check_type(cls, 'list_max_show_all', int) def validate_list_editable(self, cls, model): """ Validate that list_editable is a sequence of editable fields from list_display without first element. """ if hasattr(cls, 'list_editable') and cls.list_editable: check_isseq(cls, 'list_editable', cls.list_editable) for idx, field_name in enumerate(cls.list_editable): try: field = model._meta.get_field_by_name(field_name)[0] except models.FieldDoesNotExist: raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a " "field, '%s', not defined on %s.%s." % (cls.__name__, idx, field_name, model._meta.app_label, model.__name__)) if field_name not in cls.list_display: raise ImproperlyConfigured("'%s.list_editable[%d]' refers to " "'%s' which is not defined in 'list_display'." % (cls.__name__, idx, field_name)) if field_name in cls.list_display_links: raise ImproperlyConfigured("'%s' cannot be in both '%s.list_editable'" " and '%s.list_display_links'" % (field_name, cls.__name__, cls.__name__)) if not cls.list_display_links and cls.list_display[0] in cls.list_editable: raise ImproperlyConfigured("'%s.list_editable[%d]' refers to" " the first field in list_display, '%s', which can't be" " used unless list_display_links is set." % (cls.__name__, idx, cls.list_display[0])) if not field.editable: raise ImproperlyConfigured("'%s.list_editable[%d]' refers to a " "field, '%s', which isn't editable through the admin." % (cls.__name__, idx, field_name)) def validate_search_fields(self, cls, model): " Validate search_fields is a sequence. " if hasattr(cls, 'search_fields'): check_isseq(cls, 'search_fields', cls.search_fields) def validate_date_hierarchy(self, cls, model): " Validate that date_hierarchy refers to DateField or DateTimeField. " if cls.date_hierarchy: f = get_field(cls, model, 'date_hierarchy', cls.date_hierarchy) if not isinstance(f, (models.DateField, models.DateTimeField)): raise ImproperlyConfigured("'%s.date_hierarchy is " "neither an instance of DateField nor DateTimeField." % cls.__name__) class InlineValidator(BaseValidator): def validate_fk_name(self, cls, model): " Validate that fk_name refers to a ForeignKey. " if cls.fk_name: # default value is None f = get_field(cls, model, 'fk_name', cls.fk_name) if not isinstance(f, models.ForeignKey): raise ImproperlyConfigured("'%s.fk_name is not an instance of " "models.ForeignKey." % cls.__name__) def validate_extra(self, cls, model): " Validate that extra is an integer. " check_type(cls, 'extra', int) def validate_max_num(self, cls, model): " Validate that max_num is an integer. " check_type(cls, 'max_num', int) def validate_formset(self, cls, model): " Validate formset is a subclass of BaseModelFormSet. " if hasattr(cls, 'formset') and not issubclass(cls.formset, BaseModelFormSet): raise ImproperlyConfigured("'%s.formset' does not inherit from " "BaseModelFormSet." % cls.__name__) def check_type(cls, attr, type_): if getattr(cls, attr, None) is not None and not isinstance(getattr(cls, attr), type_): raise ImproperlyConfigured("'%s.%s' should be a %s." % (cls.__name__, attr, type_.__name__ )) def check_isseq(cls, label, obj): if not isinstance(obj, (list, tuple)): raise ImproperlyConfigured("'%s.%s' must be a list or tuple." % (cls.__name__, label)) def check_isdict(cls, label, obj): if not isinstance(obj, dict): raise ImproperlyConfigured("'%s.%s' must be a dictionary." % (cls.__name__, label)) def get_field(cls, model, label, field): try: return model._meta.get_field(field) except models.FieldDoesNotExist: raise ImproperlyConfigured("'%s.%s' refers to field '%s' that is missing from model '%s.%s'." % (cls.__name__, label, field, model._meta.app_label, model.__name__)) def fetch_attr(cls, model, label, field): try: return model._meta.get_field(field) except models.FieldDoesNotExist: pass try: return getattr(model, field) except AttributeError: raise ImproperlyConfigured("'%s.%s' refers to '%s' that is neither a field, method or property of model '%s.%s'." % (cls.__name__, label, field, model._meta.app_label, model.__name__))
apache-2.0
mja054/swift_plugin
test/unit/obj/test_auditor.py
5
14845
# Copyright (c) 2010-2011 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from test import unit import unittest import tempfile import os import time from shutil import rmtree from hashlib import md5 from tempfile import mkdtemp from test.unit import FakeLogger from swift.obj import auditor from swift.obj import server as object_server from swift.obj.server import DiskFile, write_metadata, DATADIR from swift.common.utils import hash_path, mkdirs, normalize_timestamp, \ renamer, storage_directory from swift.obj.replicator import invalidate_hash from swift.common.exceptions import AuditException class TestAuditor(unittest.TestCase): def setUp(self): self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor') self.devices = os.path.join(self.testdir, 'node') self.logger = FakeLogger() rmtree(self.testdir, ignore_errors=1) mkdirs(os.path.join(self.devices, 'sda')) self.objects = os.path.join(self.devices, 'sda', 'objects') os.mkdir(os.path.join(self.devices, 'sdb')) self.objects_2 = os.path.join(self.devices, 'sdb', 'objects') os.mkdir(self.objects) self.parts = {} for part in ['0', '1', '2', '3']: self.parts[part] = os.path.join(self.objects, part) os.mkdir(os.path.join(self.objects, part)) self.conf = dict( devices=self.devices, mount_check='false') self.disk_file = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', self.logger) def tearDown(self): rmtree(os.path.dirname(self.testdir), ignore_errors=1) unit.xattr_data = {} def test_object_audit_extra_data(self): self.auditor = auditor.AuditorWorker(self.conf) data = '0' * 1024 etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() timestamp = str(normalize_timestamp(time.time())) metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) pre_quarantines = self.auditor.quarantines self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines) os.write(fd, 'extra_data') self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines + 1) def test_object_audit_diff_data(self): self.auditor = auditor.AuditorWorker(self.conf) data = '0' * 1024 etag = md5() timestamp = str(normalize_timestamp(time.time())) with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) pre_quarantines = self.auditor.quarantines # remake so it will have metadata self.disk_file = DiskFile(self.devices, 'sda', '0', 'a', 'c', 'o', self.logger) self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines) etag = md5() etag.update('1' + '0' * 1023) etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(fd, metadata) self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines + 1) def test_object_audit_no_meta(self): timestamp = str(normalize_timestamp(time.time())) path = os.path.join(self.disk_file.datadir, timestamp + '.data') mkdirs(self.disk_file.datadir) fp = open(path, 'w') fp.write('0' * 1024) fp.close() invalidate_hash(os.path.dirname(self.disk_file.datadir)) self.auditor = auditor.AuditorWorker(self.conf) pre_quarantines = self.auditor.quarantines self.auditor.object_audit( os.path.join(self.disk_file.datadir, timestamp + '.data'), 'sda', '0') self.assertEquals(self.auditor.quarantines, pre_quarantines + 1) def test_object_audit_bad_args(self): self.auditor = auditor.AuditorWorker(self.conf) pre_errors = self.auditor.errors self.auditor.object_audit(5, 'sda', '0') self.assertEquals(self.auditor.errors, pre_errors + 1) pre_errors = self.auditor.errors self.auditor.object_audit('badpath', 'sda', '0') self.assertEquals(self.auditor.errors, pre_errors) # just returns def test_object_run_once_pass(self): self.auditor = auditor.AuditorWorker(self.conf) self.auditor.log_time = 0 timestamp = str(normalize_timestamp(time.time())) pre_quarantines = self.auditor.quarantines data = '0' * 1024 etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) self.disk_file.close() self.auditor.audit_all_objects() self.assertEquals(self.auditor.quarantines, pre_quarantines) def test_object_run_once_no_sda(self): self.auditor = auditor.AuditorWorker(self.conf) timestamp = str(normalize_timestamp(time.time())) pre_quarantines = self.auditor.quarantines data = '0' * 1024 etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) self.disk_file.close() os.write(fd, 'extra_data') self.auditor.audit_all_objects() self.assertEquals(self.auditor.quarantines, pre_quarantines + 1) def test_object_run_once_multi_devices(self): self.auditor = auditor.AuditorWorker(self.conf) timestamp = str(normalize_timestamp(time.time())) pre_quarantines = self.auditor.quarantines data = '0' * 10 etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) self.disk_file.close() self.auditor.audit_all_objects() self.disk_file = DiskFile(self.devices, 'sdb', '0', 'a', 'c', 'ob', self.logger) data = '1' * 10 etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': timestamp, 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) self.disk_file.close() os.write(fd, 'extra_data') self.auditor.audit_all_objects() self.assertEquals(self.auditor.quarantines, pre_quarantines + 1) def test_object_run_fast_track_non_zero(self): self.auditor = auditor.ObjectAuditor(self.conf) self.auditor.log_time = 0 data = '0' * 1024 etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): os.write(fd, data) etag.update(data) etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': str(normalize_timestamp(time.time())), 'Content-Length': str(os.fstat(fd).st_size), } self.disk_file.put(fd, tmppath, metadata) etag = md5() etag.update('1' + '0' * 1023) etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(fd, metadata) quarantine_path = os.path.join(self.devices, 'sda', 'quarantined', 'objects') self.auditor.run_once(zero_byte_fps=50) self.assertFalse(os.path.isdir(quarantine_path)) self.auditor.run_once() self.assertTrue(os.path.isdir(quarantine_path)) def setup_bad_zero_byte(self, with_ts=False): self.auditor = auditor.ObjectAuditor(self.conf) self.auditor.log_time = 0 ts_file_path = '' if with_ts: name_hash = hash_path('a', 'c', 'o') dir_path = os.path.join(self.devices, 'sda', storage_directory(DATADIR, '0', name_hash)) ts_file_path = os.path.join(dir_path, '99999.ts') if not os.path.exists(dir_path): mkdirs(dir_path) fp = open(ts_file_path, 'w') fp.close() etag = md5() with self.disk_file.mkstemp() as (fd, tmppath): etag = etag.hexdigest() metadata = { 'ETag': etag, 'X-Timestamp': str(normalize_timestamp(time.time())), 'Content-Length': 10, } self.disk_file.put(fd, tmppath, metadata) etag = md5() etag = etag.hexdigest() metadata['ETag'] = etag write_metadata(fd, metadata) if self.disk_file.data_file: return self.disk_file.data_file return ts_file_path def test_object_run_fast_track_all(self): self.setup_bad_zero_byte() self.auditor.run_once() quarantine_path = os.path.join(self.devices, 'sda', 'quarantined', 'objects') self.assertTrue(os.path.isdir(quarantine_path)) def test_object_run_fast_track_zero(self): self.setup_bad_zero_byte() self.auditor.run_once(zero_byte_fps=50) quarantine_path = os.path.join(self.devices, 'sda', 'quarantined', 'objects') self.assertTrue(os.path.isdir(quarantine_path)) def test_with_tombstone(self): ts_file_path = self.setup_bad_zero_byte(with_ts=True) self.auditor.run_once() quarantine_path = os.path.join(self.devices, 'sda', 'quarantined', 'objects') self.assertTrue(ts_file_path.endswith('ts')) self.assertTrue(os.path.exists(ts_file_path)) def test_sleeper(self): auditor.SLEEP_BETWEEN_AUDITS = 0.01 my_auditor = auditor.ObjectAuditor(self.conf) start = time.time() my_auditor._sleep() self.assertEquals(round(time.time() - start, 2), 0.01) def test_object_run_fast_track_zero_check_closed(self): rat = [False] class FakeFile(DiskFile): def close(self, verify_file=True): rat[0] = True DiskFile.close(self, verify_file=verify_file) self.setup_bad_zero_byte() was_df = object_server.DiskFile try: object_server.DiskFile = FakeFile self.auditor.run_once(zero_byte_fps=50) quarantine_path = os.path.join(self.devices, 'sda', 'quarantined', 'objects') self.assertTrue(os.path.isdir(quarantine_path)) self.assertTrue(rat[0]) finally: object_server.DiskFile = was_df def test_run_forever(self): class StopForever(Exception): pass class ObjectAuditorMock(object): check_args = () check_kwargs = {} fork_called = 0 fork_res = 0 def mock_run(self, *args, **kwargs): self.check_args = args self.check_kwargs = kwargs def mock_sleep(self): raise StopForever('stop') def mock_fork(self): self.fork_called += 1 return self.fork_res my_auditor = auditor.ObjectAuditor(dict(devices=self.devices, mount_check='false', zero_byte_files_per_second=89)) mocker = ObjectAuditorMock() my_auditor.run_once = mocker.mock_run my_auditor._sleep = mocker.mock_sleep was_fork = os.fork try: os.fork = mocker.mock_fork self.assertRaises(StopForever, my_auditor.run_forever, zero_byte_fps=50) self.assertEquals(mocker.check_kwargs['zero_byte_fps'], 50) self.assertEquals(mocker.fork_called, 0) self.assertRaises(StopForever, my_auditor.run_forever) self.assertEquals(mocker.fork_called, 1) self.assertEquals(mocker.check_args, ()) mocker.fork_res = 1 self.assertRaises(StopForever, my_auditor.run_forever) self.assertEquals(mocker.fork_called, 2) self.assertEquals(mocker.check_kwargs['zero_byte_fps'], 89) finally: os.fork = was_fork if __name__ == '__main__': unittest.main()
apache-2.0
isovic/samscripts
src/vcffilter.py
1
8768
#! /usr/bin/env python import re; import os; import sys; def extract_region(vcf_file, chrname, start_pos, end_pos): fp_in = None; try: fp_in = open(vcf_file, 'r'); except IOError: sys.stderr.write('[%s] ERROR: Could not open file "%s" for reading!' % (__name__, vcf_file)); exit(1); num_accepted = 0; num_rejected = 0; fp_out = sys.stdout; pos_position = -1; i = 0; for line in fp_in: i += 1; if ((i % 1000) == 0): sys.stderr.write('\rLine %d, num_accepted: %d, num_rejected: %d' % (i, num_accepted, num_rejected)); if (len(line.strip()) == 0 or line[0] == '#'): fp_out.write(line); if (len(line) > 1 and line[1] != '#'): params = line[1:].split('\t'); current_param = 0; for param in params: if (param.lower() == 'pos'): pos_position = current_param; break; current_param += 1; continue; split_line = line.strip().split('\t'); if ((i % 1000) == 0): sys.stderr.write(', chr: "%s"' % (split_line[0])); if (split_line[0] == chrname): if (pos_position < 0): sys.stderr.write('ERROR: Could not find parameter description line, needed to determine the POS parameter!\n'); exit(1); pos = int(split_line[pos_position]); if (pos >= start_pos and pos <= end_pos): fp_out.write(line); num_accepted += 1; else: num_rejected += 1; else: num_rejected += 1; fp_in.close(); sys.stderr.write('\n'); sys.stderr.write('num_accepted = %d (%.2f%%)\n' % (num_accepted, (float(num_accepted) / float(num_accepted + num_rejected)) * 100.0)); sys.stderr.write('num_rejected = %d (%.2f%%)\n' % (num_rejected, (float(num_rejected) / float(num_accepted + num_rejected)) * 100.0)); def split_multiallelic_snps(vcf_file): fp_in = None; try: fp_in = open(vcf_file, 'r'); except IOError: sys.stderr.write('[%s] ERROR: Could not open file "%s" for reading!' % (__name__, vcf_file)); exit(1); fp_out = sys.stdout; alt_position = -1; i = 0; for line in fp_in: i += 1; if ((i % 1000) == 0): sys.stderr.write('\rLine %d, num_accepted: %d, num_rejected: %d' % (i, num_accepted, num_rejected)); if (len(line.strip()) == 0 or line[0] == '#'): fp_out.write(line); if (len(line) > 1 and line[1] != '#'): params = line[1:].split('\t'); current_param = 0; for param in params: if (param.lower() == 'alt'): alt_position = current_param; break; current_param += 1; continue; split_line = line.strip().split('\t'); if ((i % 1000) == 0): sys.stderr.write(', chr: "%s"' % (split_line[0])); if (alt_position < 0): sys.stderr.write('ERROR: Could not find parameter description line, needed to determine the ALT parameter!\n'); exit(1); if ((',' in split_line[alt_position]) == False): fp_out.write(line); else: split_alt = split_line[alt_position].split(','); for alt in split_alt: split_line[alt_position] = alt; fp_out.write('\t'.join(split_line) + '\n'); fp_in.close(); sys.stderr.write('\n'); def vcf_sort(vcf_file): fp_in = None; try: fp_in = open(vcf_file, 'r'); except IOError: sys.stderr.write('[%s] ERROR: Could not open file "%s" for reading!' % (__name__, vcf_file)); exit(1); fp_out = sys.stdout; pos_position = -1; vcf_header = []; vcf_lines = []; i = 0; for line in fp_in: i += 1; if ((i % 1000) == 0): sys.stderr.write('\rLine %d, num_accepted: %d, num_rejected: %d' % (i, num_accepted, num_rejected)); if (len(line.strip()) == 0 or line[0] == '#'): # fp_out.write(line); vcf_header.append(line.strip()); if (len(line) > 1 and line[1] != '#'): params = line[1:].split('\t'); current_param = 0; for param in params: if (param.lower() == 'pos'): pos_position = current_param; break; current_param += 1; continue; split_line = line.strip().split('\t'); if ((i % 1000) == 0): sys.stderr.write(', chr: "%s"' % (split_line[0])); if (pos_position < 0): sys.stderr.write('ERROR: Could not find parameter description line, needed to determine the POS parameter!\n'); exit(1); vcf_lines.append([int(split_line[pos_position]), line.strip()]); fp_in.close(); sys.stderr.write('\n'); sorted_vcf_lines = sorted(vcf_lines, key=lambda x: x[0]); fp_out.write('\n'.join(vcf_header) + '\n'); fp_out.write('\n'.join([line[1] for line in sorted_vcf_lines]) + '\n'); def count_nonpass_variants(vcf_file, verbose=True): fp_in = None; try: fp_in = open(vcf_file, 'r'); except IOError: sys.stderr.write('[%s] ERROR: Could not open file "%s" for reading!' % (__name__, vcf_file)); exit(1); fp_out = sys.stdout; filter_position = -1; info_position = -1; num_pass_snps = 0; num_nonpass_snps = 0; i = 0; for line in fp_in: i += 1; if ((i % 1000) == 0): sys.stderr.write('\rLine %d, num_accepted: %d, num_rejected: %d' % (i, num_accepted, num_rejected)); line = line.strip(); if (len(line) == 0 or line[0] == '#'): if (len(line) > 1 and line[1] != '#'): params = line[1:].split('\t'); current_param = 0; for param in params: if (param.lower() == 'filter'): filter_position = current_param; if (param.lower() == 'info'): info_position = current_param; current_param += 1; continue; split_line = line.strip().split('\t'); if ((i % 1000) == 0): sys.stderr.write(', chr: "%s"' % (split_line[0])); if (filter_position < 0 or info_position < 0): sys.stderr.write('ERROR: Could not find parameter description line, needed to determine the POS parameter!\n'); exit(1); # vcf_lines.append([int(split_line[pos_position]), line.strip()]); split_info = split_line[info_position].split(';'); info_vals = {}; for value in split_info: split_value = value.split('='); if (len(split_value) != 2): continue; try: info_vals[split_value[0].lower()] = split_value[1].lower(); except Exception, e: sys.stderr.write('value = "%s"\n' % value); sys.stderr.write(str(e)); exit(1); if ('vartype' in info_vals.keys()): vartype = info_vals['vartype']; elif ('type' in info_vals.keys()): vartype = info_vals['type']; else: # sys.stderr.write('ERROR: VCF line does not contain varType info! Counting it as a SNP.\n'); # sys.stderr.write(line); # sys.stderr.write(str(info_vals.keys()) + '\n'); vartype = 'snp'; # continue; if (vartype.lower().split(',')[0] != 'snp'): continue; if (split_line[filter_position].lower() == 'pass'): # print '[%d, %d] PASS: %s' % (num_pass_snps, num_nonpass_snps, line.strip()); num_pass_snps += 1; else: # print '[%d, %d] Non-PASS: %s' % (num_pass_snps, num_nonpass_snps, line.strip()); num_nonpass_snps += 1; fp_in.close(); if (verbose == True): sys.stdout.write('%d\t%d\n' % (num_pass_snps, num_nonpass_snps)); return [num_pass_snps, num_nonpass_snps]; if __name__ == "__main__": if (len(sys.argv) < 2): sys.stderr.write('Various filtering methods for filtering VCF files.\n'); sys.stderr.write('Usage:\n'); sys.stderr.write('\tregion\n'); sys.stderr.write('\tsplitsnps\n'); sys.stderr.write('\tsort\n'); sys.stderr.write('\tcountuncertsnps\n'); exit(0); if (sys.argv[1] == 'region'): if (len(sys.argv) != 6): sys.stderr.write('Extracts only variant lines which fall within a defined region. Output is on stdout. Comment lines are output as well.\n'); sys.stderr.write('Usage:\n'); sys.stderr.write('\t%s %s <input_vcf_file> chrname startpos endpos\n' % (sys.argv[0], sys.argv[1])); exit(0); vcf_file = sys.argv[2]; chrname = sys.argv[3]; start_pos = int(sys.argv[4]); end_pos = int(sys.argv[5]); extract_region(vcf_file, chrname, start_pos, end_pos); exit(0); elif (sys.argv[1] == 'splitsnps'): if (len(sys.argv) != 3): sys.stderr.write('Splits multiallelic SNP variants into separate lines.\n'); sys.stderr.write('Usage:\n'); sys.stderr.write('\t%s %s <input_vcf_file>\n' % (sys.argv[0], sys.argv[1])); exit(0); vcf_file = sys.argv[2]; split_multiallelic_snps(vcf_file); exit(0); elif (sys.argv[1] == 'sort'): if (len(sys.argv) != 3): sys.stderr.write('Sorts a VCF file by position.\n'); sys.stderr.write('Usage:\n'); sys.stderr.write('\t%s %s <input_vcf_file>\n' % (sys.argv[0], sys.argv[1])); exit(0); vcf_file = sys.argv[2]; vcf_sort(vcf_file); exit(0); elif (sys.argv[1] == 'countuncertsnps'): if (len(sys.argv) != 3): sys.stderr.write('Count SNP variants with non PASS filter value.\n'); sys.stderr.write('Usage:\n'); sys.stderr.write('\t%s %s <input_vcf_file>\n' % (sys.argv[0], sys.argv[1])); exit(0); vcf_file = sys.argv[2]; count_nonpass_variants(vcf_file); exit(0); else: sys.stderr.write('ERROR: Unknown subcommand!\n'); exit(0);
mit
google/openhtf
openhtf/util/units.py
1
253414
# coding: utf-8 # THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. # Copyright 2016 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Units of measure for OpenHTF. THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. Used to retrieve UNECE unit codes by object, name, or suffix: from openhtf.util import units # The following three expressions are equivalent: units.METRE_PER_SECOND units.Unit('m/s') units.Unit('metre per second') OpenHTF uses UNECE unit codes internally because they are relatively complete and modern, and because they are recognized internationally. For full details regarding where we get the codes from and which units are available, see the docstring at the top of openhtf/util/units/bin/units_from_xls.py. THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. """ # pylint: disable=too-many-lines import collections class UnitDescriptor( collections.namedtuple('UnitDescriptor', [ 'name', 'code', 'suffix', ])): pass ALL_UNITS = [] # pylint: disable=line-too-long # NO_DIMENSION means that there are units set, but they cannot be expressed # by a known dimension (such as a ratio) NO_DIMENSION = UnitDescriptor('No dimension', 'NDL', None) ALL_UNITS.append(NO_DIMENSION) NONE = UnitDescriptor('None', None, None) LIFT = UnitDescriptor('lift', '05', '''''') ALL_UNITS.append(LIFT) SMALL_SPRAY = UnitDescriptor('small spray', '06', '''''') ALL_UNITS.append(SMALL_SPRAY) HEAT_LOT = UnitDescriptor('heat lot', '08', '''''') ALL_UNITS.append(HEAT_LOT) GROUP = UnitDescriptor('group', '10', '''''') ALL_UNITS.append(GROUP) OUTFIT = UnitDescriptor('outfit', '11', '''''') ALL_UNITS.append(OUTFIT) RATION = UnitDescriptor('ration', '13', '''''') ALL_UNITS.append(RATION) SHOT = UnitDescriptor('shot', '14', '''''') ALL_UNITS.append(SHOT) STICK_MILITARY = UnitDescriptor('stick, military', '15', '''''') ALL_UNITS.append(STICK_MILITARY) HUNDRED_FIFTEEN_KG_DRUM = UnitDescriptor('hundred fifteen kg drum', '16', '''''') ALL_UNITS.append(HUNDRED_FIFTEEN_KG_DRUM) HUNDRED_LB_DRUM = UnitDescriptor('hundred lb drum', '17', '''''') ALL_UNITS.append(HUNDRED_LB_DRUM) FIFTYFIVE_GALLON_US_DRUM = UnitDescriptor('fiftyfive gallon (US) drum', '18', '''''') ALL_UNITS.append(FIFTYFIVE_GALLON_US_DRUM) TANK_TRUCK = UnitDescriptor('tank truck', '19', '''''') ALL_UNITS.append(TANK_TRUCK) TWENTY_FOOT_CONTAINER = UnitDescriptor('twenty foot container', '20', '''''') ALL_UNITS.append(TWENTY_FOOT_CONTAINER) FORTY_FOOT_CONTAINER = UnitDescriptor('forty foot container', '21', '''''') ALL_UNITS.append(FORTY_FOOT_CONTAINER) DECILITRE_PER_GRAM = UnitDescriptor('decilitre per gram', '22', '''dl/g''') ALL_UNITS.append(DECILITRE_PER_GRAM) GRAM_PER_CUBIC_CENTIMETRE = UnitDescriptor('gram per cubic centimetre', '23', '''g/cm³''') ALL_UNITS.append(GRAM_PER_CUBIC_CENTIMETRE) THEORETICAL_POUND = UnitDescriptor('theoretical pound', '24', '''''') ALL_UNITS.append(THEORETICAL_POUND) GRAM_PER_SQUARE_CENTIMETRE = UnitDescriptor('gram per square centimetre', '25', '''g/cm²''') ALL_UNITS.append(GRAM_PER_SQUARE_CENTIMETRE) ACTUAL_TON = UnitDescriptor('actual ton', '26', '''''') ALL_UNITS.append(ACTUAL_TON) THEORETICAL_TON = UnitDescriptor('theoretical ton', '27', '''''') ALL_UNITS.append(THEORETICAL_TON) KILOGRAM_PER_SQUARE_METRE = UnitDescriptor('kilogram per square metre', '28', '''kg/m²''') ALL_UNITS.append(KILOGRAM_PER_SQUARE_METRE) POUND_PER_THOUSAND_SQUARE_FOOT = UnitDescriptor('pound per thousand square foot', '29', '''lb/kft²''') ALL_UNITS.append(POUND_PER_THOUSAND_SQUARE_FOOT) HORSE_POWER_DAY_PER_AIR_DRY_METRIC_TON = UnitDescriptor('horse power day per air dry metric ton', '30', '''''') ALL_UNITS.append(HORSE_POWER_DAY_PER_AIR_DRY_METRIC_TON) CATCH_WEIGHT = UnitDescriptor('catch weight', '31', '''''') ALL_UNITS.append(CATCH_WEIGHT) KILOGRAM_PER_AIR_DRY_METRIC_TON = UnitDescriptor('kilogram per air dry metric ton', '32', '''''') ALL_UNITS.append(KILOGRAM_PER_AIR_DRY_METRIC_TON) KILOPASCAL_SQUARE_METRE_PER_GRAM = UnitDescriptor('kilopascal square metre per gram', '33', '''kPa·m²/g''') ALL_UNITS.append(KILOPASCAL_SQUARE_METRE_PER_GRAM) KILOPASCAL_PER_MILLIMETRE = UnitDescriptor('kilopascal per millimetre', '34', '''kPa/mm''') ALL_UNITS.append(KILOPASCAL_PER_MILLIMETRE) MILLILITRE_PER_SQUARE_CENTIMETRE_SECOND = UnitDescriptor('millilitre per square centimetre second', '35', '''ml/(cm²·s)''') ALL_UNITS.append(MILLILITRE_PER_SQUARE_CENTIMETRE_SECOND) CUBIC_FOOT_PER_MINUTE_PER_SQUARE_FOOT = UnitDescriptor('cubic foot per minute per square foot', '36', '''ft³/(min/ft²)''') ALL_UNITS.append(CUBIC_FOOT_PER_MINUTE_PER_SQUARE_FOOT) OUNCE_PER_SQUARE_FOOT = UnitDescriptor('ounce per square foot', '37', '''oz/ft²''') ALL_UNITS.append(OUNCE_PER_SQUARE_FOOT) OUNCE_PER_SQUARE_FOOT_PER_0_01INCH = UnitDescriptor('ounce per square foot per 0,01inch', '38', '''oz/(ft²/cin)''') ALL_UNITS.append(OUNCE_PER_SQUARE_FOOT_PER_0_01INCH) MILLILITRE_PER_SECOND = UnitDescriptor('millilitre per second', '40', '''ml/s''') ALL_UNITS.append(MILLILITRE_PER_SECOND) MILLILITRE_PER_MINUTE = UnitDescriptor('millilitre per minute', '41', '''ml/min''') ALL_UNITS.append(MILLILITRE_PER_MINUTE) SUPER_BULK_BAG = UnitDescriptor('super bulk bag', '43', '''''') ALL_UNITS.append(SUPER_BULK_BAG) FIVEHUNDRED_KG_BULK_BAG = UnitDescriptor('fivehundred kg bulk bag', '44', '''''') ALL_UNITS.append(FIVEHUNDRED_KG_BULK_BAG) THREEHUNDRED_KG_BULK_BAG = UnitDescriptor('threehundred kg bulk bag', '45', '''''') ALL_UNITS.append(THREEHUNDRED_KG_BULK_BAG) FIFTY_LB_BULK_BAG = UnitDescriptor('fifty lb bulk bag', '46', '''''') ALL_UNITS.append(FIFTY_LB_BULK_BAG) FIFTY_LB_BAG = UnitDescriptor('fifty lb bag', '47', '''''') ALL_UNITS.append(FIFTY_LB_BAG) BULK_CAR_LOAD = UnitDescriptor('bulk car load', '48', '''''') ALL_UNITS.append(BULK_CAR_LOAD) THEORETICAL_KILOGRAM = UnitDescriptor('theoretical kilogram', '53', '''''') ALL_UNITS.append(THEORETICAL_KILOGRAM) THEORETICAL_TONNE = UnitDescriptor('theoretical tonne', '54', '''''') ALL_UNITS.append(THEORETICAL_TONNE) SITAS = UnitDescriptor('sitas', '56', '''''') ALL_UNITS.append(SITAS) MESH = UnitDescriptor('mesh', '57', '''''') ALL_UNITS.append(MESH) NET_KILOGRAM = UnitDescriptor('net kilogram', '58', '''''') ALL_UNITS.append(NET_KILOGRAM) PART_PER_MILLION = UnitDescriptor('part per million', '59', '''ppm''') ALL_UNITS.append(PART_PER_MILLION) PERCENT_WEIGHT = UnitDescriptor('percent weight', '60', '''''') ALL_UNITS.append(PERCENT_WEIGHT) PART_PER_BILLION_US = UnitDescriptor('part per billion (US)', '61', '''ppb''') ALL_UNITS.append(PART_PER_BILLION_US) PERCENT_PER_1000_HOUR = UnitDescriptor('percent per 1000 hour', '62', '''''') ALL_UNITS.append(PERCENT_PER_1000_HOUR) FAILURE_RATE_IN_TIME = UnitDescriptor('failure rate in time', '63', '''''') ALL_UNITS.append(FAILURE_RATE_IN_TIME) POUND_PER_SQUARE_INCH_GAUGE = UnitDescriptor('pound per square inch, gauge', '64', '''''') ALL_UNITS.append(POUND_PER_SQUARE_INCH_GAUGE) OERSTED = UnitDescriptor('oersted', '66', '''Oe''') ALL_UNITS.append(OERSTED) TEST_SPECIFIC_SCALE = UnitDescriptor('test specific scale', '69', '''''') ALL_UNITS.append(TEST_SPECIFIC_SCALE) VOLT_AMPERE_PER_POUND = UnitDescriptor('volt ampere per pound', '71', '''''') ALL_UNITS.append(VOLT_AMPERE_PER_POUND) WATT_PER_POUND = UnitDescriptor('watt per pound', '72', '''''') ALL_UNITS.append(WATT_PER_POUND) AMPERE_TUM_PER_CENTIMETRE = UnitDescriptor('ampere tum per centimetre', '73', '''''') ALL_UNITS.append(AMPERE_TUM_PER_CENTIMETRE) MILLIPASCAL = UnitDescriptor('millipascal', '74', '''mPa''') ALL_UNITS.append(MILLIPASCAL) GAUSS = UnitDescriptor('gauss', '76', '''Gs''') ALL_UNITS.append(GAUSS) MILLI_INCH = UnitDescriptor('milli-inch', '77', '''mil''') ALL_UNITS.append(MILLI_INCH) KILOGAUSS = UnitDescriptor('kilogauss', '78', '''kGs''') ALL_UNITS.append(KILOGAUSS) POUND_PER_SQUARE_INCH_ABSOLUTE = UnitDescriptor('pound per square inch absolute', '80', '''lb/in²''') ALL_UNITS.append(POUND_PER_SQUARE_INCH_ABSOLUTE) HENRY = UnitDescriptor('henry', '81', '''H''') ALL_UNITS.append(HENRY) KILOPOUND_FORCE_PER_SQUARE_INCH = UnitDescriptor('kilopound-force per square inch', '84', '''klbf/in²''') ALL_UNITS.append(KILOPOUND_FORCE_PER_SQUARE_INCH) FOOT_POUND_FORCE = UnitDescriptor('foot pound-force', '85', '''ft·lbf''') ALL_UNITS.append(FOOT_POUND_FORCE) POUND_PER_CUBIC_FOOT = UnitDescriptor('pound per cubic foot', '87', '''lb/ft³''') ALL_UNITS.append(POUND_PER_CUBIC_FOOT) POISE = UnitDescriptor('poise', '89', '''P''') ALL_UNITS.append(POISE) SAYBOLD_UNIVERSAL_SECOND = UnitDescriptor('Saybold universal second', '90', '''''') ALL_UNITS.append(SAYBOLD_UNIVERSAL_SECOND) STOKES = UnitDescriptor('stokes', '91', '''St''') ALL_UNITS.append(STOKES) CALORIE_PER_CUBIC_CENTIMETRE = UnitDescriptor('calorie per cubic centimetre', '92', '''''') ALL_UNITS.append(CALORIE_PER_CUBIC_CENTIMETRE) CALORIE_PER_GRAM = UnitDescriptor('calorie per gram', '93', '''cal/g''') ALL_UNITS.append(CALORIE_PER_GRAM) CURL_UNIT = UnitDescriptor('curl unit', '94', '''''') ALL_UNITS.append(CURL_UNIT) TWENTY_THOUSAND_GALLON_US_TANKCAR = UnitDescriptor('twenty thousand gallon (US) tankcar', '95', '''''') ALL_UNITS.append(TWENTY_THOUSAND_GALLON_US_TANKCAR) TEN_THOUSAND_GALLON_US_TANKCAR = UnitDescriptor('ten thousand gallon (US) tankcar', '96', '''''') ALL_UNITS.append(TEN_THOUSAND_GALLON_US_TANKCAR) TEN_KG_DRUM = UnitDescriptor('ten kg drum', '97', '''''') ALL_UNITS.append(TEN_KG_DRUM) FIFTEEN_KG_DRUM = UnitDescriptor('fifteen kg drum', '98', '''''') ALL_UNITS.append(FIFTEEN_KG_DRUM) CAR_MILE = UnitDescriptor('car mile', '1A', '''''') ALL_UNITS.append(CAR_MILE) CAR_COUNT = UnitDescriptor('car count', '1B', '''''') ALL_UNITS.append(CAR_COUNT) LOCOMOTIVE_COUNT = UnitDescriptor('locomotive count', '1C', '''''') ALL_UNITS.append(LOCOMOTIVE_COUNT) CABOOSE_COUNT = UnitDescriptor('caboose count', '1D', '''''') ALL_UNITS.append(CABOOSE_COUNT) EMPTY_CAR = UnitDescriptor('empty car', '1E', '''''') ALL_UNITS.append(EMPTY_CAR) TRAIN_MILE = UnitDescriptor('train mile', '1F', '''''') ALL_UNITS.append(TRAIN_MILE) FUEL_USAGE_GALLON_US = UnitDescriptor('fuel usage gallon (US)', '1G', '''''') ALL_UNITS.append(FUEL_USAGE_GALLON_US) CABOOSE_MILE = UnitDescriptor('caboose mile', '1H', '''''') ALL_UNITS.append(CABOOSE_MILE) FIXED_RATE = UnitDescriptor('fixed rate', '1I', '''''') ALL_UNITS.append(FIXED_RATE) TON_MILE = UnitDescriptor('ton mile', '1J', '''''') ALL_UNITS.append(TON_MILE) LOCOMOTIVE_MILE = UnitDescriptor('locomotive mile', '1K', '''''') ALL_UNITS.append(LOCOMOTIVE_MILE) TOTAL_CAR_COUNT = UnitDescriptor('total car count', '1L', '''''') ALL_UNITS.append(TOTAL_CAR_COUNT) TOTAL_CAR_MILE = UnitDescriptor('total car mile', '1M', '''''') ALL_UNITS.append(TOTAL_CAR_MILE) QUARTER_MILE = UnitDescriptor('quarter mile', '1X', '''''') ALL_UNITS.append(QUARTER_MILE) RADIAN_PER_SECOND = UnitDescriptor('radian per second', '2A', '''rad/s''') ALL_UNITS.append(RADIAN_PER_SECOND) RADIAN_PER_SECOND_SQUARED = UnitDescriptor('radian per second squared', '2B', '''rad/s²''') ALL_UNITS.append(RADIAN_PER_SECOND_SQUARED) ROENTGEN = UnitDescriptor('roentgen', '2C', '''R''') ALL_UNITS.append(ROENTGEN) VOLT_AC = UnitDescriptor('volt AC', '2G', '''V''') ALL_UNITS.append(VOLT_AC) VOLT_DC = UnitDescriptor('volt DC', '2H', '''V''') ALL_UNITS.append(VOLT_DC) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_HOUR = UnitDescriptor('British thermal unit (international table) per hour', '2I', '''BtuIT/h''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_HOUR) CUBIC_CENTIMETRE_PER_SECOND = UnitDescriptor('cubic centimetre per second', '2J', '''cm³/s''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_SECOND) CUBIC_FOOT_PER_HOUR = UnitDescriptor('cubic foot per hour', '2K', '''ft³/h''') ALL_UNITS.append(CUBIC_FOOT_PER_HOUR) CUBIC_FOOT_PER_MINUTE = UnitDescriptor('cubic foot per minute', '2L', '''ft³/min''') ALL_UNITS.append(CUBIC_FOOT_PER_MINUTE) CENTIMETRE_PER_SECOND = UnitDescriptor('centimetre per second', '2M', '''cm/s''') ALL_UNITS.append(CENTIMETRE_PER_SECOND) DECIBEL = UnitDescriptor('decibel', '2N', '''dB''') ALL_UNITS.append(DECIBEL) KILOBYTE = UnitDescriptor('kilobyte', '2P', '''kbyte''') ALL_UNITS.append(KILOBYTE) KILOBECQUEREL = UnitDescriptor('kilobecquerel', '2Q', '''kBq''') ALL_UNITS.append(KILOBECQUEREL) KILOCURIE = UnitDescriptor('kilocurie', '2R', '''kCi''') ALL_UNITS.append(KILOCURIE) MEGAGRAM = UnitDescriptor('megagram', '2U', '''Mg''') ALL_UNITS.append(MEGAGRAM) MEGAGRAM_PER_HOUR = UnitDescriptor('megagram per hour', '2V', '''Mg/h''') ALL_UNITS.append(MEGAGRAM_PER_HOUR) BIN = UnitDescriptor('bin', '2W', '''''') ALL_UNITS.append(BIN) METRE_PER_MINUTE = UnitDescriptor('metre per minute', '2X', '''m/min''') ALL_UNITS.append(METRE_PER_MINUTE) MILLIROENTGEN = UnitDescriptor('milliroentgen', '2Y', '''mR''') ALL_UNITS.append(MILLIROENTGEN) MILLIVOLT = UnitDescriptor('millivolt', '2Z', '''mV''') ALL_UNITS.append(MILLIVOLT) MEGAJOULE = UnitDescriptor('megajoule', '3B', '''MJ''') ALL_UNITS.append(MEGAJOULE) MANMONTH = UnitDescriptor('manmonth', '3C', '''''') ALL_UNITS.append(MANMONTH) POUND_PER_POUND_OF_PRODUCT = UnitDescriptor('pound per pound of product', '3E', '''''') ALL_UNITS.append(POUND_PER_POUND_OF_PRODUCT) POUND_PER_PIECE_OF_PRODUCT = UnitDescriptor('pound per piece of product', '3G', '''''') ALL_UNITS.append(POUND_PER_PIECE_OF_PRODUCT) KILOGRAM_PER_KILOGRAM_OF_PRODUCT = UnitDescriptor('kilogram per kilogram of product', '3H', '''''') ALL_UNITS.append(KILOGRAM_PER_KILOGRAM_OF_PRODUCT) KILOGRAM_PER_PIECE_OF_PRODUCT = UnitDescriptor('kilogram per piece of product', '3I', '''''') ALL_UNITS.append(KILOGRAM_PER_PIECE_OF_PRODUCT) BOBBIN = UnitDescriptor('bobbin', '4A', '''''') ALL_UNITS.append(BOBBIN) CAP = UnitDescriptor('cap', '4B', '''''') ALL_UNITS.append(CAP) CENTISTOKES = UnitDescriptor('centistokes', '4C', '''cSt''') ALL_UNITS.append(CENTISTOKES) TWENTY_PACK = UnitDescriptor('twenty pack', '4E', '''''') ALL_UNITS.append(TWENTY_PACK) MICROLITRE = UnitDescriptor('microlitre', '4G', '''µl''') ALL_UNITS.append(MICROLITRE) MICROMETRE_MICRON = UnitDescriptor('micrometre (micron)', '4H', '''µm''') ALL_UNITS.append(MICROMETRE_MICRON) MILLIAMPERE = UnitDescriptor('milliampere', '4K', '''mA''') ALL_UNITS.append(MILLIAMPERE) MEGABYTE = UnitDescriptor('megabyte', '4L', '''Mbyte''') ALL_UNITS.append(MEGABYTE) MILLIGRAM_PER_HOUR = UnitDescriptor('milligram per hour', '4M', '''mg/h''') ALL_UNITS.append(MILLIGRAM_PER_HOUR) MEGABECQUEREL = UnitDescriptor('megabecquerel', '4N', '''MBq''') ALL_UNITS.append(MEGABECQUEREL) MICROFARAD = UnitDescriptor('microfarad', '4O', '''µF''') ALL_UNITS.append(MICROFARAD) NEWTON_PER_METRE = UnitDescriptor('newton per metre', '4P', '''N/m''') ALL_UNITS.append(NEWTON_PER_METRE) OUNCE_INCH = UnitDescriptor('ounce inch', '4Q', '''oz·in''') ALL_UNITS.append(OUNCE_INCH) OUNCE_FOOT = UnitDescriptor('ounce foot', '4R', '''oz·ft''') ALL_UNITS.append(OUNCE_FOOT) PICOFARAD = UnitDescriptor('picofarad', '4T', '''pF''') ALL_UNITS.append(PICOFARAD) POUND_PER_HOUR = UnitDescriptor('pound per hour', '4U', '''lb/h''') ALL_UNITS.append(POUND_PER_HOUR) TON_US_PER_HOUR = UnitDescriptor('ton (US) per hour', '4W', '''ton (US) /h''') ALL_UNITS.append(TON_US_PER_HOUR) KILOLITRE_PER_HOUR = UnitDescriptor('kilolitre per hour', '4X', '''kl/h''') ALL_UNITS.append(KILOLITRE_PER_HOUR) BARREL_US_PER_MINUTE = UnitDescriptor('barrel (US) per minute', '5A', '''barrel (US)/min''') ALL_UNITS.append(BARREL_US_PER_MINUTE) BATCH = UnitDescriptor('batch', '5B', '''''') ALL_UNITS.append(BATCH) GALLONUS_PER_THOUSAND = UnitDescriptor('gallon(US) per thousand', '5C', '''''') ALL_UNITS.append(GALLONUS_PER_THOUSAND) MMSCF_PER_DAY = UnitDescriptor('MMSCF/day', '5E', '''''') ALL_UNITS.append(MMSCF_PER_DAY) POUND_PER_THOUSAND = UnitDescriptor('pound per thousand', '5F', '''''') ALL_UNITS.append(POUND_PER_THOUSAND) PUMP = UnitDescriptor('pump', '5G', '''''') ALL_UNITS.append(PUMP) STAGE = UnitDescriptor('stage', '5H', '''''') ALL_UNITS.append(STAGE) STANDARD_CUBIC_FOOT = UnitDescriptor('standard cubic foot', '5I', '''std''') ALL_UNITS.append(STANDARD_CUBIC_FOOT) HYDRAULIC_HORSE_POWER = UnitDescriptor('hydraulic horse power', '5J', '''''') ALL_UNITS.append(HYDRAULIC_HORSE_POWER) COUNT_PER_MINUTE = UnitDescriptor('count per minute', '5K', '''''') ALL_UNITS.append(COUNT_PER_MINUTE) SEISMIC_LEVEL = UnitDescriptor('seismic level', '5P', '''''') ALL_UNITS.append(SEISMIC_LEVEL) SEISMIC_LINE = UnitDescriptor('seismic line', '5Q', '''''') ALL_UNITS.append(SEISMIC_LINE) FIFTEEN_DEG_C_CALORIE = UnitDescriptor('15 °C calorie', 'A1', '''cal₁₅''') ALL_UNITS.append(FIFTEEN_DEG_C_CALORIE) AMPERE_SQUARE_METRE_PER_JOULE_SECOND = UnitDescriptor('ampere square metre per joule second', 'A10', '''A·m²/(J·s)''') ALL_UNITS.append(AMPERE_SQUARE_METRE_PER_JOULE_SECOND) ANGSTROM = UnitDescriptor('angstrom', 'A11', '''Å''') ALL_UNITS.append(ANGSTROM) ASTRONOMICAL_UNIT = UnitDescriptor('astronomical unit', 'A12', '''ua''') ALL_UNITS.append(ASTRONOMICAL_UNIT) ATTOJOULE = UnitDescriptor('attojoule', 'A13', '''aJ''') ALL_UNITS.append(ATTOJOULE) BARN = UnitDescriptor('barn', 'A14', '''b''') ALL_UNITS.append(BARN) BARN_PER_ELECTRONVOLT = UnitDescriptor('barn per electronvolt', 'A15', '''b/eV''') ALL_UNITS.append(BARN_PER_ELECTRONVOLT) BARN_PER_STERADIAN_ELECTRONVOLT = UnitDescriptor('barn per steradian electronvolt', 'A16', '''b/(sr·eV)''') ALL_UNITS.append(BARN_PER_STERADIAN_ELECTRONVOLT) BARN_PER_STERADIAN = UnitDescriptor('barn per steradian', 'A17', '''b/sr''') ALL_UNITS.append(BARN_PER_STERADIAN) BECQUEREL_PER_KILOGRAM = UnitDescriptor('becquerel per kilogram', 'A18', '''Bq/kg''') ALL_UNITS.append(BECQUEREL_PER_KILOGRAM) BECQUEREL_PER_CUBIC_METRE = UnitDescriptor('becquerel per cubic metre', 'A19', '''Bq/m³''') ALL_UNITS.append(BECQUEREL_PER_CUBIC_METRE) AMPERE_PER_CENTIMETRE = UnitDescriptor('ampere per centimetre', 'A2', '''A/cm''') ALL_UNITS.append(AMPERE_PER_CENTIMETRE) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SECOND_SQUARE_FOOT_DEGREE_RANKINE = UnitDescriptor('British thermal unit (international table) per second square foot degree Rankine', 'A20', '''BtuIT/(s·ft²·°R)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SECOND_SQUARE_FOOT_DEGREE_RANKINE) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_POUND_DEGREE_RANKINE = UnitDescriptor('British thermal unit (international table) per pound degree Rankine', 'A21', '''BtuIT/(lb·°R)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_POUND_DEGREE_RANKINE) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SECOND_FOOT_DEGREE_RANKINE = UnitDescriptor('British thermal unit (international table) per second foot degree Rankine', 'A22', '''BtuIT/(s·ft·°R)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SECOND_FOOT_DEGREE_RANKINE) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_HOUR_SQUARE_FOOT_DEGREE_RANKINE = UnitDescriptor('British thermal unit (international table) per hour square foot degree Rankine', 'A23', '''BtuIT/(h·ft²·°R)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_HOUR_SQUARE_FOOT_DEGREE_RANKINE) CANDELA_PER_SQUARE_METRE = UnitDescriptor('candela per square metre', 'A24', '''cd/m²''') ALL_UNITS.append(CANDELA_PER_SQUARE_METRE) CHEVAL_VAPEUR = UnitDescriptor('cheval vapeur', 'A25', '''CV''') ALL_UNITS.append(CHEVAL_VAPEUR) COULOMB_METRE = UnitDescriptor('coulomb metre', 'A26', '''C·m''') ALL_UNITS.append(COULOMB_METRE) COULOMB_METRE_SQUARED_PER_VOLT = UnitDescriptor('coulomb metre squared per volt', 'A27', '''C·m²/V''') ALL_UNITS.append(COULOMB_METRE_SQUARED_PER_VOLT) COULOMB_PER_CUBIC_CENTIMETRE = UnitDescriptor('coulomb per cubic centimetre', 'A28', '''C/cm³''') ALL_UNITS.append(COULOMB_PER_CUBIC_CENTIMETRE) COULOMB_PER_CUBIC_METRE = UnitDescriptor('coulomb per cubic metre', 'A29', '''C/m³''') ALL_UNITS.append(COULOMB_PER_CUBIC_METRE) AMPERE_PER_MILLIMETRE = UnitDescriptor('ampere per millimetre', 'A3', '''A/mm''') ALL_UNITS.append(AMPERE_PER_MILLIMETRE) COULOMB_PER_CUBIC_MILLIMETRE = UnitDescriptor('coulomb per cubic millimetre', 'A30', '''C/mm³''') ALL_UNITS.append(COULOMB_PER_CUBIC_MILLIMETRE) COULOMB_PER_KILOGRAM_SECOND = UnitDescriptor('coulomb per kilogram second', 'A31', '''C/(kg·s)''') ALL_UNITS.append(COULOMB_PER_KILOGRAM_SECOND) COULOMB_PER_MOLE = UnitDescriptor('coulomb per mole', 'A32', '''C/mol''') ALL_UNITS.append(COULOMB_PER_MOLE) COULOMB_PER_SQUARE_CENTIMETRE = UnitDescriptor('coulomb per square centimetre', 'A33', '''C/cm²''') ALL_UNITS.append(COULOMB_PER_SQUARE_CENTIMETRE) COULOMB_PER_SQUARE_METRE = UnitDescriptor('coulomb per square metre', 'A34', '''C/m²''') ALL_UNITS.append(COULOMB_PER_SQUARE_METRE) COULOMB_PER_SQUARE_MILLIMETRE = UnitDescriptor('coulomb per square millimetre', 'A35', '''C/mm²''') ALL_UNITS.append(COULOMB_PER_SQUARE_MILLIMETRE) CUBIC_CENTIMETRE_PER_MOLE = UnitDescriptor('cubic centimetre per mole', 'A36', '''cm³/mol''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_MOLE) CUBIC_DECIMETRE_PER_MOLE = UnitDescriptor('cubic decimetre per mole', 'A37', '''dm³/mol''') ALL_UNITS.append(CUBIC_DECIMETRE_PER_MOLE) CUBIC_METRE_PER_COULOMB = UnitDescriptor('cubic metre per coulomb', 'A38', '''m³/C''') ALL_UNITS.append(CUBIC_METRE_PER_COULOMB) CUBIC_METRE_PER_KILOGRAM = UnitDescriptor('cubic metre per kilogram', 'A39', '''m³/kg''') ALL_UNITS.append(CUBIC_METRE_PER_KILOGRAM) AMPERE_PER_SQUARE_CENTIMETRE = UnitDescriptor('ampere per square centimetre', 'A4', '''A/cm²''') ALL_UNITS.append(AMPERE_PER_SQUARE_CENTIMETRE) CUBIC_METRE_PER_MOLE = UnitDescriptor('cubic metre per mole', 'A40', '''m³/mol''') ALL_UNITS.append(CUBIC_METRE_PER_MOLE) AMPERE_PER_SQUARE_METRE = UnitDescriptor('ampere per square metre', 'A41', '''A/m²''') ALL_UNITS.append(AMPERE_PER_SQUARE_METRE) CURIE_PER_KILOGRAM = UnitDescriptor('curie per kilogram', 'A42', '''Ci/kg''') ALL_UNITS.append(CURIE_PER_KILOGRAM) DEADWEIGHT_TONNAGE = UnitDescriptor('deadweight tonnage', 'A43', '''dwt''') ALL_UNITS.append(DEADWEIGHT_TONNAGE) DECALITRE = UnitDescriptor('decalitre', 'A44', '''dal''') ALL_UNITS.append(DECALITRE) DECAMETRE = UnitDescriptor('decametre', 'A45', '''dam''') ALL_UNITS.append(DECAMETRE) DECITEX = UnitDescriptor('decitex', 'A47', '''dtex (g/10km)''') ALL_UNITS.append(DECITEX) DEGREE_RANKINE = UnitDescriptor('degree Rankine', 'A48', '''°R''') ALL_UNITS.append(DEGREE_RANKINE) DENIER = UnitDescriptor('denier', 'A49', '''den (g/9 km)''') ALL_UNITS.append(DENIER) AMPERE_SQUARE_METRE = UnitDescriptor('ampere square metre', 'A5', '''A·m²''') ALL_UNITS.append(AMPERE_SQUARE_METRE) DYNE_SECOND_PER_CUBIC_CENTIMETRE = UnitDescriptor('dyne second per cubic centimetre', 'A50', '''dyn·s/cm³''') ALL_UNITS.append(DYNE_SECOND_PER_CUBIC_CENTIMETRE) DYNE_SECOND_PER_CENTIMETRE = UnitDescriptor('dyne second per centimetre', 'A51', '''dyn·s/cm''') ALL_UNITS.append(DYNE_SECOND_PER_CENTIMETRE) DYNE_SECOND_PER_CENTIMETRE_TO_THE_FIFTH_POWER = UnitDescriptor('dyne second per centimetre to the fifth power', 'A52', '''dyn·s/cm⁵''') ALL_UNITS.append(DYNE_SECOND_PER_CENTIMETRE_TO_THE_FIFTH_POWER) ELECTRONVOLT = UnitDescriptor('electronvolt', 'A53', '''eV''') ALL_UNITS.append(ELECTRONVOLT) ELECTRONVOLT_PER_METRE = UnitDescriptor('electronvolt per metre', 'A54', '''eV/m''') ALL_UNITS.append(ELECTRONVOLT_PER_METRE) ELECTRONVOLT_SQUARE_METRE = UnitDescriptor('electronvolt square metre', 'A55', '''eV·m²''') ALL_UNITS.append(ELECTRONVOLT_SQUARE_METRE) ELECTRONVOLT_SQUARE_METRE_PER_KILOGRAM = UnitDescriptor('electronvolt square metre per kilogram', 'A56', '''eV·m²/kg''') ALL_UNITS.append(ELECTRONVOLT_SQUARE_METRE_PER_KILOGRAM) ERG = UnitDescriptor('erg', 'A57', '''erg''') ALL_UNITS.append(ERG) ERG_PER_CENTIMETRE = UnitDescriptor('erg per centimetre', 'A58', '''erg/cm''') ALL_UNITS.append(ERG_PER_CENTIMETRE) EIGHT_PART_CLOUD_COVER = UnitDescriptor('8-part cloud cover', 'A59', '''''') ALL_UNITS.append(EIGHT_PART_CLOUD_COVER) AMPERE_PER_SQUARE_METRE_KELVIN_SQUARED = UnitDescriptor('ampere per square metre kelvin squared', 'A6', '''A/(m²·K²)''') ALL_UNITS.append(AMPERE_PER_SQUARE_METRE_KELVIN_SQUARED) ERG_PER_CUBIC_CENTIMETRE = UnitDescriptor('erg per cubic centimetre', 'A60', '''erg/cm³''') ALL_UNITS.append(ERG_PER_CUBIC_CENTIMETRE) ERG_PER_GRAM = UnitDescriptor('erg per gram', 'A61', '''erg/g''') ALL_UNITS.append(ERG_PER_GRAM) ERG_PER_GRAM_SECOND = UnitDescriptor('erg per gram second', 'A62', '''erg/g·s''') ALL_UNITS.append(ERG_PER_GRAM_SECOND) ERG_PER_SECOND = UnitDescriptor('erg per second', 'A63', '''erg/s''') ALL_UNITS.append(ERG_PER_SECOND) ERG_PER_SECOND_SQUARE_CENTIMETRE = UnitDescriptor('erg per second square centimetre', 'A64', '''erg/(s·cm²)''') ALL_UNITS.append(ERG_PER_SECOND_SQUARE_CENTIMETRE) ERG_PER_SQUARE_CENTIMETRE_SECOND = UnitDescriptor('erg per square centimetre second', 'A65', '''erg/(cm²·s)''') ALL_UNITS.append(ERG_PER_SQUARE_CENTIMETRE_SECOND) ERG_SQUARE_CENTIMETRE = UnitDescriptor('erg square centimetre', 'A66', '''erg·cm²''') ALL_UNITS.append(ERG_SQUARE_CENTIMETRE) ERG_SQUARE_CENTIMETRE_PER_GRAM = UnitDescriptor('erg square centimetre per gram', 'A67', '''erg·cm²/g''') ALL_UNITS.append(ERG_SQUARE_CENTIMETRE_PER_GRAM) EXAJOULE = UnitDescriptor('exajoule', 'A68', '''EJ''') ALL_UNITS.append(EXAJOULE) FARAD_PER_METRE = UnitDescriptor('farad per metre', 'A69', '''F/m''') ALL_UNITS.append(FARAD_PER_METRE) AMPERE_PER_SQUARE_MILLIMETRE = UnitDescriptor('ampere per square millimetre', 'A7', '''A/mm²''') ALL_UNITS.append(AMPERE_PER_SQUARE_MILLIMETRE) FEMTOJOULE = UnitDescriptor('femtojoule', 'A70', '''fJ''') ALL_UNITS.append(FEMTOJOULE) FEMTOMETRE = UnitDescriptor('femtometre', 'A71', '''fm''') ALL_UNITS.append(FEMTOMETRE) FOOT_PER_SECOND_SQUARED = UnitDescriptor('foot per second squared', 'A73', '''ft/s²''') ALL_UNITS.append(FOOT_PER_SECOND_SQUARED) FOOT_POUND_FORCE_PER_SECOND = UnitDescriptor('foot pound-force per second', 'A74', '''ft·lbf/s''') ALL_UNITS.append(FOOT_POUND_FORCE_PER_SECOND) FREIGHT_TON = UnitDescriptor('freight ton', 'A75', '''''') ALL_UNITS.append(FREIGHT_TON) GAL = UnitDescriptor('gal', 'A76', '''Gal''') ALL_UNITS.append(GAL) GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_DISPLACEMENT = UnitDescriptor('Gaussian CGS (Centimetre-Gram-Second system) unit of displacement', 'A77', '''''') ALL_UNITS.append(GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_DISPLACEMENT) GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_ELECTRIC_CURRENT = UnitDescriptor('Gaussian CGS (Centimetre-Gram-Second system) unit of electric current', 'A78', '''''') ALL_UNITS.append(GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_ELECTRIC_CURRENT) GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_ELECTRIC_CHARGE = UnitDescriptor('Gaussian CGS (Centimetre-Gram-Second system) unit of electric charge', 'A79', '''''') ALL_UNITS.append(GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_ELECTRIC_CHARGE) AMPERE_SECOND = UnitDescriptor('ampere second', 'A8', '''A·s''') ALL_UNITS.append(AMPERE_SECOND) GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_ELECTRIC_FIELD_STRENGTH = UnitDescriptor('Gaussian CGS (Centimetre-Gram-Second system) unit of electric field strength', 'A80', '''''') ALL_UNITS.append(GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_ELECTRIC_FIELD_STRENGTH) GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_ELECTRIC_POLARIZATION = UnitDescriptor('Gaussian CGS (Centimetre-Gram-Second system) unit of electric polarization', 'A81', '''''') ALL_UNITS.append(GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_ELECTRIC_POLARIZATION) GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_ELECTRIC_POTENTIAL = UnitDescriptor('Gaussian CGS (Centimetre-Gram-Second system) unit of electric potential', 'A82', '''''') ALL_UNITS.append(GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_ELECTRIC_POTENTIAL) GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_MAGNETIZATION = UnitDescriptor('Gaussian CGS (Centimetre-Gram-Second system) unit of magnetization', 'A83', '''''') ALL_UNITS.append(GAUSSIAN_CGS_CENTIMETRE_GRAM_SECOND_SYSTEM_UNIT_OF_MAGNETIZATION) GIGACOULOMB_PER_CUBIC_METRE = UnitDescriptor('gigacoulomb per cubic metre', 'A84', '''GC/m³''') ALL_UNITS.append(GIGACOULOMB_PER_CUBIC_METRE) GIGAELECTRONVOLT = UnitDescriptor('gigaelectronvolt', 'A85', '''GeV''') ALL_UNITS.append(GIGAELECTRONVOLT) GIGAHERTZ = UnitDescriptor('gigahertz', 'A86', '''GHz''') ALL_UNITS.append(GIGAHERTZ) GIGAOHM = UnitDescriptor('gigaohm', 'A87', '''GΩ''') ALL_UNITS.append(GIGAOHM) GIGAOHM_METRE = UnitDescriptor('gigaohm metre', 'A88', '''GΩ·m''') ALL_UNITS.append(GIGAOHM_METRE) GIGAPASCAL = UnitDescriptor('gigapascal', 'A89', '''GPa''') ALL_UNITS.append(GIGAPASCAL) RATE = UnitDescriptor('rate', 'A9', '''''') ALL_UNITS.append(RATE) GIGAWATT = UnitDescriptor('gigawatt', 'A90', '''GW''') ALL_UNITS.append(GIGAWATT) GON = UnitDescriptor('gon', 'A91', '''gon''') ALL_UNITS.append(GON) GRAM_PER_CUBIC_METRE = UnitDescriptor('gram per cubic metre', 'A93', '''g/m³''') ALL_UNITS.append(GRAM_PER_CUBIC_METRE) GRAM_PER_MOLE = UnitDescriptor('gram per mole', 'A94', '''g/mol''') ALL_UNITS.append(GRAM_PER_MOLE) GRAY = UnitDescriptor('gray', 'A95', '''Gy''') ALL_UNITS.append(GRAY) GRAY_PER_SECOND = UnitDescriptor('gray per second', 'A96', '''Gy/s''') ALL_UNITS.append(GRAY_PER_SECOND) HECTOPASCAL = UnitDescriptor('hectopascal', 'A97', '''hPa''') ALL_UNITS.append(HECTOPASCAL) HENRY_PER_METRE = UnitDescriptor('henry per metre', 'A98', '''H/m''') ALL_UNITS.append(HENRY_PER_METRE) BIT = UnitDescriptor('bit', 'A99', '''bit''') ALL_UNITS.append(BIT) BALL = UnitDescriptor('ball', 'AA', '''''') ALL_UNITS.append(BALL) BULK_PACK = UnitDescriptor('bulk pack', 'AB', '''pk''') ALL_UNITS.append(BULK_PACK) ACRE = UnitDescriptor('acre', 'ACR', '''acre''') ALL_UNITS.append(ACRE) ACTIVITY = UnitDescriptor('activity', 'ACT', '''''') ALL_UNITS.append(ACTIVITY) BYTE = UnitDescriptor('byte', 'AD', '''byte''') ALL_UNITS.append(BYTE) AMPERE_PER_METRE = UnitDescriptor('ampere per metre', 'AE', '''A/m''') ALL_UNITS.append(AMPERE_PER_METRE) ADDITIONAL_MINUTE = UnitDescriptor('additional minute', 'AH', '''''') ALL_UNITS.append(ADDITIONAL_MINUTE) AVERAGE_MINUTE_PER_CALL = UnitDescriptor('average minute per call', 'AI', '''''') ALL_UNITS.append(AVERAGE_MINUTE_PER_CALL) COP = UnitDescriptor('cop', 'AJ', '''''') ALL_UNITS.append(COP) FATHOM = UnitDescriptor('fathom', 'AK', '''fth''') ALL_UNITS.append(FATHOM) ACCESS_LINE = UnitDescriptor('access line', 'AL', '''''') ALL_UNITS.append(ACCESS_LINE) AMPOULE = UnitDescriptor('ampoule', 'AM', '''''') ALL_UNITS.append(AMPOULE) AMPERE_HOUR = UnitDescriptor('ampere hour', 'AMH', '''A·h''') ALL_UNITS.append(AMPERE_HOUR) AMPERE = UnitDescriptor('ampere', 'AMP', '''A''') ALL_UNITS.append(AMPERE) YEAR = UnitDescriptor('year', 'ANN', '''y''') ALL_UNITS.append(YEAR) ALUMINIUM_POUND_ONLY = UnitDescriptor('aluminium pound only', 'AP', '''''') ALL_UNITS.append(ALUMINIUM_POUND_ONLY) TROY_OUNCE_OR_APOTHECARY_OUNCE = UnitDescriptor('troy ounce or apothecary ounce', 'APZ', '''tr oz''') ALL_UNITS.append(TROY_OUNCE_OR_APOTHECARY_OUNCE) ANTI_HEMOPHILIC_FACTOR_AHF_UNIT = UnitDescriptor('anti-hemophilic factor (AHF) unit', 'AQ', '''''') ALL_UNITS.append(ANTI_HEMOPHILIC_FACTOR_AHF_UNIT) SUPPOSITORY = UnitDescriptor('suppository', 'AR', '''''') ALL_UNITS.append(SUPPOSITORY) ARE = UnitDescriptor('are', 'ARE', '''a''') ALL_UNITS.append(ARE) ASSORTMENT = UnitDescriptor('assortment', 'AS', '''''') ALL_UNITS.append(ASSORTMENT) ALCOHOLIC_STRENGTH_BY_MASS = UnitDescriptor('alcoholic strength by mass', 'ASM', '''''') ALL_UNITS.append(ALCOHOLIC_STRENGTH_BY_MASS) ALCOHOLIC_STRENGTH_BY_VOLUME = UnitDescriptor('alcoholic strength by volume', 'ASU', '''''') ALL_UNITS.append(ALCOHOLIC_STRENGTH_BY_VOLUME) STANDARD_ATMOSPHERE = UnitDescriptor('standard atmosphere', 'ATM', '''atm''') ALL_UNITS.append(STANDARD_ATMOSPHERE) TECHNICAL_ATMOSPHERE = UnitDescriptor('technical atmosphere', 'ATT', '''at''') ALL_UNITS.append(TECHNICAL_ATMOSPHERE) CAPSULE = UnitDescriptor('capsule', 'AV', '''''') ALL_UNITS.append(CAPSULE) POWDER_FILLED_VIAL = UnitDescriptor('powder filled vial', 'AW', '''''') ALL_UNITS.append(POWDER_FILLED_VIAL) AMERICAN_WIRE_GAUGE = UnitDescriptor('american wire gauge', 'AWG', '''AWG''') ALL_UNITS.append(AMERICAN_WIRE_GAUGE) ASSEMBLY = UnitDescriptor('assembly', 'AY', '''''') ALL_UNITS.append(ASSEMBLY) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_POUND = UnitDescriptor('British thermal unit (international table) per pound', 'AZ', '''BtuIT/lb''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_POUND) BTU_PER_CUBIC_FOOT = UnitDescriptor('Btu per cubic foot', 'B0', '''BTU/ft³''') ALL_UNITS.append(BTU_PER_CUBIC_FOOT) BARREL_US_PER_DAY = UnitDescriptor('barrel (US) per day', 'B1', '''barrel (US)/d''') ALL_UNITS.append(BARREL_US_PER_DAY) BIT_PER_SECOND = UnitDescriptor('bit per second', 'B10', '''bit/s''') ALL_UNITS.append(BIT_PER_SECOND) JOULE_PER_KILOGRAM_KELVIN = UnitDescriptor('joule per kilogram kelvin', 'B11', '''J/(kg·K)''') ALL_UNITS.append(JOULE_PER_KILOGRAM_KELVIN) JOULE_PER_METRE = UnitDescriptor('joule per metre', 'B12', '''J/m''') ALL_UNITS.append(JOULE_PER_METRE) JOULE_PER_SQUARE_METRE = UnitDescriptor('joule per square metre', 'B13', '''J/m²''') ALL_UNITS.append(JOULE_PER_SQUARE_METRE) JOULE_PER_METRE_TO_THE_FOURTH_POWER = UnitDescriptor('joule per metre to the fourth power', 'B14', '''J/m⁴''') ALL_UNITS.append(JOULE_PER_METRE_TO_THE_FOURTH_POWER) JOULE_PER_MOLE = UnitDescriptor('joule per mole', 'B15', '''J/mol''') ALL_UNITS.append(JOULE_PER_MOLE) JOULE_PER_MOLE_KELVIN = UnitDescriptor('joule per mole kelvin', 'B16', '''J/(mol·K)''') ALL_UNITS.append(JOULE_PER_MOLE_KELVIN) CREDIT = UnitDescriptor('credit', 'B17', '''''') ALL_UNITS.append(CREDIT) JOULE_SECOND = UnitDescriptor('joule second', 'B18', '''J·s''') ALL_UNITS.append(JOULE_SECOND) DIGIT = UnitDescriptor('digit', 'B19', '''''') ALL_UNITS.append(DIGIT) BUNK = UnitDescriptor('bunk', 'B2', '''''') ALL_UNITS.append(BUNK) JOULE_SQUARE_METRE_PER_KILOGRAM = UnitDescriptor('joule square metre per kilogram', 'B20', '''J·m²/kg''') ALL_UNITS.append(JOULE_SQUARE_METRE_PER_KILOGRAM) KELVIN_PER_WATT = UnitDescriptor('kelvin per watt', 'B21', '''K/W''') ALL_UNITS.append(KELVIN_PER_WATT) KILOAMPERE = UnitDescriptor('kiloampere', 'B22', '''kA''') ALL_UNITS.append(KILOAMPERE) KILOAMPERE_PER_SQUARE_METRE = UnitDescriptor('kiloampere per square metre', 'B23', '''kA/m²''') ALL_UNITS.append(KILOAMPERE_PER_SQUARE_METRE) KILOAMPERE_PER_METRE = UnitDescriptor('kiloampere per metre', 'B24', '''kA/m''') ALL_UNITS.append(KILOAMPERE_PER_METRE) KILOBECQUEREL_PER_KILOGRAM = UnitDescriptor('kilobecquerel per kilogram', 'B25', '''kBq/kg''') ALL_UNITS.append(KILOBECQUEREL_PER_KILOGRAM) KILOCOULOMB = UnitDescriptor('kilocoulomb', 'B26', '''kC''') ALL_UNITS.append(KILOCOULOMB) KILOCOULOMB_PER_CUBIC_METRE = UnitDescriptor('kilocoulomb per cubic metre', 'B27', '''kC/m³''') ALL_UNITS.append(KILOCOULOMB_PER_CUBIC_METRE) KILOCOULOMB_PER_SQUARE_METRE = UnitDescriptor('kilocoulomb per square metre', 'B28', '''kC/m²''') ALL_UNITS.append(KILOCOULOMB_PER_SQUARE_METRE) KILOELECTRONVOLT = UnitDescriptor('kiloelectronvolt', 'B29', '''keV''') ALL_UNITS.append(KILOELECTRONVOLT) BATTING_POUND = UnitDescriptor('batting pound', 'B3', '''''') ALL_UNITS.append(BATTING_POUND) GIBIBIT = UnitDescriptor('gibibit', 'B30', '''Gibit''') ALL_UNITS.append(GIBIBIT) KILOGRAM_METRE_PER_SECOND = UnitDescriptor('kilogram metre per second', 'B31', '''kg·m/s''') ALL_UNITS.append(KILOGRAM_METRE_PER_SECOND) KILOGRAM_METRE_SQUARED = UnitDescriptor('kilogram metre squared', 'B32', '''kg·m²''') ALL_UNITS.append(KILOGRAM_METRE_SQUARED) KILOGRAM_METRE_SQUARED_PER_SECOND = UnitDescriptor('kilogram metre squared per second', 'B33', '''kg·m²/s''') ALL_UNITS.append(KILOGRAM_METRE_SQUARED_PER_SECOND) KILOGRAM_PER_CUBIC_DECIMETRE = UnitDescriptor('kilogram per cubic decimetre', 'B34', '''kg/dm³''') ALL_UNITS.append(KILOGRAM_PER_CUBIC_DECIMETRE) KILOGRAM_PER_LITRE = UnitDescriptor('kilogram per litre', 'B35', '''kg/l''') ALL_UNITS.append(KILOGRAM_PER_LITRE) KILOGRAM_PER_LITRE = UnitDescriptor('kilogram per litre', 'B35', '''kg/L''') ALL_UNITS.append(KILOGRAM_PER_LITRE) CALORIE_THERMOCHEMICAL_PER_GRAM = UnitDescriptor('calorie (thermochemical) per gram', 'B36', '''calth/g''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL_PER_GRAM) KILOGRAM_FORCE = UnitDescriptor('kilogram-force', 'B37', '''kgf''') ALL_UNITS.append(KILOGRAM_FORCE) KILOGRAM_FORCE_METRE = UnitDescriptor('kilogram-force metre', 'B38', '''kgf·m''') ALL_UNITS.append(KILOGRAM_FORCE_METRE) KILOGRAM_FORCE_METRE_PER_SECOND = UnitDescriptor('kilogram-force metre per second', 'B39', '''kgf·m/s''') ALL_UNITS.append(KILOGRAM_FORCE_METRE_PER_SECOND) BARREL_IMPERIAL = UnitDescriptor('barrel, imperial', 'B4', '''''') ALL_UNITS.append(BARREL_IMPERIAL) KILOGRAM_FORCE_PER_SQUARE_METRE = UnitDescriptor('kilogram-force per square metre', 'B40', '''kgf/m²''') ALL_UNITS.append(KILOGRAM_FORCE_PER_SQUARE_METRE) KILOJOULE_PER_KELVIN = UnitDescriptor('kilojoule per kelvin', 'B41', '''kJ/K''') ALL_UNITS.append(KILOJOULE_PER_KELVIN) KILOJOULE_PER_KILOGRAM = UnitDescriptor('kilojoule per kilogram', 'B42', '''kJ/kg''') ALL_UNITS.append(KILOJOULE_PER_KILOGRAM) KILOJOULE_PER_KILOGRAM_KELVIN = UnitDescriptor('kilojoule per kilogram kelvin', 'B43', '''kJ/(kg·K)''') ALL_UNITS.append(KILOJOULE_PER_KILOGRAM_KELVIN) KILOJOULE_PER_MOLE = UnitDescriptor('kilojoule per mole', 'B44', '''kJ/mol''') ALL_UNITS.append(KILOJOULE_PER_MOLE) KILOMOLE = UnitDescriptor('kilomole', 'B45', '''kmol''') ALL_UNITS.append(KILOMOLE) KILOMOLE_PER_CUBIC_METRE = UnitDescriptor('kilomole per cubic metre', 'B46', '''kmol/m³''') ALL_UNITS.append(KILOMOLE_PER_CUBIC_METRE) KILONEWTON = UnitDescriptor('kilonewton', 'B47', '''kN''') ALL_UNITS.append(KILONEWTON) KILONEWTON_METRE = UnitDescriptor('kilonewton metre', 'B48', '''kN·m''') ALL_UNITS.append(KILONEWTON_METRE) KILOOHM = UnitDescriptor('kiloohm', 'B49', '''kΩ''') ALL_UNITS.append(KILOOHM) BILLET = UnitDescriptor('billet', 'B5', '''''') ALL_UNITS.append(BILLET) KILOOHM_METRE = UnitDescriptor('kiloohm metre', 'B50', '''kΩ·m''') ALL_UNITS.append(KILOOHM_METRE) KILOPOND = UnitDescriptor('kilopond', 'B51', '''kp''') ALL_UNITS.append(KILOPOND) KILOSECOND = UnitDescriptor('kilosecond', 'B52', '''ks''') ALL_UNITS.append(KILOSECOND) KILOSIEMENS = UnitDescriptor('kilosiemens', 'B53', '''kS''') ALL_UNITS.append(KILOSIEMENS) KILOSIEMENS_PER_METRE = UnitDescriptor('kilosiemens per metre', 'B54', '''kS/m''') ALL_UNITS.append(KILOSIEMENS_PER_METRE) KILOVOLT_PER_METRE = UnitDescriptor('kilovolt per metre', 'B55', '''kV/m''') ALL_UNITS.append(KILOVOLT_PER_METRE) KILOWEBER_PER_METRE = UnitDescriptor('kiloweber per metre', 'B56', '''kWb/m''') ALL_UNITS.append(KILOWEBER_PER_METRE) LIGHT_YEAR = UnitDescriptor('light year', 'B57', '''ly''') ALL_UNITS.append(LIGHT_YEAR) LITRE_PER_MOLE = UnitDescriptor('litre per mole', 'B58', '''l/mol''') ALL_UNITS.append(LITRE_PER_MOLE) LUMEN_HOUR = UnitDescriptor('lumen hour', 'B59', '''lm·h''') ALL_UNITS.append(LUMEN_HOUR) BUN = UnitDescriptor('bun', 'B6', '''''') ALL_UNITS.append(BUN) LUMEN_PER_SQUARE_METRE = UnitDescriptor('lumen per square metre', 'B60', '''lm/m²''') ALL_UNITS.append(LUMEN_PER_SQUARE_METRE) LUMEN_PER_WATT = UnitDescriptor('lumen per watt', 'B61', '''lm/W''') ALL_UNITS.append(LUMEN_PER_WATT) LUMEN_SECOND = UnitDescriptor('lumen second', 'B62', '''lm·s''') ALL_UNITS.append(LUMEN_SECOND) LUX_HOUR = UnitDescriptor('lux hour', 'B63', '''lx·h''') ALL_UNITS.append(LUX_HOUR) LUX_SECOND = UnitDescriptor('lux second', 'B64', '''lx·s''') ALL_UNITS.append(LUX_SECOND) MAXWELL = UnitDescriptor('maxwell', 'B65', '''Mx''') ALL_UNITS.append(MAXWELL) MEGAAMPERE_PER_SQUARE_METRE = UnitDescriptor('megaampere per square metre', 'B66', '''MA/m²''') ALL_UNITS.append(MEGAAMPERE_PER_SQUARE_METRE) MEGABECQUEREL_PER_KILOGRAM = UnitDescriptor('megabecquerel per kilogram', 'B67', '''MBq/kg''') ALL_UNITS.append(MEGABECQUEREL_PER_KILOGRAM) GIGABIT = UnitDescriptor('gigabit', 'B68', '''Gbit''') ALL_UNITS.append(GIGABIT) MEGACOULOMB_PER_CUBIC_METRE = UnitDescriptor('megacoulomb per cubic metre', 'B69', '''MC/m³''') ALL_UNITS.append(MEGACOULOMB_PER_CUBIC_METRE) CYCLE = UnitDescriptor('cycle', 'B7', '''''') ALL_UNITS.append(CYCLE) MEGACOULOMB_PER_SQUARE_METRE = UnitDescriptor('megacoulomb per square metre', 'B70', '''MC/m²''') ALL_UNITS.append(MEGACOULOMB_PER_SQUARE_METRE) MEGAELECTRONVOLT = UnitDescriptor('megaelectronvolt', 'B71', '''MeV''') ALL_UNITS.append(MEGAELECTRONVOLT) MEGAGRAM_PER_CUBIC_METRE = UnitDescriptor('megagram per cubic metre', 'B72', '''Mg/m³''') ALL_UNITS.append(MEGAGRAM_PER_CUBIC_METRE) MEGANEWTON = UnitDescriptor('meganewton', 'B73', '''MN''') ALL_UNITS.append(MEGANEWTON) MEGANEWTON_METRE = UnitDescriptor('meganewton metre', 'B74', '''MN·m''') ALL_UNITS.append(MEGANEWTON_METRE) MEGAOHM = UnitDescriptor('megaohm', 'B75', '''MΩ''') ALL_UNITS.append(MEGAOHM) MEGAOHM_METRE = UnitDescriptor('megaohm metre', 'B76', '''MΩ·m''') ALL_UNITS.append(MEGAOHM_METRE) MEGASIEMENS_PER_METRE = UnitDescriptor('megasiemens per metre', 'B77', '''MS/m''') ALL_UNITS.append(MEGASIEMENS_PER_METRE) MEGAVOLT = UnitDescriptor('megavolt', 'B78', '''MV''') ALL_UNITS.append(MEGAVOLT) MEGAVOLT_PER_METRE = UnitDescriptor('megavolt per metre', 'B79', '''MV/m''') ALL_UNITS.append(MEGAVOLT_PER_METRE) JOULE_PER_CUBIC_METRE = UnitDescriptor('joule per cubic metre', 'B8', '''J/m³''') ALL_UNITS.append(JOULE_PER_CUBIC_METRE) GIGABIT_PER_SECOND = UnitDescriptor('gigabit per second', 'B80', '''Gbit/s''') ALL_UNITS.append(GIGABIT_PER_SECOND) RECIPROCAL_METRE_SQUARED_RECIPROCAL_SECOND = UnitDescriptor('reciprocal metre squared reciprocal second', 'B81', '''m⁻²/s''') ALL_UNITS.append(RECIPROCAL_METRE_SQUARED_RECIPROCAL_SECOND) INCH_PER_LINEAR_FOOT = UnitDescriptor('inch per linear foot', 'B82', '''''') ALL_UNITS.append(INCH_PER_LINEAR_FOOT) METRE_TO_THE_FOURTH_POWER = UnitDescriptor('metre to the fourth power', 'B83', '''m⁴''') ALL_UNITS.append(METRE_TO_THE_FOURTH_POWER) MICROAMPERE = UnitDescriptor('microampere', 'B84', '''µA''') ALL_UNITS.append(MICROAMPERE) MICROBAR = UnitDescriptor('microbar', 'B85', '''µbar''') ALL_UNITS.append(MICROBAR) MICROCOULOMB = UnitDescriptor('microcoulomb', 'B86', '''µC''') ALL_UNITS.append(MICROCOULOMB) MICROCOULOMB_PER_CUBIC_METRE = UnitDescriptor('microcoulomb per cubic metre', 'B87', '''µC/m³''') ALL_UNITS.append(MICROCOULOMB_PER_CUBIC_METRE) MICROCOULOMB_PER_SQUARE_METRE = UnitDescriptor('microcoulomb per square metre', 'B88', '''µC/m²''') ALL_UNITS.append(MICROCOULOMB_PER_SQUARE_METRE) MICROFARAD_PER_METRE = UnitDescriptor('microfarad per metre', 'B89', '''µF/m''') ALL_UNITS.append(MICROFARAD_PER_METRE) BATT = UnitDescriptor('batt', 'B9', '''''') ALL_UNITS.append(BATT) MICROHENRY = UnitDescriptor('microhenry', 'B90', '''µH''') ALL_UNITS.append(MICROHENRY) MICROHENRY_PER_METRE = UnitDescriptor('microhenry per metre', 'B91', '''µH/m''') ALL_UNITS.append(MICROHENRY_PER_METRE) MICRONEWTON = UnitDescriptor('micronewton', 'B92', '''µN''') ALL_UNITS.append(MICRONEWTON) MICRONEWTON_METRE = UnitDescriptor('micronewton metre', 'B93', '''µN·m''') ALL_UNITS.append(MICRONEWTON_METRE) MICROOHM = UnitDescriptor('microohm', 'B94', '''µΩ''') ALL_UNITS.append(MICROOHM) MICROOHM_METRE = UnitDescriptor('microohm metre', 'B95', '''µΩ·m''') ALL_UNITS.append(MICROOHM_METRE) MICROPASCAL = UnitDescriptor('micropascal', 'B96', '''µPa''') ALL_UNITS.append(MICROPASCAL) MICRORADIAN = UnitDescriptor('microradian', 'B97', '''µrad''') ALL_UNITS.append(MICRORADIAN) MICROSECOND = UnitDescriptor('microsecond', 'B98', '''µs''') ALL_UNITS.append(MICROSECOND) MICROSIEMENS = UnitDescriptor('microsiemens', 'B99', '''µS''') ALL_UNITS.append(MICROSIEMENS) BAR_UNIT_OF_PRESSURE = UnitDescriptor('bar [unit of pressure]', 'BAR', '''bar''') ALL_UNITS.append(BAR_UNIT_OF_PRESSURE) BASE_BOX = UnitDescriptor('base box', 'BB', '''''') ALL_UNITS.append(BASE_BOX) BOARD = UnitDescriptor('board', 'BD', '''''') ALL_UNITS.append(BOARD) BUNDLE = UnitDescriptor('bundle', 'BE', '''''') ALL_UNITS.append(BUNDLE) BOARD_FOOT = UnitDescriptor('board foot', 'BFT', '''fbm''') ALL_UNITS.append(BOARD_FOOT) BAG = UnitDescriptor('bag', 'BG', '''''') ALL_UNITS.append(BAG) BRUSH = UnitDescriptor('brush', 'BH', '''''') ALL_UNITS.append(BRUSH) BRAKE_HORSE_POWER = UnitDescriptor('brake horse power', 'BHP', '''BHP''') ALL_UNITS.append(BRAKE_HORSE_POWER) BILLION_EUR = UnitDescriptor('billion (EUR)', 'BIL', '''''') ALL_UNITS.append(BILLION_EUR) BUCKET = UnitDescriptor('bucket', 'BJ', '''''') ALL_UNITS.append(BUCKET) BASKET = UnitDescriptor('basket', 'BK', '''''') ALL_UNITS.append(BASKET) BALE = UnitDescriptor('bale', 'BL', '''''') ALL_UNITS.append(BALE) DRY_BARREL_US = UnitDescriptor('dry barrel (US)', 'BLD', '''bbl (US)''') ALL_UNITS.append(DRY_BARREL_US) BARREL_US = UnitDescriptor('barrel (US)', 'BLL', '''barrel (US)''') ALL_UNITS.append(BARREL_US) BOTTLE = UnitDescriptor('bottle', 'BO', '''''') ALL_UNITS.append(BOTTLE) HUNDRED_BOARD_FOOT = UnitDescriptor('hundred board foot', 'BP', '''''') ALL_UNITS.append(HUNDRED_BOARD_FOOT) BEATS_PER_MINUTE = UnitDescriptor('beats per minute', 'BPM', '''BPM''') ALL_UNITS.append(BEATS_PER_MINUTE) BECQUEREL = UnitDescriptor('becquerel', 'BQL', '''Bq''') ALL_UNITS.append(BECQUEREL) BAR_UNIT_OF_PACKAGING = UnitDescriptor('bar [unit of packaging]', 'BR', '''''') ALL_UNITS.append(BAR_UNIT_OF_PACKAGING) BOLT = UnitDescriptor('bolt', 'BT', '''''') ALL_UNITS.append(BOLT) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE = UnitDescriptor('British thermal unit (international table)', 'BTU', '''BtuIT''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE) BUSHEL_US = UnitDescriptor('bushel (US)', 'BUA', '''bu (US)''') ALL_UNITS.append(BUSHEL_US) BUSHEL_UK = UnitDescriptor('bushel (UK)', 'BUI', '''bushel (UK)''') ALL_UNITS.append(BUSHEL_UK) BASE_WEIGHT = UnitDescriptor('base weight', 'BW', '''''') ALL_UNITS.append(BASE_WEIGHT) BOX = UnitDescriptor('box', 'BX', '''''') ALL_UNITS.append(BOX) MILLION_BTUS = UnitDescriptor('million BTUs', 'BZ', '''''') ALL_UNITS.append(MILLION_BTUS) CALL = UnitDescriptor('call', 'C0', '''''') ALL_UNITS.append(CALL) COMPOSITE_PRODUCT_POUND_TOTAL_WEIGHT = UnitDescriptor('composite product pound (total weight)', 'C1', '''''') ALL_UNITS.append(COMPOSITE_PRODUCT_POUND_TOTAL_WEIGHT) MILLIFARAD = UnitDescriptor('millifarad', 'C10', '''mF''') ALL_UNITS.append(MILLIFARAD) MILLIGAL = UnitDescriptor('milligal', 'C11', '''mGal''') ALL_UNITS.append(MILLIGAL) MILLIGRAM_PER_METRE = UnitDescriptor('milligram per metre', 'C12', '''mg/m''') ALL_UNITS.append(MILLIGRAM_PER_METRE) MILLIGRAY = UnitDescriptor('milligray', 'C13', '''mGy''') ALL_UNITS.append(MILLIGRAY) MILLIHENRY = UnitDescriptor('millihenry', 'C14', '''mH''') ALL_UNITS.append(MILLIHENRY) MILLIJOULE = UnitDescriptor('millijoule', 'C15', '''mJ''') ALL_UNITS.append(MILLIJOULE) MILLIMETRE_PER_SECOND = UnitDescriptor('millimetre per second', 'C16', '''mm/s''') ALL_UNITS.append(MILLIMETRE_PER_SECOND) MILLIMETRE_SQUARED_PER_SECOND = UnitDescriptor('millimetre squared per second', 'C17', '''mm²/s''') ALL_UNITS.append(MILLIMETRE_SQUARED_PER_SECOND) MILLIMOLE = UnitDescriptor('millimole', 'C18', '''mmol''') ALL_UNITS.append(MILLIMOLE) MOLE_PER_KILOGRAM = UnitDescriptor('mole per kilogram', 'C19', '''mol/kg''') ALL_UNITS.append(MOLE_PER_KILOGRAM) CARSET = UnitDescriptor('carset', 'C2', '''''') ALL_UNITS.append(CARSET) MILLINEWTON = UnitDescriptor('millinewton', 'C20', '''mN''') ALL_UNITS.append(MILLINEWTON) KIBIBIT = UnitDescriptor('kibibit', 'C21', '''Kibit''') ALL_UNITS.append(KIBIBIT) MILLINEWTON_PER_METRE = UnitDescriptor('millinewton per metre', 'C22', '''mN/m''') ALL_UNITS.append(MILLINEWTON_PER_METRE) MILLIOHM_METRE = UnitDescriptor('milliohm metre', 'C23', '''mΩ·m''') ALL_UNITS.append(MILLIOHM_METRE) MILLIPASCAL_SECOND = UnitDescriptor('millipascal second', 'C24', '''mPa·s''') ALL_UNITS.append(MILLIPASCAL_SECOND) MILLIRADIAN = UnitDescriptor('milliradian', 'C25', '''mrad''') ALL_UNITS.append(MILLIRADIAN) MILLISECOND = UnitDescriptor('millisecond', 'C26', '''ms''') ALL_UNITS.append(MILLISECOND) MILLISIEMENS = UnitDescriptor('millisiemens', 'C27', '''mS''') ALL_UNITS.append(MILLISIEMENS) MILLISIEVERT = UnitDescriptor('millisievert', 'C28', '''mSv''') ALL_UNITS.append(MILLISIEVERT) MILLITESLA = UnitDescriptor('millitesla', 'C29', '''mT''') ALL_UNITS.append(MILLITESLA) MICROVOLT_PER_METRE = UnitDescriptor('microvolt per metre', 'C3', '''µV/m''') ALL_UNITS.append(MICROVOLT_PER_METRE) MILLIVOLT_PER_METRE = UnitDescriptor('millivolt per metre', 'C30', '''mV/m''') ALL_UNITS.append(MILLIVOLT_PER_METRE) MILLIWATT = UnitDescriptor('milliwatt', 'C31', '''mW''') ALL_UNITS.append(MILLIWATT) MILLIWATT_PER_SQUARE_METRE = UnitDescriptor('milliwatt per square metre', 'C32', '''mW/m²''') ALL_UNITS.append(MILLIWATT_PER_SQUARE_METRE) MILLIWEBER = UnitDescriptor('milliweber', 'C33', '''mWb''') ALL_UNITS.append(MILLIWEBER) MOLE = UnitDescriptor('mole', 'C34', '''mol''') ALL_UNITS.append(MOLE) MOLE_PER_CUBIC_DECIMETRE = UnitDescriptor('mole per cubic decimetre', 'C35', '''mol/dm³''') ALL_UNITS.append(MOLE_PER_CUBIC_DECIMETRE) MOLE_PER_CUBIC_METRE = UnitDescriptor('mole per cubic metre', 'C36', '''mol/m³''') ALL_UNITS.append(MOLE_PER_CUBIC_METRE) KILOBIT = UnitDescriptor('kilobit', 'C37', '''kbit''') ALL_UNITS.append(KILOBIT) MOLE_PER_LITRE = UnitDescriptor('mole per litre', 'C38', '''mol/l''') ALL_UNITS.append(MOLE_PER_LITRE) NANOAMPERE = UnitDescriptor('nanoampere', 'C39', '''nA''') ALL_UNITS.append(NANOAMPERE) CARLOAD = UnitDescriptor('carload', 'C4', '''''') ALL_UNITS.append(CARLOAD) NANOCOULOMB = UnitDescriptor('nanocoulomb', 'C40', '''nC''') ALL_UNITS.append(NANOCOULOMB) NANOFARAD = UnitDescriptor('nanofarad', 'C41', '''nF''') ALL_UNITS.append(NANOFARAD) NANOFARAD_PER_METRE = UnitDescriptor('nanofarad per metre', 'C42', '''nF/m''') ALL_UNITS.append(NANOFARAD_PER_METRE) NANOHENRY = UnitDescriptor('nanohenry', 'C43', '''nH''') ALL_UNITS.append(NANOHENRY) NANOHENRY_PER_METRE = UnitDescriptor('nanohenry per metre', 'C44', '''nH/m''') ALL_UNITS.append(NANOHENRY_PER_METRE) NANOMETRE = UnitDescriptor('nanometre', 'C45', '''nm''') ALL_UNITS.append(NANOMETRE) NANOOHM_METRE = UnitDescriptor('nanoohm metre', 'C46', '''nΩ·m''') ALL_UNITS.append(NANOOHM_METRE) NANOSECOND = UnitDescriptor('nanosecond', 'C47', '''ns''') ALL_UNITS.append(NANOSECOND) NANOTESLA = UnitDescriptor('nanotesla', 'C48', '''nT''') ALL_UNITS.append(NANOTESLA) NANOWATT = UnitDescriptor('nanowatt', 'C49', '''nW''') ALL_UNITS.append(NANOWATT) COST = UnitDescriptor('cost', 'C5', '''''') ALL_UNITS.append(COST) NEPER = UnitDescriptor('neper', 'C50', '''Np''') ALL_UNITS.append(NEPER) NEPER_PER_SECOND = UnitDescriptor('neper per second', 'C51', '''Np/s''') ALL_UNITS.append(NEPER_PER_SECOND) PICOMETRE = UnitDescriptor('picometre', 'C52', '''pm''') ALL_UNITS.append(PICOMETRE) NEWTON_METRE_SECOND = UnitDescriptor('newton metre second', 'C53', '''N·m·s''') ALL_UNITS.append(NEWTON_METRE_SECOND) NEWTON_METRE_SQUARED_PER_KILOGRAM_SQUARED = UnitDescriptor('newton metre squared per kilogram squared', 'C54', '''N·m²/kg²''') ALL_UNITS.append(NEWTON_METRE_SQUARED_PER_KILOGRAM_SQUARED) NEWTON_PER_SQUARE_METRE = UnitDescriptor('newton per square metre', 'C55', '''N/m²''') ALL_UNITS.append(NEWTON_PER_SQUARE_METRE) NEWTON_PER_SQUARE_MILLIMETRE = UnitDescriptor('newton per square millimetre', 'C56', '''N/mm²''') ALL_UNITS.append(NEWTON_PER_SQUARE_MILLIMETRE) NEWTON_SECOND = UnitDescriptor('newton second', 'C57', '''N·s''') ALL_UNITS.append(NEWTON_SECOND) NEWTON_SECOND_PER_METRE = UnitDescriptor('newton second per metre', 'C58', '''N·s/m''') ALL_UNITS.append(NEWTON_SECOND_PER_METRE) OCTAVE = UnitDescriptor('octave', 'C59', '''''') ALL_UNITS.append(OCTAVE) CELL = UnitDescriptor('cell', 'C6', '''''') ALL_UNITS.append(CELL) OHM_CENTIMETRE = UnitDescriptor('ohm centimetre', 'C60', '''Ω·cm''') ALL_UNITS.append(OHM_CENTIMETRE) OHM_METRE = UnitDescriptor('ohm metre', 'C61', '''Ω·m''') ALL_UNITS.append(OHM_METRE) ONE = UnitDescriptor('one', 'C62', '''1''') ALL_UNITS.append(ONE) PARSEC = UnitDescriptor('parsec', 'C63', '''pc''') ALL_UNITS.append(PARSEC) PASCAL_PER_KELVIN = UnitDescriptor('pascal per kelvin', 'C64', '''Pa/K''') ALL_UNITS.append(PASCAL_PER_KELVIN) PASCAL_SECOND = UnitDescriptor('pascal second', 'C65', '''Pa·s''') ALL_UNITS.append(PASCAL_SECOND) PASCAL_SECOND_PER_CUBIC_METRE = UnitDescriptor('pascal second per cubic metre', 'C66', '''Pa·s/m³''') ALL_UNITS.append(PASCAL_SECOND_PER_CUBIC_METRE) PASCAL_SECOND_PER_METRE = UnitDescriptor('pascal second per metre', 'C67', '''Pa· s/m''') ALL_UNITS.append(PASCAL_SECOND_PER_METRE) PETAJOULE = UnitDescriptor('petajoule', 'C68', '''PJ''') ALL_UNITS.append(PETAJOULE) PHON = UnitDescriptor('phon', 'C69', '''''') ALL_UNITS.append(PHON) CENTIPOISE = UnitDescriptor('centipoise', 'C7', '''cP''') ALL_UNITS.append(CENTIPOISE) PICOAMPERE = UnitDescriptor('picoampere', 'C70', '''pA''') ALL_UNITS.append(PICOAMPERE) PICOCOULOMB = UnitDescriptor('picocoulomb', 'C71', '''pC''') ALL_UNITS.append(PICOCOULOMB) PICOFARAD_PER_METRE = UnitDescriptor('picofarad per metre', 'C72', '''pF/m''') ALL_UNITS.append(PICOFARAD_PER_METRE) PICOHENRY = UnitDescriptor('picohenry', 'C73', '''pH''') ALL_UNITS.append(PICOHENRY) KILOBIT_PER_SECOND = UnitDescriptor('kilobit per second', 'C74', '''kbit/s''') ALL_UNITS.append(KILOBIT_PER_SECOND) PICOWATT = UnitDescriptor('picowatt', 'C75', '''pW''') ALL_UNITS.append(PICOWATT) PICOWATT_PER_SQUARE_METRE = UnitDescriptor('picowatt per square metre', 'C76', '''pW/m²''') ALL_UNITS.append(PICOWATT_PER_SQUARE_METRE) POUND_GAGE = UnitDescriptor('pound gage', 'C77', '''''') ALL_UNITS.append(POUND_GAGE) POUND_FORCE = UnitDescriptor('pound-force', 'C78', '''lbf''') ALL_UNITS.append(POUND_FORCE) KILOVOLT_AMPERE_HOUR = UnitDescriptor('kilovolt ampere hour', 'C79', '''kVAh''') ALL_UNITS.append(KILOVOLT_AMPERE_HOUR) MILLICOULOMB_PER_KILOGRAM = UnitDescriptor('millicoulomb per kilogram', 'C8', '''mC/kg''') ALL_UNITS.append(MILLICOULOMB_PER_KILOGRAM) RAD = UnitDescriptor('rad', 'C80', '''rad''') ALL_UNITS.append(RAD) RADIAN = UnitDescriptor('radian', 'C81', '''rad''') ALL_UNITS.append(RADIAN) RADIAN_SQUARE_METRE_PER_MOLE = UnitDescriptor('radian square metre per mole', 'C82', '''rad·m²/mol''') ALL_UNITS.append(RADIAN_SQUARE_METRE_PER_MOLE) RADIAN_SQUARE_METRE_PER_KILOGRAM = UnitDescriptor('radian square metre per kilogram', 'C83', '''rad·m²/kg''') ALL_UNITS.append(RADIAN_SQUARE_METRE_PER_KILOGRAM) RADIAN_PER_METRE = UnitDescriptor('radian per metre', 'C84', '''rad/m''') ALL_UNITS.append(RADIAN_PER_METRE) RECIPROCAL_ANGSTROM = UnitDescriptor('reciprocal angstrom', 'C85', '''Å⁻¹''') ALL_UNITS.append(RECIPROCAL_ANGSTROM) RECIPROCAL_CUBIC_METRE = UnitDescriptor('reciprocal cubic metre', 'C86', '''m⁻³''') ALL_UNITS.append(RECIPROCAL_CUBIC_METRE) RECIPROCAL_CUBIC_METRE_PER_SECOND = UnitDescriptor('reciprocal cubic metre per second', 'C87', '''m⁻³/s''') ALL_UNITS.append(RECIPROCAL_CUBIC_METRE_PER_SECOND) RECIPROCAL_ELECTRON_VOLT_PER_CUBIC_METRE = UnitDescriptor('reciprocal electron volt per cubic metre', 'C88', '''eV⁻¹/m³''') ALL_UNITS.append(RECIPROCAL_ELECTRON_VOLT_PER_CUBIC_METRE) RECIPROCAL_HENRY = UnitDescriptor('reciprocal henry', 'C89', '''H⁻¹''') ALL_UNITS.append(RECIPROCAL_HENRY) COIL_GROUP = UnitDescriptor('coil group', 'C9', '''''') ALL_UNITS.append(COIL_GROUP) RECIPROCAL_JOULE_PER_CUBIC_METRE = UnitDescriptor('reciprocal joule per cubic metre', 'C90', '''J⁻¹/m³''') ALL_UNITS.append(RECIPROCAL_JOULE_PER_CUBIC_METRE) RECIPROCAL_KELVIN_OR_KELVIN_TO_THE_POWER_MINUS_ONE = UnitDescriptor('reciprocal kelvin or kelvin to the power minus one', 'C91', '''K⁻¹''') ALL_UNITS.append(RECIPROCAL_KELVIN_OR_KELVIN_TO_THE_POWER_MINUS_ONE) RECIPROCAL_METRE = UnitDescriptor('reciprocal metre', 'C92', '''m⁻¹''') ALL_UNITS.append(RECIPROCAL_METRE) RECIPROCAL_SQUARE_METRE = UnitDescriptor('reciprocal square metre', 'C93', '''m⁻²''') ALL_UNITS.append(RECIPROCAL_SQUARE_METRE) RECIPROCAL_MINUTE = UnitDescriptor('reciprocal minute', 'C94', '''min⁻¹''') ALL_UNITS.append(RECIPROCAL_MINUTE) RECIPROCAL_MOLE = UnitDescriptor('reciprocal mole', 'C95', '''mol⁻¹''') ALL_UNITS.append(RECIPROCAL_MOLE) RECIPROCAL_PASCAL_OR_PASCAL_TO_THE_POWER_MINUS_ONE = UnitDescriptor('reciprocal pascal or pascal to the power minus one', 'C96', '''Pa⁻¹''') ALL_UNITS.append(RECIPROCAL_PASCAL_OR_PASCAL_TO_THE_POWER_MINUS_ONE) RECIPROCAL_SECOND = UnitDescriptor('reciprocal second', 'C97', '''s⁻¹''') ALL_UNITS.append(RECIPROCAL_SECOND) RECIPROCAL_SECOND_PER_CUBIC_METRE = UnitDescriptor('reciprocal second per cubic metre', 'C98', '''s⁻¹/m³''') ALL_UNITS.append(RECIPROCAL_SECOND_PER_CUBIC_METRE) RECIPROCAL_SECOND_PER_METRE_SQUARED = UnitDescriptor('reciprocal second per metre squared', 'C99', '''s⁻¹/m²''') ALL_UNITS.append(RECIPROCAL_SECOND_PER_METRE_SQUARED) CAN = UnitDescriptor('can', 'CA', '''''') ALL_UNITS.append(CAN) CARRYING_CAPACITY_IN_METRIC_TON = UnitDescriptor('carrying capacity in metric ton', 'CCT', '''''') ALL_UNITS.append(CARRYING_CAPACITY_IN_METRIC_TON) CANDELA = UnitDescriptor('candela', 'CDL', '''cd''') ALL_UNITS.append(CANDELA) DEGREE_CELSIUS = UnitDescriptor('degree Celsius', 'CEL', '''°C''') ALL_UNITS.append(DEGREE_CELSIUS) HUNDRED = UnitDescriptor('hundred', 'CEN', '''''') ALL_UNITS.append(HUNDRED) CARD = UnitDescriptor('card', 'CG', '''''') ALL_UNITS.append(CARD) CENTIGRAM = UnitDescriptor('centigram', 'CGM', '''cg''') ALL_UNITS.append(CENTIGRAM) CONTAINER = UnitDescriptor('container', 'CH', '''''') ALL_UNITS.append(CONTAINER) CONE = UnitDescriptor('cone', 'CJ', '''''') ALL_UNITS.append(CONE) CONNECTOR = UnitDescriptor('connector', 'CK', '''''') ALL_UNITS.append(CONNECTOR) COULOMB_PER_KILOGRAM = UnitDescriptor('coulomb per kilogram', 'CKG', '''C/kg''') ALL_UNITS.append(COULOMB_PER_KILOGRAM) COIL = UnitDescriptor('coil', 'CL', '''''') ALL_UNITS.append(COIL) HUNDRED_LEAVE = UnitDescriptor('hundred leave', 'CLF', '''''') ALL_UNITS.append(HUNDRED_LEAVE) CENTILITRE = UnitDescriptor('centilitre', 'CLT', '''cl''') ALL_UNITS.append(CENTILITRE) SQUARE_CENTIMETRE = UnitDescriptor('square centimetre', 'CMK', '''cm²''') ALL_UNITS.append(SQUARE_CENTIMETRE) CUBIC_CENTIMETRE = UnitDescriptor('cubic centimetre', 'CMQ', '''cm³''') ALL_UNITS.append(CUBIC_CENTIMETRE) CENTIMETRE = UnitDescriptor('centimetre', 'CMT', '''cm''') ALL_UNITS.append(CENTIMETRE) HUNDRED_PACK = UnitDescriptor('hundred pack', 'CNP', '''''') ALL_UNITS.append(HUNDRED_PACK) CENTAL_UK = UnitDescriptor('cental (UK)', 'CNT', '''''') ALL_UNITS.append(CENTAL_UK) CARBOY = UnitDescriptor('carboy', 'CO', '''''') ALL_UNITS.append(CARBOY) COULOMB = UnitDescriptor('coulomb', 'COU', '''C''') ALL_UNITS.append(COULOMB) CARTRIDGE = UnitDescriptor('cartridge', 'CQ', '''''') ALL_UNITS.append(CARTRIDGE) CRATE = UnitDescriptor('crate', 'CR', '''''') ALL_UNITS.append(CRATE) CASE = UnitDescriptor('case', 'CS', '''''') ALL_UNITS.append(CASE) CARTON = UnitDescriptor('carton', 'CT', '''''') ALL_UNITS.append(CARTON) CONTENT_GRAM = UnitDescriptor('content gram', 'CTG', '''''') ALL_UNITS.append(CONTENT_GRAM) METRIC_CARAT = UnitDescriptor('metric carat', 'CTM', '''''') ALL_UNITS.append(METRIC_CARAT) CONTENT_TON_METRIC = UnitDescriptor('content ton (metric)', 'CTN', '''''') ALL_UNITS.append(CONTENT_TON_METRIC) CUP = UnitDescriptor('cup', 'CU', '''''') ALL_UNITS.append(CUP) CURIE = UnitDescriptor('curie', 'CUR', '''Ci''') ALL_UNITS.append(CURIE) COVER = UnitDescriptor('cover', 'CV', '''''') ALL_UNITS.append(COVER) HUNDRED_POUND_CWT_PER_HUNDRED_WEIGHT_US = UnitDescriptor('hundred pound (cwt) / hundred weight (US)', 'CWA', '''cwt (US)''') ALL_UNITS.append(HUNDRED_POUND_CWT_PER_HUNDRED_WEIGHT_US) HUNDRED_WEIGHT_UK = UnitDescriptor('hundred weight (UK)', 'CWI', '''cwt (UK)''') ALL_UNITS.append(HUNDRED_WEIGHT_UK) CYLINDER = UnitDescriptor('cylinder', 'CY', '''''') ALL_UNITS.append(CYLINDER) COMBO = UnitDescriptor('combo', 'CZ', '''''') ALL_UNITS.append(COMBO) KILOWATT_HOUR_PER_HOUR = UnitDescriptor('kilowatt hour per hour', 'D03', '''kW·h/h''') ALL_UNITS.append(KILOWATT_HOUR_PER_HOUR) LOT_UNIT_OF_WEIGHT = UnitDescriptor('lot [unit of weight]', 'D04', '''''') ALL_UNITS.append(LOT_UNIT_OF_WEIGHT) RECIPROCAL_SECOND_PER_STERADIAN = UnitDescriptor('reciprocal second per steradian', 'D1', '''s⁻¹/sr''') ALL_UNITS.append(RECIPROCAL_SECOND_PER_STERADIAN) SIEMENS_PER_METRE = UnitDescriptor('siemens per metre', 'D10', '''S/m''') ALL_UNITS.append(SIEMENS_PER_METRE) MEBIBIT = UnitDescriptor('mebibit', 'D11', '''Mibit''') ALL_UNITS.append(MEBIBIT) SIEMENS_SQUARE_METRE_PER_MOLE = UnitDescriptor('siemens square metre per mole', 'D12', '''S·m²/mol''') ALL_UNITS.append(SIEMENS_SQUARE_METRE_PER_MOLE) SIEVERT = UnitDescriptor('sievert', 'D13', '''Sv''') ALL_UNITS.append(SIEVERT) THOUSAND_LINEAR_YARD = UnitDescriptor('thousand linear yard', 'D14', '''''') ALL_UNITS.append(THOUSAND_LINEAR_YARD) SONE = UnitDescriptor('sone', 'D15', '''''') ALL_UNITS.append(SONE) SQUARE_CENTIMETRE_PER_ERG = UnitDescriptor('square centimetre per erg', 'D16', '''cm²/erg''') ALL_UNITS.append(SQUARE_CENTIMETRE_PER_ERG) SQUARE_CENTIMETRE_PER_STERADIAN_ERG = UnitDescriptor('square centimetre per steradian erg', 'D17', '''cm²/(sr·erg)''') ALL_UNITS.append(SQUARE_CENTIMETRE_PER_STERADIAN_ERG) METRE_KELVIN = UnitDescriptor('metre kelvin', 'D18', '''m·K''') ALL_UNITS.append(METRE_KELVIN) SQUARE_METRE_KELVIN_PER_WATT = UnitDescriptor('square metre kelvin per watt', 'D19', '''m²·K/W''') ALL_UNITS.append(SQUARE_METRE_KELVIN_PER_WATT) RECIPROCAL_SECOND_PER_STERADIAN_METRE_SQUARED = UnitDescriptor('reciprocal second per steradian metre squared', 'D2', '''s⁻¹/(sr·m²)''') ALL_UNITS.append(RECIPROCAL_SECOND_PER_STERADIAN_METRE_SQUARED) SQUARE_METRE_PER_JOULE = UnitDescriptor('square metre per joule', 'D20', '''m²/J''') ALL_UNITS.append(SQUARE_METRE_PER_JOULE) SQUARE_METRE_PER_KILOGRAM = UnitDescriptor('square metre per kilogram', 'D21', '''m²/kg''') ALL_UNITS.append(SQUARE_METRE_PER_KILOGRAM) SQUARE_METRE_PER_MOLE = UnitDescriptor('square metre per mole', 'D22', '''m²/mol''') ALL_UNITS.append(SQUARE_METRE_PER_MOLE) PEN_GRAM_PROTEIN = UnitDescriptor('pen gram (protein)', 'D23', '''''') ALL_UNITS.append(PEN_GRAM_PROTEIN) SQUARE_METRE_PER_STERADIAN = UnitDescriptor('square metre per steradian', 'D24', '''m²/sr''') ALL_UNITS.append(SQUARE_METRE_PER_STERADIAN) SQUARE_METRE_PER_STERADIAN_JOULE = UnitDescriptor('square metre per steradian joule', 'D25', '''m²/(sr·J)''') ALL_UNITS.append(SQUARE_METRE_PER_STERADIAN_JOULE) SQUARE_METRE_PER_VOLT_SECOND = UnitDescriptor('square metre per volt second', 'D26', '''m²/(V·s)''') ALL_UNITS.append(SQUARE_METRE_PER_VOLT_SECOND) STERADIAN = UnitDescriptor('steradian', 'D27', '''sr''') ALL_UNITS.append(STERADIAN) SYPHON = UnitDescriptor('syphon', 'D28', '''''') ALL_UNITS.append(SYPHON) TERAHERTZ = UnitDescriptor('terahertz', 'D29', '''THz''') ALL_UNITS.append(TERAHERTZ) TERAJOULE = UnitDescriptor('terajoule', 'D30', '''TJ''') ALL_UNITS.append(TERAJOULE) TERAWATT = UnitDescriptor('terawatt', 'D31', '''TW''') ALL_UNITS.append(TERAWATT) TERAWATT_HOUR = UnitDescriptor('terawatt hour', 'D32', '''TW·h''') ALL_UNITS.append(TERAWATT_HOUR) TESLA = UnitDescriptor('tesla', 'D33', '''T''') ALL_UNITS.append(TESLA) TEX = UnitDescriptor('tex', 'D34', '''tex (g/km)''') ALL_UNITS.append(TEX) CALORIE_THERMOCHEMICAL = UnitDescriptor('calorie (thermochemical)', 'D35', '''calth''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL) MEGABIT = UnitDescriptor('megabit', 'D36', '''Mbit''') ALL_UNITS.append(MEGABIT) CALORIE_THERMOCHEMICAL_PER_GRAM_KELVIN = UnitDescriptor('calorie (thermochemical) per gram kelvin', 'D37', '''calth/(g·K)''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL_PER_GRAM_KELVIN) CALORIE_THERMOCHEMICAL_PER_SECOND_CENTIMETRE_KELVIN = UnitDescriptor('calorie (thermochemical) per second centimetre kelvin', 'D38', '''calth/(s·cm·K)''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL_PER_SECOND_CENTIMETRE_KELVIN) CALORIE_THERMOCHEMICAL_PER_SECOND_SQUARE_CENTIMETRE_KELVIN = UnitDescriptor('calorie (thermochemical) per second square centimetre kelvin', 'D39', '''calth/(s·cm²·K)''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL_PER_SECOND_SQUARE_CENTIMETRE_KELVIN) THOUSAND_LITRE = UnitDescriptor('thousand litre', 'D40', '''''') ALL_UNITS.append(THOUSAND_LITRE) TONNE_PER_CUBIC_METRE = UnitDescriptor('tonne per cubic metre', 'D41', '''t/m³''') ALL_UNITS.append(TONNE_PER_CUBIC_METRE) TROPICAL_YEAR = UnitDescriptor('tropical year', 'D42', '''y (tropical)''') ALL_UNITS.append(TROPICAL_YEAR) UNIFIED_ATOMIC_MASS_UNIT = UnitDescriptor('unified atomic mass unit', 'D43', '''u''') ALL_UNITS.append(UNIFIED_ATOMIC_MASS_UNIT) VAR = UnitDescriptor('var', 'D44', '''var''') ALL_UNITS.append(VAR) VOLT_SQUARED_PER_KELVIN_SQUARED = UnitDescriptor('volt squared per kelvin squared', 'D45', '''V²/K²''') ALL_UNITS.append(VOLT_SQUARED_PER_KELVIN_SQUARED) VOLT_AMPERE = UnitDescriptor('volt - ampere', 'D46', '''V·A''') ALL_UNITS.append(VOLT_AMPERE) VOLT_PER_CENTIMETRE = UnitDescriptor('volt per centimetre', 'D47', '''V/cm''') ALL_UNITS.append(VOLT_PER_CENTIMETRE) VOLT_PER_KELVIN = UnitDescriptor('volt per kelvin', 'D48', '''V/K''') ALL_UNITS.append(VOLT_PER_KELVIN) MILLIVOLT_PER_KELVIN = UnitDescriptor('millivolt per kelvin', 'D49', '''mV/K''') ALL_UNITS.append(MILLIVOLT_PER_KELVIN) KILOGRAM_PER_SQUARE_CENTIMETRE = UnitDescriptor('kilogram per square centimetre', 'D5', '''kg/cm²''') ALL_UNITS.append(KILOGRAM_PER_SQUARE_CENTIMETRE) VOLT_PER_METRE = UnitDescriptor('volt per metre', 'D50', '''V/m''') ALL_UNITS.append(VOLT_PER_METRE) VOLT_PER_MILLIMETRE = UnitDescriptor('volt per millimetre', 'D51', '''V/mm''') ALL_UNITS.append(VOLT_PER_MILLIMETRE) WATT_PER_KELVIN = UnitDescriptor('watt per kelvin', 'D52', '''W/K''') ALL_UNITS.append(WATT_PER_KELVIN) WATT_PER_METRE_KELVIN = UnitDescriptor('watt per metre kelvin', 'D53', '''W/(m·K)''') ALL_UNITS.append(WATT_PER_METRE_KELVIN) WATT_PER_SQUARE_METRE = UnitDescriptor('watt per square metre', 'D54', '''W/m²''') ALL_UNITS.append(WATT_PER_SQUARE_METRE) WATT_PER_SQUARE_METRE_KELVIN = UnitDescriptor('watt per square metre kelvin', 'D55', '''W/(m²·K)''') ALL_UNITS.append(WATT_PER_SQUARE_METRE_KELVIN) WATT_PER_SQUARE_METRE_KELVIN_TO_THE_FOURTH_POWER = UnitDescriptor('watt per square metre kelvin to the fourth power', 'D56', '''W/(m²·K⁴)''') ALL_UNITS.append(WATT_PER_SQUARE_METRE_KELVIN_TO_THE_FOURTH_POWER) WATT_PER_STERADIAN = UnitDescriptor('watt per steradian', 'D57', '''W/sr''') ALL_UNITS.append(WATT_PER_STERADIAN) WATT_PER_STERADIAN_SQUARE_METRE = UnitDescriptor('watt per steradian square metre', 'D58', '''W/(sr·m²)''') ALL_UNITS.append(WATT_PER_STERADIAN_SQUARE_METRE) WEBER_PER_METRE = UnitDescriptor('weber per metre', 'D59', '''Wb/m''') ALL_UNITS.append(WEBER_PER_METRE) ROENTGEN_PER_SECOND = UnitDescriptor('roentgen per second', 'D6', '''R/s''') ALL_UNITS.append(ROENTGEN_PER_SECOND) WEBER_PER_MILLIMETRE = UnitDescriptor('weber per millimetre', 'D60', '''Wb/mm''') ALL_UNITS.append(WEBER_PER_MILLIMETRE) MINUTE_UNIT_OF_ANGLE = UnitDescriptor('minute [unit of angle]', 'D61', '''\'''') ALL_UNITS.append(MINUTE_UNIT_OF_ANGLE) SECOND_UNIT_OF_ANGLE = UnitDescriptor('second [unit of angle]', 'D62', '''"''') ALL_UNITS.append(SECOND_UNIT_OF_ANGLE) BOOK = UnitDescriptor('book', 'D63', '''''') ALL_UNITS.append(BOOK) BLOCK = UnitDescriptor('block', 'D64', '''''') ALL_UNITS.append(BLOCK) ROUND = UnitDescriptor('round', 'D65', '''''') ALL_UNITS.append(ROUND) CASSETTE = UnitDescriptor('cassette', 'D66', '''''') ALL_UNITS.append(CASSETTE) DOLLAR_PER_HOUR = UnitDescriptor('dollar per hour', 'D67', '''''') ALL_UNITS.append(DOLLAR_PER_HOUR) NUMBER_OF_WORDS = UnitDescriptor('number of words', 'D68', '''''') ALL_UNITS.append(NUMBER_OF_WORDS) INCH_TO_THE_FOURTH_POWER = UnitDescriptor('inch to the fourth power', 'D69', '''in⁴''') ALL_UNITS.append(INCH_TO_THE_FOURTH_POWER) SANDWICH = UnitDescriptor('sandwich', 'D7', '''''') ALL_UNITS.append(SANDWICH) CALORIE_INTERNATIONAL_TABLE = UnitDescriptor('calorie (international table)', 'D70', '''calIT''') ALL_UNITS.append(CALORIE_INTERNATIONAL_TABLE) CALORIE_INTERNATIONAL_TABLE_PER_SECOND_CENTIMETRE_KELVIN = UnitDescriptor('calorie (international table) per second centimetre kelvin', 'D71', '''calIT/(s·cm·K)''') ALL_UNITS.append(CALORIE_INTERNATIONAL_TABLE_PER_SECOND_CENTIMETRE_KELVIN) CALORIE_INTERNATIONAL_TABLE_PER_SECOND_SQUARE_CENTIMETRE_KELVIN = UnitDescriptor('calorie (international table) per second square centimetre kelvin', 'D72', '''calIT/(s·cm²·K)''') ALL_UNITS.append(CALORIE_INTERNATIONAL_TABLE_PER_SECOND_SQUARE_CENTIMETRE_KELVIN) JOULE_SQUARE_METRE = UnitDescriptor('joule square metre', 'D73', '''J·m²''') ALL_UNITS.append(JOULE_SQUARE_METRE) KILOGRAM_PER_MOLE = UnitDescriptor('kilogram per mole', 'D74', '''kg/mol''') ALL_UNITS.append(KILOGRAM_PER_MOLE) CALORIE_INTERNATIONAL_TABLE_PER_GRAM = UnitDescriptor('calorie (international table) per gram', 'D75', '''calIT/g''') ALL_UNITS.append(CALORIE_INTERNATIONAL_TABLE_PER_GRAM) CALORIE_INTERNATIONAL_TABLE_PER_GRAM_KELVIN = UnitDescriptor('calorie (international table) per gram kelvin', 'D76', '''calIT/(g·K)''') ALL_UNITS.append(CALORIE_INTERNATIONAL_TABLE_PER_GRAM_KELVIN) MEGACOULOMB = UnitDescriptor('megacoulomb', 'D77', '''MC''') ALL_UNITS.append(MEGACOULOMB) MEGAJOULE_PER_SECOND = UnitDescriptor('megajoule per second', 'D78', '''MJ/s''') ALL_UNITS.append(MEGAJOULE_PER_SECOND) BEAM = UnitDescriptor('beam', 'D79', '''''') ALL_UNITS.append(BEAM) DRAIZE_SCORE = UnitDescriptor('draize score', 'D8', '''''') ALL_UNITS.append(DRAIZE_SCORE) MICROWATT = UnitDescriptor('microwatt', 'D80', '''µW''') ALL_UNITS.append(MICROWATT) MICROTESLA = UnitDescriptor('microtesla', 'D81', '''µT''') ALL_UNITS.append(MICROTESLA) MICROVOLT = UnitDescriptor('microvolt', 'D82', '''µV''') ALL_UNITS.append(MICROVOLT) MILLINEWTON_METRE = UnitDescriptor('millinewton metre', 'D83', '''mN·m''') ALL_UNITS.append(MILLINEWTON_METRE) MICROWATT_PER_SQUARE_METRE = UnitDescriptor('microwatt per square metre', 'D85', '''µW/m²''') ALL_UNITS.append(MICROWATT_PER_SQUARE_METRE) MILLICOULOMB = UnitDescriptor('millicoulomb', 'D86', '''mC''') ALL_UNITS.append(MILLICOULOMB) MILLIMOLE_PER_KILOGRAM = UnitDescriptor('millimole per kilogram', 'D87', '''mmol/kg''') ALL_UNITS.append(MILLIMOLE_PER_KILOGRAM) MILLICOULOMB_PER_CUBIC_METRE = UnitDescriptor('millicoulomb per cubic metre', 'D88', '''mC/m³''') ALL_UNITS.append(MILLICOULOMB_PER_CUBIC_METRE) MILLICOULOMB_PER_SQUARE_METRE = UnitDescriptor('millicoulomb per square metre', 'D89', '''mC/m²''') ALL_UNITS.append(MILLICOULOMB_PER_SQUARE_METRE) DYNE_PER_SQUARE_CENTIMETRE = UnitDescriptor('dyne per square centimetre', 'D9', '''dyn/cm²''') ALL_UNITS.append(DYNE_PER_SQUARE_CENTIMETRE) CUBIC_METRE_NET = UnitDescriptor('cubic metre (net)', 'D90', '''''') ALL_UNITS.append(CUBIC_METRE_NET) REM = UnitDescriptor('rem', 'D91', '''rem''') ALL_UNITS.append(REM) BAND = UnitDescriptor('band', 'D92', '''''') ALL_UNITS.append(BAND) SECOND_PER_CUBIC_METRE = UnitDescriptor('second per cubic metre', 'D93', '''s/m³''') ALL_UNITS.append(SECOND_PER_CUBIC_METRE) SECOND_PER_CUBIC_METRE_RADIAN = UnitDescriptor('second per cubic metre radian', 'D94', '''s/(rad·m³)''') ALL_UNITS.append(SECOND_PER_CUBIC_METRE_RADIAN) JOULE_PER_GRAM = UnitDescriptor('joule per gram', 'D95', '''J/g''') ALL_UNITS.append(JOULE_PER_GRAM) POUND_GROSS = UnitDescriptor('pound gross', 'D96', '''''') ALL_UNITS.append(POUND_GROSS) PALLET_PER_UNIT_LOAD = UnitDescriptor('pallet/unit load', 'D97', '''''') ALL_UNITS.append(PALLET_PER_UNIT_LOAD) MASS_POUND = UnitDescriptor('mass pound', 'D98', '''''') ALL_UNITS.append(MASS_POUND) SLEEVE = UnitDescriptor('sleeve', 'D99', '''''') ALL_UNITS.append(SLEEVE) DECARE = UnitDescriptor('decare', 'DAA', '''daa''') ALL_UNITS.append(DECARE) TEN_DAY = UnitDescriptor('ten day', 'DAD', '''''') ALL_UNITS.append(TEN_DAY) DAY = UnitDescriptor('day', 'DAY', '''d''') ALL_UNITS.append(DAY) DRY_POUND = UnitDescriptor('dry pound', 'DB', '''''') ALL_UNITS.append(DRY_POUND) DISK_DISC = UnitDescriptor('disk (disc)', 'DC', '''''') ALL_UNITS.append(DISK_DISC) DEGREE_UNIT_OF_ANGLE = UnitDescriptor('degree [unit of angle]', 'DD', '''°''') ALL_UNITS.append(DEGREE_UNIT_OF_ANGLE) DEAL = UnitDescriptor('deal', 'DE', '''''') ALL_UNITS.append(DEAL) DECADE = UnitDescriptor('decade', 'DEC', '''''') ALL_UNITS.append(DECADE) DECIGRAM = UnitDescriptor('decigram', 'DG', '''dg''') ALL_UNITS.append(DECIGRAM) DISPENSER = UnitDescriptor('dispenser', 'DI', '''''') ALL_UNITS.append(DISPENSER) DECAGRAM = UnitDescriptor('decagram', 'DJ', '''dag''') ALL_UNITS.append(DECAGRAM) DECILITRE = UnitDescriptor('decilitre', 'DLT', '''dl''') ALL_UNITS.append(DECILITRE) CUBIC_DECAMETRE = UnitDescriptor('cubic decametre', 'DMA', '''dam³''') ALL_UNITS.append(CUBIC_DECAMETRE) SQUARE_DECIMETRE = UnitDescriptor('square decimetre', 'DMK', '''dm²''') ALL_UNITS.append(SQUARE_DECIMETRE) STANDARD_KILOLITRE = UnitDescriptor('standard kilolitre', 'DMO', '''''') ALL_UNITS.append(STANDARD_KILOLITRE) CUBIC_DECIMETRE = UnitDescriptor('cubic decimetre', 'DMQ', '''dm³''') ALL_UNITS.append(CUBIC_DECIMETRE) DECIMETRE = UnitDescriptor('decimetre', 'DMT', '''dm''') ALL_UNITS.append(DECIMETRE) DECINEWTON_METRE = UnitDescriptor('decinewton metre', 'DN', '''dN·m''') ALL_UNITS.append(DECINEWTON_METRE) DOZEN_PIECE = UnitDescriptor('dozen piece', 'DPC', '''''') ALL_UNITS.append(DOZEN_PIECE) DOZEN_PAIR = UnitDescriptor('dozen pair', 'DPR', '''''') ALL_UNITS.append(DOZEN_PAIR) DISPLACEMENT_TONNAGE = UnitDescriptor('displacement tonnage', 'DPT', '''''') ALL_UNITS.append(DISPLACEMENT_TONNAGE) DATA_RECORD = UnitDescriptor('data record', 'DQ', '''''') ALL_UNITS.append(DATA_RECORD) DRUM = UnitDescriptor('drum', 'DR', '''''') ALL_UNITS.append(DRUM) DRAM_US = UnitDescriptor('dram (US)', 'DRA', '''''') ALL_UNITS.append(DRAM_US) DRAM_UK = UnitDescriptor('dram (UK)', 'DRI', '''''') ALL_UNITS.append(DRAM_UK) DOZEN_ROLL = UnitDescriptor('dozen roll', 'DRL', '''''') ALL_UNITS.append(DOZEN_ROLL) DRACHM_UK = UnitDescriptor('drachm (UK)', 'DRM', '''''') ALL_UNITS.append(DRACHM_UK) DISPLAY = UnitDescriptor('display', 'DS', '''''') ALL_UNITS.append(DISPLAY) DRY_TON = UnitDescriptor('dry ton', 'DT', '''''') ALL_UNITS.append(DRY_TON) DECITONNE = UnitDescriptor('decitonne', 'DTN', '''dt''') ALL_UNITS.append(DECITONNE) DECITONNE = UnitDescriptor('decitonne', 'DTN', '''dtn''') ALL_UNITS.append(DECITONNE) DYNE = UnitDescriptor('dyne', 'DU', '''dyn''') ALL_UNITS.append(DYNE) PENNYWEIGHT = UnitDescriptor('pennyweight', 'DWT', '''''') ALL_UNITS.append(PENNYWEIGHT) DYNE_PER_CENTIMETRE = UnitDescriptor('dyne per centimetre', 'DX', '''dyn/cm''') ALL_UNITS.append(DYNE_PER_CENTIMETRE) DIRECTORY_BOOK = UnitDescriptor('directory book', 'DY', '''''') ALL_UNITS.append(DIRECTORY_BOOK) DOZEN = UnitDescriptor('dozen', 'DZN', '''DOZ''') ALL_UNITS.append(DOZEN) DOZEN_PACK = UnitDescriptor('dozen pack', 'DZP', '''''') ALL_UNITS.append(DOZEN_PACK) NEWTON_PER_SQUARE_CENTIMETRE = UnitDescriptor('newton per square centimetre', 'E01', '''N/cm²''') ALL_UNITS.append(NEWTON_PER_SQUARE_CENTIMETRE) MEGAWATT_HOUR_PER_HOUR = UnitDescriptor('megawatt hour per hour', 'E07', '''MW·h/h''') ALL_UNITS.append(MEGAWATT_HOUR_PER_HOUR) MEGAWATT_PER_HERTZ = UnitDescriptor('megawatt per hertz', 'E08', '''MW/Hz''') ALL_UNITS.append(MEGAWATT_PER_HERTZ) MILLIAMPERE_HOUR = UnitDescriptor('milliampere hour', 'E09', '''mA·h''') ALL_UNITS.append(MILLIAMPERE_HOUR) DEGREE_DAY = UnitDescriptor('degree day', 'E10', '''deg da''') ALL_UNITS.append(DEGREE_DAY) GIGACALORIE = UnitDescriptor('gigacalorie', 'E11', '''''') ALL_UNITS.append(GIGACALORIE) MILLE = UnitDescriptor('mille', 'E12', '''''') ALL_UNITS.append(MILLE) KILOCALORIE_INTERNATIONAL_TABLE = UnitDescriptor('kilocalorie (international table)', 'E14', '''kcalIT''') ALL_UNITS.append(KILOCALORIE_INTERNATIONAL_TABLE) KILOCALORIE_THERMOCHEMICAL_PER_HOUR = UnitDescriptor('kilocalorie (thermochemical) per hour', 'E15', '''kcalth/h''') ALL_UNITS.append(KILOCALORIE_THERMOCHEMICAL_PER_HOUR) MILLION_BTUIT_PER_HOUR = UnitDescriptor('million Btu(IT) per hour', 'E16', '''BtuIT/h''') ALL_UNITS.append(MILLION_BTUIT_PER_HOUR) CUBIC_FOOT_PER_SECOND = UnitDescriptor('cubic foot per second', 'E17', '''ft³/s''') ALL_UNITS.append(CUBIC_FOOT_PER_SECOND) TONNE_PER_HOUR = UnitDescriptor('tonne per hour', 'E18', '''t/h''') ALL_UNITS.append(TONNE_PER_HOUR) PING = UnitDescriptor('ping', 'E19', '''''') ALL_UNITS.append(PING) BELT = UnitDescriptor('belt', 'E2', '''''') ALL_UNITS.append(BELT) MEGABIT_PER_SECOND = UnitDescriptor('megabit per second', 'E20', '''Mbit/s''') ALL_UNITS.append(MEGABIT_PER_SECOND) SHARES = UnitDescriptor('shares', 'E21', '''''') ALL_UNITS.append(SHARES) TEU = UnitDescriptor('TEU', 'E22', '''''') ALL_UNITS.append(TEU) TYRE = UnitDescriptor('tyre', 'E23', '''''') ALL_UNITS.append(TYRE) ACTIVE_UNIT = UnitDescriptor('active unit', 'E25', '''''') ALL_UNITS.append(ACTIVE_UNIT) DOSE = UnitDescriptor('dose', 'E27', '''''') ALL_UNITS.append(DOSE) AIR_DRY_TON = UnitDescriptor('air dry ton', 'E28', '''''') ALL_UNITS.append(AIR_DRY_TON) TRAILER = UnitDescriptor('trailer', 'E3', '''''') ALL_UNITS.append(TRAILER) STRAND = UnitDescriptor('strand', 'E30', '''''') ALL_UNITS.append(STRAND) SQUARE_METRE_PER_LITRE = UnitDescriptor('square metre per litre', 'E31', '''m²/l''') ALL_UNITS.append(SQUARE_METRE_PER_LITRE) LITRE_PER_HOUR = UnitDescriptor('litre per hour', 'E32', '''l/h''') ALL_UNITS.append(LITRE_PER_HOUR) FOOT_PER_THOUSAND = UnitDescriptor('foot per thousand', 'E33', '''''') ALL_UNITS.append(FOOT_PER_THOUSAND) GIGABYTE = UnitDescriptor('gigabyte', 'E34', '''Gbyte''') ALL_UNITS.append(GIGABYTE) TERABYTE = UnitDescriptor('terabyte', 'E35', '''Tbyte''') ALL_UNITS.append(TERABYTE) PETABYTE = UnitDescriptor('petabyte', 'E36', '''Pbyte''') ALL_UNITS.append(PETABYTE) PIXEL = UnitDescriptor('pixel', 'E37', '''''') ALL_UNITS.append(PIXEL) MEGAPIXEL = UnitDescriptor('megapixel', 'E38', '''''') ALL_UNITS.append(MEGAPIXEL) DOTS_PER_INCH = UnitDescriptor('dots per inch', 'E39', '''dpi''') ALL_UNITS.append(DOTS_PER_INCH) GROSS_KILOGRAM = UnitDescriptor('gross kilogram', 'E4', '''''') ALL_UNITS.append(GROSS_KILOGRAM) PART_PER_HUNDRED_THOUSAND = UnitDescriptor('part per hundred thousand', 'E40', '''ppht''') ALL_UNITS.append(PART_PER_HUNDRED_THOUSAND) KILOGRAM_FORCE_PER_SQUARE_MILLIMETRE = UnitDescriptor('kilogram-force per square millimetre', 'E41', '''kgf/mm²''') ALL_UNITS.append(KILOGRAM_FORCE_PER_SQUARE_MILLIMETRE) KILOGRAM_FORCE_PER_SQUARE_CENTIMETRE = UnitDescriptor('kilogram-force per square centimetre', 'E42', '''kgf/cm²''') ALL_UNITS.append(KILOGRAM_FORCE_PER_SQUARE_CENTIMETRE) JOULE_PER_SQUARE_CENTIMETRE = UnitDescriptor('joule per square centimetre', 'E43', '''J/cm²''') ALL_UNITS.append(JOULE_PER_SQUARE_CENTIMETRE) KILOGRAM_FORCE_METRE_PER_SQUARE_CENTIMETRE = UnitDescriptor('kilogram-force metre per square centimetre', 'E44', '''kgf·m/cm²''') ALL_UNITS.append(KILOGRAM_FORCE_METRE_PER_SQUARE_CENTIMETRE) MILLIOHM = UnitDescriptor('milliohm', 'E45', '''mΩ''') ALL_UNITS.append(MILLIOHM) KILOWATT_HOUR_PER_CUBIC_METRE = UnitDescriptor('kilowatt hour per cubic metre', 'E46', '''kW·h/m³''') ALL_UNITS.append(KILOWATT_HOUR_PER_CUBIC_METRE) KILOWATT_HOUR_PER_KELVIN = UnitDescriptor('kilowatt hour per kelvin', 'E47', '''kW·h/K''') ALL_UNITS.append(KILOWATT_HOUR_PER_KELVIN) SERVICE_UNIT = UnitDescriptor('service unit', 'E48', '''''') ALL_UNITS.append(SERVICE_UNIT) WORKING_DAY = UnitDescriptor('working day', 'E49', '''''') ALL_UNITS.append(WORKING_DAY) METRIC_LONG_TON = UnitDescriptor('metric long ton', 'E5', '''''') ALL_UNITS.append(METRIC_LONG_TON) ACCOUNTING_UNIT = UnitDescriptor('accounting unit', 'E50', '''''') ALL_UNITS.append(ACCOUNTING_UNIT) JOB = UnitDescriptor('job', 'E51', '''''') ALL_UNITS.append(JOB) RUN_FOOT = UnitDescriptor('run foot', 'E52', '''''') ALL_UNITS.append(RUN_FOOT) TEST = UnitDescriptor('test', 'E53', '''''') ALL_UNITS.append(TEST) TRIP = UnitDescriptor('trip', 'E54', '''''') ALL_UNITS.append(TRIP) USE = UnitDescriptor('use', 'E55', '''''') ALL_UNITS.append(USE) WELL = UnitDescriptor('well', 'E56', '''''') ALL_UNITS.append(WELL) ZONE = UnitDescriptor('zone', 'E57', '''''') ALL_UNITS.append(ZONE) EXABIT_PER_SECOND = UnitDescriptor('exabit per second', 'E58', '''Ebit/s''') ALL_UNITS.append(EXABIT_PER_SECOND) EXBIBYTE = UnitDescriptor('exbibyte', 'E59', '''Eibyte''') ALL_UNITS.append(EXBIBYTE) PEBIBYTE = UnitDescriptor('pebibyte', 'E60', '''Pibyte''') ALL_UNITS.append(PEBIBYTE) TEBIBYTE = UnitDescriptor('tebibyte', 'E61', '''Tibyte''') ALL_UNITS.append(TEBIBYTE) GIBIBYTE = UnitDescriptor('gibibyte', 'E62', '''Gibyte''') ALL_UNITS.append(GIBIBYTE) MEBIBYTE = UnitDescriptor('mebibyte', 'E63', '''Mibyte''') ALL_UNITS.append(MEBIBYTE) KIBIBYTE = UnitDescriptor('kibibyte', 'E64', '''Kibyte''') ALL_UNITS.append(KIBIBYTE) EXBIBIT_PER_METRE = UnitDescriptor('exbibit per metre', 'E65', '''Eibit/m''') ALL_UNITS.append(EXBIBIT_PER_METRE) EXBIBIT_PER_SQUARE_METRE = UnitDescriptor('exbibit per square metre', 'E66', '''Eibit/m²''') ALL_UNITS.append(EXBIBIT_PER_SQUARE_METRE) EXBIBIT_PER_CUBIC_METRE = UnitDescriptor('exbibit per cubic metre', 'E67', '''Eibit/m³''') ALL_UNITS.append(EXBIBIT_PER_CUBIC_METRE) GIGABYTE_PER_SECOND = UnitDescriptor('gigabyte per second', 'E68', '''Gbyte/s''') ALL_UNITS.append(GIGABYTE_PER_SECOND) GIBIBIT_PER_METRE = UnitDescriptor('gibibit per metre', 'E69', '''Gibit/m''') ALL_UNITS.append(GIBIBIT_PER_METRE) GIBIBIT_PER_SQUARE_METRE = UnitDescriptor('gibibit per square metre', 'E70', '''Gibit/m²''') ALL_UNITS.append(GIBIBIT_PER_SQUARE_METRE) GIBIBIT_PER_CUBIC_METRE = UnitDescriptor('gibibit per cubic metre', 'E71', '''Gibit/m³''') ALL_UNITS.append(GIBIBIT_PER_CUBIC_METRE) KIBIBIT_PER_METRE = UnitDescriptor('kibibit per metre', 'E72', '''Kibit/m''') ALL_UNITS.append(KIBIBIT_PER_METRE) KIBIBIT_PER_SQUARE_METRE = UnitDescriptor('kibibit per square metre', 'E73', '''Kibit/m²''') ALL_UNITS.append(KIBIBIT_PER_SQUARE_METRE) KIBIBIT_PER_CUBIC_METRE = UnitDescriptor('kibibit per cubic metre', 'E74', '''Kibit/m³''') ALL_UNITS.append(KIBIBIT_PER_CUBIC_METRE) MEBIBIT_PER_METRE = UnitDescriptor('mebibit per metre', 'E75', '''Mibit/m''') ALL_UNITS.append(MEBIBIT_PER_METRE) MEBIBIT_PER_SQUARE_METRE = UnitDescriptor('mebibit per square metre', 'E76', '''Mibit/m²''') ALL_UNITS.append(MEBIBIT_PER_SQUARE_METRE) MEBIBIT_PER_CUBIC_METRE = UnitDescriptor('mebibit per cubic metre', 'E77', '''Mibit/m³''') ALL_UNITS.append(MEBIBIT_PER_CUBIC_METRE) PETABIT = UnitDescriptor('petabit', 'E78', '''Pbit''') ALL_UNITS.append(PETABIT) PETABIT_PER_SECOND = UnitDescriptor('petabit per second', 'E79', '''Pbit/s''') ALL_UNITS.append(PETABIT_PER_SECOND) PEBIBIT_PER_METRE = UnitDescriptor('pebibit per metre', 'E80', '''Pibit/m''') ALL_UNITS.append(PEBIBIT_PER_METRE) PEBIBIT_PER_SQUARE_METRE = UnitDescriptor('pebibit per square metre', 'E81', '''Pibit/m²''') ALL_UNITS.append(PEBIBIT_PER_SQUARE_METRE) PEBIBIT_PER_CUBIC_METRE = UnitDescriptor('pebibit per cubic metre', 'E82', '''Pibit/m³''') ALL_UNITS.append(PEBIBIT_PER_CUBIC_METRE) TERABIT = UnitDescriptor('terabit', 'E83', '''Tbit''') ALL_UNITS.append(TERABIT) TERABIT_PER_SECOND = UnitDescriptor('terabit per second', 'E84', '''Tbit/s''') ALL_UNITS.append(TERABIT_PER_SECOND) TEBIBIT_PER_METRE = UnitDescriptor('tebibit per metre', 'E85', '''Tibit/m''') ALL_UNITS.append(TEBIBIT_PER_METRE) TEBIBIT_PER_CUBIC_METRE = UnitDescriptor('tebibit per cubic metre', 'E86', '''Tibit/m³''') ALL_UNITS.append(TEBIBIT_PER_CUBIC_METRE) TEBIBIT_PER_SQUARE_METRE = UnitDescriptor('tebibit per square metre', 'E87', '''Tibit/m²''') ALL_UNITS.append(TEBIBIT_PER_SQUARE_METRE) BIT_PER_METRE = UnitDescriptor('bit per metre', 'E88', '''bit/m''') ALL_UNITS.append(BIT_PER_METRE) BIT_PER_SQUARE_METRE = UnitDescriptor('bit per square metre', 'E89', '''bit/m²''') ALL_UNITS.append(BIT_PER_SQUARE_METRE) RECIPROCAL_CENTIMETRE = UnitDescriptor('reciprocal centimetre', 'E90', '''cm⁻¹''') ALL_UNITS.append(RECIPROCAL_CENTIMETRE) RECIPROCAL_DAY = UnitDescriptor('reciprocal day', 'E91', '''d⁻¹''') ALL_UNITS.append(RECIPROCAL_DAY) CUBIC_DECIMETRE_PER_HOUR = UnitDescriptor('cubic decimetre per hour', 'E92', '''dm³/h''') ALL_UNITS.append(CUBIC_DECIMETRE_PER_HOUR) KILOGRAM_PER_HOUR = UnitDescriptor('kilogram per hour', 'E93', '''kg/h''') ALL_UNITS.append(KILOGRAM_PER_HOUR) KILOMOLE_PER_SECOND = UnitDescriptor('kilomole per second', 'E94', '''kmol/s''') ALL_UNITS.append(KILOMOLE_PER_SECOND) MOLE_PER_SECOND = UnitDescriptor('mole per second', 'E95', '''mol/s''') ALL_UNITS.append(MOLE_PER_SECOND) DEGREE_PER_SECOND = UnitDescriptor('degree per second', 'E96', '''°/s''') ALL_UNITS.append(DEGREE_PER_SECOND) MILLIMETRE_PER_DEGREE_CELCIUS_METRE = UnitDescriptor('millimetre per degree Celcius metre', 'E97', '''mm/(°C·m)''') ALL_UNITS.append(MILLIMETRE_PER_DEGREE_CELCIUS_METRE) DEGREE_CELSIUS_PER_KELVIN = UnitDescriptor('degree Celsius per kelvin', 'E98', '''°C/K''') ALL_UNITS.append(DEGREE_CELSIUS_PER_KELVIN) HECTOPASCAL_PER_BAR = UnitDescriptor('hectopascal per bar', 'E99', '''hPa/bar''') ALL_UNITS.append(HECTOPASCAL_PER_BAR) EACH = UnitDescriptor('each', 'EA', '''''') ALL_UNITS.append(EACH) ELECTRONIC_MAIL_BOX = UnitDescriptor('electronic mail box', 'EB', '''''') ALL_UNITS.append(ELECTRONIC_MAIL_BOX) EACH_PER_MONTH = UnitDescriptor('each per month', 'EC', '''''') ALL_UNITS.append(EACH_PER_MONTH) ELEVEN_PACK = UnitDescriptor('eleven pack', 'EP', '''''') ALL_UNITS.append(ELEVEN_PACK) EQUIVALENT_GALLON = UnitDescriptor('equivalent gallon', 'EQ', '''''') ALL_UNITS.append(EQUIVALENT_GALLON) ENVELOPE = UnitDescriptor('envelope', 'EV', '''''') ALL_UNITS.append(ENVELOPE) BIT_PER_CUBIC_METRE = UnitDescriptor('bit per cubic metre', 'F01', '''bit/m³''') ALL_UNITS.append(BIT_PER_CUBIC_METRE) KELVIN_PER_KELVIN = UnitDescriptor('kelvin per kelvin', 'F02', '''K/K''') ALL_UNITS.append(KELVIN_PER_KELVIN) KILOPASCAL_PER_BAR = UnitDescriptor('kilopascal per bar', 'F03', '''kPa/bar''') ALL_UNITS.append(KILOPASCAL_PER_BAR) MILLIBAR_PER_BAR = UnitDescriptor('millibar per bar', 'F04', '''mbar/bar''') ALL_UNITS.append(MILLIBAR_PER_BAR) MEGAPASCAL_PER_BAR = UnitDescriptor('megapascal per bar', 'F05', '''MPa/bar''') ALL_UNITS.append(MEGAPASCAL_PER_BAR) POISE_PER_BAR = UnitDescriptor('poise per bar', 'F06', '''P/bar''') ALL_UNITS.append(POISE_PER_BAR) PASCAL_PER_BAR = UnitDescriptor('pascal per bar', 'F07', '''Pa/bar''') ALL_UNITS.append(PASCAL_PER_BAR) MILLIAMPERE_PER_INCH = UnitDescriptor('milliampere per inch', 'F08', '''mA/in''') ALL_UNITS.append(MILLIAMPERE_PER_INCH) THOUSAND_CUBIC_FOOT_PER_DAY = UnitDescriptor('thousand cubic foot per day', 'F1', '''''') ALL_UNITS.append(THOUSAND_CUBIC_FOOT_PER_DAY) KELVIN_PER_HOUR = UnitDescriptor('kelvin per hour', 'F10', '''K/h''') ALL_UNITS.append(KELVIN_PER_HOUR) KELVIN_PER_MINUTE = UnitDescriptor('kelvin per minute', 'F11', '''K/min''') ALL_UNITS.append(KELVIN_PER_MINUTE) KELVIN_PER_SECOND = UnitDescriptor('kelvin per second', 'F12', '''K/s''') ALL_UNITS.append(KELVIN_PER_SECOND) SLUG = UnitDescriptor('slug', 'F13', '''slug''') ALL_UNITS.append(SLUG) GRAM_PER_KELVIN = UnitDescriptor('gram per kelvin', 'F14', '''g/K''') ALL_UNITS.append(GRAM_PER_KELVIN) KILOGRAM_PER_KELVIN = UnitDescriptor('kilogram per kelvin', 'F15', '''kg/K''') ALL_UNITS.append(KILOGRAM_PER_KELVIN) MILLIGRAM_PER_KELVIN = UnitDescriptor('milligram per kelvin', 'F16', '''mg/K''') ALL_UNITS.append(MILLIGRAM_PER_KELVIN) POUND_FORCE_PER_FOOT = UnitDescriptor('pound-force per foot', 'F17', '''lbf/ft''') ALL_UNITS.append(POUND_FORCE_PER_FOOT) KILOGRAM_SQUARE_CENTIMETRE = UnitDescriptor('kilogram square centimetre', 'F18', '''kg·cm²''') ALL_UNITS.append(KILOGRAM_SQUARE_CENTIMETRE) KILOGRAM_SQUARE_MILLIMETRE = UnitDescriptor('kilogram square millimetre', 'F19', '''kg·mm²''') ALL_UNITS.append(KILOGRAM_SQUARE_MILLIMETRE) POUND_INCH_SQUARED = UnitDescriptor('pound inch squared', 'F20', '''lb·in²''') ALL_UNITS.append(POUND_INCH_SQUARED) POUND_FORCE_INCH = UnitDescriptor('pound-force inch', 'F21', '''lbf·in''') ALL_UNITS.append(POUND_FORCE_INCH) POUND_FORCE_FOOT_PER_AMPERE = UnitDescriptor('pound-force foot per ampere', 'F22', '''lbf·ft/A''') ALL_UNITS.append(POUND_FORCE_FOOT_PER_AMPERE) GRAM_PER_CUBIC_DECIMETRE = UnitDescriptor('gram per cubic decimetre', 'F23', '''g/dm³''') ALL_UNITS.append(GRAM_PER_CUBIC_DECIMETRE) KILOGRAM_PER_KILOMOL = UnitDescriptor('kilogram per kilomol', 'F24', '''kg/kmol''') ALL_UNITS.append(KILOGRAM_PER_KILOMOL) GRAM_PER_HERTZ = UnitDescriptor('gram per hertz', 'F25', '''g/Hz''') ALL_UNITS.append(GRAM_PER_HERTZ) GRAM_PER_DAY = UnitDescriptor('gram per day', 'F26', '''g/d''') ALL_UNITS.append(GRAM_PER_DAY) GRAM_PER_HOUR = UnitDescriptor('gram per hour', 'F27', '''g/h''') ALL_UNITS.append(GRAM_PER_HOUR) GRAM_PER_MINUTE = UnitDescriptor('gram per minute', 'F28', '''g/min''') ALL_UNITS.append(GRAM_PER_MINUTE) GRAM_PER_SECOND = UnitDescriptor('gram per second', 'F29', '''g/s''') ALL_UNITS.append(GRAM_PER_SECOND) KILOGRAM_PER_DAY = UnitDescriptor('kilogram per day', 'F30', '''kg/d''') ALL_UNITS.append(KILOGRAM_PER_DAY) KILOGRAM_PER_MINUTE = UnitDescriptor('kilogram per minute', 'F31', '''kg/min''') ALL_UNITS.append(KILOGRAM_PER_MINUTE) MILLIGRAM_PER_DAY = UnitDescriptor('milligram per day', 'F32', '''mg/d''') ALL_UNITS.append(MILLIGRAM_PER_DAY) MILLIGRAM_PER_MINUTE = UnitDescriptor('milligram per minute', 'F33', '''mg/min''') ALL_UNITS.append(MILLIGRAM_PER_MINUTE) MILLIGRAM_PER_SECOND = UnitDescriptor('milligram per second', 'F34', '''mg/s''') ALL_UNITS.append(MILLIGRAM_PER_SECOND) GRAM_PER_DAY_KELVIN = UnitDescriptor('gram per day kelvin', 'F35', '''g/(d·K)''') ALL_UNITS.append(GRAM_PER_DAY_KELVIN) GRAM_PER_HOUR_KELVIN = UnitDescriptor('gram per hour kelvin', 'F36', '''g/(h·K)''') ALL_UNITS.append(GRAM_PER_HOUR_KELVIN) GRAM_PER_MINUTE_KELVIN = UnitDescriptor('gram per minute kelvin', 'F37', '''g/(min·K)''') ALL_UNITS.append(GRAM_PER_MINUTE_KELVIN) GRAM_PER_SECOND_KELVIN = UnitDescriptor('gram per second kelvin', 'F38', '''g/(s·K)''') ALL_UNITS.append(GRAM_PER_SECOND_KELVIN) KILOGRAM_PER_DAY_KELVIN = UnitDescriptor('kilogram per day kelvin', 'F39', '''kg/(d·K)''') ALL_UNITS.append(KILOGRAM_PER_DAY_KELVIN) KILOGRAM_PER_HOUR_KELVIN = UnitDescriptor('kilogram per hour kelvin', 'F40', '''kg/(h·K)''') ALL_UNITS.append(KILOGRAM_PER_HOUR_KELVIN) KILOGRAM_PER_MINUTE_KELVIN = UnitDescriptor('kilogram per minute kelvin', 'F41', '''kg/(min·K)''') ALL_UNITS.append(KILOGRAM_PER_MINUTE_KELVIN) KILOGRAM_PER_SECOND_KELVIN = UnitDescriptor('kilogram per second kelvin', 'F42', '''kg/(s·K)''') ALL_UNITS.append(KILOGRAM_PER_SECOND_KELVIN) MILLIGRAM_PER_DAY_KELVIN = UnitDescriptor('milligram per day kelvin', 'F43', '''mg/(d·K)''') ALL_UNITS.append(MILLIGRAM_PER_DAY_KELVIN) MILLIGRAM_PER_HOUR_KELVIN = UnitDescriptor('milligram per hour kelvin', 'F44', '''mg/(h·K)''') ALL_UNITS.append(MILLIGRAM_PER_HOUR_KELVIN) MILLIGRAM_PER_MINUTE_KELVIN = UnitDescriptor('milligram per minute kelvin', 'F45', '''mg/(min·K)''') ALL_UNITS.append(MILLIGRAM_PER_MINUTE_KELVIN) MILLIGRAM_PER_SECOND_KELVIN = UnitDescriptor('milligram per second kelvin', 'F46', '''mg/(s·K)''') ALL_UNITS.append(MILLIGRAM_PER_SECOND_KELVIN) NEWTON_PER_MILLIMETRE = UnitDescriptor('newton per millimetre', 'F47', '''N/mm''') ALL_UNITS.append(NEWTON_PER_MILLIMETRE) POUND_FORCE_PER_INCH = UnitDescriptor('pound-force per inch', 'F48', '''lbf/in''') ALL_UNITS.append(POUND_FORCE_PER_INCH) ROD_UNIT_OF_DISTANCE = UnitDescriptor('rod [unit of distance]', 'F49', '''rd (US)''') ALL_UNITS.append(ROD_UNIT_OF_DISTANCE) MICROMETRE_PER_KELVIN = UnitDescriptor('micrometre per kelvin', 'F50', '''µm/K''') ALL_UNITS.append(MICROMETRE_PER_KELVIN) CENTIMETRE_PER_KELVIN = UnitDescriptor('centimetre per kelvin', 'F51', '''cm/K''') ALL_UNITS.append(CENTIMETRE_PER_KELVIN) METRE_PER_KELVIN = UnitDescriptor('metre per kelvin', 'F52', '''m/K''') ALL_UNITS.append(METRE_PER_KELVIN) MILLIMETRE_PER_KELVIN = UnitDescriptor('millimetre per kelvin', 'F53', '''mm/K''') ALL_UNITS.append(MILLIMETRE_PER_KELVIN) MILLIOHM_PER_METRE = UnitDescriptor('milliohm per metre', 'F54', '''mΩ/m''') ALL_UNITS.append(MILLIOHM_PER_METRE) OHM_PER_MILE_STATUTE_MILE = UnitDescriptor('ohm per mile (statute mile)', 'F55', '''Ω/mi''') ALL_UNITS.append(OHM_PER_MILE_STATUTE_MILE) OHM_PER_KILOMETRE = UnitDescriptor('ohm per kilometre', 'F56', '''Ω/km''') ALL_UNITS.append(OHM_PER_KILOMETRE) MILLIAMPERE_PER_POUND_FORCE_PER_SQUARE_INCH = UnitDescriptor('milliampere per pound-force per square inch', 'F57', '''mA/(lbf/in²)''') ALL_UNITS.append(MILLIAMPERE_PER_POUND_FORCE_PER_SQUARE_INCH) RECIPROCAL_BAR = UnitDescriptor('reciprocal bar', 'F58', '''1/bar''') ALL_UNITS.append(RECIPROCAL_BAR) MILLIAMPERE_PER_BAR = UnitDescriptor('milliampere per bar', 'F59', '''mA/bar''') ALL_UNITS.append(MILLIAMPERE_PER_BAR) DEGREE_CELSIUS_PER_BAR = UnitDescriptor('degree Celsius per bar', 'F60', '''°C/bar''') ALL_UNITS.append(DEGREE_CELSIUS_PER_BAR) KELVIN_PER_BAR = UnitDescriptor('kelvin per bar', 'F61', '''K/bar''') ALL_UNITS.append(KELVIN_PER_BAR) GRAM_PER_DAY_BAR = UnitDescriptor('gram per day bar', 'F62', '''g/(d·bar)''') ALL_UNITS.append(GRAM_PER_DAY_BAR) GRAM_PER_HOUR_BAR = UnitDescriptor('gram per hour bar', 'F63', '''g/(h·bar)''') ALL_UNITS.append(GRAM_PER_HOUR_BAR) GRAM_PER_MINUTE_BAR = UnitDescriptor('gram per minute bar', 'F64', '''g/(min·bar)''') ALL_UNITS.append(GRAM_PER_MINUTE_BAR) GRAM_PER_SECOND_BAR = UnitDescriptor('gram per second bar', 'F65', '''g/(s·bar)''') ALL_UNITS.append(GRAM_PER_SECOND_BAR) KILOGRAM_PER_DAY_BAR = UnitDescriptor('kilogram per day bar', 'F66', '''kg/(d·bar)''') ALL_UNITS.append(KILOGRAM_PER_DAY_BAR) KILOGRAM_PER_HOUR_BAR = UnitDescriptor('kilogram per hour bar', 'F67', '''kg/(h·bar)''') ALL_UNITS.append(KILOGRAM_PER_HOUR_BAR) KILOGRAM_PER_MINUTE_BAR = UnitDescriptor('kilogram per minute bar', 'F68', '''kg/(min·bar)''') ALL_UNITS.append(KILOGRAM_PER_MINUTE_BAR) KILOGRAM_PER_SECOND_BAR = UnitDescriptor('kilogram per second bar', 'F69', '''kg/(s·bar)''') ALL_UNITS.append(KILOGRAM_PER_SECOND_BAR) MILLIGRAM_PER_DAY_BAR = UnitDescriptor('milligram per day bar', 'F70', '''mg/(d·bar)''') ALL_UNITS.append(MILLIGRAM_PER_DAY_BAR) MILLIGRAM_PER_HOUR_BAR = UnitDescriptor('milligram per hour bar', 'F71', '''mg/(h·bar)''') ALL_UNITS.append(MILLIGRAM_PER_HOUR_BAR) MILLIGRAM_PER_MINUTE_BAR = UnitDescriptor('milligram per minute bar', 'F72', '''mg/(min·bar)''') ALL_UNITS.append(MILLIGRAM_PER_MINUTE_BAR) MILLIGRAM_PER_SECOND_BAR = UnitDescriptor('milligram per second bar', 'F73', '''mg/(s·bar)''') ALL_UNITS.append(MILLIGRAM_PER_SECOND_BAR) GRAM_PER_BAR = UnitDescriptor('gram per bar', 'F74', '''g/bar''') ALL_UNITS.append(GRAM_PER_BAR) MILLIGRAM_PER_BAR = UnitDescriptor('milligram per bar', 'F75', '''mg/bar''') ALL_UNITS.append(MILLIGRAM_PER_BAR) MILLIAMPERE_PER_MILLIMETRE = UnitDescriptor('milliampere per millimetre', 'F76', '''mA/mm''') ALL_UNITS.append(MILLIAMPERE_PER_MILLIMETRE) PASCAL_SECOND_PER_KELVIN = UnitDescriptor('pascal second per kelvin', 'F77', '''Pa.s/K''') ALL_UNITS.append(PASCAL_SECOND_PER_KELVIN) INCH_OF_WATER = UnitDescriptor('inch of water', 'F78', '''inH₂O''') ALL_UNITS.append(INCH_OF_WATER) INCH_OF_MERCURY = UnitDescriptor('inch of mercury', 'F79', '''inHg''') ALL_UNITS.append(INCH_OF_MERCURY) WATER_HORSE_POWER = UnitDescriptor('water horse power', 'F80', '''''') ALL_UNITS.append(WATER_HORSE_POWER) BAR_PER_KELVIN = UnitDescriptor('bar per kelvin', 'F81', '''bar/K''') ALL_UNITS.append(BAR_PER_KELVIN) HECTOPASCAL_PER_KELVIN = UnitDescriptor('hectopascal per kelvin', 'F82', '''hPa/K''') ALL_UNITS.append(HECTOPASCAL_PER_KELVIN) KILOPASCAL_PER_KELVIN = UnitDescriptor('kilopascal per kelvin', 'F83', '''kPa/K''') ALL_UNITS.append(KILOPASCAL_PER_KELVIN) MILLIBAR_PER_KELVIN = UnitDescriptor('millibar per kelvin', 'F84', '''mbar/K''') ALL_UNITS.append(MILLIBAR_PER_KELVIN) MEGAPASCAL_PER_KELVIN = UnitDescriptor('megapascal per kelvin', 'F85', '''MPa/K''') ALL_UNITS.append(MEGAPASCAL_PER_KELVIN) POISE_PER_KELVIN = UnitDescriptor('poise per kelvin', 'F86', '''P/K''') ALL_UNITS.append(POISE_PER_KELVIN) VOLT_PER_LITRE_MINUTE = UnitDescriptor('volt per litre minute', 'F87', '''V/(l·min)''') ALL_UNITS.append(VOLT_PER_LITRE_MINUTE) NEWTON_CENTIMETRE = UnitDescriptor('newton centimetre', 'F88', '''N·cm''') ALL_UNITS.append(NEWTON_CENTIMETRE) NEWTON_METRE_PER_DEGREE = UnitDescriptor('newton metre per degree', 'F89', '''Nm/°''') ALL_UNITS.append(NEWTON_METRE_PER_DEGREE) FIBRE_PER_CUBIC_CENTIMETRE_OF_AIR = UnitDescriptor('fibre per cubic centimetre of air', 'F9', '''''') ALL_UNITS.append(FIBRE_PER_CUBIC_CENTIMETRE_OF_AIR) NEWTON_METRE_PER_AMPERE = UnitDescriptor('newton metre per ampere', 'F90', '''N·m/A''') ALL_UNITS.append(NEWTON_METRE_PER_AMPERE) BAR_LITRE_PER_SECOND = UnitDescriptor('bar litre per second', 'F91', '''bar·l/s''') ALL_UNITS.append(BAR_LITRE_PER_SECOND) BAR_CUBIC_METRE_PER_SECOND = UnitDescriptor('bar cubic metre per second', 'F92', '''bar·m³/s''') ALL_UNITS.append(BAR_CUBIC_METRE_PER_SECOND) HECTOPASCAL_LITRE_PER_SECOND = UnitDescriptor('hectopascal litre per second', 'F93', '''hPa·l/s''') ALL_UNITS.append(HECTOPASCAL_LITRE_PER_SECOND) HECTOPASCAL_CUBIC_METRE_PER_SECOND = UnitDescriptor('hectopascal cubic metre per second', 'F94', '''hPa·m³/s''') ALL_UNITS.append(HECTOPASCAL_CUBIC_METRE_PER_SECOND) MILLIBAR_LITRE_PER_SECOND = UnitDescriptor('millibar litre per second', 'F95', '''mbar·l/s''') ALL_UNITS.append(MILLIBAR_LITRE_PER_SECOND) MILLIBAR_CUBIC_METRE_PER_SECOND = UnitDescriptor('millibar cubic metre per second', 'F96', '''mbar·m³/s''') ALL_UNITS.append(MILLIBAR_CUBIC_METRE_PER_SECOND) MEGAPASCAL_LITRE_PER_SECOND = UnitDescriptor('megapascal litre per second', 'F97', '''MPa·l/s''') ALL_UNITS.append(MEGAPASCAL_LITRE_PER_SECOND) MEGAPASCAL_CUBIC_METRE_PER_SECOND = UnitDescriptor('megapascal cubic metre per second', 'F98', '''MPa·m³/s''') ALL_UNITS.append(MEGAPASCAL_CUBIC_METRE_PER_SECOND) PASCAL_LITRE_PER_SECOND = UnitDescriptor('pascal litre per second', 'F99', '''Pa·l/s''') ALL_UNITS.append(PASCAL_LITRE_PER_SECOND) DEGREE_FAHRENHEIT = UnitDescriptor('degree Fahrenheit', 'FAH', '''°F''') ALL_UNITS.append(DEGREE_FAHRENHEIT) FARAD = UnitDescriptor('farad', 'FAR', '''F''') ALL_UNITS.append(FARAD) FIELD = UnitDescriptor('field', 'FB', '''''') ALL_UNITS.append(FIELD) FIBRE_METRE = UnitDescriptor('fibre metre', 'FBM', '''''') ALL_UNITS.append(FIBRE_METRE) THOUSAND_CUBIC_FOOT = UnitDescriptor('thousand cubic foot', 'FC', '''kft³''') ALL_UNITS.append(THOUSAND_CUBIC_FOOT) MILLION_PARTICLE_PER_CUBIC_FOOT = UnitDescriptor('million particle per cubic foot', 'FD', '''''') ALL_UNITS.append(MILLION_PARTICLE_PER_CUBIC_FOOT) TRACK_FOOT = UnitDescriptor('track foot', 'FE', '''''') ALL_UNITS.append(TRACK_FOOT) HUNDRED_CUBIC_METRE = UnitDescriptor('hundred cubic metre', 'FF', '''''') ALL_UNITS.append(HUNDRED_CUBIC_METRE) TRANSDERMAL_PATCH = UnitDescriptor('transdermal patch', 'FG', '''''') ALL_UNITS.append(TRANSDERMAL_PATCH) MICROMOLE = UnitDescriptor('micromole', 'FH', '''µmol''') ALL_UNITS.append(MICROMOLE) FAILURES_IN_TIME = UnitDescriptor('failures in time', 'FIT', '''FIT''') ALL_UNITS.append(FAILURES_IN_TIME) FLAKE_TON = UnitDescriptor('flake ton', 'FL', '''''') ALL_UNITS.append(FLAKE_TON) MILLION_CUBIC_FOOT = UnitDescriptor('million cubic foot', 'FM', '''Mft³''') ALL_UNITS.append(MILLION_CUBIC_FOOT) FOOT = UnitDescriptor('foot', 'FOT', '''ft''') ALL_UNITS.append(FOOT) POUND_PER_SQUARE_FOOT = UnitDescriptor('pound per square foot', 'FP', '''lb/ft²''') ALL_UNITS.append(POUND_PER_SQUARE_FOOT) FOOT_PER_MINUTE = UnitDescriptor('foot per minute', 'FR', '''ft/min''') ALL_UNITS.append(FOOT_PER_MINUTE) FOOT_PER_SECOND = UnitDescriptor('foot per second', 'FS', '''ft/s''') ALL_UNITS.append(FOOT_PER_SECOND) SQUARE_FOOT = UnitDescriptor('square foot', 'FTK', '''ft²''') ALL_UNITS.append(SQUARE_FOOT) CUBIC_FOOT = UnitDescriptor('cubic foot', 'FTQ', '''ft³''') ALL_UNITS.append(CUBIC_FOOT) PASCAL_CUBIC_METRE_PER_SECOND = UnitDescriptor('pascal cubic metre per second', 'G01', '''Pa·m³/s''') ALL_UNITS.append(PASCAL_CUBIC_METRE_PER_SECOND) CENTIMETRE_PER_BAR = UnitDescriptor('centimetre per bar', 'G04', '''cm/bar''') ALL_UNITS.append(CENTIMETRE_PER_BAR) METRE_PER_BAR = UnitDescriptor('metre per bar', 'G05', '''m/bar''') ALL_UNITS.append(METRE_PER_BAR) MILLIMETRE_PER_BAR = UnitDescriptor('millimetre per bar', 'G06', '''mm/bar''') ALL_UNITS.append(MILLIMETRE_PER_BAR) SQUARE_INCH_PER_SECOND = UnitDescriptor('square inch per second', 'G08', '''in²/s''') ALL_UNITS.append(SQUARE_INCH_PER_SECOND) SQUARE_METRE_PER_SECOND_KELVIN = UnitDescriptor('square metre per second kelvin', 'G09', '''m²/(s·K)''') ALL_UNITS.append(SQUARE_METRE_PER_SECOND_KELVIN) STOKES_PER_KELVIN = UnitDescriptor('stokes per kelvin', 'G10', '''St/K''') ALL_UNITS.append(STOKES_PER_KELVIN) GRAM_PER_CUBIC_CENTIMETRE_BAR = UnitDescriptor('gram per cubic centimetre bar', 'G11', '''g/(cm³·bar)''') ALL_UNITS.append(GRAM_PER_CUBIC_CENTIMETRE_BAR) GRAM_PER_CUBIC_DECIMETRE_BAR = UnitDescriptor('gram per cubic decimetre bar', 'G12', '''g/(dm³·bar)''') ALL_UNITS.append(GRAM_PER_CUBIC_DECIMETRE_BAR) GRAM_PER_LITRE_BAR = UnitDescriptor('gram per litre bar', 'G13', '''g/(l·bar)''') ALL_UNITS.append(GRAM_PER_LITRE_BAR) GRAM_PER_CUBIC_METRE_BAR = UnitDescriptor('gram per cubic metre bar', 'G14', '''g/(m³·bar)''') ALL_UNITS.append(GRAM_PER_CUBIC_METRE_BAR) GRAM_PER_MILLILITRE_BAR = UnitDescriptor('gram per millilitre bar', 'G15', '''g/(ml·bar)''') ALL_UNITS.append(GRAM_PER_MILLILITRE_BAR) KILOGRAM_PER_CUBIC_CENTIMETRE_BAR = UnitDescriptor('kilogram per cubic centimetre bar', 'G16', '''kg/(cm³·bar)''') ALL_UNITS.append(KILOGRAM_PER_CUBIC_CENTIMETRE_BAR) KILOGRAM_PER_LITRE_BAR = UnitDescriptor('kilogram per litre bar', 'G17', '''kg/(l·bar)''') ALL_UNITS.append(KILOGRAM_PER_LITRE_BAR) KILOGRAM_PER_CUBIC_METRE_BAR = UnitDescriptor('kilogram per cubic metre bar', 'G18', '''kg/(m³·bar)''') ALL_UNITS.append(KILOGRAM_PER_CUBIC_METRE_BAR) NEWTON_METRE_PER_KILOGRAM = UnitDescriptor('newton metre per kilogram', 'G19', '''N·m/kg''') ALL_UNITS.append(NEWTON_METRE_PER_KILOGRAM) US_GALLON_PER_MINUTE = UnitDescriptor('US gallon per minute', 'G2', '''gal (US) /min''') ALL_UNITS.append(US_GALLON_PER_MINUTE) POUND_FORCE_FOOT_PER_POUND = UnitDescriptor('pound-force foot per pound', 'G20', '''lbf·ft/lb''') ALL_UNITS.append(POUND_FORCE_FOOT_PER_POUND) CUP_UNIT_OF_VOLUME = UnitDescriptor('cup [unit of volume]', 'G21', '''cup (US)''') ALL_UNITS.append(CUP_UNIT_OF_VOLUME) PECK = UnitDescriptor('peck', 'G23', '''pk (US)''') ALL_UNITS.append(PECK) TABLESPOON_US = UnitDescriptor('tablespoon (US)', 'G24', '''tablespoon (US)''') ALL_UNITS.append(TABLESPOON_US) TEASPOON_US = UnitDescriptor('teaspoon (US)', 'G25', '''teaspoon (US)''') ALL_UNITS.append(TEASPOON_US) STERE = UnitDescriptor('stere', 'G26', '''st''') ALL_UNITS.append(STERE) CUBIC_CENTIMETRE_PER_KELVIN = UnitDescriptor('cubic centimetre per kelvin', 'G27', '''cm³/K''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_KELVIN) LITRE_PER_KELVIN = UnitDescriptor('litre per kelvin', 'G28', '''l/K''') ALL_UNITS.append(LITRE_PER_KELVIN) CUBIC_METRE_PER_KELVIN = UnitDescriptor('cubic metre per kelvin', 'G29', '''m³/K''') ALL_UNITS.append(CUBIC_METRE_PER_KELVIN) IMPERIAL_GALLON_PER_MINUTE = UnitDescriptor('Imperial gallon per minute', 'G3', '''gal (UK) /min''') ALL_UNITS.append(IMPERIAL_GALLON_PER_MINUTE) MILLILITRE_PER_KELVIN = UnitDescriptor('millilitre per kelvin', 'G30', '''ml/K''') ALL_UNITS.append(MILLILITRE_PER_KELVIN) KILOGRAM_PER_CUBIC_CENTIMETRE = UnitDescriptor('kilogram per cubic centimetre', 'G31', '''kg/cm³''') ALL_UNITS.append(KILOGRAM_PER_CUBIC_CENTIMETRE) OUNCE_AVOIRDUPOIS_PER_CUBIC_YARD = UnitDescriptor('ounce (avoirdupois) per cubic yard', 'G32', '''oz/yd³''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS_PER_CUBIC_YARD) GRAM_PER_CUBIC_CENTIMETRE_KELVIN = UnitDescriptor('gram per cubic centimetre kelvin', 'G33', '''g/(cm³·K)''') ALL_UNITS.append(GRAM_PER_CUBIC_CENTIMETRE_KELVIN) GRAM_PER_CUBIC_DECIMETRE_KELVIN = UnitDescriptor('gram per cubic decimetre kelvin', 'G34', '''g/(dm³·K)''') ALL_UNITS.append(GRAM_PER_CUBIC_DECIMETRE_KELVIN) GRAM_PER_LITRE_KELVIN = UnitDescriptor('gram per litre kelvin', 'G35', '''g/(l·K)''') ALL_UNITS.append(GRAM_PER_LITRE_KELVIN) GRAM_PER_CUBIC_METRE_KELVIN = UnitDescriptor('gram per cubic metre kelvin', 'G36', '''g/(m³·K)''') ALL_UNITS.append(GRAM_PER_CUBIC_METRE_KELVIN) GRAM_PER_MILLILITRE_KELVIN = UnitDescriptor('gram per millilitre kelvin', 'G37', '''g/(ml·K)''') ALL_UNITS.append(GRAM_PER_MILLILITRE_KELVIN) KILOGRAM_PER_CUBIC_CENTIMETRE_KELVIN = UnitDescriptor('kilogram per cubic centimetre kelvin', 'G38', '''kg/(cm³·K)''') ALL_UNITS.append(KILOGRAM_PER_CUBIC_CENTIMETRE_KELVIN) KILOGRAM_PER_LITRE_KELVIN = UnitDescriptor('kilogram per litre kelvin', 'G39', '''kg/(l·K)''') ALL_UNITS.append(KILOGRAM_PER_LITRE_KELVIN) KILOGRAM_PER_CUBIC_METRE_KELVIN = UnitDescriptor('kilogram per cubic metre kelvin', 'G40', '''kg/(m³·K)''') ALL_UNITS.append(KILOGRAM_PER_CUBIC_METRE_KELVIN) SQUARE_METRE_PER_SECOND_BAR = UnitDescriptor('square metre per second bar', 'G41', '''m²/(s·bar)''') ALL_UNITS.append(SQUARE_METRE_PER_SECOND_BAR) MICROSIEMENS_PER_CENTIMETRE = UnitDescriptor('microsiemens per centimetre', 'G42', '''µS/cm''') ALL_UNITS.append(MICROSIEMENS_PER_CENTIMETRE) MICROSIEMENS_PER_METRE = UnitDescriptor('microsiemens per metre', 'G43', '''µS/m''') ALL_UNITS.append(MICROSIEMENS_PER_METRE) NANOSIEMENS_PER_CENTIMETRE = UnitDescriptor('nanosiemens per centimetre', 'G44', '''nS/cm''') ALL_UNITS.append(NANOSIEMENS_PER_CENTIMETRE) NANOSIEMENS_PER_METRE = UnitDescriptor('nanosiemens per metre', 'G45', '''nS/m''') ALL_UNITS.append(NANOSIEMENS_PER_METRE) STOKES_PER_BAR = UnitDescriptor('stokes per bar', 'G46', '''St/bar''') ALL_UNITS.append(STOKES_PER_BAR) CUBIC_CENTIMETRE_PER_DAY = UnitDescriptor('cubic centimetre per day', 'G47', '''cm³/d''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_DAY) CUBIC_CENTIMETRE_PER_HOUR = UnitDescriptor('cubic centimetre per hour', 'G48', '''cm³/h''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_HOUR) CUBIC_CENTIMETRE_PER_MINUTE = UnitDescriptor('cubic centimetre per minute', 'G49', '''cm³/min''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_MINUTE) GALLON_US_PER_HOUR = UnitDescriptor('gallon (US) per hour', 'G50', '''gal/h''') ALL_UNITS.append(GALLON_US_PER_HOUR) LITRE_PER_SECOND = UnitDescriptor('litre per second', 'G51', '''l/s''') ALL_UNITS.append(LITRE_PER_SECOND) CUBIC_METRE_PER_DAY = UnitDescriptor('cubic metre per day', 'G52', '''m³/d''') ALL_UNITS.append(CUBIC_METRE_PER_DAY) CUBIC_METRE_PER_MINUTE = UnitDescriptor('cubic metre per minute', 'G53', '''m³/min''') ALL_UNITS.append(CUBIC_METRE_PER_MINUTE) MILLILITRE_PER_DAY = UnitDescriptor('millilitre per day', 'G54', '''ml/d''') ALL_UNITS.append(MILLILITRE_PER_DAY) MILLILITRE_PER_HOUR = UnitDescriptor('millilitre per hour', 'G55', '''ml/h''') ALL_UNITS.append(MILLILITRE_PER_HOUR) CUBIC_INCH_PER_HOUR = UnitDescriptor('cubic inch per hour', 'G56', '''in³/h''') ALL_UNITS.append(CUBIC_INCH_PER_HOUR) CUBIC_INCH_PER_MINUTE = UnitDescriptor('cubic inch per minute', 'G57', '''in³/min''') ALL_UNITS.append(CUBIC_INCH_PER_MINUTE) CUBIC_INCH_PER_SECOND = UnitDescriptor('cubic inch per second', 'G58', '''in³/s''') ALL_UNITS.append(CUBIC_INCH_PER_SECOND) MILLIAMPERE_PER_LITRE_MINUTE = UnitDescriptor('milliampere per litre minute', 'G59', '''mA/(l·min)''') ALL_UNITS.append(MILLIAMPERE_PER_LITRE_MINUTE) VOLT_PER_BAR = UnitDescriptor('volt per bar', 'G60', '''V/bar''') ALL_UNITS.append(VOLT_PER_BAR) CUBIC_CENTIMETRE_PER_DAY_KELVIN = UnitDescriptor('cubic centimetre per day kelvin', 'G61', '''cm³/(d·K)''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_DAY_KELVIN) CUBIC_CENTIMETRE_PER_HOUR_KELVIN = UnitDescriptor('cubic centimetre per hour kelvin', 'G62', '''cm³/(h·K)''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_HOUR_KELVIN) CUBIC_CENTIMETRE_PER_MINUTE_KELVIN = UnitDescriptor('cubic centimetre per minute kelvin', 'G63', '''cm³/(min·K)''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_MINUTE_KELVIN) CUBIC_CENTIMETRE_PER_SECOND_KELVIN = UnitDescriptor('cubic centimetre per second kelvin', 'G64', '''cm³/(s·K)''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_SECOND_KELVIN) LITRE_PER_DAY_KELVIN = UnitDescriptor('litre per day kelvin', 'G65', '''l/(d·K)''') ALL_UNITS.append(LITRE_PER_DAY_KELVIN) LITRE_PER_HOUR_KELVIN = UnitDescriptor('litre per hour kelvin', 'G66', '''l/(h·K)''') ALL_UNITS.append(LITRE_PER_HOUR_KELVIN) LITRE_PER_MINUTE_KELVIN = UnitDescriptor('litre per minute kelvin', 'G67', '''l/(min·K)''') ALL_UNITS.append(LITRE_PER_MINUTE_KELVIN) LITRE_PER_SECOND_KELVIN = UnitDescriptor('litre per second kelvin', 'G68', '''l/(s·K)''') ALL_UNITS.append(LITRE_PER_SECOND_KELVIN) CUBIC_METRE_PER_DAY_KELVIN = UnitDescriptor('cubic metre per day kelvin', 'G69', '''m³/(d·K)''') ALL_UNITS.append(CUBIC_METRE_PER_DAY_KELVIN) MICROFICHE_SHEET = UnitDescriptor('microfiche sheet', 'G7', '''''') ALL_UNITS.append(MICROFICHE_SHEET) CUBIC_METRE_PER_HOUR_KELVIN = UnitDescriptor('cubic metre per hour kelvin', 'G70', '''m³/(h·K)''') ALL_UNITS.append(CUBIC_METRE_PER_HOUR_KELVIN) CUBIC_METRE_PER_MINUTE_KELVIN = UnitDescriptor('cubic metre per minute kelvin', 'G71', '''m³/(min·K)''') ALL_UNITS.append(CUBIC_METRE_PER_MINUTE_KELVIN) CUBIC_METRE_PER_SECOND_KELVIN = UnitDescriptor('cubic metre per second kelvin', 'G72', '''m³/(s·K)''') ALL_UNITS.append(CUBIC_METRE_PER_SECOND_KELVIN) MILLILITRE_PER_DAY_KELVIN = UnitDescriptor('millilitre per day kelvin', 'G73', '''ml/(d·K)''') ALL_UNITS.append(MILLILITRE_PER_DAY_KELVIN) MILLILITRE_PER_HOUR_KELVIN = UnitDescriptor('millilitre per hour kelvin', 'G74', '''ml/(h·K)''') ALL_UNITS.append(MILLILITRE_PER_HOUR_KELVIN) MILLILITRE_PER_MINUTE_KELVIN = UnitDescriptor('millilitre per minute kelvin', 'G75', '''ml/(min·K)''') ALL_UNITS.append(MILLILITRE_PER_MINUTE_KELVIN) MILLILITRE_PER_SECOND_KELVIN = UnitDescriptor('millilitre per second kelvin', 'G76', '''ml/(s·K)''') ALL_UNITS.append(MILLILITRE_PER_SECOND_KELVIN) MILLIMETRE_TO_THE_FOURTH_POWER = UnitDescriptor('millimetre to the fourth power', 'G77', '''mm⁴''') ALL_UNITS.append(MILLIMETRE_TO_THE_FOURTH_POWER) CUBIC_CENTIMETRE_PER_DAY_BAR = UnitDescriptor('cubic centimetre per day bar', 'G78', '''cm³/(d·bar)''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_DAY_BAR) CUBIC_CENTIMETRE_PER_HOUR_BAR = UnitDescriptor('cubic centimetre per hour bar', 'G79', '''cm³/(h·bar)''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_HOUR_BAR) CUBIC_CENTIMETRE_PER_MINUTE_BAR = UnitDescriptor('cubic centimetre per minute bar', 'G80', '''cm³/(min·bar)''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_MINUTE_BAR) CUBIC_CENTIMETRE_PER_SECOND_BAR = UnitDescriptor('cubic centimetre per second bar', 'G81', '''cm³/(s·bar)''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_SECOND_BAR) LITRE_PER_DAY_BAR = UnitDescriptor('litre per day bar', 'G82', '''l/(d·bar)''') ALL_UNITS.append(LITRE_PER_DAY_BAR) LITRE_PER_HOUR_BAR = UnitDescriptor('litre per hour bar', 'G83', '''l/(h·bar)''') ALL_UNITS.append(LITRE_PER_HOUR_BAR) LITRE_PER_MINUTE_BAR = UnitDescriptor('litre per minute bar', 'G84', '''l/(min·bar)''') ALL_UNITS.append(LITRE_PER_MINUTE_BAR) LITRE_PER_SECOND_BAR = UnitDescriptor('litre per second bar', 'G85', '''l/(s·bar)''') ALL_UNITS.append(LITRE_PER_SECOND_BAR) CUBIC_METRE_PER_DAY_BAR = UnitDescriptor('cubic metre per day bar', 'G86', '''m³/(d·bar)''') ALL_UNITS.append(CUBIC_METRE_PER_DAY_BAR) CUBIC_METRE_PER_HOUR_BAR = UnitDescriptor('cubic metre per hour bar', 'G87', '''m³/(h·bar)''') ALL_UNITS.append(CUBIC_METRE_PER_HOUR_BAR) CUBIC_METRE_PER_MINUTE_BAR = UnitDescriptor('cubic metre per minute bar', 'G88', '''m³/(min·bar)''') ALL_UNITS.append(CUBIC_METRE_PER_MINUTE_BAR) CUBIC_METRE_PER_SECOND_BAR = UnitDescriptor('cubic metre per second bar', 'G89', '''m³/(s·bar)''') ALL_UNITS.append(CUBIC_METRE_PER_SECOND_BAR) MILLILITRE_PER_DAY_BAR = UnitDescriptor('millilitre per day bar', 'G90', '''ml/(d·bar)''') ALL_UNITS.append(MILLILITRE_PER_DAY_BAR) MILLILITRE_PER_HOUR_BAR = UnitDescriptor('millilitre per hour bar', 'G91', '''ml/(h·bar)''') ALL_UNITS.append(MILLILITRE_PER_HOUR_BAR) MILLILITRE_PER_MINUTE_BAR = UnitDescriptor('millilitre per minute bar', 'G92', '''ml/(min·bar)''') ALL_UNITS.append(MILLILITRE_PER_MINUTE_BAR) MILLILITRE_PER_SECOND_BAR = UnitDescriptor('millilitre per second bar', 'G93', '''ml/(s·bar)''') ALL_UNITS.append(MILLILITRE_PER_SECOND_BAR) CUBIC_CENTIMETRE_PER_BAR = UnitDescriptor('cubic centimetre per bar', 'G94', '''cm³/bar''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_BAR) LITRE_PER_BAR = UnitDescriptor('litre per bar', 'G95', '''l/bar''') ALL_UNITS.append(LITRE_PER_BAR) CUBIC_METRE_PER_BAR = UnitDescriptor('cubic metre per bar', 'G96', '''m³/bar''') ALL_UNITS.append(CUBIC_METRE_PER_BAR) MILLILITRE_PER_BAR = UnitDescriptor('millilitre per bar', 'G97', '''ml/bar''') ALL_UNITS.append(MILLILITRE_PER_BAR) MICROHENRY_PER_KILOOHM = UnitDescriptor('microhenry per kiloohm', 'G98', '''µH/kΩ''') ALL_UNITS.append(MICROHENRY_PER_KILOOHM) MICROHENRY_PER_OHM = UnitDescriptor('microhenry per ohm', 'G99', '''µH/Ω''') ALL_UNITS.append(MICROHENRY_PER_OHM) GALLON_US_PER_DAY = UnitDescriptor('gallon (US) per day', 'GB', '''gal (US)/d''') ALL_UNITS.append(GALLON_US_PER_DAY) GIGABECQUEREL = UnitDescriptor('gigabecquerel', 'GBQ', '''GBq''') ALL_UNITS.append(GIGABECQUEREL) GRAM_PER_100_GRAM = UnitDescriptor('gram per 100 gram', 'GC', '''''') ALL_UNITS.append(GRAM_PER_100_GRAM) GROSS_BARREL = UnitDescriptor('gross barrel', 'GD', '''''') ALL_UNITS.append(GROSS_BARREL) GRAM_DRY_WEIGHT = UnitDescriptor('gram, dry weight', 'GDW', '''''') ALL_UNITS.append(GRAM_DRY_WEIGHT) POUND_PER_GALLON_US = UnitDescriptor('pound per gallon (US)', 'GE', '''lb/gal (US)''') ALL_UNITS.append(POUND_PER_GALLON_US) GRAM_PER_METRE_GRAM_PER_100_CENTIMETRES = UnitDescriptor('gram per metre (gram per 100 centimetres)', 'GF', '''g/m''') ALL_UNITS.append(GRAM_PER_METRE_GRAM_PER_100_CENTIMETRES) GRAM_OF_FISSILE_ISOTOPE = UnitDescriptor('gram of fissile isotope', 'GFI', '''gi F/S''') ALL_UNITS.append(GRAM_OF_FISSILE_ISOTOPE) GREAT_GROSS = UnitDescriptor('great gross', 'GGR', '''''') ALL_UNITS.append(GREAT_GROSS) HALF_GALLON_US = UnitDescriptor('half gallon (US)', 'GH', '''''') ALL_UNITS.append(HALF_GALLON_US) GILL_US = UnitDescriptor('gill (US)', 'GIA', '''gi (US)''') ALL_UNITS.append(GILL_US) GRAM_INCLUDING_CONTAINER = UnitDescriptor('gram, including container', 'GIC', '''''') ALL_UNITS.append(GRAM_INCLUDING_CONTAINER) GILL_UK = UnitDescriptor('gill (UK)', 'GII', '''gi (UK)''') ALL_UNITS.append(GILL_UK) GRAM_INCLUDING_INNER_PACKAGING = UnitDescriptor('gram, including inner packaging', 'GIP', '''''') ALL_UNITS.append(GRAM_INCLUDING_INNER_PACKAGING) GRAM_PER_MILLILITRE = UnitDescriptor('gram per millilitre', 'GJ', '''g/ml''') ALL_UNITS.append(GRAM_PER_MILLILITRE) GRAM_PER_KILOGRAM = UnitDescriptor('gram per kilogram', 'GK', '''''') ALL_UNITS.append(GRAM_PER_KILOGRAM) GRAM_PER_LITRE = UnitDescriptor('gram per litre', 'GL', '''g/l''') ALL_UNITS.append(GRAM_PER_LITRE) DRY_GALLON_US = UnitDescriptor('dry gallon (US)', 'GLD', '''dry gal (US)''') ALL_UNITS.append(DRY_GALLON_US) GALLON_UK = UnitDescriptor('gallon (UK)', 'GLI', '''gal (UK)''') ALL_UNITS.append(GALLON_UK) GALLON_US = UnitDescriptor('gallon (US)', 'GLL', '''gal (US)''') ALL_UNITS.append(GALLON_US) GRAM_PER_SQUARE_METRE = UnitDescriptor('gram per square metre', 'GM', '''g/m²''') ALL_UNITS.append(GRAM_PER_SQUARE_METRE) GROSS_GALLON = UnitDescriptor('gross gallon', 'GN', '''''') ALL_UNITS.append(GROSS_GALLON) MILLIGRAM_PER_SQUARE_METRE = UnitDescriptor('milligram per square metre', 'GO', '''mg/m²''') ALL_UNITS.append(MILLIGRAM_PER_SQUARE_METRE) MILLIGRAM_PER_CUBIC_METRE = UnitDescriptor('milligram per cubic metre', 'GP', '''mg/m³''') ALL_UNITS.append(MILLIGRAM_PER_CUBIC_METRE) MICROGRAM_PER_CUBIC_METRE = UnitDescriptor('microgram per cubic metre', 'GQ', '''µg/m³''') ALL_UNITS.append(MICROGRAM_PER_CUBIC_METRE) GRAM = UnitDescriptor('gram', 'GRM', '''g''') ALL_UNITS.append(GRAM) GRAIN = UnitDescriptor('grain', 'GRN', '''gr''') ALL_UNITS.append(GRAIN) GROSS = UnitDescriptor('gross', 'GRO', '''gr''') ALL_UNITS.append(GROSS) GROSS_REGISTER_TON = UnitDescriptor('gross register ton', 'GRT', '''''') ALL_UNITS.append(GROSS_REGISTER_TON) GROSS_TON = UnitDescriptor('gross ton', 'GT', '''''') ALL_UNITS.append(GROSS_TON) GIGAJOULE = UnitDescriptor('gigajoule', 'GV', '''GJ''') ALL_UNITS.append(GIGAJOULE) GALLON_PER_THOUSAND_CUBIC_FOOT = UnitDescriptor('gallon per thousand cubic foot', 'GW', '''''') ALL_UNITS.append(GALLON_PER_THOUSAND_CUBIC_FOOT) GIGAWATT_HOUR = UnitDescriptor('gigawatt hour', 'GWH', '''GW·h''') ALL_UNITS.append(GIGAWATT_HOUR) GROSS_YARD = UnitDescriptor('gross yard', 'GY', '''''') ALL_UNITS.append(GROSS_YARD) GAGE_SYSTEM = UnitDescriptor('gage system', 'GZ', '''''') ALL_UNITS.append(GAGE_SYSTEM) HENRY_PER_KILOOHM = UnitDescriptor('henry per kiloohm', 'H03', '''H/kΩ''') ALL_UNITS.append(HENRY_PER_KILOOHM) HENRY_PER_OHM = UnitDescriptor('henry per ohm', 'H04', '''H/Ω''') ALL_UNITS.append(HENRY_PER_OHM) MILLIHENRY_PER_KILOOHM = UnitDescriptor('millihenry per kiloohm', 'H05', '''mH/kΩ''') ALL_UNITS.append(MILLIHENRY_PER_KILOOHM) MILLIHENRY_PER_OHM = UnitDescriptor('millihenry per ohm', 'H06', '''mH/Ω''') ALL_UNITS.append(MILLIHENRY_PER_OHM) PASCAL_SECOND_PER_BAR = UnitDescriptor('pascal second per bar', 'H07', '''Pa·s/bar''') ALL_UNITS.append(PASCAL_SECOND_PER_BAR) MICROBECQUEREL = UnitDescriptor('microbecquerel', 'H08', '''µBq''') ALL_UNITS.append(MICROBECQUEREL) RECIPROCAL_YEAR = UnitDescriptor('reciprocal year', 'H09', '''1/y''') ALL_UNITS.append(RECIPROCAL_YEAR) HALF_PAGE_ELECTRONIC = UnitDescriptor('half page – electronic', 'H1', '''''') ALL_UNITS.append(HALF_PAGE_ELECTRONIC) RECIPROCAL_HOUR = UnitDescriptor('reciprocal hour', 'H10', '''1/h''') ALL_UNITS.append(RECIPROCAL_HOUR) RECIPROCAL_MONTH = UnitDescriptor('reciprocal month', 'H11', '''1/mo''') ALL_UNITS.append(RECIPROCAL_MONTH) DEGREE_CELSIUS_PER_HOUR = UnitDescriptor('degree Celsius per hour', 'H12', '''°C/h''') ALL_UNITS.append(DEGREE_CELSIUS_PER_HOUR) DEGREE_CELSIUS_PER_MINUTE = UnitDescriptor('degree Celsius per minute', 'H13', '''°C/min''') ALL_UNITS.append(DEGREE_CELSIUS_PER_MINUTE) DEGREE_CELSIUS_PER_SECOND = UnitDescriptor('degree Celsius per second', 'H14', '''°C/s''') ALL_UNITS.append(DEGREE_CELSIUS_PER_SECOND) SQUARE_CENTIMETRE_PER_GRAM = UnitDescriptor('square centimetre per gram', 'H15', '''cm²/g''') ALL_UNITS.append(SQUARE_CENTIMETRE_PER_GRAM) SQUARE_DECAMETRE = UnitDescriptor('square decametre', 'H16', '''dam²''') ALL_UNITS.append(SQUARE_DECAMETRE) SQUARE_HECTOMETRE = UnitDescriptor('square hectometre', 'H18', '''hm²''') ALL_UNITS.append(SQUARE_HECTOMETRE) CUBIC_HECTOMETRE = UnitDescriptor('cubic hectometre', 'H19', '''hm³''') ALL_UNITS.append(CUBIC_HECTOMETRE) HALF_LITRE = UnitDescriptor('half litre', 'H2', '''''') ALL_UNITS.append(HALF_LITRE) CUBIC_KILOMETRE = UnitDescriptor('cubic kilometre', 'H20', '''km³''') ALL_UNITS.append(CUBIC_KILOMETRE) BLANK = UnitDescriptor('blank', 'H21', '''''') ALL_UNITS.append(BLANK) VOLT_SQUARE_INCH_PER_POUND_FORCE = UnitDescriptor('volt square inch per pound-force', 'H22', '''V/(lbf/in²)''') ALL_UNITS.append(VOLT_SQUARE_INCH_PER_POUND_FORCE) VOLT_PER_INCH = UnitDescriptor('volt per inch', 'H23', '''V/in''') ALL_UNITS.append(VOLT_PER_INCH) VOLT_PER_MICROSECOND = UnitDescriptor('volt per microsecond', 'H24', '''V/µs''') ALL_UNITS.append(VOLT_PER_MICROSECOND) PERCENT_PER_KELVIN = UnitDescriptor('percent per kelvin', 'H25', '''%/K''') ALL_UNITS.append(PERCENT_PER_KELVIN) OHM_PER_METRE = UnitDescriptor('ohm per metre', 'H26', '''Ω/m''') ALL_UNITS.append(OHM_PER_METRE) DEGREE_PER_METRE = UnitDescriptor('degree per metre', 'H27', '''°/m''') ALL_UNITS.append(DEGREE_PER_METRE) MICROFARAD_PER_KILOMETRE = UnitDescriptor('microfarad per kilometre', 'H28', '''µF/km''') ALL_UNITS.append(MICROFARAD_PER_KILOMETRE) MICROGRAM_PER_LITRE = UnitDescriptor('microgram per litre', 'H29', '''µg/l''') ALL_UNITS.append(MICROGRAM_PER_LITRE) SQUARE_MICROMETRE_SQUARE_MICRON = UnitDescriptor('square micrometre (square micron)', 'H30', '''µm²''') ALL_UNITS.append(SQUARE_MICROMETRE_SQUARE_MICRON) AMPERE_PER_KILOGRAM = UnitDescriptor('ampere per kilogram', 'H31', '''A/kg''') ALL_UNITS.append(AMPERE_PER_KILOGRAM) AMPERE_SQUARED_SECOND = UnitDescriptor('ampere squared second', 'H32', '''A²·s''') ALL_UNITS.append(AMPERE_SQUARED_SECOND) FARAD_PER_KILOMETRE = UnitDescriptor('farad per kilometre', 'H33', '''F/km''') ALL_UNITS.append(FARAD_PER_KILOMETRE) HERTZ_METRE = UnitDescriptor('hertz metre', 'H34', '''Hz·m''') ALL_UNITS.append(HERTZ_METRE) KELVIN_METRE_PER_WATT = UnitDescriptor('kelvin metre per watt', 'H35', '''K·m/W''') ALL_UNITS.append(KELVIN_METRE_PER_WATT) MEGAOHM_PER_KILOMETRE = UnitDescriptor('megaohm per kilometre', 'H36', '''MΩ/km''') ALL_UNITS.append(MEGAOHM_PER_KILOMETRE) MEGAOHM_PER_METRE = UnitDescriptor('megaohm per metre', 'H37', '''MΩ/m''') ALL_UNITS.append(MEGAOHM_PER_METRE) MEGAAMPERE = UnitDescriptor('megaampere', 'H38', '''MA''') ALL_UNITS.append(MEGAAMPERE) MEGAHERTZ_KILOMETRE = UnitDescriptor('megahertz kilometre', 'H39', '''MHz·km''') ALL_UNITS.append(MEGAHERTZ_KILOMETRE) NEWTON_PER_AMPERE = UnitDescriptor('newton per ampere', 'H40', '''N/A''') ALL_UNITS.append(NEWTON_PER_AMPERE) NEWTON_METRE_WATT_TO_THE_POWER_MINUS_0_5 = UnitDescriptor('newton metre watt to the power minus 0,5', 'H41', '''N·m·W⁻⁰‧⁵''') ALL_UNITS.append(NEWTON_METRE_WATT_TO_THE_POWER_MINUS_0_5) PASCAL_PER_METRE = UnitDescriptor('pascal per metre', 'H42', '''Pa/m''') ALL_UNITS.append(PASCAL_PER_METRE) SIEMENS_PER_CENTIMETRE = UnitDescriptor('siemens per centimetre', 'H43', '''S/cm''') ALL_UNITS.append(SIEMENS_PER_CENTIMETRE) TERAOHM = UnitDescriptor('teraohm', 'H44', '''TΩ''') ALL_UNITS.append(TERAOHM) VOLT_SECOND_PER_METRE = UnitDescriptor('volt second per metre', 'H45', '''V·s/m''') ALL_UNITS.append(VOLT_SECOND_PER_METRE) VOLT_PER_SECOND = UnitDescriptor('volt per second', 'H46', '''V/s''') ALL_UNITS.append(VOLT_PER_SECOND) WATT_PER_CUBIC_METRE = UnitDescriptor('watt per cubic metre', 'H47', '''W/m³''') ALL_UNITS.append(WATT_PER_CUBIC_METRE) ATTOFARAD = UnitDescriptor('attofarad', 'H48', '''aF''') ALL_UNITS.append(ATTOFARAD) CENTIMETRE_PER_HOUR = UnitDescriptor('centimetre per hour', 'H49', '''cm/h''') ALL_UNITS.append(CENTIMETRE_PER_HOUR) RECIPROCAL_CUBIC_CENTIMETRE = UnitDescriptor('reciprocal cubic centimetre', 'H50', '''cm⁻³''') ALL_UNITS.append(RECIPROCAL_CUBIC_CENTIMETRE) DECIBEL_PER_KILOMETRE = UnitDescriptor('decibel per kilometre', 'H51', '''dB/km''') ALL_UNITS.append(DECIBEL_PER_KILOMETRE) DECIBEL_PER_METRE = UnitDescriptor('decibel per metre', 'H52', '''dB/m''') ALL_UNITS.append(DECIBEL_PER_METRE) KILOGRAM_PER_BAR = UnitDescriptor('kilogram per bar', 'H53', '''kg/bar''') ALL_UNITS.append(KILOGRAM_PER_BAR) KILOGRAM_PER_CUBIC_DECIMETRE_KELVIN = UnitDescriptor('kilogram per cubic decimetre kelvin', 'H54', '''(kg/dm³)/K''') ALL_UNITS.append(KILOGRAM_PER_CUBIC_DECIMETRE_KELVIN) KILOGRAM_PER_CUBIC_DECIMETRE_BAR = UnitDescriptor('kilogram per cubic decimetre bar', 'H55', '''(kg/dm³)/bar''') ALL_UNITS.append(KILOGRAM_PER_CUBIC_DECIMETRE_BAR) KILOGRAM_PER_SQUARE_METRE_SECOND = UnitDescriptor('kilogram per square metre second', 'H56', '''kg/(m²·s)''') ALL_UNITS.append(KILOGRAM_PER_SQUARE_METRE_SECOND) INCH_PER_TWO_PI_RADIANT = UnitDescriptor('inch per two pi radiant', 'H57', '''in/revolution''') ALL_UNITS.append(INCH_PER_TWO_PI_RADIANT) METRE_PER_VOLT_SECOND = UnitDescriptor('metre per volt second', 'H58', '''m/(V·s)''') ALL_UNITS.append(METRE_PER_VOLT_SECOND) SQUARE_METRE_PER_NEWTON = UnitDescriptor('square metre per newton', 'H59', '''m²/N''') ALL_UNITS.append(SQUARE_METRE_PER_NEWTON) CUBIC_METRE_PER_CUBIC_METRE = UnitDescriptor('cubic metre per cubic metre', 'H60', '''m³/m³''') ALL_UNITS.append(CUBIC_METRE_PER_CUBIC_METRE) MILLISIEMENS_PER_CENTIMETRE = UnitDescriptor('millisiemens per centimetre', 'H61', '''mS/cm''') ALL_UNITS.append(MILLISIEMENS_PER_CENTIMETRE) MILLIVOLT_PER_MINUTE = UnitDescriptor('millivolt per minute', 'H62', '''mV/min''') ALL_UNITS.append(MILLIVOLT_PER_MINUTE) MILLIGRAM_PER_SQUARE_CENTIMETRE = UnitDescriptor('milligram per square centimetre', 'H63', '''mg/cm²''') ALL_UNITS.append(MILLIGRAM_PER_SQUARE_CENTIMETRE) MILLIGRAM_PER_GRAM = UnitDescriptor('milligram per gram', 'H64', '''mg/g''') ALL_UNITS.append(MILLIGRAM_PER_GRAM) MILLILITRE_PER_CUBIC_METRE = UnitDescriptor('millilitre per cubic metre', 'H65', '''ml/m³''') ALL_UNITS.append(MILLILITRE_PER_CUBIC_METRE) MILLIMETRE_PER_YEAR = UnitDescriptor('millimetre per year', 'H66', '''mm/y''') ALL_UNITS.append(MILLIMETRE_PER_YEAR) MILLIMETRE_PER_HOUR = UnitDescriptor('millimetre per hour', 'H67', '''mm/h''') ALL_UNITS.append(MILLIMETRE_PER_HOUR) MILLIMOLE_PER_GRAM = UnitDescriptor('millimole per gram', 'H68', '''mmol/g''') ALL_UNITS.append(MILLIMOLE_PER_GRAM) PICOPASCAL_PER_KILOMETRE = UnitDescriptor('picopascal per kilometre', 'H69', '''pPa/km''') ALL_UNITS.append(PICOPASCAL_PER_KILOMETRE) PICOSECOND = UnitDescriptor('picosecond', 'H70', '''ps''') ALL_UNITS.append(PICOSECOND) PERCENT_PER_MONTH = UnitDescriptor('percent per month', 'H71', '''%/mo''') ALL_UNITS.append(PERCENT_PER_MONTH) PERCENT_PER_HECTOBAR = UnitDescriptor('percent per hectobar', 'H72', '''%/hbar''') ALL_UNITS.append(PERCENT_PER_HECTOBAR) PERCENT_PER_DECAKELVIN = UnitDescriptor('percent per decakelvin', 'H73', '''%/daK''') ALL_UNITS.append(PERCENT_PER_DECAKELVIN) WATT_PER_METRE = UnitDescriptor('watt per metre', 'H74', '''W/m''') ALL_UNITS.append(WATT_PER_METRE) DECAPASCAL = UnitDescriptor('decapascal', 'H75', '''daPa''') ALL_UNITS.append(DECAPASCAL) GRAM_PER_MILLIMETRE = UnitDescriptor('gram per millimetre', 'H76', '''g/mm''') ALL_UNITS.append(GRAM_PER_MILLIMETRE) MODULE_WIDTH = UnitDescriptor('module width', 'H77', '''MW''') ALL_UNITS.append(MODULE_WIDTH) CONVENTIONAL_CENTIMETRE_OF_WATER = UnitDescriptor('conventional centimetre of water', 'H78', '''cm H₂O''') ALL_UNITS.append(CONVENTIONAL_CENTIMETRE_OF_WATER) FRENCH_GAUGE = UnitDescriptor('French gauge', 'H79', '''Fg''') ALL_UNITS.append(FRENCH_GAUGE) RACK_UNIT = UnitDescriptor('rack unit', 'H80', '''U''') ALL_UNITS.append(RACK_UNIT) RACK_UNIT = UnitDescriptor('rack unit', 'H80', '''RU''') ALL_UNITS.append(RACK_UNIT) MILLIMETRE_PER_MINUTE = UnitDescriptor('millimetre per minute', 'H81', '''mm/min''') ALL_UNITS.append(MILLIMETRE_PER_MINUTE) BIG_POINT = UnitDescriptor('big point', 'H82', '''bp''') ALL_UNITS.append(BIG_POINT) LITRE_PER_KILOGRAM = UnitDescriptor('litre per kilogram', 'H83', '''l/kg''') ALL_UNITS.append(LITRE_PER_KILOGRAM) GRAM_MILLIMETRE = UnitDescriptor('gram millimetre', 'H84', '''g·mm''') ALL_UNITS.append(GRAM_MILLIMETRE) RECIPROCAL_WEEK = UnitDescriptor('reciprocal week', 'H85', '''1/wk''') ALL_UNITS.append(RECIPROCAL_WEEK) PIECE = UnitDescriptor('piece', 'H87', '''''') ALL_UNITS.append(PIECE) MEGAOHM_KILOMETRE = UnitDescriptor('megaohm kilometre', 'H88', '''MΩ·km''') ALL_UNITS.append(MEGAOHM_KILOMETRE) PERCENT_PER_OHM = UnitDescriptor('percent per ohm', 'H89', '''%/Ω''') ALL_UNITS.append(PERCENT_PER_OHM) PERCENT_PER_DEGREE = UnitDescriptor('percent per degree', 'H90', '''%/°''') ALL_UNITS.append(PERCENT_PER_DEGREE) PERCENT_PER_TEN_THOUSAND = UnitDescriptor('percent per ten thousand', 'H91', '''%/10000''') ALL_UNITS.append(PERCENT_PER_TEN_THOUSAND) PERCENT_PER_ONE_HUNDRED_THOUSAND = UnitDescriptor('percent per one hundred thousand', 'H92', '''%/100000''') ALL_UNITS.append(PERCENT_PER_ONE_HUNDRED_THOUSAND) PERCENT_PER_HUNDRED = UnitDescriptor('percent per hundred', 'H93', '''%/100''') ALL_UNITS.append(PERCENT_PER_HUNDRED) PERCENT_PER_THOUSAND = UnitDescriptor('percent per thousand', 'H94', '''%/1000''') ALL_UNITS.append(PERCENT_PER_THOUSAND) PERCENT_PER_VOLT = UnitDescriptor('percent per volt', 'H95', '''%/V''') ALL_UNITS.append(PERCENT_PER_VOLT) PERCENT_PER_BAR = UnitDescriptor('percent per bar', 'H96', '''%/bar''') ALL_UNITS.append(PERCENT_PER_BAR) PERCENT_PER_INCH = UnitDescriptor('percent per inch', 'H98', '''%/in''') ALL_UNITS.append(PERCENT_PER_INCH) PERCENT_PER_METRE = UnitDescriptor('percent per metre', 'H99', '''%/m''') ALL_UNITS.append(PERCENT_PER_METRE) HANK = UnitDescriptor('hank', 'HA', '''''') ALL_UNITS.append(HANK) HECTARE = UnitDescriptor('hectare', 'HAR', '''ha''') ALL_UNITS.append(HECTARE) HECTOBAR = UnitDescriptor('hectobar', 'HBA', '''hbar''') ALL_UNITS.append(HECTOBAR) HUNDRED_BOXES = UnitDescriptor('hundred boxes', 'HBX', '''''') ALL_UNITS.append(HUNDRED_BOXES) HUNDRED_COUNT = UnitDescriptor('hundred count', 'HC', '''''') ALL_UNITS.append(HUNDRED_COUNT) HALF_DOZEN = UnitDescriptor('half dozen', 'HD', '''''') ALL_UNITS.append(HALF_DOZEN) HUNDRED_KILOGRAM_DRY_WEIGHT = UnitDescriptor('hundred kilogram, dry weight', 'HDW', '''''') ALL_UNITS.append(HUNDRED_KILOGRAM_DRY_WEIGHT) HUNDREDTH_OF_A_CARAT = UnitDescriptor('hundredth of a carat', 'HE', '''''') ALL_UNITS.append(HUNDREDTH_OF_A_CARAT) HEAD = UnitDescriptor('head', 'HEA', '''''') ALL_UNITS.append(HEAD) HUNDRED_FOOT = UnitDescriptor('hundred foot', 'HF', '''''') ALL_UNITS.append(HUNDRED_FOOT) HECTOGRAM = UnitDescriptor('hectogram', 'HGM', '''hg''') ALL_UNITS.append(HECTOGRAM) HUNDRED_CUBIC_FOOT = UnitDescriptor('hundred cubic foot', 'HH', '''''') ALL_UNITS.append(HUNDRED_CUBIC_FOOT) HUNDRED_SHEET = UnitDescriptor('hundred sheet', 'HI', '''''') ALL_UNITS.append(HUNDRED_SHEET) HUNDRED_INTERNATIONAL_UNIT = UnitDescriptor('hundred international unit', 'HIU', '''''') ALL_UNITS.append(HUNDRED_INTERNATIONAL_UNIT) METRIC_HORSE_POWER = UnitDescriptor('metric horse power', 'HJ', '''metric hp''') ALL_UNITS.append(METRIC_HORSE_POWER) HUNDRED_KILOGRAM = UnitDescriptor('hundred kilogram', 'HK', '''''') ALL_UNITS.append(HUNDRED_KILOGRAM) HUNDRED_KILOGRAM_NET_MASS = UnitDescriptor('hundred kilogram, net mass', 'HKM', '''''') ALL_UNITS.append(HUNDRED_KILOGRAM_NET_MASS) HUNDRED_FOOT_LINEAR = UnitDescriptor('hundred foot (linear)', 'HL', '''''') ALL_UNITS.append(HUNDRED_FOOT_LINEAR) HECTOLITRE = UnitDescriptor('hectolitre', 'HLT', '''hl''') ALL_UNITS.append(HECTOLITRE) MILE_PER_HOUR_STATUTE_MILE = UnitDescriptor('mile per hour (statute mile)', 'HM', '''mile/h''') ALL_UNITS.append(MILE_PER_HOUR_STATUTE_MILE) MILLION_CUBIC_METRE = UnitDescriptor('million cubic metre', 'HMQ', '''Mm³''') ALL_UNITS.append(MILLION_CUBIC_METRE) HECTOMETRE = UnitDescriptor('hectometre', 'HMT', '''hm''') ALL_UNITS.append(HECTOMETRE) CONVENTIONAL_MILLIMETRE_OF_MERCURY = UnitDescriptor('conventional millimetre of mercury', 'HN', '''mm Hg''') ALL_UNITS.append(CONVENTIONAL_MILLIMETRE_OF_MERCURY) HUNDRED_TROY_OUNCE = UnitDescriptor('hundred troy ounce', 'HO', '''''') ALL_UNITS.append(HUNDRED_TROY_OUNCE) CONVENTIONAL_MILLIMETRE_OF_WATER = UnitDescriptor('conventional millimetre of water', 'HP', '''mm H₂O''') ALL_UNITS.append(CONVENTIONAL_MILLIMETRE_OF_WATER) HECTOLITRE_OF_PURE_ALCOHOL = UnitDescriptor('hectolitre of pure alcohol', 'HPA', '''''') ALL_UNITS.append(HECTOLITRE_OF_PURE_ALCOHOL) HUNDRED_SQUARE_FOOT = UnitDescriptor('hundred square foot', 'HS', '''''') ALL_UNITS.append(HUNDRED_SQUARE_FOOT) HALF_HOUR = UnitDescriptor('half hour', 'HT', '''''') ALL_UNITS.append(HALF_HOUR) HERTZ = UnitDescriptor('hertz', 'HTZ', '''Hz''') ALL_UNITS.append(HERTZ) HOUR = UnitDescriptor('hour', 'HUR', '''h''') ALL_UNITS.append(HOUR) HUNDRED_YARD = UnitDescriptor('hundred yard', 'HY', '''''') ALL_UNITS.append(HUNDRED_YARD) INCH_POUND_POUND_INCH = UnitDescriptor('inch pound (pound inch)', 'IA', '''in·lb''') ALL_UNITS.append(INCH_POUND_POUND_INCH) COUNT_PER_INCH = UnitDescriptor('count per inch', 'IC', '''''') ALL_UNITS.append(COUNT_PER_INCH) PERSON = UnitDescriptor('person', 'IE', '''''') ALL_UNITS.append(PERSON) INCHES_OF_WATER = UnitDescriptor('inches of water', 'IF', '''''') ALL_UNITS.append(INCHES_OF_WATER) COLUMN_INCH = UnitDescriptor('column inch', 'II', '''''') ALL_UNITS.append(COLUMN_INCH) INCH_PER_MINUTE = UnitDescriptor('inch per minute', 'IL', '''''') ALL_UNITS.append(INCH_PER_MINUTE) IMPRESSION = UnitDescriptor('impression', 'IM', '''''') ALL_UNITS.append(IMPRESSION) INCH = UnitDescriptor('inch', 'INH', '''in''') ALL_UNITS.append(INCH) SQUARE_INCH = UnitDescriptor('square inch', 'INK', '''in²''') ALL_UNITS.append(SQUARE_INCH) CUBIC_INCH = UnitDescriptor('cubic inch', 'INQ', '''in³''') ALL_UNITS.append(CUBIC_INCH) INSURANCE_POLICY = UnitDescriptor('insurance policy', 'IP', '''''') ALL_UNITS.append(INSURANCE_POLICY) INTERNATIONAL_SUGAR_DEGREE = UnitDescriptor('international sugar degree', 'ISD', '''''') ALL_UNITS.append(INTERNATIONAL_SUGAR_DEGREE) COUNT_PER_CENTIMETRE = UnitDescriptor('count per centimetre', 'IT', '''''') ALL_UNITS.append(COUNT_PER_CENTIMETRE) INCH_PER_SECOND = UnitDescriptor('inch per second', 'IU', '''in/s''') ALL_UNITS.append(INCH_PER_SECOND) INTERNATIONAL_UNIT_PER_GRAM = UnitDescriptor('international unit per gram', 'IUG', '''''') ALL_UNITS.append(INTERNATIONAL_UNIT_PER_GRAM) INCH_PER_SECOND_SQUARED = UnitDescriptor('inch per second squared', 'IV', '''in/s²''') ALL_UNITS.append(INCH_PER_SECOND_SQUARED) PERCENT_PER_MILLIMETRE = UnitDescriptor('percent per millimetre', 'J10', '''%/mm''') ALL_UNITS.append(PERCENT_PER_MILLIMETRE) PER_MILLE_PER_PSI = UnitDescriptor('per mille per psi', 'J12', '''‰/psi''') ALL_UNITS.append(PER_MILLE_PER_PSI) DEGREE_API = UnitDescriptor('degree API', 'J13', '''°API''') ALL_UNITS.append(DEGREE_API) DEGREE_BAUME_ORIGIN_SCALE = UnitDescriptor('degree Baume (origin scale)', 'J14', '''°Bé''') ALL_UNITS.append(DEGREE_BAUME_ORIGIN_SCALE) DEGREE_BAUME_US_HEAVY = UnitDescriptor('degree Baume (US heavy)', 'J15', '''°Bé (US heavy)''') ALL_UNITS.append(DEGREE_BAUME_US_HEAVY) DEGREE_BAUME_US_LIGHT = UnitDescriptor('degree Baume (US light)', 'J16', '''°Bé (US light)''') ALL_UNITS.append(DEGREE_BAUME_US_LIGHT) DEGREE_BALLING = UnitDescriptor('degree Balling', 'J17', '''°Balling''') ALL_UNITS.append(DEGREE_BALLING) DEGREE_BRIX = UnitDescriptor('degree Brix', 'J18', '''°Bx''') ALL_UNITS.append(DEGREE_BRIX) DEGREE_FAHRENHEIT_HOUR_SQUARE_FOOT_PER_BRITISH_THERMAL_UNIT_THERMOCHEMICAL = UnitDescriptor('degree Fahrenheit hour square foot per British thermal unit (thermochemical)', 'J19', '''°F·h·ft²/Btuth''') ALL_UNITS.append(DEGREE_FAHRENHEIT_HOUR_SQUARE_FOOT_PER_BRITISH_THERMAL_UNIT_THERMOCHEMICAL) JOULE_PER_KILOGRAM = UnitDescriptor('joule per kilogram', 'J2', '''J/kg''') ALL_UNITS.append(JOULE_PER_KILOGRAM) DEGREE_FAHRENHEIT_PER_KELVIN = UnitDescriptor('degree Fahrenheit per kelvin', 'J20', '''°F/K''') ALL_UNITS.append(DEGREE_FAHRENHEIT_PER_KELVIN) DEGREE_FAHRENHEIT_PER_BAR = UnitDescriptor('degree Fahrenheit per bar', 'J21', '''°F/bar''') ALL_UNITS.append(DEGREE_FAHRENHEIT_PER_BAR) DEGREE_FAHRENHEIT_HOUR_SQUARE_FOOT_PER_BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE = UnitDescriptor('degree Fahrenheit hour square foot per British thermal unit (international table)', 'J22', '''°F·h·ft²/BtuIT''') ALL_UNITS.append(DEGREE_FAHRENHEIT_HOUR_SQUARE_FOOT_PER_BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE) DEGREE_FAHRENHEIT_PER_HOUR = UnitDescriptor('degree Fahrenheit per hour', 'J23', '''°F/h''') ALL_UNITS.append(DEGREE_FAHRENHEIT_PER_HOUR) DEGREE_FAHRENHEIT_PER_MINUTE = UnitDescriptor('degree Fahrenheit per minute', 'J24', '''°F/min''') ALL_UNITS.append(DEGREE_FAHRENHEIT_PER_MINUTE) DEGREE_FAHRENHEIT_PER_SECOND = UnitDescriptor('degree Fahrenheit per second', 'J25', '''°F/s''') ALL_UNITS.append(DEGREE_FAHRENHEIT_PER_SECOND) RECIPROCAL_DEGREE_FAHRENHEIT = UnitDescriptor('reciprocal degree Fahrenheit', 'J26', '''1/°F''') ALL_UNITS.append(RECIPROCAL_DEGREE_FAHRENHEIT) DEGREE_OECHSLE = UnitDescriptor('degree Oechsle', 'J27', '''°Oechsle''') ALL_UNITS.append(DEGREE_OECHSLE) DEGREE_RANKINE_PER_HOUR = UnitDescriptor('degree Rankine per hour', 'J28', '''°R/h''') ALL_UNITS.append(DEGREE_RANKINE_PER_HOUR) DEGREE_RANKINE_PER_MINUTE = UnitDescriptor('degree Rankine per minute', 'J29', '''°R/min''') ALL_UNITS.append(DEGREE_RANKINE_PER_MINUTE) DEGREE_RANKINE_PER_SECOND = UnitDescriptor('degree Rankine per second', 'J30', '''°R/s''') ALL_UNITS.append(DEGREE_RANKINE_PER_SECOND) DEGREE_TWADDELL = UnitDescriptor('degree Twaddell', 'J31', '''°Tw''') ALL_UNITS.append(DEGREE_TWADDELL) MICROPOISE = UnitDescriptor('micropoise', 'J32', '''µP''') ALL_UNITS.append(MICROPOISE) MICROGRAM_PER_KILOGRAM = UnitDescriptor('microgram per kilogram', 'J33', '''µg/kg''') ALL_UNITS.append(MICROGRAM_PER_KILOGRAM) MICROGRAM_PER_CUBIC_METRE_KELVIN = UnitDescriptor('microgram per cubic metre kelvin', 'J34', '''(µg/m³)/K''') ALL_UNITS.append(MICROGRAM_PER_CUBIC_METRE_KELVIN) MICROGRAM_PER_CUBIC_METRE_BAR = UnitDescriptor('microgram per cubic metre bar', 'J35', '''(µg/m³)/bar''') ALL_UNITS.append(MICROGRAM_PER_CUBIC_METRE_BAR) MICROLITRE_PER_LITRE = UnitDescriptor('microlitre per litre', 'J36', '''µl/l''') ALL_UNITS.append(MICROLITRE_PER_LITRE) BAUD = UnitDescriptor('baud', 'J38', '''Bd''') ALL_UNITS.append(BAUD) BRITISH_THERMAL_UNIT_MEAN = UnitDescriptor('British thermal unit (mean)', 'J39', '''Btu''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_MEAN) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_FOOT_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (international table) foot per hour square foot degree Fahrenheit', 'J40', '''BtuIT·ft/(h·ft²·°F)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_FOOT_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_INCH_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (international table) inch per hour square foot degree Fahrenheit', 'J41', '''BtuIT·in/(h·ft²·°F)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_INCH_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_INCH_PER_SECOND_SQUARE_FOOT_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (international table) inch per second square foot degree Fahrenheit', 'J42', '''BtuIT·in/(s·ft²·°F)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_INCH_PER_SECOND_SQUARE_FOOT_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_POUND_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (international table) per pound degree Fahrenheit', 'J43', '''BtuIT/(lb·°F)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_POUND_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_MINUTE = UnitDescriptor('British thermal unit (international table) per minute', 'J44', '''BtuIT/min''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_MINUTE) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SECOND = UnitDescriptor('British thermal unit (international table) per second', 'J45', '''BtuIT/s''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SECOND) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_FOOT_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (thermochemical) foot per hour square foot degree Fahrenheit', 'J46', '''Btuth·ft/(h·ft²·°F)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_FOOT_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_HOUR = UnitDescriptor('British thermal unit (thermochemical) per hour', 'J47', '''Btuth/h''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_HOUR) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_INCH_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (thermochemical) inch per hour square foot degree Fahrenheit', 'J48', '''Btuth·in/(h·ft²·°F)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_INCH_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_INCH_PER_SECOND_SQUARE_FOOT_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (thermochemical) inch per second square foot degree Fahrenheit', 'J49', '''Btuth·in/(s·ft²·°F)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_INCH_PER_SECOND_SQUARE_FOOT_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_POUND_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (thermochemical) per pound degree Fahrenheit', 'J50', '''Btuth/(lb·°F)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_POUND_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_MINUTE = UnitDescriptor('British thermal unit (thermochemical) per minute', 'J51', '''Btuth/min''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_MINUTE) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SECOND = UnitDescriptor('British thermal unit (thermochemical) per second', 'J52', '''Btuth/s''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SECOND) COULOMB_SQUARE_METRE_PER_KILOGRAM = UnitDescriptor('coulomb square metre per kilogram', 'J53', '''C·m²/kg''') ALL_UNITS.append(COULOMB_SQUARE_METRE_PER_KILOGRAM) MEGABAUD = UnitDescriptor('megabaud', 'J54', '''MBd''') ALL_UNITS.append(MEGABAUD) WATT_SECOND = UnitDescriptor('watt second', 'J55', '''W·s''') ALL_UNITS.append(WATT_SECOND) BAR_PER_BAR = UnitDescriptor('bar per bar', 'J56', '''bar/bar''') ALL_UNITS.append(BAR_PER_BAR) BARREL_UK_PETROLEUM = UnitDescriptor('barrel (UK petroleum)', 'J57', '''bbl (UK liq.)''') ALL_UNITS.append(BARREL_UK_PETROLEUM) BARREL_UK_PETROLEUM_PER_MINUTE = UnitDescriptor('barrel (UK petroleum) per minute', 'J58', '''bbl (UK liq.)/min''') ALL_UNITS.append(BARREL_UK_PETROLEUM_PER_MINUTE) BARREL_UK_PETROLEUM_PER_DAY = UnitDescriptor('barrel (UK petroleum) per day', 'J59', '''bbl (UK liq.)/d''') ALL_UNITS.append(BARREL_UK_PETROLEUM_PER_DAY) BARREL_UK_PETROLEUM_PER_HOUR = UnitDescriptor('barrel (UK petroleum) per hour', 'J60', '''bbl (UK liq.)/h''') ALL_UNITS.append(BARREL_UK_PETROLEUM_PER_HOUR) BARREL_UK_PETROLEUM_PER_SECOND = UnitDescriptor('barrel (UK petroleum) per second', 'J61', '''bbl (UK liq.)/s''') ALL_UNITS.append(BARREL_UK_PETROLEUM_PER_SECOND) BARREL_US_PETROLEUM_PER_HOUR = UnitDescriptor('barrel (US petroleum) per hour', 'J62', '''bbl (US)/h''') ALL_UNITS.append(BARREL_US_PETROLEUM_PER_HOUR) BARREL_US_PETROLEUM_PER_SECOND = UnitDescriptor('barrel (US petroleum) per second', 'J63', '''bbl (US)/s''') ALL_UNITS.append(BARREL_US_PETROLEUM_PER_SECOND) BUSHEL_UK_PER_DAY = UnitDescriptor('bushel (UK) per day', 'J64', '''bu (UK)/d''') ALL_UNITS.append(BUSHEL_UK_PER_DAY) BUSHEL_UK_PER_HOUR = UnitDescriptor('bushel (UK) per hour', 'J65', '''bu (UK)/h''') ALL_UNITS.append(BUSHEL_UK_PER_HOUR) BUSHEL_UK_PER_MINUTE = UnitDescriptor('bushel (UK) per minute', 'J66', '''bu (UK)/min''') ALL_UNITS.append(BUSHEL_UK_PER_MINUTE) BUSHEL_UK_PER_SECOND = UnitDescriptor('bushel (UK) per second', 'J67', '''bu (UK)/s''') ALL_UNITS.append(BUSHEL_UK_PER_SECOND) BUSHEL_US_DRY_PER_DAY = UnitDescriptor('bushel (US dry) per day', 'J68', '''bu (US dry)/d''') ALL_UNITS.append(BUSHEL_US_DRY_PER_DAY) BUSHEL_US_DRY_PER_HOUR = UnitDescriptor('bushel (US dry) per hour', 'J69', '''bu (US dry)/h''') ALL_UNITS.append(BUSHEL_US_DRY_PER_HOUR) BUSHEL_US_DRY_PER_MINUTE = UnitDescriptor('bushel (US dry) per minute', 'J70', '''bu (US dry)/min''') ALL_UNITS.append(BUSHEL_US_DRY_PER_MINUTE) BUSHEL_US_DRY_PER_SECOND = UnitDescriptor('bushel (US dry) per second', 'J71', '''bu (US dry)/s''') ALL_UNITS.append(BUSHEL_US_DRY_PER_SECOND) CENTINEWTON_METRE = UnitDescriptor('centinewton metre', 'J72', '''cN·m''') ALL_UNITS.append(CENTINEWTON_METRE) CENTIPOISE_PER_KELVIN = UnitDescriptor('centipoise per kelvin', 'J73', '''cP/K''') ALL_UNITS.append(CENTIPOISE_PER_KELVIN) CENTIPOISE_PER_BAR = UnitDescriptor('centipoise per bar', 'J74', '''cP/bar''') ALL_UNITS.append(CENTIPOISE_PER_BAR) CALORIE_MEAN = UnitDescriptor('calorie (mean)', 'J75', '''cal''') ALL_UNITS.append(CALORIE_MEAN) CALORIE_INTERNATIONAL_TABLE_PER_GRAM_DEGREE_CELSIUS = UnitDescriptor('calorie (international table) per gram degree Celsius', 'J76', '''calIT/(g·°C)''') ALL_UNITS.append(CALORIE_INTERNATIONAL_TABLE_PER_GRAM_DEGREE_CELSIUS) CALORIE_THERMOCHEMICAL_PER_CENTIMETRE_SECOND_DEGREE_CELSIUS = UnitDescriptor('calorie (thermochemical) per centimetre second degree Celsius', 'J78', '''calth/(cm·s·°C)''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL_PER_CENTIMETRE_SECOND_DEGREE_CELSIUS) CALORIE_THERMOCHEMICAL_PER_GRAM_DEGREE_CELSIUS = UnitDescriptor('calorie (thermochemical) per gram degree Celsius', 'J79', '''calth/(g·°C)''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL_PER_GRAM_DEGREE_CELSIUS) CALORIE_THERMOCHEMICAL_PER_MINUTE = UnitDescriptor('calorie (thermochemical) per minute', 'J81', '''calth/min''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL_PER_MINUTE) CALORIE_THERMOCHEMICAL_PER_SECOND = UnitDescriptor('calorie (thermochemical) per second', 'J82', '''calth/s''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL_PER_SECOND) CLO = UnitDescriptor('clo', 'J83', '''clo''') ALL_UNITS.append(CLO) CENTIMETRE_PER_SECOND_KELVIN = UnitDescriptor('centimetre per second kelvin', 'J84', '''(cm/s)/K''') ALL_UNITS.append(CENTIMETRE_PER_SECOND_KELVIN) CENTIMETRE_PER_SECOND_BAR = UnitDescriptor('centimetre per second bar', 'J85', '''(cm/s)/bar''') ALL_UNITS.append(CENTIMETRE_PER_SECOND_BAR) CUBIC_CENTIMETRE_PER_CUBIC_METRE = UnitDescriptor('cubic centimetre per cubic metre', 'J87', '''cm³/m³''') ALL_UNITS.append(CUBIC_CENTIMETRE_PER_CUBIC_METRE) CENTIMETRE_OF_MERCURY = UnitDescriptor('centimetre of mercury', 'J89', '''cm Hg''') ALL_UNITS.append(CENTIMETRE_OF_MERCURY) CUBIC_DECIMETRE_PER_DAY = UnitDescriptor('cubic decimetre per day', 'J90', '''dm³/d''') ALL_UNITS.append(CUBIC_DECIMETRE_PER_DAY) CUBIC_DECIMETRE_PER_CUBIC_METRE = UnitDescriptor('cubic decimetre per cubic metre', 'J91', '''dm³/m³''') ALL_UNITS.append(CUBIC_DECIMETRE_PER_CUBIC_METRE) CUBIC_DECIMETRE_PER_MINUTE = UnitDescriptor('cubic decimetre per minute', 'J92', '''dm³/min''') ALL_UNITS.append(CUBIC_DECIMETRE_PER_MINUTE) CUBIC_DECIMETRE_PER_SECOND = UnitDescriptor('cubic decimetre per second', 'J93', '''dm³/s''') ALL_UNITS.append(CUBIC_DECIMETRE_PER_SECOND) DYNE_CENTIMETRE = UnitDescriptor('dyne centimetre', 'J94', '''dyn·cm''') ALL_UNITS.append(DYNE_CENTIMETRE) OUNCE_UK_FLUID_PER_DAY = UnitDescriptor('ounce (UK fluid) per day', 'J95', '''fl oz (UK)/d''') ALL_UNITS.append(OUNCE_UK_FLUID_PER_DAY) OUNCE_UK_FLUID_PER_HOUR = UnitDescriptor('ounce (UK fluid) per hour', 'J96', '''fl oz (UK)/h''') ALL_UNITS.append(OUNCE_UK_FLUID_PER_HOUR) OUNCE_UK_FLUID_PER_MINUTE = UnitDescriptor('ounce (UK fluid) per minute', 'J97', '''fl oz (UK)/min''') ALL_UNITS.append(OUNCE_UK_FLUID_PER_MINUTE) OUNCE_UK_FLUID_PER_SECOND = UnitDescriptor('ounce (UK fluid) per second', 'J98', '''fl oz (UK)/s''') ALL_UNITS.append(OUNCE_UK_FLUID_PER_SECOND) OUNCE_US_FLUID_PER_DAY = UnitDescriptor('ounce (US fluid) per day', 'J99', '''fl oz (US)/d''') ALL_UNITS.append(OUNCE_US_FLUID_PER_DAY) JUMBO = UnitDescriptor('jumbo', 'JB', '''''') ALL_UNITS.append(JUMBO) JOULE_PER_KELVIN = UnitDescriptor('joule per kelvin', 'JE', '''J/K''') ALL_UNITS.append(JOULE_PER_KELVIN) JUG = UnitDescriptor('jug', 'JG', '''''') ALL_UNITS.append(JUG) MEGAJOULE_PER_KILOGRAM = UnitDescriptor('megajoule per kilogram', 'JK', '''MJ/kg''') ALL_UNITS.append(MEGAJOULE_PER_KILOGRAM) MEGAJOULE_PER_CUBIC_METRE = UnitDescriptor('megajoule per cubic metre', 'JM', '''MJ/m³''') ALL_UNITS.append(MEGAJOULE_PER_CUBIC_METRE) PIPELINE_JOINT = UnitDescriptor('pipeline joint', 'JNT', '''''') ALL_UNITS.append(PIPELINE_JOINT) JOINT = UnitDescriptor('joint', 'JO', '''''') ALL_UNITS.append(JOINT) JOULE = UnitDescriptor('joule', 'JOU', '''J''') ALL_UNITS.append(JOULE) HUNDRED_METRE = UnitDescriptor('hundred metre', 'JPS', '''''') ALL_UNITS.append(HUNDRED_METRE) JAR = UnitDescriptor('jar', 'JR', '''''') ALL_UNITS.append(JAR) NUMBER_OF_JEWELS = UnitDescriptor('number of jewels', 'JWL', '''''') ALL_UNITS.append(NUMBER_OF_JEWELS) KILOWATT_DEMAND = UnitDescriptor('kilowatt demand', 'K1', '''''') ALL_UNITS.append(KILOWATT_DEMAND) OUNCE_US_FLUID_PER_HOUR = UnitDescriptor('ounce (US fluid) per hour', 'K10', '''fl oz (US)/h''') ALL_UNITS.append(OUNCE_US_FLUID_PER_HOUR) OUNCE_US_FLUID_PER_MINUTE = UnitDescriptor('ounce (US fluid) per minute', 'K11', '''fl oz (US)/min''') ALL_UNITS.append(OUNCE_US_FLUID_PER_MINUTE) OUNCE_US_FLUID_PER_SECOND = UnitDescriptor('ounce (US fluid) per second', 'K12', '''fl oz (US)/s''') ALL_UNITS.append(OUNCE_US_FLUID_PER_SECOND) FOOT_PER_DEGREE_FAHRENHEIT = UnitDescriptor('foot per degree Fahrenheit', 'K13', '''ft/°F''') ALL_UNITS.append(FOOT_PER_DEGREE_FAHRENHEIT) FOOT_PER_HOUR = UnitDescriptor('foot per hour', 'K14', '''ft/h''') ALL_UNITS.append(FOOT_PER_HOUR) FOOT_POUND_FORCE_PER_HOUR = UnitDescriptor('foot pound-force per hour', 'K15', '''ft·lbf/h''') ALL_UNITS.append(FOOT_POUND_FORCE_PER_HOUR) FOOT_POUND_FORCE_PER_MINUTE = UnitDescriptor('foot pound-force per minute', 'K16', '''ft·lbf/min''') ALL_UNITS.append(FOOT_POUND_FORCE_PER_MINUTE) FOOT_PER_PSI = UnitDescriptor('foot per psi', 'K17', '''ft/psi''') ALL_UNITS.append(FOOT_PER_PSI) FOOT_PER_SECOND_DEGREE_FAHRENHEIT = UnitDescriptor('foot per second degree Fahrenheit', 'K18', '''(ft/s)/°F''') ALL_UNITS.append(FOOT_PER_SECOND_DEGREE_FAHRENHEIT) FOOT_PER_SECOND_PSI = UnitDescriptor('foot per second psi', 'K19', '''(ft/s)/psi''') ALL_UNITS.append(FOOT_PER_SECOND_PSI) KILOVOLT_AMPERE_REACTIVE_DEMAND = UnitDescriptor('kilovolt ampere reactive demand', 'K2', '''''') ALL_UNITS.append(KILOVOLT_AMPERE_REACTIVE_DEMAND) RECIPROCAL_CUBIC_FOOT = UnitDescriptor('reciprocal cubic foot', 'K20', '''1/ft³''') ALL_UNITS.append(RECIPROCAL_CUBIC_FOOT) CUBIC_FOOT_PER_DEGREE_FAHRENHEIT = UnitDescriptor('cubic foot per degree Fahrenheit', 'K21', '''ft³/°F''') ALL_UNITS.append(CUBIC_FOOT_PER_DEGREE_FAHRENHEIT) CUBIC_FOOT_PER_DAY = UnitDescriptor('cubic foot per day', 'K22', '''ft³/d''') ALL_UNITS.append(CUBIC_FOOT_PER_DAY) CUBIC_FOOT_PER_PSI = UnitDescriptor('cubic foot per psi', 'K23', '''ft³/psi''') ALL_UNITS.append(CUBIC_FOOT_PER_PSI) FOOT_OF_WATER = UnitDescriptor('foot of water', 'K24', '''ft H₂O''') ALL_UNITS.append(FOOT_OF_WATER) FOOT_OF_MERCURY = UnitDescriptor('foot of mercury', 'K25', '''ft Hg''') ALL_UNITS.append(FOOT_OF_MERCURY) GALLON_UK_PER_DAY = UnitDescriptor('gallon (UK) per day', 'K26', '''gal (UK)/d''') ALL_UNITS.append(GALLON_UK_PER_DAY) GALLON_UK_PER_HOUR = UnitDescriptor('gallon (UK) per hour', 'K27', '''gal (UK)/h''') ALL_UNITS.append(GALLON_UK_PER_HOUR) GALLON_UK_PER_SECOND = UnitDescriptor('gallon (UK) per second', 'K28', '''gal (UK)/s''') ALL_UNITS.append(GALLON_UK_PER_SECOND) KILOVOLT_AMPERE_REACTIVE_HOUR = UnitDescriptor('kilovolt ampere reactive hour', 'K3', '''kvar·h''') ALL_UNITS.append(KILOVOLT_AMPERE_REACTIVE_HOUR) GALLON_US_LIQUID_PER_SECOND = UnitDescriptor('gallon (US liquid) per second', 'K30', '''gal (US liq.)/s''') ALL_UNITS.append(GALLON_US_LIQUID_PER_SECOND) GRAM_FORCE_PER_SQUARE_CENTIMETRE = UnitDescriptor('gram-force per square centimetre', 'K31', '''gf/cm²''') ALL_UNITS.append(GRAM_FORCE_PER_SQUARE_CENTIMETRE) GILL_UK_PER_DAY = UnitDescriptor('gill (UK) per day', 'K32', '''gi (UK)/d''') ALL_UNITS.append(GILL_UK_PER_DAY) GILL_UK_PER_HOUR = UnitDescriptor('gill (UK) per hour', 'K33', '''gi (UK)/h''') ALL_UNITS.append(GILL_UK_PER_HOUR) GILL_UK_PER_MINUTE = UnitDescriptor('gill (UK) per minute', 'K34', '''gi (UK)/min''') ALL_UNITS.append(GILL_UK_PER_MINUTE) GILL_UK_PER_SECOND = UnitDescriptor('gill (UK) per second', 'K35', '''gi (UK)/s''') ALL_UNITS.append(GILL_UK_PER_SECOND) GILL_US_PER_DAY = UnitDescriptor('gill (US) per day', 'K36', '''gi (US)/d''') ALL_UNITS.append(GILL_US_PER_DAY) GILL_US_PER_HOUR = UnitDescriptor('gill (US) per hour', 'K37', '''gi (US)/h''') ALL_UNITS.append(GILL_US_PER_HOUR) GILL_US_PER_MINUTE = UnitDescriptor('gill (US) per minute', 'K38', '''gi (US)/min''') ALL_UNITS.append(GILL_US_PER_MINUTE) GILL_US_PER_SECOND = UnitDescriptor('gill (US) per second', 'K39', '''gi (US)/s''') ALL_UNITS.append(GILL_US_PER_SECOND) STANDARD_ACCELERATION_OF_FREE_FALL = UnitDescriptor('standard acceleration of free fall', 'K40', '''gn''') ALL_UNITS.append(STANDARD_ACCELERATION_OF_FREE_FALL) GRAIN_PER_GALLON_US = UnitDescriptor('grain per gallon (US)', 'K41', '''gr/gal (US)''') ALL_UNITS.append(GRAIN_PER_GALLON_US) HORSEPOWER_BOILER = UnitDescriptor('horsepower (boiler)', 'K42', '''boiler hp''') ALL_UNITS.append(HORSEPOWER_BOILER) HORSEPOWER_ELECTRIC = UnitDescriptor('horsepower (electric)', 'K43', '''electric hp''') ALL_UNITS.append(HORSEPOWER_ELECTRIC) INCH_PER_DEGREE_FAHRENHEIT = UnitDescriptor('inch per degree Fahrenheit', 'K45', '''in/°F''') ALL_UNITS.append(INCH_PER_DEGREE_FAHRENHEIT) INCH_PER_PSI = UnitDescriptor('inch per psi', 'K46', '''in/psi''') ALL_UNITS.append(INCH_PER_PSI) INCH_PER_SECOND_DEGREE_FAHRENHEIT = UnitDescriptor('inch per second degree Fahrenheit', 'K47', '''(in/s)/°F''') ALL_UNITS.append(INCH_PER_SECOND_DEGREE_FAHRENHEIT) INCH_PER_SECOND_PSI = UnitDescriptor('inch per second psi', 'K48', '''(in/s)/psi''') ALL_UNITS.append(INCH_PER_SECOND_PSI) RECIPROCAL_CUBIC_INCH = UnitDescriptor('reciprocal cubic inch', 'K49', '''1/in³''') ALL_UNITS.append(RECIPROCAL_CUBIC_INCH) KILOVOLT_AMPERE_REACTIVE = UnitDescriptor('kilovolt ampere (reactive)', 'K5', '''kvar''') ALL_UNITS.append(KILOVOLT_AMPERE_REACTIVE) KILOBAUD = UnitDescriptor('kilobaud', 'K50', '''kBd''') ALL_UNITS.append(KILOBAUD) KILOCALORIE_MEAN = UnitDescriptor('kilocalorie (mean)', 'K51', '''kcal''') ALL_UNITS.append(KILOCALORIE_MEAN) KILOCALORIE_INTERNATIONAL_TABLE_PER_HOUR_METRE_DEGREE_CELSIUS = UnitDescriptor('kilocalorie (international table) per hour metre degree Celsius', 'K52', '''kcal/(m·h·°C)''') ALL_UNITS.append(KILOCALORIE_INTERNATIONAL_TABLE_PER_HOUR_METRE_DEGREE_CELSIUS) KILOCALORIE_THERMOCHEMICAL = UnitDescriptor('kilocalorie (thermochemical)', 'K53', '''kcalth''') ALL_UNITS.append(KILOCALORIE_THERMOCHEMICAL) KILOCALORIE_THERMOCHEMICAL_PER_MINUTE = UnitDescriptor('kilocalorie (thermochemical) per minute', 'K54', '''kcalth/min''') ALL_UNITS.append(KILOCALORIE_THERMOCHEMICAL_PER_MINUTE) KILOCALORIE_THERMOCHEMICAL_PER_SECOND = UnitDescriptor('kilocalorie (thermochemical) per second', 'K55', '''kcalth/s''') ALL_UNITS.append(KILOCALORIE_THERMOCHEMICAL_PER_SECOND) KILOMOLE_PER_HOUR = UnitDescriptor('kilomole per hour', 'K58', '''kmol/h''') ALL_UNITS.append(KILOMOLE_PER_HOUR) KILOMOLE_PER_CUBIC_METRE_KELVIN = UnitDescriptor('kilomole per cubic metre kelvin', 'K59', '''(kmol/m³)/K''') ALL_UNITS.append(KILOMOLE_PER_CUBIC_METRE_KELVIN) KILOLITRE = UnitDescriptor('kilolitre', 'K6', '''kl''') ALL_UNITS.append(KILOLITRE) KILOMOLE_PER_CUBIC_METRE_BAR = UnitDescriptor('kilomole per cubic metre bar', 'K60', '''(kmol/m³)/bar''') ALL_UNITS.append(KILOMOLE_PER_CUBIC_METRE_BAR) KILOMOLE_PER_MINUTE = UnitDescriptor('kilomole per minute', 'K61', '''kmol/min''') ALL_UNITS.append(KILOMOLE_PER_MINUTE) LITRE_PER_LITRE = UnitDescriptor('litre per litre', 'K62', '''l/l''') ALL_UNITS.append(LITRE_PER_LITRE) RECIPROCAL_LITRE = UnitDescriptor('reciprocal litre', 'K63', '''1/l''') ALL_UNITS.append(RECIPROCAL_LITRE) POUND_AVOIRDUPOIS_PER_DEGREE_FAHRENHEIT = UnitDescriptor('pound (avoirdupois) per degree Fahrenheit', 'K64', '''lb/°F''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_DEGREE_FAHRENHEIT) POUND_AVOIRDUPOIS_SQUARE_FOOT = UnitDescriptor('pound (avoirdupois) square foot', 'K65', '''lb·ft²''') ALL_UNITS.append(POUND_AVOIRDUPOIS_SQUARE_FOOT) POUND_AVOIRDUPOIS_PER_DAY = UnitDescriptor('pound (avoirdupois) per day', 'K66', '''lb/d''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_DAY) POUND_PER_FOOT_HOUR = UnitDescriptor('pound per foot hour', 'K67', '''lb/(ft·h)''') ALL_UNITS.append(POUND_PER_FOOT_HOUR) POUND_PER_FOOT_SECOND = UnitDescriptor('pound per foot second', 'K68', '''lb/(ft·s)''') ALL_UNITS.append(POUND_PER_FOOT_SECOND) POUND_AVOIRDUPOIS_PER_CUBIC_FOOT_DEGREE_FAHRENHEIT = UnitDescriptor('pound (avoirdupois) per cubic foot degree Fahrenheit', 'K69', '''(lb/ft³)/°F''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_CUBIC_FOOT_DEGREE_FAHRENHEIT) POUND_AVOIRDUPOIS_PER_CUBIC_FOOT_PSI = UnitDescriptor('pound (avoirdupois) per cubic foot psi', 'K70', '''(lb/ft³)/psi''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_CUBIC_FOOT_PSI) POUND_AVOIRDUPOIS_PER_GALLON_UK = UnitDescriptor('pound (avoirdupois) per gallon (UK)', 'K71', '''lb/gal (UK)''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_GALLON_UK) POUND_AVOIRDUPOIS_PER_HOUR_DEGREE_FAHRENHEIT = UnitDescriptor('pound (avoirdupois) per hour degree Fahrenheit', 'K73', '''(lb/h)/°F''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_HOUR_DEGREE_FAHRENHEIT) POUND_AVOIRDUPOIS_PER_HOUR_PSI = UnitDescriptor('pound (avoirdupois) per hour psi', 'K74', '''(lb/h)/psi''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_HOUR_PSI) POUND_AVOIRDUPOIS_PER_CUBIC_INCH_DEGREE_FAHRENHEIT = UnitDescriptor('pound (avoirdupois) per cubic inch degree Fahrenheit', 'K75', '''(lb/in³)/°F''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_CUBIC_INCH_DEGREE_FAHRENHEIT) POUND_AVOIRDUPOIS_PER_CUBIC_INCH_PSI = UnitDescriptor('pound (avoirdupois) per cubic inch psi', 'K76', '''(lb/in³)/psi''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_CUBIC_INCH_PSI) POUND_AVOIRDUPOIS_PER_PSI = UnitDescriptor('pound (avoirdupois) per psi', 'K77', '''lb/psi''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_PSI) POUND_AVOIRDUPOIS_PER_MINUTE = UnitDescriptor('pound (avoirdupois) per minute', 'K78', '''lb/min''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_MINUTE) POUND_AVOIRDUPOIS_PER_MINUTE_DEGREE_FAHRENHEIT = UnitDescriptor('pound (avoirdupois) per minute degree Fahrenheit', 'K79', '''lb/(min·°F)''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_MINUTE_DEGREE_FAHRENHEIT) POUND_AVOIRDUPOIS_PER_MINUTE_PSI = UnitDescriptor('pound (avoirdupois) per minute psi', 'K80', '''(lb/min)/psi''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_MINUTE_PSI) POUND_AVOIRDUPOIS_PER_SECOND = UnitDescriptor('pound (avoirdupois) per second', 'K81', '''lb/s''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_SECOND) POUND_AVOIRDUPOIS_PER_SECOND_DEGREE_FAHRENHEIT = UnitDescriptor('pound (avoirdupois) per second degree Fahrenheit', 'K82', '''(lb/s)/°F''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_SECOND_DEGREE_FAHRENHEIT) POUND_AVOIRDUPOIS_PER_SECOND_PSI = UnitDescriptor('pound (avoirdupois) per second psi', 'K83', '''(lb/s)/psi''') ALL_UNITS.append(POUND_AVOIRDUPOIS_PER_SECOND_PSI) POUND_PER_CUBIC_YARD = UnitDescriptor('pound per cubic yard', 'K84', '''lb/yd³''') ALL_UNITS.append(POUND_PER_CUBIC_YARD) POUND_FORCE_PER_SQUARE_FOOT = UnitDescriptor('pound-force per square foot', 'K85', '''lbf/ft²''') ALL_UNITS.append(POUND_FORCE_PER_SQUARE_FOOT) POUND_FORCE_PER_SQUARE_INCH_DEGREE_FAHRENHEIT = UnitDescriptor('pound-force per square inch degree Fahrenheit', 'K86', '''psi/°F''') ALL_UNITS.append(POUND_FORCE_PER_SQUARE_INCH_DEGREE_FAHRENHEIT) PSI_CUBIC_INCH_PER_SECOND = UnitDescriptor('psi cubic inch per second', 'K87', '''psi·in³/s''') ALL_UNITS.append(PSI_CUBIC_INCH_PER_SECOND) PSI_LITRE_PER_SECOND = UnitDescriptor('psi litre per second', 'K88', '''psi·l/s''') ALL_UNITS.append(PSI_LITRE_PER_SECOND) PSI_CUBIC_METRE_PER_SECOND = UnitDescriptor('psi cubic metre per second', 'K89', '''psi·m³/s''') ALL_UNITS.append(PSI_CUBIC_METRE_PER_SECOND) PSI_CUBIC_YARD_PER_SECOND = UnitDescriptor('psi cubic yard per second', 'K90', '''psi·yd³/s''') ALL_UNITS.append(PSI_CUBIC_YARD_PER_SECOND) POUND_FORCE_SECOND_PER_SQUARE_FOOT = UnitDescriptor('pound-force second per square foot', 'K91', '''lbf·s/ft²''') ALL_UNITS.append(POUND_FORCE_SECOND_PER_SQUARE_FOOT) POUND_FORCE_SECOND_PER_SQUARE_INCH = UnitDescriptor('pound-force second per square inch', 'K92', '''lbf·s/in²''') ALL_UNITS.append(POUND_FORCE_SECOND_PER_SQUARE_INCH) RECIPROCAL_PSI = UnitDescriptor('reciprocal psi', 'K93', '''1/psi''') ALL_UNITS.append(RECIPROCAL_PSI) QUART_UK_LIQUID_PER_DAY = UnitDescriptor('quart (UK liquid) per day', 'K94', '''qt (UK liq.)/d''') ALL_UNITS.append(QUART_UK_LIQUID_PER_DAY) QUART_UK_LIQUID_PER_HOUR = UnitDescriptor('quart (UK liquid) per hour', 'K95', '''qt (UK liq.)/h''') ALL_UNITS.append(QUART_UK_LIQUID_PER_HOUR) QUART_UK_LIQUID_PER_MINUTE = UnitDescriptor('quart (UK liquid) per minute', 'K96', '''qt (UK liq.)/min''') ALL_UNITS.append(QUART_UK_LIQUID_PER_MINUTE) QUART_UK_LIQUID_PER_SECOND = UnitDescriptor('quart (UK liquid) per second', 'K97', '''qt (UK liq.)/s''') ALL_UNITS.append(QUART_UK_LIQUID_PER_SECOND) QUART_US_LIQUID_PER_DAY = UnitDescriptor('quart (US liquid) per day', 'K98', '''qt (US liq.)/d''') ALL_UNITS.append(QUART_US_LIQUID_PER_DAY) QUART_US_LIQUID_PER_HOUR = UnitDescriptor('quart (US liquid) per hour', 'K99', '''qt (US liq.)/h''') ALL_UNITS.append(QUART_US_LIQUID_PER_HOUR) CAKE = UnitDescriptor('cake', 'KA', '''''') ALL_UNITS.append(CAKE) KATAL = UnitDescriptor('katal', 'KAT', '''kat''') ALL_UNITS.append(KATAL) KILOCHARACTER = UnitDescriptor('kilocharacter', 'KB', '''''') ALL_UNITS.append(KILOCHARACTER) KILOBAR = UnitDescriptor('kilobar', 'KBA', '''kbar''') ALL_UNITS.append(KILOBAR) KILOGRAM_OF_CHOLINE_CHLORIDE = UnitDescriptor('kilogram of choline chloride', 'KCC', '''kg C₅ H₁₄ClNO''') ALL_UNITS.append(KILOGRAM_OF_CHOLINE_CHLORIDE) KILOGRAM_DECIMAL = UnitDescriptor('kilogram decimal', 'KD', '''''') ALL_UNITS.append(KILOGRAM_DECIMAL) KILOGRAM_DRAINED_NET_WEIGHT = UnitDescriptor('kilogram drained net weight', 'KDW', '''kg/net eda''') ALL_UNITS.append(KILOGRAM_DRAINED_NET_WEIGHT) KELVIN = UnitDescriptor('kelvin', 'KEL', '''K''') ALL_UNITS.append(KELVIN) KILOPACKET = UnitDescriptor('kilopacket', 'KF', '''''') ALL_UNITS.append(KILOPACKET) KEG = UnitDescriptor('keg', 'KG', '''''') ALL_UNITS.append(KEG) KILOGRAM = UnitDescriptor('kilogram', 'KGM', '''kg''') ALL_UNITS.append(KILOGRAM) KILOGRAM_PER_SECOND = UnitDescriptor('kilogram per second', 'KGS', '''kg/s''') ALL_UNITS.append(KILOGRAM_PER_SECOND) KILOGRAM_OF_HYDROGEN_PEROXIDE = UnitDescriptor('kilogram of hydrogen peroxide', 'KHY', '''kg H₂O₂''') ALL_UNITS.append(KILOGRAM_OF_HYDROGEN_PEROXIDE) KILOHERTZ = UnitDescriptor('kilohertz', 'KHZ', '''kHz''') ALL_UNITS.append(KILOHERTZ) KILOGRAM_PER_MILLIMETRE_WIDTH = UnitDescriptor('kilogram per millimetre width', 'KI', '''''') ALL_UNITS.append(KILOGRAM_PER_MILLIMETRE_WIDTH) KILOGRAM_INCLUDING_CONTAINER = UnitDescriptor('kilogram, including container', 'KIC', '''''') ALL_UNITS.append(KILOGRAM_INCLUDING_CONTAINER) KILOGRAM_INCLUDING_INNER_PACKAGING = UnitDescriptor('kilogram, including inner packaging', 'KIP', '''''') ALL_UNITS.append(KILOGRAM_INCLUDING_INNER_PACKAGING) KILOSEGMENT = UnitDescriptor('kilosegment', 'KJ', '''''') ALL_UNITS.append(KILOSEGMENT) KILOJOULE = UnitDescriptor('kilojoule', 'KJO', '''kJ''') ALL_UNITS.append(KILOJOULE) KILOGRAM_PER_METRE = UnitDescriptor('kilogram per metre', 'KL', '''kg/m''') ALL_UNITS.append(KILOGRAM_PER_METRE) LACTIC_DRY_MATERIAL_PERCENTAGE = UnitDescriptor('lactic dry material percentage', 'KLK', '''''') ALL_UNITS.append(LACTIC_DRY_MATERIAL_PERCENTAGE) KILOLUX = UnitDescriptor('kilolux', 'KLX', '''klx''') ALL_UNITS.append(KILOLUX) KILOGRAM_OF_METHYLAMINE = UnitDescriptor('kilogram of methylamine', 'KMA', '''kg met.am.''') ALL_UNITS.append(KILOGRAM_OF_METHYLAMINE) KILOMETRE_PER_HOUR = UnitDescriptor('kilometre per hour', 'KMH', '''km/h''') ALL_UNITS.append(KILOMETRE_PER_HOUR) SQUARE_KILOMETRE = UnitDescriptor('square kilometre', 'KMK', '''km²''') ALL_UNITS.append(SQUARE_KILOMETRE) KILOGRAM_PER_CUBIC_METRE = UnitDescriptor('kilogram per cubic metre', 'KMQ', '''kg/m³''') ALL_UNITS.append(KILOGRAM_PER_CUBIC_METRE) KILOMETRE = UnitDescriptor('kilometre', 'KMT', '''km''') ALL_UNITS.append(KILOMETRE) KILOGRAM_OF_NITROGEN = UnitDescriptor('kilogram of nitrogen', 'KNI', '''kg N''') ALL_UNITS.append(KILOGRAM_OF_NITROGEN) KILONEWTON_PER_SQUARE_METRE = UnitDescriptor('kilonewton per square metre', 'KNM', '''kN/m2''') ALL_UNITS.append(KILONEWTON_PER_SQUARE_METRE) KILOGRAM_NAMED_SUBSTANCE = UnitDescriptor('kilogram named substance', 'KNS', '''''') ALL_UNITS.append(KILOGRAM_NAMED_SUBSTANCE) KNOT = UnitDescriptor('knot', 'KNT', '''kn''') ALL_UNITS.append(KNOT) MILLIEQUIVALENCE_CAUSTIC_POTASH_PER_GRAM_OF_PRODUCT = UnitDescriptor('milliequivalence caustic potash per gram of product', 'KO', '''''') ALL_UNITS.append(MILLIEQUIVALENCE_CAUSTIC_POTASH_PER_GRAM_OF_PRODUCT) KILOPASCAL = UnitDescriptor('kilopascal', 'KPA', '''kPa''') ALL_UNITS.append(KILOPASCAL) KILOGRAM_OF_POTASSIUM_HYDROXIDE_CAUSTIC_POTASH = UnitDescriptor('kilogram of potassium hydroxide (caustic potash)', 'KPH', '''kg KOH''') ALL_UNITS.append(KILOGRAM_OF_POTASSIUM_HYDROXIDE_CAUSTIC_POTASH) KILOGRAM_OF_POTASSIUM_OXIDE = UnitDescriptor('kilogram of potassium oxide', 'KPO', '''kg K₂O''') ALL_UNITS.append(KILOGRAM_OF_POTASSIUM_OXIDE) KILOGRAM_OF_PHOSPHORUS_PENTOXIDE_PHOSPHORIC_ANHYDRIDE = UnitDescriptor('kilogram of phosphorus pentoxide (phosphoric anhydride)', 'KPP', '''''') ALL_UNITS.append(KILOGRAM_OF_PHOSPHORUS_PENTOXIDE_PHOSPHORIC_ANHYDRIDE) KILOROENTGEN = UnitDescriptor('kiloroentgen', 'KR', '''kR''') ALL_UNITS.append(KILOROENTGEN) THOUSAND_POUND_PER_SQUARE_INCH = UnitDescriptor('thousand pound per square inch', 'KS', '''''') ALL_UNITS.append(THOUSAND_POUND_PER_SQUARE_INCH) KILOGRAM_OF_SUBSTANCE_90_PERCENT_DRY = UnitDescriptor('kilogram of substance 90 % dry', 'KSD', '''kg 90 % sdt''') ALL_UNITS.append(KILOGRAM_OF_SUBSTANCE_90_PERCENT_DRY) KILOGRAM_OF_SODIUM_HYDROXIDE_CAUSTIC_SODA = UnitDescriptor('kilogram of sodium hydroxide (caustic soda)', 'KSH', '''kg NaOH''') ALL_UNITS.append(KILOGRAM_OF_SODIUM_HYDROXIDE_CAUSTIC_SODA) KIT = UnitDescriptor('kit', 'KT', '''''') ALL_UNITS.append(KIT) KILOTONNE = UnitDescriptor('kilotonne', 'KTN', '''kt''') ALL_UNITS.append(KILOTONNE) KILOGRAM_OF_URANIUM = UnitDescriptor('kilogram of uranium', 'KUR', '''kg U''') ALL_UNITS.append(KILOGRAM_OF_URANIUM) KILOVOLT_AMPERE = UnitDescriptor('kilovolt - ampere', 'KVA', '''kV·A''') ALL_UNITS.append(KILOVOLT_AMPERE) KILOVAR = UnitDescriptor('kilovar', 'KVR', '''kvar''') ALL_UNITS.append(KILOVAR) KILOVOLT = UnitDescriptor('kilovolt', 'KVT', '''kV''') ALL_UNITS.append(KILOVOLT) KILOGRAM_PER_MILLIMETRE = UnitDescriptor('kilogram per millimetre', 'KW', '''kg/mm''') ALL_UNITS.append(KILOGRAM_PER_MILLIMETRE) KILOWATT_HOUR = UnitDescriptor('kilowatt hour', 'KWH', '''kW·h''') ALL_UNITS.append(KILOWATT_HOUR) KILOWATT_YEAR = UnitDescriptor('kilowatt year', 'KWY', '''kW/year''') ALL_UNITS.append(KILOWATT_YEAR) KILOWATT_HOUR_PER_NORMALIZED_CUBIC_METRE = UnitDescriptor('Kilowatt hour per normalized cubic metre', 'KWN', '''''') ALL_UNITS.append(KILOWATT_HOUR_PER_NORMALIZED_CUBIC_METRE) KILOGRAM_OF_TUNGSTEN_TRIOXIDE = UnitDescriptor('kilogram of tungsten trioxide', 'KWO', '''kg WO₃''') ALL_UNITS.append(KILOGRAM_OF_TUNGSTEN_TRIOXIDE) KILOWATT_HOUR_PER_STANDARD_CUBIC_METRE = UnitDescriptor('Kilowatt hour per standard cubic metre', 'KWS', '''''') ALL_UNITS.append(KILOWATT_HOUR_PER_STANDARD_CUBIC_METRE) KILOWATT = UnitDescriptor('kilowatt', 'KWT', '''kW''') ALL_UNITS.append(KILOWATT) MILLILITRE_PER_KILOGRAM = UnitDescriptor('millilitre per kilogram', 'KX', '''ml/kg''') ALL_UNITS.append(MILLILITRE_PER_KILOGRAM) QUART_US_LIQUID_PER_MINUTE = UnitDescriptor('quart (US liquid) per minute', 'L10', '''qt (US liq.)/min''') ALL_UNITS.append(QUART_US_LIQUID_PER_MINUTE) QUART_US_LIQUID_PER_SECOND = UnitDescriptor('quart (US liquid) per second', 'L11', '''qt (US liq.)/s''') ALL_UNITS.append(QUART_US_LIQUID_PER_SECOND) METRE_PER_SECOND_KELVIN = UnitDescriptor('metre per second kelvin', 'L12', '''(m/s)/K''') ALL_UNITS.append(METRE_PER_SECOND_KELVIN) METRE_PER_SECOND_BAR = UnitDescriptor('metre per second bar', 'L13', '''(m/s)/bar''') ALL_UNITS.append(METRE_PER_SECOND_BAR) SQUARE_METRE_HOUR_DEGREE_CELSIUS_PER_KILOCALORIE_INTERNATIONAL_TABLE = UnitDescriptor('square metre hour degree Celsius per kilocalorie (international table)', 'L14', '''m²·h·°C/kcal''') ALL_UNITS.append(SQUARE_METRE_HOUR_DEGREE_CELSIUS_PER_KILOCALORIE_INTERNATIONAL_TABLE) MILLIPASCAL_SECOND_PER_KELVIN = UnitDescriptor('millipascal second per kelvin', 'L15', '''mPa·s/K''') ALL_UNITS.append(MILLIPASCAL_SECOND_PER_KELVIN) MILLIPASCAL_SECOND_PER_BAR = UnitDescriptor('millipascal second per bar', 'L16', '''mPa·s/bar''') ALL_UNITS.append(MILLIPASCAL_SECOND_PER_BAR) MILLIGRAM_PER_CUBIC_METRE_KELVIN = UnitDescriptor('milligram per cubic metre kelvin', 'L17', '''(mg/m³)/K''') ALL_UNITS.append(MILLIGRAM_PER_CUBIC_METRE_KELVIN) MILLIGRAM_PER_CUBIC_METRE_BAR = UnitDescriptor('milligram per cubic metre bar', 'L18', '''(mg/m³)/bar''') ALL_UNITS.append(MILLIGRAM_PER_CUBIC_METRE_BAR) MILLILITRE_PER_LITRE = UnitDescriptor('millilitre per litre', 'L19', '''ml/l''') ALL_UNITS.append(MILLILITRE_PER_LITRE) LITRE_PER_MINUTE = UnitDescriptor('litre per minute', 'L2', '''l/min''') ALL_UNITS.append(LITRE_PER_MINUTE) RECIPROCAL_CUBIC_MILLIMETRE = UnitDescriptor('reciprocal cubic millimetre', 'L20', '''1/mm³''') ALL_UNITS.append(RECIPROCAL_CUBIC_MILLIMETRE) CUBIC_MILLIMETRE_PER_CUBIC_METRE = UnitDescriptor('cubic millimetre per cubic metre', 'L21', '''mm³/m³''') ALL_UNITS.append(CUBIC_MILLIMETRE_PER_CUBIC_METRE) MOLE_PER_HOUR = UnitDescriptor('mole per hour', 'L23', '''mol/h''') ALL_UNITS.append(MOLE_PER_HOUR) MOLE_PER_KILOGRAM_KELVIN = UnitDescriptor('mole per kilogram kelvin', 'L24', '''(mol/kg)/K''') ALL_UNITS.append(MOLE_PER_KILOGRAM_KELVIN) MOLE_PER_KILOGRAM_BAR = UnitDescriptor('mole per kilogram bar', 'L25', '''(mol/kg)/bar''') ALL_UNITS.append(MOLE_PER_KILOGRAM_BAR) MOLE_PER_LITRE_KELVIN = UnitDescriptor('mole per litre kelvin', 'L26', '''(mol/l)/K''') ALL_UNITS.append(MOLE_PER_LITRE_KELVIN) MOLE_PER_LITRE_BAR = UnitDescriptor('mole per litre bar', 'L27', '''(mol/l)/bar''') ALL_UNITS.append(MOLE_PER_LITRE_BAR) MOLE_PER_CUBIC_METRE_KELVIN = UnitDescriptor('mole per cubic metre kelvin', 'L28', '''(mol/m³)/K''') ALL_UNITS.append(MOLE_PER_CUBIC_METRE_KELVIN) MOLE_PER_CUBIC_METRE_BAR = UnitDescriptor('mole per cubic metre bar', 'L29', '''(mol/m³)/bar''') ALL_UNITS.append(MOLE_PER_CUBIC_METRE_BAR) MOLE_PER_MINUTE = UnitDescriptor('mole per minute', 'L30', '''mol/min''') ALL_UNITS.append(MOLE_PER_MINUTE) MILLIROENTGEN_AEQUIVALENT_MEN = UnitDescriptor('milliroentgen aequivalent men', 'L31', '''mrem''') ALL_UNITS.append(MILLIROENTGEN_AEQUIVALENT_MEN) NANOGRAM_PER_KILOGRAM = UnitDescriptor('nanogram per kilogram', 'L32', '''ng/kg''') ALL_UNITS.append(NANOGRAM_PER_KILOGRAM) OUNCE_AVOIRDUPOIS_PER_DAY = UnitDescriptor('ounce (avoirdupois) per day', 'L33', '''oz/d''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS_PER_DAY) OUNCE_AVOIRDUPOIS_PER_HOUR = UnitDescriptor('ounce (avoirdupois) per hour', 'L34', '''oz/h''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS_PER_HOUR) OUNCE_AVOIRDUPOIS_PER_MINUTE = UnitDescriptor('ounce (avoirdupois) per minute', 'L35', '''oz/min''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS_PER_MINUTE) OUNCE_AVOIRDUPOIS_PER_SECOND = UnitDescriptor('ounce (avoirdupois) per second', 'L36', '''oz/s''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS_PER_SECOND) OUNCE_AVOIRDUPOIS_PER_GALLON_UK = UnitDescriptor('ounce (avoirdupois) per gallon (UK)', 'L37', '''oz/gal (UK)''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS_PER_GALLON_UK) OUNCE_AVOIRDUPOIS_PER_GALLON_US = UnitDescriptor('ounce (avoirdupois) per gallon (US)', 'L38', '''oz/gal (US)''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS_PER_GALLON_US) OUNCE_AVOIRDUPOIS_PER_CUBIC_INCH = UnitDescriptor('ounce (avoirdupois) per cubic inch', 'L39', '''oz/in³''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS_PER_CUBIC_INCH) OUNCE_AVOIRDUPOIS_FORCE = UnitDescriptor('ounce (avoirdupois)-force', 'L40', '''ozf''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS_FORCE) OUNCE_AVOIRDUPOIS_FORCE_INCH = UnitDescriptor('ounce (avoirdupois)-force inch', 'L41', '''ozf·in''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS_FORCE_INCH) PICOSIEMENS_PER_METRE = UnitDescriptor('picosiemens per metre', 'L42', '''pS/m''') ALL_UNITS.append(PICOSIEMENS_PER_METRE) PECK_UK = UnitDescriptor('peck (UK)', 'L43', '''pk (UK)''') ALL_UNITS.append(PECK_UK) PECK_UK_PER_DAY = UnitDescriptor('peck (UK) per day', 'L44', '''pk (UK)/d''') ALL_UNITS.append(PECK_UK_PER_DAY) PECK_UK_PER_HOUR = UnitDescriptor('peck (UK) per hour', 'L45', '''pk (UK)/h''') ALL_UNITS.append(PECK_UK_PER_HOUR) PECK_UK_PER_MINUTE = UnitDescriptor('peck (UK) per minute', 'L46', '''pk (UK)/min''') ALL_UNITS.append(PECK_UK_PER_MINUTE) PECK_UK_PER_SECOND = UnitDescriptor('peck (UK) per second', 'L47', '''pk (UK)/s''') ALL_UNITS.append(PECK_UK_PER_SECOND) PECK_US_DRY_PER_DAY = UnitDescriptor('peck (US dry) per day', 'L48', '''pk (US dry)/d''') ALL_UNITS.append(PECK_US_DRY_PER_DAY) PECK_US_DRY_PER_HOUR = UnitDescriptor('peck (US dry) per hour', 'L49', '''pk (US dry)/h''') ALL_UNITS.append(PECK_US_DRY_PER_HOUR) PECK_US_DRY_PER_MINUTE = UnitDescriptor('peck (US dry) per minute', 'L50', '''pk (US dry)/min''') ALL_UNITS.append(PECK_US_DRY_PER_MINUTE) PECK_US_DRY_PER_SECOND = UnitDescriptor('peck (US dry) per second', 'L51', '''pk (US dry)/s''') ALL_UNITS.append(PECK_US_DRY_PER_SECOND) PSI_PER_PSI = UnitDescriptor('psi per psi', 'L52', '''psi/psi''') ALL_UNITS.append(PSI_PER_PSI) PINT_UK_PER_DAY = UnitDescriptor('pint (UK) per day', 'L53', '''pt (UK)/d''') ALL_UNITS.append(PINT_UK_PER_DAY) PINT_UK_PER_HOUR = UnitDescriptor('pint (UK) per hour', 'L54', '''pt (UK)/h''') ALL_UNITS.append(PINT_UK_PER_HOUR) PINT_UK_PER_MINUTE = UnitDescriptor('pint (UK) per minute', 'L55', '''pt (UK)/min''') ALL_UNITS.append(PINT_UK_PER_MINUTE) PINT_UK_PER_SECOND = UnitDescriptor('pint (UK) per second', 'L56', '''pt (UK)/s''') ALL_UNITS.append(PINT_UK_PER_SECOND) PINT_US_LIQUID_PER_DAY = UnitDescriptor('pint (US liquid) per day', 'L57', '''pt (US liq.)/d''') ALL_UNITS.append(PINT_US_LIQUID_PER_DAY) PINT_US_LIQUID_PER_HOUR = UnitDescriptor('pint (US liquid) per hour', 'L58', '''pt (US liq.)/h''') ALL_UNITS.append(PINT_US_LIQUID_PER_HOUR) PINT_US_LIQUID_PER_MINUTE = UnitDescriptor('pint (US liquid) per minute', 'L59', '''pt (US liq.)/min''') ALL_UNITS.append(PINT_US_LIQUID_PER_MINUTE) PINT_US_LIQUID_PER_SECOND = UnitDescriptor('pint (US liquid) per second', 'L60', '''pt (US liq.)/s''') ALL_UNITS.append(PINT_US_LIQUID_PER_SECOND) PINT_US_DRY = UnitDescriptor('pint (US dry)', 'L61', '''pt (US dry)''') ALL_UNITS.append(PINT_US_DRY) QUART_US_DRY = UnitDescriptor('quart (US dry)', 'L62', '''qt (US dry)''') ALL_UNITS.append(QUART_US_DRY) SLUG_PER_DAY = UnitDescriptor('slug per day', 'L63', '''slug/d''') ALL_UNITS.append(SLUG_PER_DAY) SLUG_PER_FOOT_SECOND = UnitDescriptor('slug per foot second', 'L64', '''slug/(ft·s)''') ALL_UNITS.append(SLUG_PER_FOOT_SECOND) SLUG_PER_CUBIC_FOOT = UnitDescriptor('slug per cubic foot', 'L65', '''slug/ft³''') ALL_UNITS.append(SLUG_PER_CUBIC_FOOT) SLUG_PER_HOUR = UnitDescriptor('slug per hour', 'L66', '''slug/h''') ALL_UNITS.append(SLUG_PER_HOUR) SLUG_PER_MINUTE = UnitDescriptor('slug per minute', 'L67', '''slug/min''') ALL_UNITS.append(SLUG_PER_MINUTE) SLUG_PER_SECOND = UnitDescriptor('slug per second', 'L68', '''slug/s''') ALL_UNITS.append(SLUG_PER_SECOND) TONNE_PER_KELVIN = UnitDescriptor('tonne per kelvin', 'L69', '''t/K''') ALL_UNITS.append(TONNE_PER_KELVIN) TONNE_PER_BAR = UnitDescriptor('tonne per bar', 'L70', '''t/bar''') ALL_UNITS.append(TONNE_PER_BAR) TONNE_PER_DAY = UnitDescriptor('tonne per day', 'L71', '''t/d''') ALL_UNITS.append(TONNE_PER_DAY) TONNE_PER_DAY_KELVIN = UnitDescriptor('tonne per day kelvin', 'L72', '''(t/d)/K''') ALL_UNITS.append(TONNE_PER_DAY_KELVIN) TONNE_PER_DAY_BAR = UnitDescriptor('tonne per day bar', 'L73', '''(t/d)/bar''') ALL_UNITS.append(TONNE_PER_DAY_BAR) TONNE_PER_HOUR_KELVIN = UnitDescriptor('tonne per hour kelvin', 'L74', '''(t/h)/K''') ALL_UNITS.append(TONNE_PER_HOUR_KELVIN) TONNE_PER_HOUR_BAR = UnitDescriptor('tonne per hour bar', 'L75', '''(t/h)/bar''') ALL_UNITS.append(TONNE_PER_HOUR_BAR) TONNE_PER_CUBIC_METRE_KELVIN = UnitDescriptor('tonne per cubic metre kelvin', 'L76', '''(t/m³)/K''') ALL_UNITS.append(TONNE_PER_CUBIC_METRE_KELVIN) TONNE_PER_CUBIC_METRE_BAR = UnitDescriptor('tonne per cubic metre bar', 'L77', '''(t/m³)/bar''') ALL_UNITS.append(TONNE_PER_CUBIC_METRE_BAR) TONNE_PER_MINUTE = UnitDescriptor('tonne per minute', 'L78', '''t/min''') ALL_UNITS.append(TONNE_PER_MINUTE) TONNE_PER_MINUTE_KELVIN = UnitDescriptor('tonne per minute kelvin', 'L79', '''(t/min)/K''') ALL_UNITS.append(TONNE_PER_MINUTE_KELVIN) TONNE_PER_MINUTE_BAR = UnitDescriptor('tonne per minute bar', 'L80', '''(t/min)/bar''') ALL_UNITS.append(TONNE_PER_MINUTE_BAR) TONNE_PER_SECOND = UnitDescriptor('tonne per second', 'L81', '''t/s''') ALL_UNITS.append(TONNE_PER_SECOND) TONNE_PER_SECOND_KELVIN = UnitDescriptor('tonne per second kelvin', 'L82', '''(t/s)/K''') ALL_UNITS.append(TONNE_PER_SECOND_KELVIN) TONNE_PER_SECOND_BAR = UnitDescriptor('tonne per second bar', 'L83', '''(t/s)/bar''') ALL_UNITS.append(TONNE_PER_SECOND_BAR) TON_UK_SHIPPING = UnitDescriptor('ton (UK shipping)', 'L84', '''British shipping ton''') ALL_UNITS.append(TON_UK_SHIPPING) TON_LONG_PER_DAY = UnitDescriptor('ton long per day', 'L85', '''ton (UK)/d''') ALL_UNITS.append(TON_LONG_PER_DAY) TON_US_SHIPPING = UnitDescriptor('ton (US shipping)', 'L86', '''(US) shipping ton''') ALL_UNITS.append(TON_US_SHIPPING) TON_SHORT_PER_DEGREE_FAHRENHEIT = UnitDescriptor('ton short per degree Fahrenheit', 'L87', '''ton (US)/°F''') ALL_UNITS.append(TON_SHORT_PER_DEGREE_FAHRENHEIT) TON_SHORT_PER_DAY = UnitDescriptor('ton short per day', 'L88', '''ton (US)/d''') ALL_UNITS.append(TON_SHORT_PER_DAY) TON_SHORT_PER_HOUR_DEGREE_FAHRENHEIT = UnitDescriptor('ton short per hour degree Fahrenheit', 'L89', '''ton (US)/(h·°F)''') ALL_UNITS.append(TON_SHORT_PER_HOUR_DEGREE_FAHRENHEIT) TON_SHORT_PER_HOUR_PSI = UnitDescriptor('ton short per hour psi', 'L90', '''(ton (US)/h)/psi''') ALL_UNITS.append(TON_SHORT_PER_HOUR_PSI) TON_SHORT_PER_PSI = UnitDescriptor('ton short per psi', 'L91', '''ton (US)/psi''') ALL_UNITS.append(TON_SHORT_PER_PSI) TON_UK_LONG_PER_CUBIC_YARD = UnitDescriptor('ton (UK long) per cubic yard', 'L92', '''ton.l/yd³ (UK)''') ALL_UNITS.append(TON_UK_LONG_PER_CUBIC_YARD) TON_US_SHORT_PER_CUBIC_YARD = UnitDescriptor('ton (US short) per cubic yard', 'L93', '''ton.s/yd³ (US)''') ALL_UNITS.append(TON_US_SHORT_PER_CUBIC_YARD) TON_FORCE_US_SHORT = UnitDescriptor('ton-force (US short)', 'L94', '''ton.sh-force''') ALL_UNITS.append(TON_FORCE_US_SHORT) COMMON_YEAR = UnitDescriptor('common year', 'L95', '''y (365 days)''') ALL_UNITS.append(COMMON_YEAR) SIDEREAL_YEAR = UnitDescriptor('sidereal year', 'L96', '''y (sidereal)''') ALL_UNITS.append(SIDEREAL_YEAR) YARD_PER_DEGREE_FAHRENHEIT = UnitDescriptor('yard per degree Fahrenheit', 'L98', '''yd/°F''') ALL_UNITS.append(YARD_PER_DEGREE_FAHRENHEIT) YARD_PER_PSI = UnitDescriptor('yard per psi', 'L99', '''yd/psi''') ALL_UNITS.append(YARD_PER_PSI) POUND_PER_CUBIC_INCH = UnitDescriptor('pound per cubic inch', 'LA', '''lb/in³''') ALL_UNITS.append(POUND_PER_CUBIC_INCH) LACTOSE_EXCESS_PERCENTAGE = UnitDescriptor('lactose excess percentage', 'LAC', '''''') ALL_UNITS.append(LACTOSE_EXCESS_PERCENTAGE) POUND = UnitDescriptor('pound', 'LBR', '''lb''') ALL_UNITS.append(POUND) TROY_POUND_US = UnitDescriptor('troy pound (US)', 'LBT', '''''') ALL_UNITS.append(TROY_POUND_US) LINEAR_CENTIMETRE = UnitDescriptor('linear centimetre', 'LC', '''''') ALL_UNITS.append(LINEAR_CENTIMETRE) LITRE_PER_DAY = UnitDescriptor('litre per day', 'LD', '''l/d''') ALL_UNITS.append(LITRE_PER_DAY) LITE = UnitDescriptor('lite', 'LE', '''''') ALL_UNITS.append(LITE) LEAF = UnitDescriptor('leaf', 'LEF', '''''') ALL_UNITS.append(LEAF) LINEAR_FOOT = UnitDescriptor('linear foot', 'LF', '''''') ALL_UNITS.append(LINEAR_FOOT) LABOUR_HOUR = UnitDescriptor('labour hour', 'LH', '''''') ALL_UNITS.append(LABOUR_HOUR) LINEAR_INCH = UnitDescriptor('linear inch', 'LI', '''''') ALL_UNITS.append(LINEAR_INCH) LARGE_SPRAY = UnitDescriptor('large spray', 'LJ', '''''') ALL_UNITS.append(LARGE_SPRAY) LINK = UnitDescriptor('link', 'LK', '''''') ALL_UNITS.append(LINK) LINEAR_METRE = UnitDescriptor('linear metre', 'LM', '''''') ALL_UNITS.append(LINEAR_METRE) LENGTH = UnitDescriptor('length', 'LN', '''''') ALL_UNITS.append(LENGTH) LOT_UNIT_OF_PROCUREMENT = UnitDescriptor('lot [unit of procurement]', 'LO', '''''') ALL_UNITS.append(LOT_UNIT_OF_PROCUREMENT) LIQUID_POUND = UnitDescriptor('liquid pound', 'LP', '''''') ALL_UNITS.append(LIQUID_POUND) LITRE_OF_PURE_ALCOHOL = UnitDescriptor('litre of pure alcohol', 'LPA', '''''') ALL_UNITS.append(LITRE_OF_PURE_ALCOHOL) LAYER = UnitDescriptor('layer', 'LR', '''''') ALL_UNITS.append(LAYER) LUMP_SUM = UnitDescriptor('lump sum', 'LS', '''''') ALL_UNITS.append(LUMP_SUM) TON_UK_OR_LONG_TON_US = UnitDescriptor('ton (UK) or long ton (US)', 'LTN', '''ton (UK)''') ALL_UNITS.append(TON_UK_OR_LONG_TON_US) LITRE = UnitDescriptor('litre', 'LTR', '''l''') ALL_UNITS.append(LITRE) METRIC_TON_LUBRICATING_OIL = UnitDescriptor('metric ton, lubricating oil', 'LUB', '''''') ALL_UNITS.append(METRIC_TON_LUBRICATING_OIL) LUMEN = UnitDescriptor('lumen', 'LUM', '''lm''') ALL_UNITS.append(LUMEN) LUX = UnitDescriptor('lux', 'LUX', '''lx''') ALL_UNITS.append(LUX) LINEAR_YARD_PER_POUND = UnitDescriptor('linear yard per pound', 'LX', '''''') ALL_UNITS.append(LINEAR_YARD_PER_POUND) LINEAR_YARD = UnitDescriptor('linear yard', 'LY', '''''') ALL_UNITS.append(LINEAR_YARD) MAGNETIC_TAPE = UnitDescriptor('magnetic tape', 'M0', '''''') ALL_UNITS.append(MAGNETIC_TAPE) MILLIGRAM_PER_LITRE = UnitDescriptor('milligram per litre', 'M1', '''mg/l''') ALL_UNITS.append(MILLIGRAM_PER_LITRE) RECIPROCAL_CUBIC_YARD = UnitDescriptor('reciprocal cubic yard', 'M10', '''1/yd³''') ALL_UNITS.append(RECIPROCAL_CUBIC_YARD) CUBIC_YARD_PER_DEGREE_FAHRENHEIT = UnitDescriptor('cubic yard per degree Fahrenheit', 'M11', '''yd³/°F''') ALL_UNITS.append(CUBIC_YARD_PER_DEGREE_FAHRENHEIT) CUBIC_YARD_PER_DAY = UnitDescriptor('cubic yard per day', 'M12', '''yd³/d''') ALL_UNITS.append(CUBIC_YARD_PER_DAY) CUBIC_YARD_PER_HOUR = UnitDescriptor('cubic yard per hour', 'M13', '''yd³/h''') ALL_UNITS.append(CUBIC_YARD_PER_HOUR) CUBIC_YARD_PER_PSI = UnitDescriptor('cubic yard per psi', 'M14', '''yd³/psi''') ALL_UNITS.append(CUBIC_YARD_PER_PSI) CUBIC_YARD_PER_MINUTE = UnitDescriptor('cubic yard per minute', 'M15', '''yd³/min''') ALL_UNITS.append(CUBIC_YARD_PER_MINUTE) CUBIC_YARD_PER_SECOND = UnitDescriptor('cubic yard per second', 'M16', '''yd³/s''') ALL_UNITS.append(CUBIC_YARD_PER_SECOND) KILOHERTZ_METRE = UnitDescriptor('kilohertz metre', 'M17', '''kHz·m''') ALL_UNITS.append(KILOHERTZ_METRE) GIGAHERTZ_METRE = UnitDescriptor('gigahertz metre', 'M18', '''GHz·m''') ALL_UNITS.append(GIGAHERTZ_METRE) BEAUFORT = UnitDescriptor('Beaufort', 'M19', '''Bft''') ALL_UNITS.append(BEAUFORT) RECIPROCAL_MEGAKELVIN_OR_MEGAKELVIN_TO_THE_POWER_MINUS_ONE = UnitDescriptor('reciprocal megakelvin or megakelvin to the power minus one', 'M20', '''1/MK''') ALL_UNITS.append(RECIPROCAL_MEGAKELVIN_OR_MEGAKELVIN_TO_THE_POWER_MINUS_ONE) RECIPROCAL_KILOVOLT_AMPERE_RECIPROCAL_HOUR = UnitDescriptor('reciprocal kilovolt - ampere reciprocal hour', 'M21', '''1/kVAh''') ALL_UNITS.append(RECIPROCAL_KILOVOLT_AMPERE_RECIPROCAL_HOUR) MILLILITRE_PER_SQUARE_CENTIMETRE_MINUTE = UnitDescriptor('millilitre per square centimetre minute', 'M22', '''(ml/min)/cm²''') ALL_UNITS.append(MILLILITRE_PER_SQUARE_CENTIMETRE_MINUTE) NEWTON_PER_CENTIMETRE = UnitDescriptor('newton per centimetre', 'M23', '''N/cm''') ALL_UNITS.append(NEWTON_PER_CENTIMETRE) OHM_KILOMETRE = UnitDescriptor('ohm kilometre', 'M24', '''Ω·km''') ALL_UNITS.append(OHM_KILOMETRE) PERCENT_PER_DEGREE_CELSIUS = UnitDescriptor('percent per degree Celsius', 'M25', '''%/°C''') ALL_UNITS.append(PERCENT_PER_DEGREE_CELSIUS) GIGAOHM_PER_METRE = UnitDescriptor('gigaohm per metre', 'M26', '''GΩ/m''') ALL_UNITS.append(GIGAOHM_PER_METRE) MEGAHERTZ_METRE = UnitDescriptor('megahertz metre', 'M27', '''MHz·m''') ALL_UNITS.append(MEGAHERTZ_METRE) KILOGRAM_PER_KILOGRAM = UnitDescriptor('kilogram per kilogram', 'M29', '''kg/kg''') ALL_UNITS.append(KILOGRAM_PER_KILOGRAM) RECIPROCAL_VOLT_AMPERE_RECIPROCAL_SECOND = UnitDescriptor('reciprocal volt - ampere reciprocal second', 'M30', '''1/(V·A·s)''') ALL_UNITS.append(RECIPROCAL_VOLT_AMPERE_RECIPROCAL_SECOND) KILOGRAM_PER_KILOMETRE = UnitDescriptor('kilogram per kilometre', 'M31', '''kg/km''') ALL_UNITS.append(KILOGRAM_PER_KILOMETRE) PASCAL_SECOND_PER_LITRE = UnitDescriptor('pascal second per litre', 'M32', '''Pa·s/l''') ALL_UNITS.append(PASCAL_SECOND_PER_LITRE) MILLIMOLE_PER_LITRE = UnitDescriptor('millimole per litre', 'M33', '''mmol/l''') ALL_UNITS.append(MILLIMOLE_PER_LITRE) NEWTON_METRE_PER_SQUARE_METRE = UnitDescriptor('newton metre per square metre', 'M34', '''N·m/m²''') ALL_UNITS.append(NEWTON_METRE_PER_SQUARE_METRE) MILLIVOLT_AMPERE = UnitDescriptor('millivolt - ampere', 'M35', '''mV·A''') ALL_UNITS.append(MILLIVOLT_AMPERE) THIRTY_DAY_MONTH = UnitDescriptor('30-day month', 'M36', '''mo (30 days)''') ALL_UNITS.append(THIRTY_DAY_MONTH) ACTUAL_PER_360 = UnitDescriptor('actual/360', 'M37', '''y (360 days)''') ALL_UNITS.append(ACTUAL_PER_360) KILOMETRE_PER_SECOND_SQUARED = UnitDescriptor('kilometre per second squared', 'M38', '''km/s²''') ALL_UNITS.append(KILOMETRE_PER_SECOND_SQUARED) CENTIMETRE_PER_SECOND_SQUARED = UnitDescriptor('centimetre per second squared', 'M39', '''cm/s²''') ALL_UNITS.append(CENTIMETRE_PER_SECOND_SQUARED) MONETARY_VALUE = UnitDescriptor('monetary value', 'M4', '''''') ALL_UNITS.append(MONETARY_VALUE) YARD_PER_SECOND_SQUARED = UnitDescriptor('yard per second squared', 'M40', '''yd/s²''') ALL_UNITS.append(YARD_PER_SECOND_SQUARED) MILLIMETRE_PER_SECOND_SQUARED = UnitDescriptor('millimetre per second squared', 'M41', '''mm/s²''') ALL_UNITS.append(MILLIMETRE_PER_SECOND_SQUARED) MILE_STATUTE_MILE_PER_SECOND_SQUARED = UnitDescriptor('mile (statute mile) per second squared', 'M42', '''mi/s²''') ALL_UNITS.append(MILE_STATUTE_MILE_PER_SECOND_SQUARED) MIL = UnitDescriptor('mil', 'M43', '''mil''') ALL_UNITS.append(MIL) REVOLUTION = UnitDescriptor('revolution', 'M44', '''rev''') ALL_UNITS.append(REVOLUTION) DEGREE_UNIT_OF_ANGLE_PER_SECOND_SQUARED = UnitDescriptor('degree [unit of angle] per second squared', 'M45', '''°/s²''') ALL_UNITS.append(DEGREE_UNIT_OF_ANGLE_PER_SECOND_SQUARED) REVOLUTION_PER_MINUTE = UnitDescriptor('revolution per minute', 'M46', '''r/min''') ALL_UNITS.append(REVOLUTION_PER_MINUTE) CIRCULAR_MIL = UnitDescriptor('circular mil', 'M47', '''cmil''') ALL_UNITS.append(CIRCULAR_MIL) SQUARE_MILE_BASED_ON_U_S_SURVEY_FOOT = UnitDescriptor('square mile (based on U.S. survey foot)', 'M48', '''mi² (US survey)''') ALL_UNITS.append(SQUARE_MILE_BASED_ON_U_S_SURVEY_FOOT) CHAIN_BASED_ON_U_S_SURVEY_FOOT = UnitDescriptor('chain (based on U.S. survey foot)', 'M49', '''ch (US survey) ''') ALL_UNITS.append(CHAIN_BASED_ON_U_S_SURVEY_FOOT) MICROCURIE = UnitDescriptor('microcurie', 'M5', '''µCi''') ALL_UNITS.append(MICROCURIE) FURLONG = UnitDescriptor('furlong', 'M50', '''fur''') ALL_UNITS.append(FURLONG) FOOT_U_S_SURVEY = UnitDescriptor('foot (U.S. survey)', 'M51', '''ft (US survey) ''') ALL_UNITS.append(FOOT_U_S_SURVEY) MILE_BASED_ON_U_S_SURVEY_FOOT = UnitDescriptor('mile (based on U.S. survey foot)', 'M52', '''mi (US survey) ''') ALL_UNITS.append(MILE_BASED_ON_U_S_SURVEY_FOOT) METRE_PER_PASCAL = UnitDescriptor('metre per pascal', 'M53', '''m/Pa''') ALL_UNITS.append(METRE_PER_PASCAL) METRE_PER_RADIANT = UnitDescriptor('metre per radiant', 'M55', '''m/rad''') ALL_UNITS.append(METRE_PER_RADIANT) SHAKE = UnitDescriptor('shake', 'M56', '''shake''') ALL_UNITS.append(SHAKE) MILE_PER_MINUTE = UnitDescriptor('mile per minute', 'M57', '''mi/min''') ALL_UNITS.append(MILE_PER_MINUTE) MILE_PER_SECOND = UnitDescriptor('mile per second', 'M58', '''mi/s''') ALL_UNITS.append(MILE_PER_SECOND) METRE_PER_SECOND_PASCAL = UnitDescriptor('metre per second pascal', 'M59', '''(m/s)/Pa''') ALL_UNITS.append(METRE_PER_SECOND_PASCAL) METRE_PER_HOUR = UnitDescriptor('metre per hour', 'M60', '''m/h''') ALL_UNITS.append(METRE_PER_HOUR) INCH_PER_YEAR = UnitDescriptor('inch per year', 'M61', '''in/y''') ALL_UNITS.append(INCH_PER_YEAR) KILOMETRE_PER_SECOND = UnitDescriptor('kilometre per second', 'M62', '''km/s''') ALL_UNITS.append(KILOMETRE_PER_SECOND) YARD_PER_SECOND = UnitDescriptor('yard per second', 'M64', '''yd/s''') ALL_UNITS.append(YARD_PER_SECOND) YARD_PER_MINUTE = UnitDescriptor('yard per minute', 'M65', '''yd/min''') ALL_UNITS.append(YARD_PER_MINUTE) YARD_PER_HOUR = UnitDescriptor('yard per hour', 'M66', '''yd/h''') ALL_UNITS.append(YARD_PER_HOUR) ACRE_FOOT_BASED_ON_U_S_SURVEY_FOOT = UnitDescriptor('acre-foot (based on U.S. survey foot)', 'M67', '''acre-ft (US survey)''') ALL_UNITS.append(ACRE_FOOT_BASED_ON_U_S_SURVEY_FOOT) CORD_12EIGHT_FT3 = UnitDescriptor('cord (128 ft3)', 'M68', '''cord''') ALL_UNITS.append(CORD_12EIGHT_FT3) CUBIC_MILE_UK_STATUTE = UnitDescriptor('cubic mile (UK statute)', 'M69', '''mi³''') ALL_UNITS.append(CUBIC_MILE_UK_STATUTE) MICRO_INCH = UnitDescriptor('micro-inch', 'M7', '''µin''') ALL_UNITS.append(MICRO_INCH) TON_REGISTER = UnitDescriptor('ton, register', 'M70', '''RT''') ALL_UNITS.append(TON_REGISTER) CUBIC_METRE_PER_PASCAL = UnitDescriptor('cubic metre per pascal', 'M71', '''m³/Pa''') ALL_UNITS.append(CUBIC_METRE_PER_PASCAL) BEL = UnitDescriptor('bel', 'M72', '''B''') ALL_UNITS.append(BEL) KILOGRAM_PER_CUBIC_METRE_PASCAL = UnitDescriptor('kilogram per cubic metre pascal', 'M73', '''(kg/m³)/Pa''') ALL_UNITS.append(KILOGRAM_PER_CUBIC_METRE_PASCAL) KILOGRAM_PER_PASCAL = UnitDescriptor('kilogram per pascal', 'M74', '''kg/Pa''') ALL_UNITS.append(KILOGRAM_PER_PASCAL) KILOPOUND_FORCE = UnitDescriptor('kilopound-force', 'M75', '''kip''') ALL_UNITS.append(KILOPOUND_FORCE) POUNDAL = UnitDescriptor('poundal', 'M76', '''pdl''') ALL_UNITS.append(POUNDAL) KILOGRAM_METRE_PER_SECOND_SQUARED = UnitDescriptor('kilogram metre per second squared', 'M77', '''kg·m/s²''') ALL_UNITS.append(KILOGRAM_METRE_PER_SECOND_SQUARED) POND = UnitDescriptor('pond', 'M78', '''p''') ALL_UNITS.append(POND) SQUARE_FOOT_PER_HOUR = UnitDescriptor('square foot per hour', 'M79', '''ft²/h''') ALL_UNITS.append(SQUARE_FOOT_PER_HOUR) STOKES_PER_PASCAL = UnitDescriptor('stokes per pascal', 'M80', '''St/Pa''') ALL_UNITS.append(STOKES_PER_PASCAL) SQUARE_CENTIMETRE_PER_SECOND = UnitDescriptor('square centimetre per second', 'M81', '''cm²/s''') ALL_UNITS.append(SQUARE_CENTIMETRE_PER_SECOND) SQUARE_METRE_PER_SECOND_PASCAL = UnitDescriptor('square metre per second pascal', 'M82', '''(m²/s)/Pa''') ALL_UNITS.append(SQUARE_METRE_PER_SECOND_PASCAL) POUND_PER_YARD = UnitDescriptor('pound per yard', 'M84', '''lb/yd''') ALL_UNITS.append(POUND_PER_YARD) TON_ASSAY = UnitDescriptor('ton, assay', 'M85', '''''') ALL_UNITS.append(TON_ASSAY) PFUND = UnitDescriptor('pfund', 'M86', '''pfd''') ALL_UNITS.append(PFUND) KILOGRAM_PER_SECOND_PASCAL = UnitDescriptor('kilogram per second pascal', 'M87', '''(kg/s)/Pa''') ALL_UNITS.append(KILOGRAM_PER_SECOND_PASCAL) TONNE_PER_MONTH = UnitDescriptor('tonne per month', 'M88', '''t/mo''') ALL_UNITS.append(TONNE_PER_MONTH) TONNE_PER_YEAR = UnitDescriptor('tonne per year', 'M89', '''t/y''') ALL_UNITS.append(TONNE_PER_YEAR) MILLION_BTU_PER_1000_CUBIC_FOOT = UnitDescriptor('million Btu per 1000 cubic foot', 'M9', '''MBTU/kft³''') ALL_UNITS.append(MILLION_BTU_PER_1000_CUBIC_FOOT) KILOPOUND_PER_HOUR = UnitDescriptor('kilopound per hour', 'M90', '''klb/h''') ALL_UNITS.append(KILOPOUND_PER_HOUR) POUND_PER_POUND = UnitDescriptor('pound per pound', 'M91', '''lb/lb''') ALL_UNITS.append(POUND_PER_POUND) POUND_FORCE_FOOT = UnitDescriptor('pound-force foot', 'M92', '''lbf·ft''') ALL_UNITS.append(POUND_FORCE_FOOT) NEWTON_METRE_PER_RADIAN = UnitDescriptor('newton metre per radian', 'M93', '''N·m/rad''') ALL_UNITS.append(NEWTON_METRE_PER_RADIAN) KILOGRAM_METRE = UnitDescriptor('kilogram metre', 'M94', '''kg·m''') ALL_UNITS.append(KILOGRAM_METRE) POUNDAL_FOOT = UnitDescriptor('poundal foot', 'M95', '''pdl·ft''') ALL_UNITS.append(POUNDAL_FOOT) POUNDAL_INCH = UnitDescriptor('poundal inch', 'M96', '''pdl·in''') ALL_UNITS.append(POUNDAL_INCH) DYNE_METRE = UnitDescriptor('dyne metre', 'M97', '''dyn·m''') ALL_UNITS.append(DYNE_METRE) KILOGRAM_CENTIMETRE_PER_SECOND = UnitDescriptor('kilogram centimetre per second', 'M98', '''kg·(cm/s)''') ALL_UNITS.append(KILOGRAM_CENTIMETRE_PER_SECOND) GRAM_CENTIMETRE_PER_SECOND = UnitDescriptor('gram centimetre per second', 'M99', '''g·(cm/s)''') ALL_UNITS.append(GRAM_CENTIMETRE_PER_SECOND) MACHINE_PER_UNIT = UnitDescriptor('machine per unit', 'MA', '''''') ALL_UNITS.append(MACHINE_PER_UNIT) MEGAVOLT_AMPERE_REACTIVE_HOUR = UnitDescriptor('megavolt ampere reactive hour', 'MAH', '''Mvar·h''') ALL_UNITS.append(MEGAVOLT_AMPERE_REACTIVE_HOUR) MEGALITRE = UnitDescriptor('megalitre', 'MAL', '''Ml''') ALL_UNITS.append(MEGALITRE) MEGAMETRE = UnitDescriptor('megametre', 'MAM', '''Mm''') ALL_UNITS.append(MEGAMETRE) MEGAVAR = UnitDescriptor('megavar', 'MAR', '''Mvar''') ALL_UNITS.append(MEGAVAR) MEGAWATT = UnitDescriptor('megawatt', 'MAW', '''MW''') ALL_UNITS.append(MEGAWATT) THOUSAND_STANDARD_BRICK_EQUIVALENT = UnitDescriptor('thousand standard brick equivalent', 'MBE', '''''') ALL_UNITS.append(THOUSAND_STANDARD_BRICK_EQUIVALENT) THOUSAND_BOARD_FOOT = UnitDescriptor('thousand board foot', 'MBF', '''''') ALL_UNITS.append(THOUSAND_BOARD_FOOT) MILLIBAR = UnitDescriptor('millibar', 'MBR', '''mbar''') ALL_UNITS.append(MILLIBAR) MICROGRAM = UnitDescriptor('microgram', 'MC', '''µg''') ALL_UNITS.append(MICROGRAM) MILLICURIE = UnitDescriptor('millicurie', 'MCU', '''mCi''') ALL_UNITS.append(MILLICURIE) AIR_DRY_METRIC_TON = UnitDescriptor('air dry metric ton', 'MD', '''''') ALL_UNITS.append(AIR_DRY_METRIC_TON) MILLIGRAM_PER_SQUARE_FOOT_PER_SIDE = UnitDescriptor('milligram per square foot per side', 'MF', '''''') ALL_UNITS.append(MILLIGRAM_PER_SQUARE_FOOT_PER_SIDE) MILLIGRAM = UnitDescriptor('milligram', 'MGM', '''mg''') ALL_UNITS.append(MILLIGRAM) MEGAHERTZ = UnitDescriptor('megahertz', 'MHZ', '''MHz''') ALL_UNITS.append(MEGAHERTZ) SQUARE_MILE_STATUTE_MILE = UnitDescriptor('square mile (statute mile)', 'MIK', '''mi²''') ALL_UNITS.append(SQUARE_MILE_STATUTE_MILE) THOUSAND = UnitDescriptor('thousand', 'MIL', '''''') ALL_UNITS.append(THOUSAND) MINUTE_UNIT_OF_TIME = UnitDescriptor('minute [unit of time]', 'MIN', '''min''') ALL_UNITS.append(MINUTE_UNIT_OF_TIME) MILLION = UnitDescriptor('million', 'MIO', '''''') ALL_UNITS.append(MILLION) MILLION_INTERNATIONAL_UNIT = UnitDescriptor('million international unit', 'MIU', '''''') ALL_UNITS.append(MILLION_INTERNATIONAL_UNIT) MILLIGRAM_PER_SQUARE_INCH = UnitDescriptor('milligram per square inch', 'MK', '''mg/in²''') ALL_UNITS.append(MILLIGRAM_PER_SQUARE_INCH) MILLIARD = UnitDescriptor('milliard', 'MLD', '''''') ALL_UNITS.append(MILLIARD) MILLILITRE = UnitDescriptor('millilitre', 'MLT', '''ml''') ALL_UNITS.append(MILLILITRE) SQUARE_MILLIMETRE = UnitDescriptor('square millimetre', 'MMK', '''mm²''') ALL_UNITS.append(SQUARE_MILLIMETRE) CUBIC_MILLIMETRE = UnitDescriptor('cubic millimetre', 'MMQ', '''mm³''') ALL_UNITS.append(CUBIC_MILLIMETRE) MILLIMETRE = UnitDescriptor('millimetre', 'MMT', '''mm''') ALL_UNITS.append(MILLIMETRE) KILOGRAM_DRY_WEIGHT = UnitDescriptor('kilogram, dry weight', 'MND', '''''') ALL_UNITS.append(KILOGRAM_DRY_WEIGHT) MONTH = UnitDescriptor('month', 'MON', '''mo''') ALL_UNITS.append(MONTH) MEGAPASCAL = UnitDescriptor('megapascal', 'MPA', '''MPa''') ALL_UNITS.append(MEGAPASCAL) THOUSAND_METRE = UnitDescriptor('thousand metre', 'MQ', '''''') ALL_UNITS.append(THOUSAND_METRE) CUBIC_METRE_PER_HOUR = UnitDescriptor('cubic metre per hour', 'MQH', '''m³/h''') ALL_UNITS.append(CUBIC_METRE_PER_HOUR) CUBIC_METRE_PER_SECOND = UnitDescriptor('cubic metre per second', 'MQS', '''m³/s''') ALL_UNITS.append(CUBIC_METRE_PER_SECOND) METRE_PER_SECOND_SQUARED = UnitDescriptor('metre per second squared', 'MSK', '''m/s²''') ALL_UNITS.append(METRE_PER_SECOND_SQUARED) MAT = UnitDescriptor('mat', 'MT', '''''') ALL_UNITS.append(MAT) SQUARE_METRE = UnitDescriptor('square metre', 'MTK', '''m²''') ALL_UNITS.append(SQUARE_METRE) CUBIC_METRE = UnitDescriptor('cubic metre', 'MTQ', '''m³''') ALL_UNITS.append(CUBIC_METRE) METRE = UnitDescriptor('metre', 'MTR', '''m''') ALL_UNITS.append(METRE) METRE_PER_SECOND = UnitDescriptor('metre per second', 'MTS', '''m/s''') ALL_UNITS.append(METRE_PER_SECOND) NUMBER_OF_MULTS = UnitDescriptor('number of mults', 'MV', '''''') ALL_UNITS.append(NUMBER_OF_MULTS) MEGAVOLT_AMPERE = UnitDescriptor('megavolt - ampere', 'MVA', '''MV·A''') ALL_UNITS.append(MEGAVOLT_AMPERE) MEGAWATT_HOUR_1000_KW_H = UnitDescriptor('megawatt hour (1000 kW.h)', 'MWH', '''MW·h''') ALL_UNITS.append(MEGAWATT_HOUR_1000_KW_H) PEN_CALORIE = UnitDescriptor('pen calorie', 'N1', '''''') ALL_UNITS.append(PEN_CALORIE) POUND_FOOT_PER_SECOND = UnitDescriptor('pound foot per second', 'N10', '''lb·(ft/s)''') ALL_UNITS.append(POUND_FOOT_PER_SECOND) POUND_INCH_PER_SECOND = UnitDescriptor('pound inch per second', 'N11', '''lb·(in/s)''') ALL_UNITS.append(POUND_INCH_PER_SECOND) PFERDESTAERKE = UnitDescriptor('Pferdestaerke', 'N12', '''PS''') ALL_UNITS.append(PFERDESTAERKE) CENTIMETRE_OF_MERCURY_0_DEG_C = UnitDescriptor('centimetre of mercury (0 ºC)', 'N13', '''cmHg (0 ºC)''') ALL_UNITS.append(CENTIMETRE_OF_MERCURY_0_DEG_C) CENTIMETRE_OF_WATER_4_DEG_C = UnitDescriptor('centimetre of water (4 ºC)', 'N14', '''cmH₂O (4 °C)''') ALL_UNITS.append(CENTIMETRE_OF_WATER_4_DEG_C) FOOT_OF_WATER_39_2_DEG_F = UnitDescriptor('foot of water (39.2 ºF)', 'N15', '''ftH₂O (39,2 ºF)''') ALL_UNITS.append(FOOT_OF_WATER_39_2_DEG_F) INCH_OF_MERCURY_32_DEG_F = UnitDescriptor('inch of mercury (32 ºF)', 'N16', '''inHG (32 ºF)''') ALL_UNITS.append(INCH_OF_MERCURY_32_DEG_F) INCH_OF_MERCURY_60_DEG_F = UnitDescriptor('inch of mercury (60 ºF)', 'N17', '''inHg (60 ºF)''') ALL_UNITS.append(INCH_OF_MERCURY_60_DEG_F) INCH_OF_WATER_39_2_DEG_F = UnitDescriptor('inch of water (39.2 ºF)', 'N18', '''inH₂O (39,2 ºF)''') ALL_UNITS.append(INCH_OF_WATER_39_2_DEG_F) INCH_OF_WATER_60_DEG_F = UnitDescriptor('inch of water (60 ºF)', 'N19', '''inH₂O (60 ºF)''') ALL_UNITS.append(INCH_OF_WATER_60_DEG_F) NUMBER_OF_LINES = UnitDescriptor('number of lines', 'N2', '''''') ALL_UNITS.append(NUMBER_OF_LINES) KIP_PER_SQUARE_INCH = UnitDescriptor('kip per square inch', 'N20', '''ksi''') ALL_UNITS.append(KIP_PER_SQUARE_INCH) POUNDAL_PER_SQUARE_FOOT = UnitDescriptor('poundal per square foot', 'N21', '''pdl/ft²''') ALL_UNITS.append(POUNDAL_PER_SQUARE_FOOT) OUNCE_AVOIRDUPOIS_PER_SQUARE_INCH = UnitDescriptor('ounce (avoirdupois) per square inch', 'N22', '''oz/in²''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS_PER_SQUARE_INCH) CONVENTIONAL_METRE_OF_WATER = UnitDescriptor('conventional metre of water', 'N23', '''mH₂O''') ALL_UNITS.append(CONVENTIONAL_METRE_OF_WATER) GRAM_PER_SQUARE_MILLIMETRE = UnitDescriptor('gram per square millimetre', 'N24', '''g/mm²''') ALL_UNITS.append(GRAM_PER_SQUARE_MILLIMETRE) POUND_PER_SQUARE_YARD = UnitDescriptor('pound per square yard', 'N25', '''lb/yd²''') ALL_UNITS.append(POUND_PER_SQUARE_YARD) POUNDAL_PER_SQUARE_INCH = UnitDescriptor('poundal per square inch', 'N26', '''pdl/in²''') ALL_UNITS.append(POUNDAL_PER_SQUARE_INCH) FOOT_TO_THE_FOURTH_POWER = UnitDescriptor('foot to the fourth power', 'N27', '''ft⁴''') ALL_UNITS.append(FOOT_TO_THE_FOURTH_POWER) CUBIC_DECIMETRE_PER_KILOGRAM = UnitDescriptor('cubic decimetre per kilogram', 'N28', '''dm³/kg''') ALL_UNITS.append(CUBIC_DECIMETRE_PER_KILOGRAM) CUBIC_FOOT_PER_POUND = UnitDescriptor('cubic foot per pound', 'N29', '''ft³/lb''') ALL_UNITS.append(CUBIC_FOOT_PER_POUND) PRINT_POINT = UnitDescriptor('print point', 'N3', '''''') ALL_UNITS.append(PRINT_POINT) CUBIC_INCH_PER_POUND = UnitDescriptor('cubic inch per pound', 'N30', '''in³/lb''') ALL_UNITS.append(CUBIC_INCH_PER_POUND) KILONEWTON_PER_METRE = UnitDescriptor('kilonewton per metre', 'N31', '''kN/m''') ALL_UNITS.append(KILONEWTON_PER_METRE) POUNDAL_PER_INCH = UnitDescriptor('poundal per inch', 'N32', '''pdl/in''') ALL_UNITS.append(POUNDAL_PER_INCH) POUND_FORCE_PER_YARD = UnitDescriptor('pound-force per yard', 'N33', '''lbf/yd''') ALL_UNITS.append(POUND_FORCE_PER_YARD) POUNDAL_SECOND_PER_SQUARE_FOOT = UnitDescriptor('poundal second per square foot', 'N34', '''(pdl/ft²)·s''') ALL_UNITS.append(POUNDAL_SECOND_PER_SQUARE_FOOT) POISE_PER_PASCAL = UnitDescriptor('poise per pascal', 'N35', '''P/Pa''') ALL_UNITS.append(POISE_PER_PASCAL) NEWTON_SECOND_PER_SQUARE_METRE = UnitDescriptor('newton second per square metre', 'N36', '''(N/m²)·s''') ALL_UNITS.append(NEWTON_SECOND_PER_SQUARE_METRE) KILOGRAM_PER_METRE_SECOND = UnitDescriptor('kilogram per metre second', 'N37', '''kg/(m·s)''') ALL_UNITS.append(KILOGRAM_PER_METRE_SECOND) KILOGRAM_PER_METRE_MINUTE = UnitDescriptor('kilogram per metre minute', 'N38', '''kg/(m·min)''') ALL_UNITS.append(KILOGRAM_PER_METRE_MINUTE) KILOGRAM_PER_METRE_DAY = UnitDescriptor('kilogram per metre day', 'N39', '''kg/(m·d)''') ALL_UNITS.append(KILOGRAM_PER_METRE_DAY) KILOGRAM_PER_METRE_HOUR = UnitDescriptor('kilogram per metre hour', 'N40', '''kg/(m·h)''') ALL_UNITS.append(KILOGRAM_PER_METRE_HOUR) GRAM_PER_CENTIMETRE_SECOND = UnitDescriptor('gram per centimetre second', 'N41', '''g/(cm·s)''') ALL_UNITS.append(GRAM_PER_CENTIMETRE_SECOND) POUNDAL_SECOND_PER_SQUARE_INCH = UnitDescriptor('poundal second per square inch', 'N42', '''(pdl/in²)·s''') ALL_UNITS.append(POUNDAL_SECOND_PER_SQUARE_INCH) POUND_PER_FOOT_MINUTE = UnitDescriptor('pound per foot minute', 'N43', '''lb/(ft·min)''') ALL_UNITS.append(POUND_PER_FOOT_MINUTE) POUND_PER_FOOT_DAY = UnitDescriptor('pound per foot day', 'N44', '''lb/(ft·d)''') ALL_UNITS.append(POUND_PER_FOOT_DAY) CUBIC_METRE_PER_SECOND_PASCAL = UnitDescriptor('cubic metre per second pascal', 'N45', '''(m³/s)/Pa''') ALL_UNITS.append(CUBIC_METRE_PER_SECOND_PASCAL) FOOT_POUNDAL = UnitDescriptor('foot poundal', 'N46', '''ft·pdl''') ALL_UNITS.append(FOOT_POUNDAL) INCH_POUNDAL = UnitDescriptor('inch poundal', 'N47', '''in·pdl''') ALL_UNITS.append(INCH_POUNDAL) WATT_PER_SQUARE_CENTIMETRE = UnitDescriptor('watt per square centimetre', 'N48', '''W/cm²''') ALL_UNITS.append(WATT_PER_SQUARE_CENTIMETRE) WATT_PER_SQUARE_INCH = UnitDescriptor('watt per square inch', 'N49', '''W/in²''') ALL_UNITS.append(WATT_PER_SQUARE_INCH) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SQUARE_FOOT_HOUR = UnitDescriptor('British thermal unit (international table) per square foot hour', 'N50', '''BtuIT/(ft²·h)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SQUARE_FOOT_HOUR) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SQUARE_FOOT_HOUR = UnitDescriptor('British thermal unit (thermochemical) per square foot hour', 'N51', '''Btuth/(ft²·h)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SQUARE_FOOT_HOUR) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SQUARE_FOOT_MINUTE = UnitDescriptor('British thermal unit (thermochemical) per square foot minute', 'N52', '''Btuth/(ft²·min) ''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SQUARE_FOOT_MINUTE) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SQUARE_FOOT_SECOND = UnitDescriptor('British thermal unit (international table) per square foot second', 'N53', '''BtuIT/(ft²·s)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SQUARE_FOOT_SECOND) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SQUARE_FOOT_SECOND = UnitDescriptor('British thermal unit (thermochemical) per square foot second', 'N54', '''Btuth/(ft²·s)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SQUARE_FOOT_SECOND) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SQUARE_INCH_SECOND = UnitDescriptor('British thermal unit (international table) per square inch second', 'N55', '''BtuIT/(in²·s)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SQUARE_INCH_SECOND) CALORIE_THERMOCHEMICAL_PER_SQUARE_CENTIMETRE_MINUTE = UnitDescriptor('calorie (thermochemical) per square centimetre minute', 'N56', '''calth/(cm²·min)''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL_PER_SQUARE_CENTIMETRE_MINUTE) CALORIE_THERMOCHEMICAL_PER_SQUARE_CENTIMETRE_SECOND = UnitDescriptor('calorie (thermochemical) per square centimetre second', 'N57', '''calth/(cm²·s)''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL_PER_SQUARE_CENTIMETRE_SECOND) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_CUBIC_FOOT = UnitDescriptor('British thermal unit (international table) per cubic foot', 'N58', '''BtuIT/ft³''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_CUBIC_FOOT) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_CUBIC_FOOT = UnitDescriptor('British thermal unit (thermochemical) per cubic foot', 'N59', '''Btuth/ft³''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_CUBIC_FOOT) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (international table) per degree Fahrenheit', 'N60', '''BtuIT/ºF''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (thermochemical) per degree Fahrenheit', 'N61', '''Btuth/ºF''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_DEGREE_RANKINE = UnitDescriptor('British thermal unit (international table) per degree Rankine', 'N62', '''BtuIT/ºR''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_DEGREE_RANKINE) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_DEGREE_RANKINE = UnitDescriptor('British thermal unit (thermochemical) per degree Rankine', 'N63', '''Btuth/ºR''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_DEGREE_RANKINE) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_POUND_DEGREE_RANKINE = UnitDescriptor('British thermal unit (thermochemical) per pound degree Rankine', 'N64', '''(Btuth/°R)/lb''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_POUND_DEGREE_RANKINE) KILOCALORIE_INTERNATIONAL_TABLE_PER_GRAM_KELVIN = UnitDescriptor('kilocalorie (international table) per gram kelvin', 'N65', '''(kcalIT/K)/g''') ALL_UNITS.append(KILOCALORIE_INTERNATIONAL_TABLE_PER_GRAM_KELVIN) BRITISH_THERMAL_UNIT_39_DEG_F = UnitDescriptor('British thermal unit (39 ºF)', 'N66', '''Btu (39 ºF) ''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_39_DEG_F) BRITISH_THERMAL_UNIT_59_DEG_F = UnitDescriptor('British thermal unit (59 ºF)', 'N67', '''Btu (59 ºF)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_59_DEG_F) BRITISH_THERMAL_UNIT_60_DEG_F = UnitDescriptor('British thermal unit (60 ºF)', 'N68', '''Btu (60 ºF) ''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_60_DEG_F) CALORIE_20_DEG_C = UnitDescriptor('calorie (20 ºC)', 'N69', '''cal₂₀''') ALL_UNITS.append(CALORIE_20_DEG_C) QUAD_10FIFTEEN_BTUIT = UnitDescriptor('quad (1015 BtuIT)', 'N70', '''quad''') ALL_UNITS.append(QUAD_10FIFTEEN_BTUIT) THERM_EC = UnitDescriptor('therm (EC)', 'N71', '''thm (EC)''') ALL_UNITS.append(THERM_EC) THERM_U_S_ = UnitDescriptor('therm (U.S.)', 'N72', '''thm (US)''') ALL_UNITS.append(THERM_U_S_) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_POUND = UnitDescriptor('British thermal unit (thermochemical) per pound', 'N73', '''Btuth/lb''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_POUND) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (international table) per hour square foot degree Fahrenheit', 'N74', '''BtuIT/(h·ft²·ºF)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (thermochemical) per hour square foot degree Fahrenheit', 'N75', '''Btuth/(h·ft²·ºF)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_HOUR_SQUARE_FOOT_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SECOND_SQUARE_FOOT_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (international table) per second square foot degree Fahrenheit', 'N76', '''BtuIT/(s·ft²·ºF)''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SECOND_SQUARE_FOOT_DEGREE_FAHRENHEIT) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SECOND_SQUARE_FOOT_DEGREE_FAHRENHEIT = UnitDescriptor('British thermal unit (thermochemical) per second square foot degree Fahrenheit', 'N77', '''Btuth/(s·ft²·ºF) ''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SECOND_SQUARE_FOOT_DEGREE_FAHRENHEIT) KILOWATT_PER_SQUARE_METRE_KELVIN = UnitDescriptor('kilowatt per square metre kelvin', 'N78', '''kW/(m²·K)''') ALL_UNITS.append(KILOWATT_PER_SQUARE_METRE_KELVIN) KELVIN_PER_PASCAL = UnitDescriptor('kelvin per pascal', 'N79', '''K/Pa''') ALL_UNITS.append(KELVIN_PER_PASCAL) WATT_PER_METRE_DEGREE_CELSIUS = UnitDescriptor('watt per metre degree Celsius', 'N80', '''W/(m·°C)''') ALL_UNITS.append(WATT_PER_METRE_DEGREE_CELSIUS) KILOWATT_PER_METRE_KELVIN = UnitDescriptor('kilowatt per metre kelvin', 'N81', '''kW/(m·K)''') ALL_UNITS.append(KILOWATT_PER_METRE_KELVIN) KILOWATT_PER_METRE_DEGREE_CELSIUS = UnitDescriptor('kilowatt per metre degree Celsius', 'N82', '''kW/(m·°C)''') ALL_UNITS.append(KILOWATT_PER_METRE_DEGREE_CELSIUS) METRE_PER_DEGREE_CELCIUS_METRE = UnitDescriptor('metre per degree Celcius metre', 'N83', '''m/(°C·m)''') ALL_UNITS.append(METRE_PER_DEGREE_CELCIUS_METRE) DEGREE_FAHRENHEIT_HOUR_PER_BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE = UnitDescriptor('degree Fahrenheit hour per British thermal unit (international table)', 'N84', '''ºF/(BtuIT/h)''') ALL_UNITS.append(DEGREE_FAHRENHEIT_HOUR_PER_BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE) DEGREE_FAHRENHEIT_HOUR_PER_BRITISH_THERMAL_UNIT_THERMOCHEMICAL = UnitDescriptor('degree Fahrenheit hour per British thermal unit (thermochemical)', 'N85', '''ºF/(Btuth/h)''') ALL_UNITS.append(DEGREE_FAHRENHEIT_HOUR_PER_BRITISH_THERMAL_UNIT_THERMOCHEMICAL) DEGREE_FAHRENHEIT_SECOND_PER_BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE = UnitDescriptor('degree Fahrenheit second per British thermal unit (international table)', 'N86', '''ºF/(BtuIT/s)''') ALL_UNITS.append(DEGREE_FAHRENHEIT_SECOND_PER_BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE) DEGREE_FAHRENHEIT_SECOND_PER_BRITISH_THERMAL_UNIT_THERMOCHEMICAL = UnitDescriptor('degree Fahrenheit second per British thermal unit (thermochemical)', 'N87', '''ºF/(Btuth/s)''') ALL_UNITS.append(DEGREE_FAHRENHEIT_SECOND_PER_BRITISH_THERMAL_UNIT_THERMOCHEMICAL) DEGREE_FAHRENHEIT_HOUR_SQUARE_FOOT_PER_BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_INCH = UnitDescriptor('degree Fahrenheit hour square foot per British thermal unit (international table) inch', 'N88', '''ºF·h·ft²/(BtuIT·in)''') ALL_UNITS.append(DEGREE_FAHRENHEIT_HOUR_SQUARE_FOOT_PER_BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_INCH) DEGREE_FAHRENHEIT_HOUR_SQUARE_FOOT_PER_BRITISH_THERMAL_UNIT_THERMOCHEMICAL_INCH = UnitDescriptor('degree Fahrenheit hour square foot per British thermal unit (thermochemical) inch', 'N89', '''ºF·h·ft²/(Btuth·in)''') ALL_UNITS.append(DEGREE_FAHRENHEIT_HOUR_SQUARE_FOOT_PER_BRITISH_THERMAL_UNIT_THERMOCHEMICAL_INCH) KILOFARAD = UnitDescriptor('kilofarad', 'N90', '''kF''') ALL_UNITS.append(KILOFARAD) RECIPROCAL_JOULE = UnitDescriptor('reciprocal joule', 'N91', '''1/J''') ALL_UNITS.append(RECIPROCAL_JOULE) PICOSIEMENS = UnitDescriptor('picosiemens', 'N92', '''pS''') ALL_UNITS.append(PICOSIEMENS) AMPERE_PER_PASCAL = UnitDescriptor('ampere per pascal', 'N93', '''A/Pa''') ALL_UNITS.append(AMPERE_PER_PASCAL) FRANKLIN = UnitDescriptor('franklin', 'N94', '''Fr''') ALL_UNITS.append(FRANKLIN) AMPERE_MINUTE = UnitDescriptor('ampere minute', 'N95', '''A·min''') ALL_UNITS.append(AMPERE_MINUTE) BIOT = UnitDescriptor('biot', 'N96', '''Bi''') ALL_UNITS.append(BIOT) GILBERT = UnitDescriptor('gilbert', 'N97', '''Gi''') ALL_UNITS.append(GILBERT) VOLT_PER_PASCAL = UnitDescriptor('volt per pascal', 'N98', '''V/Pa''') ALL_UNITS.append(VOLT_PER_PASCAL) PICOVOLT = UnitDescriptor('picovolt', 'N99', '''pV''') ALL_UNITS.append(PICOVOLT) MILLIGRAM_PER_KILOGRAM = UnitDescriptor('milligram per kilogram', 'NA', '''mg/kg''') ALL_UNITS.append(MILLIGRAM_PER_KILOGRAM) NUMBER_OF_ARTICLES = UnitDescriptor('number of articles', 'NAR', '''''') ALL_UNITS.append(NUMBER_OF_ARTICLES) BARGE = UnitDescriptor('barge', 'NB', '''''') ALL_UNITS.append(BARGE) NUMBER_OF_BOBBINS = UnitDescriptor('number of bobbins', 'NBB', '''''') ALL_UNITS.append(NUMBER_OF_BOBBINS) CAR = UnitDescriptor('car', 'NC', '''''') ALL_UNITS.append(CAR) NUMBER_OF_CELLS = UnitDescriptor('number of cells', 'NCL', '''''') ALL_UNITS.append(NUMBER_OF_CELLS) NET_BARREL = UnitDescriptor('net barrel', 'ND', '''''') ALL_UNITS.append(NET_BARREL) NET_LITRE = UnitDescriptor('net litre', 'NE', '''''') ALL_UNITS.append(NET_LITRE) NEWTON = UnitDescriptor('newton', 'NEW', '''N''') ALL_UNITS.append(NEWTON) MESSAGE = UnitDescriptor('message', 'NF', '''''') ALL_UNITS.append(MESSAGE) NET_GALLON_US = UnitDescriptor('net gallon (us)', 'NG', '''''') ALL_UNITS.append(NET_GALLON_US) MESSAGE_HOUR = UnitDescriptor('message hour', 'NH', '''''') ALL_UNITS.append(MESSAGE_HOUR) NET_IMPERIAL_GALLON = UnitDescriptor('net imperial gallon', 'NI', '''''') ALL_UNITS.append(NET_IMPERIAL_GALLON) NIL = UnitDescriptor('nil', 'NIL', '''()''') ALL_UNITS.append(NIL) NUMBER_OF_INTERNATIONAL_UNITS = UnitDescriptor('number of international units', 'NIU', '''''') ALL_UNITS.append(NUMBER_OF_INTERNATIONAL_UNITS) NUMBER_OF_SCREENS = UnitDescriptor('number of screens', 'NJ', '''''') ALL_UNITS.append(NUMBER_OF_SCREENS) LOAD = UnitDescriptor('load', 'NL', '''''') ALL_UNITS.append(LOAD) NORMALISED_CUBIC_METRE = UnitDescriptor('Normalised cubic metre', 'NM3', '''''') ALL_UNITS.append(NORMALISED_CUBIC_METRE) NAUTICAL_MILE = UnitDescriptor('nautical mile', 'NMI', '''n mile''') ALL_UNITS.append(NAUTICAL_MILE) NUMBER_OF_PACKS = UnitDescriptor('number of packs', 'NMP', '''''') ALL_UNITS.append(NUMBER_OF_PACKS) TRAIN = UnitDescriptor('train', 'NN', '''''') ALL_UNITS.append(TRAIN) NUMBER_OF_PARCELS = UnitDescriptor('number of parcels', 'NPL', '''''') ALL_UNITS.append(NUMBER_OF_PARCELS) NUMBER_OF_PAIRS = UnitDescriptor('number of pairs', 'NPR', '''''') ALL_UNITS.append(NUMBER_OF_PAIRS) NUMBER_OF_PARTS = UnitDescriptor('number of parts', 'NPT', '''''') ALL_UNITS.append(NUMBER_OF_PARTS) MHO = UnitDescriptor('mho', 'NQ', '''''') ALL_UNITS.append(MHO) MICROMHO = UnitDescriptor('micromho', 'NR', '''''') ALL_UNITS.append(MICROMHO) NUMBER_OF_ROLLS = UnitDescriptor('number of rolls', 'NRL', '''''') ALL_UNITS.append(NUMBER_OF_ROLLS) NET_TON = UnitDescriptor('net ton', 'NT', '''''') ALL_UNITS.append(NET_TON) NET_REGISTER_TON = UnitDescriptor('net register ton', 'NTT', '''''') ALL_UNITS.append(NET_REGISTER_TON) NEWTON_METRE = UnitDescriptor('newton metre', 'NU', '''N·m''') ALL_UNITS.append(NEWTON_METRE) VEHICLE = UnitDescriptor('vehicle', 'NV', '''''') ALL_UNITS.append(VEHICLE) PART_PER_THOUSAND = UnitDescriptor('part per thousand', 'NX', '''‰''') ALL_UNITS.append(PART_PER_THOUSAND) POUND_PER_AIR_DRY_METRIC_TON = UnitDescriptor('pound per air dry metric ton', 'NY', '''''') ALL_UNITS.append(POUND_PER_AIR_DRY_METRIC_TON) PANEL = UnitDescriptor('panel', 'OA', '''''') ALL_UNITS.append(PANEL) OZONE_DEPLETION_EQUIVALENT = UnitDescriptor('ozone depletion equivalent', 'ODE', '''''') ALL_UNITS.append(OZONE_DEPLETION_EQUIVALENT) ODS_GRAMS = UnitDescriptor('ODS Grams', 'ODG', '''''') ALL_UNITS.append(ODS_GRAMS) ODS_KILOGRAMS = UnitDescriptor('ODS Kilograms', 'ODK', '''''') ALL_UNITS.append(ODS_KILOGRAMS) ODS_MILLIGRAMS = UnitDescriptor('ODS Milligrams', 'ODM', '''''') ALL_UNITS.append(ODS_MILLIGRAMS) OHM = UnitDescriptor('ohm', 'OHM', '''Ω''') ALL_UNITS.append(OHM) OUNCE_PER_SQUARE_YARD = UnitDescriptor('ounce per square yard', 'ON', '''oz/yd²''') ALL_UNITS.append(OUNCE_PER_SQUARE_YARD) OUNCE_AVOIRDUPOIS = UnitDescriptor('ounce (avoirdupois)', 'ONZ', '''oz''') ALL_UNITS.append(OUNCE_AVOIRDUPOIS) TWO_PACK = UnitDescriptor('two pack', 'OP', '''''') ALL_UNITS.append(TWO_PACK) OSCILLATIONS_PER_MINUTE = UnitDescriptor('oscillations per minute', 'OPM', '''o/min''') ALL_UNITS.append(OSCILLATIONS_PER_MINUTE) OVERTIME_HOUR = UnitDescriptor('overtime hour', 'OT', '''''') ALL_UNITS.append(OVERTIME_HOUR) OUNCE_AV = UnitDescriptor('ounce av', 'OZ', '''''') ALL_UNITS.append(OUNCE_AV) FLUID_OUNCE_US = UnitDescriptor('fluid ounce (US)', 'OZA', '''fl oz (US)''') ALL_UNITS.append(FLUID_OUNCE_US) FLUID_OUNCE_UK = UnitDescriptor('fluid ounce (UK)', 'OZI', '''fl oz (UK)''') ALL_UNITS.append(FLUID_OUNCE_UK) PAGE_ELECTRONIC = UnitDescriptor('page - electronic', 'P0', '''''') ALL_UNITS.append(PAGE_ELECTRONIC) PERCENT = UnitDescriptor('percent', 'P1', '''%''') ALL_UNITS.append(PERCENT) PERCENT = UnitDescriptor('percent', 'P1', '''pct''') ALL_UNITS.append(PERCENT) COULOMB_PER_METRE = UnitDescriptor('coulomb per metre', 'P10', '''C/m''') ALL_UNITS.append(COULOMB_PER_METRE) KILOWEBER = UnitDescriptor('kiloweber', 'P11', '''kWb''') ALL_UNITS.append(KILOWEBER) GAMMA = UnitDescriptor('gamma', 'P12', '''γ''') ALL_UNITS.append(GAMMA) KILOTESLA = UnitDescriptor('kilotesla', 'P13', '''kT''') ALL_UNITS.append(KILOTESLA) JOULE_PER_SECOND = UnitDescriptor('joule per second', 'P14', '''J/s''') ALL_UNITS.append(JOULE_PER_SECOND) JOULE_PER_MINUTE = UnitDescriptor('joule per minute', 'P15', '''J/min''') ALL_UNITS.append(JOULE_PER_MINUTE) JOULE_PER_HOUR = UnitDescriptor('joule per hour', 'P16', '''J/h''') ALL_UNITS.append(JOULE_PER_HOUR) JOULE_PER_DAY = UnitDescriptor('joule per day', 'P17', '''J/d''') ALL_UNITS.append(JOULE_PER_DAY) KILOJOULE_PER_SECOND = UnitDescriptor('kilojoule per second', 'P18', '''kJ/s''') ALL_UNITS.append(KILOJOULE_PER_SECOND) KILOJOULE_PER_MINUTE = UnitDescriptor('kilojoule per minute', 'P19', '''kJ/min''') ALL_UNITS.append(KILOJOULE_PER_MINUTE) POUND_PER_FOOT = UnitDescriptor('pound per foot', 'P2', '''lb/ft''') ALL_UNITS.append(POUND_PER_FOOT) KILOJOULE_PER_HOUR = UnitDescriptor('kilojoule per hour', 'P20', '''kJ/h''') ALL_UNITS.append(KILOJOULE_PER_HOUR) KILOJOULE_PER_DAY = UnitDescriptor('kilojoule per day', 'P21', '''kJ/d''') ALL_UNITS.append(KILOJOULE_PER_DAY) NANOOHM = UnitDescriptor('nanoohm', 'P22', '''nΩ''') ALL_UNITS.append(NANOOHM) OHM_CIRCULAR_MIL_PER_FOOT = UnitDescriptor('ohm circular-mil per foot', 'P23', '''Ω·cmil/ft ''') ALL_UNITS.append(OHM_CIRCULAR_MIL_PER_FOOT) KILOHENRY = UnitDescriptor('kilohenry', 'P24', '''kH''') ALL_UNITS.append(KILOHENRY) LUMEN_PER_SQUARE_FOOT = UnitDescriptor('lumen per square foot', 'P25', '''lm/ft²''') ALL_UNITS.append(LUMEN_PER_SQUARE_FOOT) PHOT = UnitDescriptor('phot', 'P26', '''ph''') ALL_UNITS.append(PHOT) FOOTCANDLE = UnitDescriptor('footcandle', 'P27', '''ftc''') ALL_UNITS.append(FOOTCANDLE) CANDELA_PER_SQUARE_INCH = UnitDescriptor('candela per square inch', 'P28', '''cd/in²''') ALL_UNITS.append(CANDELA_PER_SQUARE_INCH) FOOTLAMBERT = UnitDescriptor('footlambert', 'P29', '''ftL''') ALL_UNITS.append(FOOTLAMBERT) THREE_PACK = UnitDescriptor('three pack', 'P3', '''''') ALL_UNITS.append(THREE_PACK) LAMBERT = UnitDescriptor('lambert', 'P30', '''Lb''') ALL_UNITS.append(LAMBERT) STILB = UnitDescriptor('stilb', 'P31', '''sb''') ALL_UNITS.append(STILB) CANDELA_PER_SQUARE_FOOT = UnitDescriptor('candela per square foot', 'P32', '''cd/ft²''') ALL_UNITS.append(CANDELA_PER_SQUARE_FOOT) KILOCANDELA = UnitDescriptor('kilocandela', 'P33', '''kcd''') ALL_UNITS.append(KILOCANDELA) MILLICANDELA = UnitDescriptor('millicandela', 'P34', '''mcd''') ALL_UNITS.append(MILLICANDELA) HEFNER_KERZE = UnitDescriptor('Hefner-Kerze', 'P35', '''HK''') ALL_UNITS.append(HEFNER_KERZE) INTERNATIONAL_CANDLE = UnitDescriptor('international candle', 'P36', '''IK''') ALL_UNITS.append(INTERNATIONAL_CANDLE) BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SQUARE_FOOT = UnitDescriptor('British thermal unit (international table) per square foot', 'P37', '''BtuIT/ft²''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_INTERNATIONAL_TABLE_PER_SQUARE_FOOT) BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SQUARE_FOOT = UnitDescriptor('British thermal unit (thermochemical) per square foot', 'P38', '''Btuth/ft²''') ALL_UNITS.append(BRITISH_THERMAL_UNIT_THERMOCHEMICAL_PER_SQUARE_FOOT) CALORIE_THERMOCHEMICAL_PER_SQUARE_CENTIMETRE = UnitDescriptor('calorie (thermochemical) per square centimetre', 'P39', '''calth/cm²''') ALL_UNITS.append(CALORIE_THERMOCHEMICAL_PER_SQUARE_CENTIMETRE) FOUR_PACK = UnitDescriptor('four pack', 'P4', '''''') ALL_UNITS.append(FOUR_PACK) LANGLEY = UnitDescriptor('langley', 'P40', '''Ly''') ALL_UNITS.append(LANGLEY) DECADE_LOGARITHMIC = UnitDescriptor('decade (logarithmic)', 'P41', '''dec''') ALL_UNITS.append(DECADE_LOGARITHMIC) PASCAL_SQUARED_SECOND = UnitDescriptor('pascal squared second', 'P42', '''Pa²·s''') ALL_UNITS.append(PASCAL_SQUARED_SECOND) BEL_PER_METRE = UnitDescriptor('bel per metre', 'P43', '''B/m''') ALL_UNITS.append(BEL_PER_METRE) POUND_MOLE = UnitDescriptor('pound mole', 'P44', '''lbmol''') ALL_UNITS.append(POUND_MOLE) POUND_MOLE_PER_SECOND = UnitDescriptor('pound mole per second', 'P45', '''lbmol/s''') ALL_UNITS.append(POUND_MOLE_PER_SECOND) POUND_MOLE_PER_MINUTE = UnitDescriptor('pound mole per minute', 'P46', '''lbmol/h''') ALL_UNITS.append(POUND_MOLE_PER_MINUTE) KILOMOLE_PER_KILOGRAM = UnitDescriptor('kilomole per kilogram', 'P47', '''kmol/kg''') ALL_UNITS.append(KILOMOLE_PER_KILOGRAM) POUND_MOLE_PER_POUND = UnitDescriptor('pound mole per pound', 'P48', '''lbmol/lb''') ALL_UNITS.append(POUND_MOLE_PER_POUND) NEWTON_SQUARE_METRE_PER_AMPERE = UnitDescriptor('newton square metre per ampere', 'P49', '''N·m²/A''') ALL_UNITS.append(NEWTON_SQUARE_METRE_PER_AMPERE) FIVE_PACK = UnitDescriptor('five pack', 'P5', '''''') ALL_UNITS.append(FIVE_PACK) WEBER_METRE = UnitDescriptor('weber metre', 'P50', '''Wb·m''') ALL_UNITS.append(WEBER_METRE) MOL_PER_KILOGRAM_PASCAL = UnitDescriptor('mol per kilogram pascal', 'P51', '''(mol/kg)/Pa''') ALL_UNITS.append(MOL_PER_KILOGRAM_PASCAL) MOL_PER_CUBIC_METRE_PASCAL = UnitDescriptor('mol per cubic metre pascal', 'P52', '''(mol/m³)/Pa''') ALL_UNITS.append(MOL_PER_CUBIC_METRE_PASCAL) UNIT_POLE = UnitDescriptor('unit pole', 'P53', '''unit pole ''') ALL_UNITS.append(UNIT_POLE) MILLIGRAY_PER_SECOND = UnitDescriptor('milligray per second', 'P54', '''mGy/s''') ALL_UNITS.append(MILLIGRAY_PER_SECOND) MICROGRAY_PER_SECOND = UnitDescriptor('microgray per second', 'P55', '''µGy/s''') ALL_UNITS.append(MICROGRAY_PER_SECOND) NANOGRAY_PER_SECOND = UnitDescriptor('nanogray per second', 'P56', '''nGy/s''') ALL_UNITS.append(NANOGRAY_PER_SECOND) GRAY_PER_MINUTE = UnitDescriptor('gray per minute', 'P57', '''Gy/min''') ALL_UNITS.append(GRAY_PER_MINUTE) MILLIGRAY_PER_MINUTE = UnitDescriptor('milligray per minute', 'P58', '''mGy/min''') ALL_UNITS.append(MILLIGRAY_PER_MINUTE) MICROGRAY_PER_MINUTE = UnitDescriptor('microgray per minute', 'P59', '''µGy/min''') ALL_UNITS.append(MICROGRAY_PER_MINUTE) SIX_PACK = UnitDescriptor('six pack', 'P6', '''''') ALL_UNITS.append(SIX_PACK) NANOGRAY_PER_MINUTE = UnitDescriptor('nanogray per minute', 'P60', '''nGy/min''') ALL_UNITS.append(NANOGRAY_PER_MINUTE) GRAY_PER_HOUR = UnitDescriptor('gray per hour', 'P61', '''Gy/h''') ALL_UNITS.append(GRAY_PER_HOUR) MILLIGRAY_PER_HOUR = UnitDescriptor('milligray per hour', 'P62', '''mGy/h''') ALL_UNITS.append(MILLIGRAY_PER_HOUR) MICROGRAY_PER_HOUR = UnitDescriptor('microgray per hour', 'P63', '''µGy/h''') ALL_UNITS.append(MICROGRAY_PER_HOUR) NANOGRAY_PER_HOUR = UnitDescriptor('nanogray per hour', 'P64', '''nGy/h''') ALL_UNITS.append(NANOGRAY_PER_HOUR) SIEVERT_PER_SECOND = UnitDescriptor('sievert per second', 'P65', '''Sv/s''') ALL_UNITS.append(SIEVERT_PER_SECOND) MILLISIEVERT_PER_SECOND = UnitDescriptor('millisievert per second', 'P66', '''mSv/s''') ALL_UNITS.append(MILLISIEVERT_PER_SECOND) MICROSIEVERT_PER_SECOND = UnitDescriptor('microsievert per second', 'P67', '''µSv/s''') ALL_UNITS.append(MICROSIEVERT_PER_SECOND) NANOSIEVERT_PER_SECOND = UnitDescriptor('nanosievert per second', 'P68', '''nSv/s''') ALL_UNITS.append(NANOSIEVERT_PER_SECOND) REM_PER_SECOND = UnitDescriptor('rem per second', 'P69', '''rem/s''') ALL_UNITS.append(REM_PER_SECOND) SEVEN_PACK = UnitDescriptor('seven pack', 'P7', '''''') ALL_UNITS.append(SEVEN_PACK) SIEVERT_PER_HOUR = UnitDescriptor('sievert per hour', 'P70', '''Sv/h''') ALL_UNITS.append(SIEVERT_PER_HOUR) MILLISIEVERT_PER_HOUR = UnitDescriptor('millisievert per hour', 'P71', '''mSv/h''') ALL_UNITS.append(MILLISIEVERT_PER_HOUR) MICROSIEVERT_PER_HOUR = UnitDescriptor('microsievert per hour', 'P72', '''µSv/h''') ALL_UNITS.append(MICROSIEVERT_PER_HOUR) NANOSIEVERT_PER_HOUR = UnitDescriptor('nanosievert per hour', 'P73', '''nSv/h''') ALL_UNITS.append(NANOSIEVERT_PER_HOUR) SIEVERT_PER_MINUTE = UnitDescriptor('sievert per minute', 'P74', '''Sv/min''') ALL_UNITS.append(SIEVERT_PER_MINUTE) MILLISIEVERT_PER_MINUTE = UnitDescriptor('millisievert per minute', 'P75', '''mSv/min''') ALL_UNITS.append(MILLISIEVERT_PER_MINUTE) MICROSIEVERT_PER_MINUTE = UnitDescriptor('microsievert per minute', 'P76', '''µSv/min''') ALL_UNITS.append(MICROSIEVERT_PER_MINUTE) NANOSIEVERT_PER_MINUTE = UnitDescriptor('nanosievert per minute', 'P77', '''nSv/min''') ALL_UNITS.append(NANOSIEVERT_PER_MINUTE) RECIPROCAL_SQUARE_INCH = UnitDescriptor('reciprocal square inch', 'P78', '''1/in²''') ALL_UNITS.append(RECIPROCAL_SQUARE_INCH) PASCAL_SQUARE_METRE_PER_KILOGRAM = UnitDescriptor('pascal square metre per kilogram', 'P79', '''Pa/(kg/m²)''') ALL_UNITS.append(PASCAL_SQUARE_METRE_PER_KILOGRAM) EIGHT_PACK = UnitDescriptor('eight pack', 'P8', '''''') ALL_UNITS.append(EIGHT_PACK) MILLIPASCAL_PER_METRE = UnitDescriptor('millipascal per metre', 'P80', '''mPa/m''') ALL_UNITS.append(MILLIPASCAL_PER_METRE) KILOPASCAL_PER_METRE = UnitDescriptor('kilopascal per metre', 'P81', '''kPa/m''') ALL_UNITS.append(KILOPASCAL_PER_METRE) HECTOPASCAL_PER_METRE = UnitDescriptor('hectopascal per metre', 'P82', '''hPa/m''') ALL_UNITS.append(HECTOPASCAL_PER_METRE) STANDARD_ATMOSPHERE_PER_METRE = UnitDescriptor('standard atmosphere per metre', 'P83', '''Atm/m''') ALL_UNITS.append(STANDARD_ATMOSPHERE_PER_METRE) TECHNICAL_ATMOSPHERE_PER_METRE = UnitDescriptor('technical atmosphere per metre', 'P84', '''at/m''') ALL_UNITS.append(TECHNICAL_ATMOSPHERE_PER_METRE) TORR_PER_METRE = UnitDescriptor('torr per metre', 'P85', '''Torr/m''') ALL_UNITS.append(TORR_PER_METRE) PSI_PER_INCH = UnitDescriptor('psi per inch', 'P86', '''psi/in''') ALL_UNITS.append(PSI_PER_INCH) CUBIC_METRE_PER_SECOND_SQUARE_METRE = UnitDescriptor('cubic metre per second square metre', 'P87', '''(m³/s)/m²''') ALL_UNITS.append(CUBIC_METRE_PER_SECOND_SQUARE_METRE) RHE = UnitDescriptor('rhe', 'P88', '''rhe''') ALL_UNITS.append(RHE) POUND_FORCE_FOOT_PER_INCH = UnitDescriptor('pound-force foot per inch', 'P89', '''lbf·ft/in''') ALL_UNITS.append(POUND_FORCE_FOOT_PER_INCH) NINE_PACK = UnitDescriptor('nine pack', 'P9', '''''') ALL_UNITS.append(NINE_PACK) POUND_FORCE_INCH_PER_INCH = UnitDescriptor('pound-force inch per inch', 'P90', '''lbf·in/in''') ALL_UNITS.append(POUND_FORCE_INCH_PER_INCH) PERM_0_DEG_C = UnitDescriptor('perm (0 ºC)', 'P91', '''perm (0 ºC) ''') ALL_UNITS.append(PERM_0_DEG_C) PERM_23_DEG_C = UnitDescriptor('perm (23 ºC)', 'P92', '''perm (23 ºC) ''') ALL_UNITS.append(PERM_23_DEG_C) BYTE_PER_SECOND = UnitDescriptor('byte per second', 'P93', '''byte/s''') ALL_UNITS.append(BYTE_PER_SECOND) KILOBYTE_PER_SECOND = UnitDescriptor('kilobyte per second', 'P94', '''kbyte/s''') ALL_UNITS.append(KILOBYTE_PER_SECOND) MEGABYTE_PER_SECOND = UnitDescriptor('megabyte per second', 'P95', '''Mbyte/s''') ALL_UNITS.append(MEGABYTE_PER_SECOND) RECIPROCAL_VOLT = UnitDescriptor('reciprocal volt', 'P96', '''1/V''') ALL_UNITS.append(RECIPROCAL_VOLT) RECIPROCAL_RADIAN = UnitDescriptor('reciprocal radian', 'P97', '''1/rad''') ALL_UNITS.append(RECIPROCAL_RADIAN) PASCAL_TO_THE_POWER_SUM_OF_STOICHIOMETRIC_NUMBERS = UnitDescriptor('pascal to the power sum of stoichiometric numbers', 'P98', '''PaΣνB''') ALL_UNITS.append(PASCAL_TO_THE_POWER_SUM_OF_STOICHIOMETRIC_NUMBERS) MOLE_PER_CUBIV_METRE_TO_THE_POWER_SUM_OF_STOICHIOMETRIC_NUMBERS = UnitDescriptor('mole per cubiv metre to the power sum of stoichiometric numbers', 'P99', '''(mol/m³)∑νB''') ALL_UNITS.append(MOLE_PER_CUBIV_METRE_TO_THE_POWER_SUM_OF_STOICHIOMETRIC_NUMBERS) PACKET = UnitDescriptor('packet', 'PA', '''''') ALL_UNITS.append(PACKET) PASCAL = UnitDescriptor('pascal', 'PAL', '''Pa''') ALL_UNITS.append(PASCAL) PAIR_INCH = UnitDescriptor('pair inch', 'PB', '''''') ALL_UNITS.append(PAIR_INCH) PAD = UnitDescriptor('pad', 'PD', '''''') ALL_UNITS.append(PAD) POUND_EQUIVALENT = UnitDescriptor('pound equivalent', 'PE', '''''') ALL_UNITS.append(POUND_EQUIVALENT) PALLET_LIFT = UnitDescriptor('pallet (lift)', 'PF', '''''') ALL_UNITS.append(PALLET_LIFT) PROOF_LITRE = UnitDescriptor('proof litre', 'PFL', '''''') ALL_UNITS.append(PROOF_LITRE) PLATE = UnitDescriptor('plate', 'PG', '''''') ALL_UNITS.append(PLATE) PROOF_GALLON = UnitDescriptor('proof gallon', 'PGL', '''''') ALL_UNITS.append(PROOF_GALLON) PITCH = UnitDescriptor('pitch', 'PI', '''''') ALL_UNITS.append(PITCH) PACK = UnitDescriptor('pack', 'PK', '''''') ALL_UNITS.append(PACK) PAIL = UnitDescriptor('pail', 'PL', '''''') ALL_UNITS.append(PAIL) DEGREE_PLATO = UnitDescriptor('degree Plato', 'PLA', '''°P''') ALL_UNITS.append(DEGREE_PLATO) POUND_PERCENTAGE = UnitDescriptor('pound percentage', 'PM', '''''') ALL_UNITS.append(POUND_PERCENTAGE) POUND_NET = UnitDescriptor('pound net', 'PN', '''''') ALL_UNITS.append(POUND_NET) POUND_PER_INCH_OF_LENGTH = UnitDescriptor('pound per inch of length', 'PO', '''lb/in''') ALL_UNITS.append(POUND_PER_INCH_OF_LENGTH) PAGE_PER_INCH = UnitDescriptor('page per inch', 'PQ', '''ppi''') ALL_UNITS.append(PAGE_PER_INCH) PAIR = UnitDescriptor('pair', 'PR', '''''') ALL_UNITS.append(PAIR) POUND_FORCE_PER_SQUARE_INCH = UnitDescriptor('pound-force per square inch', 'PS', '''lbf/in²''') ALL_UNITS.append(POUND_FORCE_PER_SQUARE_INCH) PINT_US = UnitDescriptor('pint (US)', 'PT', '''pt (US)''') ALL_UNITS.append(PINT_US) DRY_PINT_US = UnitDescriptor('dry pint (US)', 'PTD', '''dry pt (US)''') ALL_UNITS.append(DRY_PINT_US) PINT_UK = UnitDescriptor('pint (UK)', 'PTI', '''pt (UK)''') ALL_UNITS.append(PINT_UK) LIQUID_PINT_US = UnitDescriptor('liquid pint (US)', 'PTL', '''liq pt (US)''') ALL_UNITS.append(LIQUID_PINT_US) PORTION = UnitDescriptor('portion', 'PTN', '''PTN''') ALL_UNITS.append(PORTION) TRAY_PER_TRAY_PACK = UnitDescriptor('tray / tray pack', 'PU', '''''') ALL_UNITS.append(TRAY_PER_TRAY_PACK) HALF_PINT_US = UnitDescriptor('half pint (US)', 'PV', '''''') ALL_UNITS.append(HALF_PINT_US) POUND_PER_INCH_OF_WIDTH = UnitDescriptor('pound per inch of width', 'PW', '''''') ALL_UNITS.append(POUND_PER_INCH_OF_WIDTH) PECK_DRY_US = UnitDescriptor('peck dry (US)', 'PY', '''''') ALL_UNITS.append(PECK_DRY_US) PECK_DRY_UK = UnitDescriptor('peck dry (UK)', 'PZ', '''''') ALL_UNITS.append(PECK_DRY_UK) JOULE_PER_TESLA = UnitDescriptor('joule per tesla', 'Q10', '''J/T''') ALL_UNITS.append(JOULE_PER_TESLA) ERLANG = UnitDescriptor('erlang', 'Q11', '''E''') ALL_UNITS.append(ERLANG) OCTET = UnitDescriptor('octet', 'Q12', '''o''') ALL_UNITS.append(OCTET) OCTET_PER_SECOND = UnitDescriptor('octet per second', 'Q13', '''o/s''') ALL_UNITS.append(OCTET_PER_SECOND) SHANNON = UnitDescriptor('shannon', 'Q14', '''Sh''') ALL_UNITS.append(SHANNON) HARTLEY = UnitDescriptor('hartley', 'Q15', '''Hart''') ALL_UNITS.append(HARTLEY) NATURAL_UNIT_OF_INFORMATION = UnitDescriptor('natural unit of information', 'Q16', '''nat''') ALL_UNITS.append(NATURAL_UNIT_OF_INFORMATION) SHANNON_PER_SECOND = UnitDescriptor('shannon per second', 'Q17', '''Sh/s''') ALL_UNITS.append(SHANNON_PER_SECOND) HARTLEY_PER_SECOND = UnitDescriptor('hartley per second', 'Q18', '''Hart/s''') ALL_UNITS.append(HARTLEY_PER_SECOND) NATURAL_UNIT_OF_INFORMATION_PER_SECOND = UnitDescriptor('natural unit of information per second', 'Q19', '''nat/s''') ALL_UNITS.append(NATURAL_UNIT_OF_INFORMATION_PER_SECOND) SECOND_PER_KILOGRAMM = UnitDescriptor('second per kilogramm', 'Q20', '''s/kg''') ALL_UNITS.append(SECOND_PER_KILOGRAMM) WATT_SQUARE_METRE = UnitDescriptor('watt square metre', 'Q21', '''W·m²''') ALL_UNITS.append(WATT_SQUARE_METRE) SECOND_PER_RADIAN_CUBIC_METRE = UnitDescriptor('second per radian cubic metre', 'Q22', '''1/(Hz·rad·m³)''') ALL_UNITS.append(SECOND_PER_RADIAN_CUBIC_METRE) WEBER_TO_THE_POWER_MINUS_ONE = UnitDescriptor('weber to the power minus one', 'Q23', '''1/Wb''') ALL_UNITS.append(WEBER_TO_THE_POWER_MINUS_ONE) RECIPROCAL_INCH = UnitDescriptor('reciprocal inch', 'Q24', '''1/in''') ALL_UNITS.append(RECIPROCAL_INCH) DIOPTRE = UnitDescriptor('dioptre', 'Q25', '''dpt''') ALL_UNITS.append(DIOPTRE) ONE_PER_ONE = UnitDescriptor('one per one', 'Q26', '''1/1''') ALL_UNITS.append(ONE_PER_ONE) NEWTON_METRE_PER_METRE = UnitDescriptor('newton metre per metre', 'Q27', '''N·m/m²''') ALL_UNITS.append(NEWTON_METRE_PER_METRE) KILOGRAM_PER_SQUARE_METRE_PASCAL_SECOND = UnitDescriptor('kilogram per square metre pascal second', 'Q28', '''kg/(m²·Pa·s)''') ALL_UNITS.append(KILOGRAM_PER_SQUARE_METRE_PASCAL_SECOND) MICROGRAM_PER_HECTOGRAM = UnitDescriptor('microgram per hectogram', 'Q29', '''µg/hg''') ALL_UNITS.append(MICROGRAM_PER_HECTOGRAM) PH_POTENTIAL_OF_HYDROGEN = UnitDescriptor('pH (potential of Hydrogen)', 'Q30', '''pH''') ALL_UNITS.append(PH_POTENTIAL_OF_HYDROGEN) KILOJOULE_PER_GRAM = UnitDescriptor('kilojoule per gram', 'Q31', '''kJ/g''') ALL_UNITS.append(KILOJOULE_PER_GRAM) FEMTOLITRE = UnitDescriptor('femtolitre', 'Q32', '''fl''') ALL_UNITS.append(FEMTOLITRE) PICOLITRE = UnitDescriptor('picolitre', 'Q33', '''pl''') ALL_UNITS.append(PICOLITRE) NANOLITRE = UnitDescriptor('nanolitre', 'Q34', '''nl''') ALL_UNITS.append(NANOLITRE) MEGAWATTS_PER_MINUTE = UnitDescriptor('megawatts per minute', 'Q35', '''MW/min''') ALL_UNITS.append(MEGAWATTS_PER_MINUTE) SQUARE_METRE_PER_CUBIC_METRE = UnitDescriptor('square metre per cubic metre', 'Q36', '''m2/m3''') ALL_UNITS.append(SQUARE_METRE_PER_CUBIC_METRE) STANDARD_CUBIC_METRE_PER_DAY = UnitDescriptor('Standard cubic metre per day', 'Q37', '''''') ALL_UNITS.append(STANDARD_CUBIC_METRE_PER_DAY) STANDARD_CUBIC_METRE_PER_HOUR = UnitDescriptor('Standard cubic metre per hour', 'Q38', '''''') ALL_UNITS.append(STANDARD_CUBIC_METRE_PER_HOUR) NORMALIZED_CUBIC_METRE_PER_DAY = UnitDescriptor('Normalized cubic metre per day', 'Q39', '''''') ALL_UNITS.append(NORMALIZED_CUBIC_METRE_PER_DAY) NORMALIZED_CUBIC_METRE_PER_HOUR = UnitDescriptor('Normalized cubic metre per hour', 'Q40', '''''') ALL_UNITS.append(NORMALIZED_CUBIC_METRE_PER_HOUR) JOULE_PER_NORMALISED_CUBIC_METRE = UnitDescriptor('Joule per normalised cubic metre', 'Q41', '''''') ALL_UNITS.append(JOULE_PER_NORMALISED_CUBIC_METRE) JOULE_PER_STANDARD_CUBIC_METRE = UnitDescriptor('Joule per standard cubic metre', 'Q42', '''''') ALL_UNITS.append(JOULE_PER_STANDARD_CUBIC_METRE) MEAL = UnitDescriptor('meal', 'Q3', '''''') ALL_UNITS.append(MEAL) PAGE_FACSIMILE = UnitDescriptor('page - facsimile', 'QA', '''''') ALL_UNITS.append(PAGE_FACSIMILE) QUARTER_OF_A_YEAR = UnitDescriptor('quarter (of a year)', 'QAN', '''''') ALL_UNITS.append(QUARTER_OF_A_YEAR) PAGE_HARDCOPY = UnitDescriptor('page - hardcopy', 'QB', '''''') ALL_UNITS.append(PAGE_HARDCOPY) QUARTER_DOZEN = UnitDescriptor('quarter dozen', 'QD', '''''') ALL_UNITS.append(QUARTER_DOZEN) QUARTER_HOUR = UnitDescriptor('quarter hour', 'QH', '''''') ALL_UNITS.append(QUARTER_HOUR) QUARTER_KILOGRAM = UnitDescriptor('quarter kilogram', 'QK', '''''') ALL_UNITS.append(QUARTER_KILOGRAM) QUIRE = UnitDescriptor('quire', 'QR', '''qr''') ALL_UNITS.append(QUIRE) QUART_US = UnitDescriptor('quart (US)', 'QT', '''qt (US)''') ALL_UNITS.append(QUART_US) DRY_QUART_US = UnitDescriptor('dry quart (US)', 'QTD', '''dry qt (US)''') ALL_UNITS.append(DRY_QUART_US) QUART_UK = UnitDescriptor('quart (UK)', 'QTI', '''qt (UK)''') ALL_UNITS.append(QUART_UK) LIQUID_QUART_US = UnitDescriptor('liquid quart (US)', 'QTL', '''liq qt (US)''') ALL_UNITS.append(LIQUID_QUART_US) QUARTER_UK = UnitDescriptor('quarter (UK)', 'QTR', '''Qr (UK)''') ALL_UNITS.append(QUARTER_UK) PICA = UnitDescriptor('pica', 'R1', '''''') ALL_UNITS.append(PICA) CALORIE = UnitDescriptor('calorie', 'R4', '''cal''') ALL_UNITS.append(CALORIE) THOUSAND_CUBIC_METRE = UnitDescriptor('thousand cubic metre', 'R9', '''''') ALL_UNITS.append(THOUSAND_CUBIC_METRE) RACK = UnitDescriptor('rack', 'RA', '''''') ALL_UNITS.append(RACK) ROD = UnitDescriptor('rod', 'RD', '''''') ALL_UNITS.append(ROD) RING = UnitDescriptor('ring', 'RG', '''''') ALL_UNITS.append(RING) RUNNING_OR_OPERATING_HOUR = UnitDescriptor('running or operating hour', 'RH', '''''') ALL_UNITS.append(RUNNING_OR_OPERATING_HOUR) ROLL_METRIC_MEASURE = UnitDescriptor('roll metric measure', 'RK', '''''') ALL_UNITS.append(ROLL_METRIC_MEASURE) REEL = UnitDescriptor('reel', 'RL', '''''') ALL_UNITS.append(REEL) REAM = UnitDescriptor('ream', 'RM', '''''') ALL_UNITS.append(REAM) REAM_METRIC_MEASURE = UnitDescriptor('ream metric measure', 'RN', '''''') ALL_UNITS.append(REAM_METRIC_MEASURE) ROLL = UnitDescriptor('roll', 'RO', '''''') ALL_UNITS.append(ROLL) ROOM = UnitDescriptor('room', 'ROM', '''''') ALL_UNITS.append(ROOM) POUND_PER_REAM = UnitDescriptor('pound per ream', 'RP', '''''') ALL_UNITS.append(POUND_PER_REAM) REVOLUTIONS_PER_MINUTE = UnitDescriptor('revolutions per minute', 'RPM', '''r/min''') ALL_UNITS.append(REVOLUTIONS_PER_MINUTE) REVOLUTIONS_PER_SECOND = UnitDescriptor('revolutions per second', 'RPS', '''r/s''') ALL_UNITS.append(REVOLUTIONS_PER_SECOND) RESET = UnitDescriptor('reset', 'RS', '''''') ALL_UNITS.append(RESET) REVENUE_TON_MILE = UnitDescriptor('revenue ton mile', 'RT', '''''') ALL_UNITS.append(REVENUE_TON_MILE) RUN = UnitDescriptor('run', 'RU', '''''') ALL_UNITS.append(RUN) SQUARE_FOOT_PER_SECOND = UnitDescriptor('square foot per second', 'S3', '''ft²/s''') ALL_UNITS.append(SQUARE_FOOT_PER_SECOND) SQUARE_METRE_PER_SECOND = UnitDescriptor('square metre per second', 'S4', '''m²/s''') ALL_UNITS.append(SQUARE_METRE_PER_SECOND) SIXTY_FOURTHS_OF_AN_INCH = UnitDescriptor('sixty fourths of an inch', 'S5', '''''') ALL_UNITS.append(SIXTY_FOURTHS_OF_AN_INCH) SESSION = UnitDescriptor('session', 'S6', '''''') ALL_UNITS.append(SESSION) STORAGE_UNIT = UnitDescriptor('storage unit', 'S7', '''''') ALL_UNITS.append(STORAGE_UNIT) STANDARD_ADVERTISING_UNIT = UnitDescriptor('standard advertising unit', 'S8', '''''') ALL_UNITS.append(STANDARD_ADVERTISING_UNIT) SACK = UnitDescriptor('sack', 'SA', '''''') ALL_UNITS.append(SACK) HALF_YEAR_6_MONTHS = UnitDescriptor('half year (6 months)', 'SAN', '''''') ALL_UNITS.append(HALF_YEAR_6_MONTHS) SCORE = UnitDescriptor('score', 'SCO', '''''') ALL_UNITS.append(SCORE) SCRUPLE = UnitDescriptor('scruple', 'SCR', '''''') ALL_UNITS.append(SCRUPLE) SOLID_POUND = UnitDescriptor('solid pound', 'SD', '''''') ALL_UNITS.append(SOLID_POUND) SECTION = UnitDescriptor('section', 'SE', '''''') ALL_UNITS.append(SECTION) SECOND_UNIT_OF_TIME = UnitDescriptor('second [unit of time]', 'SEC', '''s''') ALL_UNITS.append(SECOND_UNIT_OF_TIME) SET = UnitDescriptor('set', 'SET', '''''') ALL_UNITS.append(SET) SEGMENT = UnitDescriptor('segment', 'SG', '''''') ALL_UNITS.append(SEGMENT) SHIPPING_TON = UnitDescriptor('shipping ton', 'SHT', '''''') ALL_UNITS.append(SHIPPING_TON) SIEMENS = UnitDescriptor('siemens', 'SIE', '''S''') ALL_UNITS.append(SIEMENS) SPLIT_TANK_TRUCK = UnitDescriptor('split tank truck', 'SK', '''''') ALL_UNITS.append(SPLIT_TANK_TRUCK) SLIPSHEET = UnitDescriptor('slipsheet', 'SL', '''''') ALL_UNITS.append(SLIPSHEET) STANDARD_CUBIC_METRE = UnitDescriptor('Standard cubic metre', 'SM3', '''''') ALL_UNITS.append(STANDARD_CUBIC_METRE) MILE_STATUTE_MILE = UnitDescriptor('mile (statute mile)', 'SMI', '''mile''') ALL_UNITS.append(MILE_STATUTE_MILE) SQUARE_ROD = UnitDescriptor('square rod', 'SN', '''rd²''') ALL_UNITS.append(SQUARE_ROD) SPOOL = UnitDescriptor('spool', 'SO', '''''') ALL_UNITS.append(SPOOL) SHELF_PACKAGE = UnitDescriptor('shelf package', 'SP', '''''') ALL_UNITS.append(SHELF_PACKAGE) SQUARE = UnitDescriptor('square', 'SQ', '''''') ALL_UNITS.append(SQUARE) SQUARE_ROOFING = UnitDescriptor('square, roofing', 'SQR', '''''') ALL_UNITS.append(SQUARE_ROOFING) STRIP = UnitDescriptor('strip', 'SR', '''''') ALL_UNITS.append(STRIP) SHEET_METRIC_MEASURE = UnitDescriptor('sheet metric measure', 'SS', '''''') ALL_UNITS.append(SHEET_METRIC_MEASURE) SHORT_STANDARD_7200_MATCHES = UnitDescriptor('short standard (7200 matches)', 'SST', '''''') ALL_UNITS.append(SHORT_STANDARD_7200_MATCHES) SHEET = UnitDescriptor('sheet', 'ST', '''''') ALL_UNITS.append(SHEET) STICK = UnitDescriptor('stick', 'STC', '''''') ALL_UNITS.append(STICK) STONE_UK = UnitDescriptor('stone (UK)', 'STI', '''st''') ALL_UNITS.append(STONE_UK) STICK_CIGARETTE = UnitDescriptor('stick, cigarette', 'STK', '''''') ALL_UNITS.append(STICK_CIGARETTE) STANDARD_LITRE = UnitDescriptor('standard litre', 'STL', '''''') ALL_UNITS.append(STANDARD_LITRE) TON_US_OR_SHORT_TON_UK_PER_US = UnitDescriptor('ton (US) or short ton (UK/US)', 'STN', '''ton (US)''') ALL_UNITS.append(TON_US_OR_SHORT_TON_UK_PER_US) STRAW = UnitDescriptor('straw', 'STW', '''''') ALL_UNITS.append(STRAW) SKID = UnitDescriptor('skid', 'SV', '''''') ALL_UNITS.append(SKID) SKEIN = UnitDescriptor('skein', 'SW', '''''') ALL_UNITS.append(SKEIN) SHIPMENT = UnitDescriptor('shipment', 'SX', '''''') ALL_UNITS.append(SHIPMENT) SYRINGE = UnitDescriptor('syringe', 'SYR', '''''') ALL_UNITS.append(SYRINGE) TELECOMMUNICATION_LINE_IN_SERVICE = UnitDescriptor('telecommunication line in service', 'T0', '''''') ALL_UNITS.append(TELECOMMUNICATION_LINE_IN_SERVICE) THOUSAND_POUND_GROSS = UnitDescriptor('thousand pound gross', 'T1', '''''') ALL_UNITS.append(THOUSAND_POUND_GROSS) THOUSAND_PIECE = UnitDescriptor('thousand piece', 'T3', '''''') ALL_UNITS.append(THOUSAND_PIECE) THOUSAND_BAG = UnitDescriptor('thousand bag', 'T4', '''''') ALL_UNITS.append(THOUSAND_BAG) THOUSAND_CASING = UnitDescriptor('thousand casing', 'T5', '''''') ALL_UNITS.append(THOUSAND_CASING) THOUSAND_GALLON_US = UnitDescriptor('thousand gallon (US)', 'T6', '''''') ALL_UNITS.append(THOUSAND_GALLON_US) THOUSAND_IMPRESSION = UnitDescriptor('thousand impression', 'T7', '''''') ALL_UNITS.append(THOUSAND_IMPRESSION) THOUSAND_LINEAR_INCH = UnitDescriptor('thousand linear inch', 'T8', '''''') ALL_UNITS.append(THOUSAND_LINEAR_INCH) TENTH_CUBIC_FOOT = UnitDescriptor('tenth cubic foot', 'TA', '''''') ALL_UNITS.append(TENTH_CUBIC_FOOT) KILOAMPERE_HOUR_THOUSAND_AMPERE_HOUR = UnitDescriptor('kiloampere hour (thousand ampere hour)', 'TAH', '''kA·h''') ALL_UNITS.append(KILOAMPERE_HOUR_THOUSAND_AMPERE_HOUR) TOTAL_ACID_NUMBER = UnitDescriptor('total acid number', 'TAN', '''TAN''') ALL_UNITS.append(TOTAL_ACID_NUMBER) TRUCKLOAD = UnitDescriptor('truckload', 'TC', '''''') ALL_UNITS.append(TRUCKLOAD) THERM = UnitDescriptor('therm', 'TD', '''''') ALL_UNITS.append(THERM) TOTE = UnitDescriptor('tote', 'TE', '''''') ALL_UNITS.append(TOTE) TEN_SQUARE_YARD = UnitDescriptor('ten square yard', 'TF', '''''') ALL_UNITS.append(TEN_SQUARE_YARD) THOUSAND_SQUARE_INCH = UnitDescriptor('thousand square inch', 'TI', '''''') ALL_UNITS.append(THOUSAND_SQUARE_INCH) METRIC_TON_INCLUDING_CONTAINER = UnitDescriptor('metric ton, including container', 'TIC', '''''') ALL_UNITS.append(METRIC_TON_INCLUDING_CONTAINER) METRIC_TON_INCLUDING_INNER_PACKAGING = UnitDescriptor('metric ton, including inner packaging', 'TIP', '''''') ALL_UNITS.append(METRIC_TON_INCLUDING_INNER_PACKAGING) THOUSAND_SQUARE_CENTIMETRE = UnitDescriptor('thousand square centimetre', 'TJ', '''''') ALL_UNITS.append(THOUSAND_SQUARE_CENTIMETRE) TANK_RECTANGULAR = UnitDescriptor('tank, rectangular', 'TK', '''''') ALL_UNITS.append(TANK_RECTANGULAR) TONNE_KILOMETRE = UnitDescriptor('tonne kilometre', 'TKM', '''t·km''') ALL_UNITS.append(TONNE_KILOMETRE) THOUSAND_FOOT_LINEAR = UnitDescriptor('thousand foot (linear)', 'TL', '''''') ALL_UNITS.append(THOUSAND_FOOT_LINEAR) KILOGRAM_OF_IMPORTED_MEAT_LESS_OFFAL = UnitDescriptor('kilogram of imported meat, less offal', 'TMS', '''''') ALL_UNITS.append(KILOGRAM_OF_IMPORTED_MEAT_LESS_OFFAL) TIN = UnitDescriptor('tin', 'TN', '''''') ALL_UNITS.append(TIN) TONNE_METRIC_TON = UnitDescriptor('tonne (metric ton)', 'TNE', '''t''') ALL_UNITS.append(TONNE_METRIC_TON) TEN_PACK = UnitDescriptor('ten pack', 'TP', '''''') ALL_UNITS.append(TEN_PACK) TEETH_PER_INCH = UnitDescriptor('teeth per inch', 'TPI', '''TPI''') ALL_UNITS.append(TEETH_PER_INCH) TEN_PAIR = UnitDescriptor('ten pair', 'TPR', '''''') ALL_UNITS.append(TEN_PAIR) THOUSAND_FOOT = UnitDescriptor('thousand foot', 'TQ', '''''') ALL_UNITS.append(THOUSAND_FOOT) THOUSAND_CUBIC_METRE_PER_DAY = UnitDescriptor('thousand cubic metre per day', 'TQD', '''km³/d''') ALL_UNITS.append(THOUSAND_CUBIC_METRE_PER_DAY) TEN_SQUARE_FOOT = UnitDescriptor('ten square foot', 'TR', '''''') ALL_UNITS.append(TEN_SQUARE_FOOT) TRILLION_EUR = UnitDescriptor('trillion (EUR)', 'TRL', '''''') ALL_UNITS.append(TRILLION_EUR) THOUSAND_SQUARE_FOOT = UnitDescriptor('thousand square foot', 'TS', '''''') ALL_UNITS.append(THOUSAND_SQUARE_FOOT) TONNE_OF_SUBSTANCE_90_PERCENT_DRY = UnitDescriptor('tonne of substance 90 % dry', 'TSD', '''''') ALL_UNITS.append(TONNE_OF_SUBSTANCE_90_PERCENT_DRY) TON_OF_STEAM_PER_HOUR = UnitDescriptor('ton of steam per hour', 'TSH', '''''') ALL_UNITS.append(TON_OF_STEAM_PER_HOUR) TEN_SET = UnitDescriptor('ten set', 'TST', '''''') ALL_UNITS.append(TEN_SET) THOUSAND_LINEAR_METRE = UnitDescriptor('thousand linear metre', 'TT', '''''') ALL_UNITS.append(THOUSAND_LINEAR_METRE) TEN_THOUSAND_STICKS = UnitDescriptor('ten thousand sticks', 'TTS', '''''') ALL_UNITS.append(TEN_THOUSAND_STICKS) TUBE = UnitDescriptor('tube', 'TU', '''''') ALL_UNITS.append(TUBE) THOUSAND_KILOGRAM = UnitDescriptor('thousand kilogram', 'TV', '''''') ALL_UNITS.append(THOUSAND_KILOGRAM) THOUSAND_SHEET = UnitDescriptor('thousand sheet', 'TW', '''''') ALL_UNITS.append(THOUSAND_SHEET) TANK_CYLINDRICAL = UnitDescriptor('tank, cylindrical', 'TY', '''''') ALL_UNITS.append(TANK_CYLINDRICAL) TREATMENT = UnitDescriptor('treatment', 'U1', '''''') ALL_UNITS.append(TREATMENT) TABLET = UnitDescriptor('tablet', 'U2', '''''') ALL_UNITS.append(TABLET) TORR = UnitDescriptor('torr', 'UA', '''Torr''') ALL_UNITS.append(TORR) TELECOMMUNICATION_LINE_IN_SERVICE_AVERAGE = UnitDescriptor('telecommunication line in service average', 'UB', '''''') ALL_UNITS.append(TELECOMMUNICATION_LINE_IN_SERVICE_AVERAGE) TELECOMMUNICATION_PORT = UnitDescriptor('telecommunication port', 'UC', '''''') ALL_UNITS.append(TELECOMMUNICATION_PORT) TENTH_MINUTE = UnitDescriptor('tenth minute', 'UD', '''''') ALL_UNITS.append(TENTH_MINUTE) TENTH_HOUR = UnitDescriptor('tenth hour', 'UE', '''''') ALL_UNITS.append(TENTH_HOUR) USAGE_PER_TELECOMMUNICATION_LINE_AVERAGE = UnitDescriptor('usage per telecommunication line average', 'UF', '''''') ALL_UNITS.append(USAGE_PER_TELECOMMUNICATION_LINE_AVERAGE) TEN_THOUSAND_YARD = UnitDescriptor('ten thousand yard', 'UH', '''''') ALL_UNITS.append(TEN_THOUSAND_YARD) MILLION_UNIT = UnitDescriptor('million unit', 'UM', '''''') ALL_UNITS.append(MILLION_UNIT) VOLT_AMPERE_PER_KILOGRAM = UnitDescriptor('volt - ampere per kilogram', 'VA', '''V·A / kg''') ALL_UNITS.append(VOLT_AMPERE_PER_KILOGRAM) VIAL = UnitDescriptor('vial', 'VI', '''''') ALL_UNITS.append(VIAL) VOLT = UnitDescriptor('volt', 'VLT', '''V''') ALL_UNITS.append(VOLT) PERCENT_VOLUME = UnitDescriptor('percent volume', 'VP', '''''') ALL_UNITS.append(PERCENT_VOLUME) BULK = UnitDescriptor('bulk', 'VQ', '''''') ALL_UNITS.append(BULK) VISIT = UnitDescriptor('visit', 'VS', '''''') ALL_UNITS.append(VISIT) WET_KILO = UnitDescriptor('wet kilo', 'W2', '''''') ALL_UNITS.append(WET_KILO) TWO_WEEK = UnitDescriptor('two week', 'W4', '''''') ALL_UNITS.append(TWO_WEEK) WATT_PER_KILOGRAM = UnitDescriptor('watt per kilogram', 'WA', '''W/kg''') ALL_UNITS.append(WATT_PER_KILOGRAM) WET_POUND = UnitDescriptor('wet pound', 'WB', '''''') ALL_UNITS.append(WET_POUND) CORD = UnitDescriptor('cord', 'WCD', '''''') ALL_UNITS.append(CORD) WET_TON = UnitDescriptor('wet ton', 'WE', '''''') ALL_UNITS.append(WET_TON) WEBER = UnitDescriptor('weber', 'WEB', '''Wb''') ALL_UNITS.append(WEBER) WEEK = UnitDescriptor('week', 'WEE', '''wk''') ALL_UNITS.append(WEEK) WINE_GALLON = UnitDescriptor('wine gallon', 'WG', '''''') ALL_UNITS.append(WINE_GALLON) WHEEL = UnitDescriptor('wheel', 'WH', '''''') ALL_UNITS.append(WHEEL) WATT_HOUR = UnitDescriptor('watt hour', 'WHR', '''W·h''') ALL_UNITS.append(WATT_HOUR) WEIGHT_PER_SQUARE_INCH = UnitDescriptor('weight per square inch', 'WI', '''''') ALL_UNITS.append(WEIGHT_PER_SQUARE_INCH) WORKING_MONTH = UnitDescriptor('working month', 'WM', '''''') ALL_UNITS.append(WORKING_MONTH) WRAP = UnitDescriptor('wrap', 'WR', '''''') ALL_UNITS.append(WRAP) STANDARD = UnitDescriptor('standard', 'WSD', '''std''') ALL_UNITS.append(STANDARD) WATT = UnitDescriptor('watt', 'WTT', '''W''') ALL_UNITS.append(WATT) MILLILITRE_OF_WATER = UnitDescriptor('millilitre of water', 'WW', '''''') ALL_UNITS.append(MILLILITRE_OF_WATER) GUNTER_S_CHAIN = UnitDescriptor('Gunter\'s chain', 'X1', '''ch (UK)''') ALL_UNITS.append(GUNTER_S_CHAIN) SQUARE_YARD = UnitDescriptor('square yard', 'YDK', '''yd²''') ALL_UNITS.append(SQUARE_YARD) CUBIC_YARD = UnitDescriptor('cubic yard', 'YDQ', '''yd³''') ALL_UNITS.append(CUBIC_YARD) HUNDRED_LINEAR_YARD = UnitDescriptor('hundred linear yard', 'YL', '''''') ALL_UNITS.append(HUNDRED_LINEAR_YARD) YARD = UnitDescriptor('yard', 'YRD', '''yd''') ALL_UNITS.append(YARD) TEN_YARD = UnitDescriptor('ten yard', 'YT', '''''') ALL_UNITS.append(TEN_YARD) LIFT_VAN = UnitDescriptor('lift van', 'Z1', '''''') ALL_UNITS.append(LIFT_VAN) HANGING_CONTAINER = UnitDescriptor('hanging container', 'Z11', '''''') ALL_UNITS.append(HANGING_CONTAINER) CHEST = UnitDescriptor('chest', 'Z2', '''''') ALL_UNITS.append(CHEST) CASK = UnitDescriptor('cask', 'Z3', '''''') ALL_UNITS.append(CASK) HOGSHEAD = UnitDescriptor('hogshead', 'Z4', '''''') ALL_UNITS.append(HOGSHEAD) LUG = UnitDescriptor('lug', 'Z5', '''''') ALL_UNITS.append(LUG) CONFERENCE_POINT = UnitDescriptor('conference point', 'Z6', '''''') ALL_UNITS.append(CONFERENCE_POINT) NEWSPAGE_AGATE_LINE = UnitDescriptor('newspage agate line', 'Z8', '''''') ALL_UNITS.append(NEWSPAGE_AGATE_LINE) PAGE = UnitDescriptor('page', 'ZP', '''''') ALL_UNITS.append(PAGE) MUTUALLY_DEFINED = UnitDescriptor('mutually defined', 'ZZ', '''''') ALL_UNITS.append(MUTUALLY_DEFINED) METRE_WEEK = UnitDescriptor('Metre Week', 'MRW', '''m·wk''') ALL_UNITS.append(METRE_WEEK) SQUARE_METRE_WEEK = UnitDescriptor('Square Metre Week', 'MKW', '''m²· wk''') ALL_UNITS.append(SQUARE_METRE_WEEK) CUBIC_METRE_WEEK = UnitDescriptor('Cubic Metre Week', 'MQW', '''m³·wk''') ALL_UNITS.append(CUBIC_METRE_WEEK) PIECE_WEEK = UnitDescriptor('Piece Week', 'HWE', '''piece·k''') ALL_UNITS.append(PIECE_WEEK) METRE_DAY = UnitDescriptor('Metre Day', 'MRD', '''m·day''') ALL_UNITS.append(METRE_DAY) SQUARE_METRE_DAY = UnitDescriptor('Square Metre Day', 'MKD', '''m²·d''') ALL_UNITS.append(SQUARE_METRE_DAY) CUBIC_METRE_DAY = UnitDescriptor('Cubic Metre Day', 'MQD', '''m³·d''') ALL_UNITS.append(CUBIC_METRE_DAY) PIECE_DAY = UnitDescriptor('Piece Day', 'HAD', '''piece·d''') ALL_UNITS.append(PIECE_DAY) METRE_MONTH = UnitDescriptor('Metre Month', 'MRM', '''m·mo''') ALL_UNITS.append(METRE_MONTH) SQUARE_METRE_MONTH = UnitDescriptor('Square Metre Month', 'MKM', '''m²·mo''') ALL_UNITS.append(SQUARE_METRE_MONTH) CUBIC_METRE_MONTH = UnitDescriptor('Cubic Metre Month', 'MQM', '''m³·mo''') ALL_UNITS.append(CUBIC_METRE_MONTH) PIECE_MONTH = UnitDescriptor('Piece Month', 'HMO', '''piece·mo''') ALL_UNITS.append(PIECE_MONTH) DECIBEL_WATT = UnitDescriptor('Decibel watt', 'DBW', '''dBW''') ALL_UNITS.append(DECIBEL_WATT) DECIBEL_MILLIWATTS = UnitDescriptor('Decibel-milliwatts', 'DBM', '''dBm''') ALL_UNITS.append(DECIBEL_MILLIWATTS) FORMAZIN_NEPHELOMETRIC_UNIT = UnitDescriptor('Formazin nephelometric unit', 'FNU', '''FNU''') ALL_UNITS.append(FORMAZIN_NEPHELOMETRIC_UNIT) NEPHELOMETRIC_TURBIDITY_UNIT = UnitDescriptor('Nephelometric turbidity unit', 'NTU', '''NTU''') ALL_UNITS.append(NEPHELOMETRIC_TURBIDITY_UNIT) # Convenience aliases. MINUTE = MINUTE_UNIT_OF_TIME SECOND = SECOND_UNIT_OF_TIME # pylint: enable=line-too-long class UnitLookup(object): """Facilitates user-friendly access to units.""" def __init__(self, lookup): self._lookup = lookup def __call__(self, name_or_suffix): """Provides instantiation-like access for units module.""" return self._lookup[name_or_suffix] UNITS_BY_NAME = {u.name: u for u in ALL_UNITS} UNITS_BY_SUFFIX = {u.suffix: u for u in ALL_UNITS} del ALL_UNITS UNITS_BY_ALL = {} UNITS_BY_ALL.update(UNITS_BY_NAME) UNITS_BY_ALL.update(UNITS_BY_SUFFIX) UNITS_BY_ALL[None] = NONE Unit = UnitLookup(UNITS_BY_ALL) # pylint: disable=invalid-name
apache-2.0
PythonSanSebastian/docstamp
docstamp/file_utils.py
1
5624
# coding=utf-8 # ------------------------------------------------------------------------------- # Author: Alexandre Manhaes Savio <[email protected]> # Grupo de Inteligencia Computational <www.ehu.es/ccwintco> # Universidad del Pais Vasco UPV/EHU # # 2015, Alexandre Manhaes Savio # Use this at your own risk! # ------------------------------------------------------------------------------- import os import tempfile import logging from glob import glob from docstamp.config import get_temp_dir log = logging.getLogger(__name__) def get_extension(filepath, check_if_exists=False): """Return the extension of fpath. Parameters ---------- fpath: string File name or path check_if_exists: bool Returns ------- str The extension of the file name or path """ if check_if_exists: if not os.path.exists(filepath): err = 'File not found: ' + filepath log.error(err) raise IOError(err) try: rest, ext = os.path.splitext(filepath) except: raise else: return ext def add_extension_if_needed(filepath, ext, check_if_exists=False): """Add the extension ext to fpath if it doesn't have it. Parameters ---------- filepath: str File name or path ext: str File extension check_if_exists: bool Returns ------- File name or path with extension added, if needed. """ if not filepath.endswith(ext): filepath += ext if check_if_exists: if not os.path.exists(filepath): err = 'File not found: ' + filepath log.error(err) raise IOError(err) return filepath def remove_ext(filepath): """Removes the extension of the file. Parameters ---------- filepath: str File path or name Returns ------- str File path or name without extension """ return filepath[:filepath.rindex(get_extension(filepath))] def get_tempfile(suffix='.txt', dirpath=None): """ Return a temporary file with the given suffix within dirpath. If dirpath is None, will look for a temporary folder in your system. Parameters ---------- suffix: str Temporary file name suffix dirpath: str Folder path where create the temporary file Returns ------- temp_filepath: str The path to the temporary path """ if dirpath is None: dirpath = get_temp_dir() return tempfile.NamedTemporaryFile(suffix=suffix, dir=dirpath) def cleanup(workdir, extension): """ Remove the files in workdir that have the given extension. Parameters ---------- workdir: Folder path from where to clean the files. extension: str File extension without the dot, e.g., 'txt' """ [os.remove(f) for f in glob(os.path.join(workdir, '*.' + extension))] def mkdir(dirpath): """Create a folder in `dirpath` if it does'nt exist.""" if not os.path.exists(dirpath): os.mkdir(dirpath) def csv_to_json(csv_filepath, json_filepath, fieldnames, ignore_first_line=True): """ Convert a CSV file in `csv_filepath` into a JSON file in `json_filepath`. Parameters ---------- csv_filepath: str Path to the input CSV file. json_filepath: str Path to the output JSON file. Will be overwritten if exists. fieldnames: List[str] Names of the fields in the CSV file. ignore_first_line: bool """ import csv import json csvfile = open(csv_filepath, 'r') jsonfile = open(json_filepath, 'w') reader = csv.DictReader(csvfile, fieldnames) rows = [] if ignore_first_line: next(reader) for row in reader: rows.append(row) json.dump(rows, jsonfile) jsonfile.close() csvfile.close() def write_to_file(file_path, content, encoding=None): """ Write `content` inside the file in `file_path` with the given encoding. Parameters ---------- file_path: str Path to the output file. Will be overwritten if exists. content: str The content you want in the file. encoding: str The name of the encoding. """ try: # TODO: check if in Python2 this should be this way # it's possible that we have to make this function more complex # to check type(content) and depending on that set 'w' without enconde # or 'wb' with encode. with open(file_path, "wb") as f: f.write(content.encode(encoding)) except: log.exception('Error writing to file in {}'.format(file_path)) raise def replace_file_content(filepath, old, new, max=1): """ Modify the content of `filepath`, replacing `old` for `new`. Parameters ---------- filepath: str Path to the file to be modified. It will be overwritten. old: str This is old substring to be replaced. new: str This is new substring, which would replace old substring. max: int If larger than 0, Only the first `max` occurrences are replaced. """ with open(filepath, 'r') as f: content = f.read() content = content.replace(old, new, max) with open(filepath, 'w') as f: f.write(content) def cleanup_docstamp_output(output_dir=''): """ Remove the 'tmp*.aux', 'tmp*.out' and 'tmp*.log' files in `output_dir`. :param output_dir: """ suffixes = ['aux', 'out', 'log'] files = [f for suf in suffixes for f in glob(os.path.join(output_dir, 'tmp*.{}'.format(suf)))] [os.remove(file) for file in files]
apache-2.0
nirs/vdsm
tests/storage/glustervolume_test.py
2
6517
# # Copyright 2017 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Refer to the README and COPYING files for full details of the license # from __future__ import absolute_import from __future__ import division from contextlib import contextmanager from monkeypatch import MonkeyPatchScope from testlib import VdsmTestCase from storage.storagefakelib import FakeStorageDomainCache from vdsm.gluster import exception from vdsm.storage import glusterVolume class FakeSD(object): def getRealPath(self): return "host.example.com:/volume" class FakeSuperVdsm(object): def __init__(self, gluster_volume_info=None): self._gluster_volume_info = gluster_volume_info def getProxy(self): return self def glusterVolumeInfo(self, volname, volFileserver): if self._gluster_volume_info is not None: return self._gluster_volume_info else: raise exception.GlusterException class GlusterVolume(glusterVolume.GlusterVolume): def __init__(self): pass # Allow creating a volume object in a test @property def sdUUID(self): return "sd_id" def getVolumePath(self): return "/rhev/data-center/mnt/glusterSD/host.example.com:_volume/" \ "sd_id/images/img_id/vol_id" class TestVolumeInfo(VdsmTestCase): @contextmanager def make_env(self, gluster_volume_info): sdcache = FakeStorageDomainCache() sdcache.domains['sd_id'] = FakeSD() svdsm = FakeSuperVdsm(gluster_volume_info) with MonkeyPatchScope([(glusterVolume, 'sdCache', sdcache), (glusterVolume, 'svdsm', svdsm)]): yield def test_no_data(self): expected = { "type": "network", "path": "volume/sd_id/images/img_id/vol_id", "protocol": "gluster", "hosts": [ { "name": "host.example.com", "transport": "tcp", "port": "0" } ] } self.check(None, expected) def test_parse_data(self): gluster_volume_info = { "volume": { "bricks": [ "host1.example.com:/rhgs/volume", "host2.example.com:/rhgs/volume", "host3.example.com:/rhgs/volume" ], "transportType": ["TCP"] } } expected = { "type": "network", "path": "volume/sd_id/images/img_id/vol_id", "protocol": "gluster", "hosts": [ { "name": "host.example.com", "transport": "tcp", "port": "0" }, { "name": "host1.example.com", "transport": "tcp", "port": "0" }, { "name": "host2.example.com", "transport": "tcp", "port": "0" }, { "name": "host3.example.com", "transport": "tcp", "port": "0" } ] } self.check(gluster_volume_info, expected) def test_unique_hosts(self): # brick, retrieved from mount path, should be # excluded from list of bricks, retrieved # using call to gluster. gluster_volume_info = { "volume": { "bricks": [ "host.example.com:/rhgs/volume", "host2.example.com:/rhgs/volume", "host3.example.com:/rhgs/volume" ], "transportType": ["TCP"] } } expected = { "type": "network", "path": "volume/sd_id/images/img_id/vol_id", "protocol": "gluster", "hosts": [ { "name": "host.example.com", "transport": "tcp", "port": "0" }, { "name": "host2.example.com", "transport": "tcp", "port": "0" }, { "name": "host3.example.com", "transport": "tcp", "port": "0" } ] } self.check(gluster_volume_info, expected) def test_rdma(self): gluster_volume_info = { "volume": { "bricks": [ "host.example.com:/rhgs/volume", "host2.example.com:/rhgs/volume", "host3.example.com:/rhgs/volume" ], "transportType": ["RDMA"] } } expected = { "type": "network", "path": "volume/sd_id/images/img_id/vol_id", "protocol": "gluster", "hosts": [ { "name": "host.example.com", "transport": "rdma", "port": "0" }, { "name": "host2.example.com", "transport": "rdma", "port": "0" }, { "name": "host3.example.com", "transport": "rdma", "port": "0" } ] } self.check(gluster_volume_info, expected) def check(self, gluster_volume_info, expected): with self.make_env(gluster_volume_info): gluster_volume_info = GlusterVolume().getVmVolumeInfo() self.assertEqual(gluster_volume_info, expected)
gpl-2.0
raccoongang/edx-platform
common/djangoapps/microsite_configuration/tests/backends/test_database.py
6
8266
""" Test Microsite database backends. """ import logging from mock import patch from django.conf import settings from microsite_configuration.backends.base import ( BaseMicrositeBackend, BaseMicrositeTemplateBackend, ) from microsite_configuration import microsite from microsite_configuration.models import ( Microsite, MicrositeHistory, MicrositeTemplate, ) from microsite_configuration.tests.tests import ( DatabaseMicrositeTestCase, ) from microsite_configuration.tests.factories import ( SiteFactory, MicrositeFactory, MicrositeTemplateFactory, ) log = logging.getLogger(__name__) @patch( 'microsite_configuration.microsite.BACKEND', microsite.get_backend( 'microsite_configuration.backends.database.DatabaseMicrositeBackend', BaseMicrositeBackend ) ) class DatabaseMicrositeBackendTests(DatabaseMicrositeTestCase): """ Go through and test the DatabaseMicrositeBackend class """ def setUp(self): super(DatabaseMicrositeBackendTests, self).setUp() self.addCleanup(microsite.clear) def test_get_value(self): """ Tests microsite.get_value works as expected. """ microsite.set_by_domain(self.microsite.site.domain) self.assertEqual(microsite.get_value('email_from_address'), self.microsite.values['email_from_address']) def test_is_request_in_microsite(self): """ Tests microsite.is_request_in_microsite works as expected. """ microsite.set_by_domain(self.microsite.site.domain) self.assertTrue(microsite.is_request_in_microsite()) def test_get_dict(self): """ Tests microsite.get_dict works as expected. """ microsite.set_by_domain(self.microsite.site.domain) self.assertEqual(microsite.get_dict('nested_dict'), self.microsite.values['nested_dict']) def test_has_override_value(self): """ Tests microsite.has_override_value works as expected. """ microsite.set_by_domain(self.microsite.site.domain) self.assertTrue(microsite.has_override_value('platform_name')) def test_get_value_for_org(self): """ Tests microsite.get_value_for_org works as expected. """ microsite.set_by_domain(self.microsite.site.domain) self.assertEqual( microsite.get_value_for_org(self.microsite.get_organizations()[0], 'platform_name'), self.microsite.values['platform_name'] ) def test_get_all_orgs(self): """ Tests microsite.get_all_orgs works as expected. """ microsite.set_by_domain(self.microsite.site.domain) self.assertEqual( microsite.get_all_orgs(), set(self.microsite.get_organizations()) ) def test_clear(self): """ Tests microsite.clear works as expected. """ microsite.set_by_domain(self.microsite.site.domain) self.assertEqual( microsite.get_value('platform_name'), self.microsite.values['platform_name'] ) microsite.clear() self.assertIsNone(microsite.get_value('platform_name')) def test_enable_microsites_pre_startup(self): """ Tests microsite.test_enable_microsites_pre_startup works as expected. """ # remove microsite root directory paths first settings.DEFAULT_TEMPLATE_ENGINE['DIRS'] = [ path for path in settings.DEFAULT_TEMPLATE_ENGINE['DIRS'] if path != settings.MICROSITE_ROOT_DIR ] with patch.dict('django.conf.settings.FEATURES', {'USE_MICROSITES': False}): microsite.enable_microsites_pre_startup(log) self.assertNotIn(settings.MICROSITE_ROOT_DIR, settings.DEFAULT_TEMPLATE_ENGINE['DIRS']) with patch.dict('django.conf.settings.FEATURES', {'USE_MICROSITES': True}): microsite.enable_microsites_pre_startup(log) self.assertIn(settings.MICROSITE_ROOT_DIR, settings.DEFAULT_TEMPLATE_ENGINE['DIRS']) self.assertIn(settings.MICROSITE_ROOT_DIR, settings.MAKO_TEMPLATES['main']) @patch('edxmako.paths.add_lookup') def test_enable_microsites(self, add_lookup): """ Tests microsite.enable_microsites works as expected. """ # remove microsite root directory paths first settings.STATICFILES_DIRS = [ path for path in settings.STATICFILES_DIRS if path != settings.MICROSITE_ROOT_DIR ] with patch.dict('django.conf.settings.FEATURES', {'USE_MICROSITES': False}): microsite.enable_microsites(log) self.assertNotIn(settings.MICROSITE_ROOT_DIR, settings.STATICFILES_DIRS) add_lookup.assert_not_called() with patch.dict('django.conf.settings.FEATURES', {'USE_MICROSITES': True}): microsite.enable_microsites(log) self.assertIn(settings.MICROSITE_ROOT_DIR, settings.STATICFILES_DIRS) def test_get_all_configs(self): """ Tests microsite.get_all_config works as expected. """ microsite.set_by_domain(self.microsite.site.domain) configs = microsite.get_all_config() self.assertEqual(len(configs.keys()), 1) self.assertEqual(configs[self.microsite.key], self.microsite.values) def test_set_config_by_domain(self): """ Tests microsite.set_config_by_domain works as expected. """ microsite.clear() # if microsite config does not exist microsite.set_by_domain('unknown') self.assertIsNone(microsite.get_value('platform_name')) # if no microsite exists Microsite.objects.all().delete() microsite.clear() microsite.set_by_domain('unknown') self.assertIsNone(microsite.get_value('platform_name')) # if microsite site has no organization it should raise exception new_microsite = MicrositeFactory.create(key="test_microsite2") new_microsite.site = SiteFactory.create(domain='test.microsite2.com') # This would update microsite so we test MicrositeHistory has old microsite new_microsite.save() self.assertEqual(MicrositeHistory.objects.all().count(), 2) with self.assertRaises(Exception): microsite.set_by_domain('test.microsite2.com') def test_has_configuration_set(self): """ Tests microsite.has_configuration_set works as expected on this backend. """ self.assertTrue(microsite.BACKEND.has_configuration_set()) Microsite.objects.all().delete() self.assertFalse(microsite.BACKEND.has_configuration_set()) @patch( 'microsite_configuration.microsite.TEMPLATES_BACKEND', microsite.get_backend( 'microsite_configuration.backends.database.DatabaseMicrositeTemplateBackend', BaseMicrositeTemplateBackend ) ) class DatabaseMicrositeTemplateBackendTests(DatabaseMicrositeTestCase): """ Go through and test the DatabaseMicrositeTemplateBackend class """ def setUp(self): super(DatabaseMicrositeTemplateBackendTests, self).setUp() MicrositeTemplateFactory.create( microsite=self.microsite, template_uri='about.html', template=""" <html> <body> About this microsite. </body> </html> """, ) def tearDown(self): super(DatabaseMicrositeTemplateBackendTests, self).tearDown() microsite.clear() def test_microsite_get_template_when_no_template_exists(self): """ Test microsite.get_template return None if there is not template in DB. """ MicrositeTemplate.objects.all().delete() microsite.set_by_domain(self.microsite.site.domain) template = microsite.get_template('about.html') self.assertIsNone(template) def test_microsite_get_template(self): """ Test microsite.get_template return appropriate template. """ microsite.set_by_domain(self.microsite.site.domain) template = microsite.get_template('about.html') self.assertIn('About this microsite', template.render())
agpl-3.0
rbramwell/runbook
src/web/reactionforms/heroku-scale-out/__init__.py
3
1399
###################################################################### # Cloud Routes Web Application # ------------------------------------------------------------------- # Reaction - Forms Class ###################################################################### from wtforms import Form from wtforms import TextField, PasswordField, SelectField, SelectMultipleField, HiddenField from wtforms.validators import DataRequired, ValidationError, Email, Length, Required, URL from wtforms.validators import IPAddress, NumberRange, EqualTo from ..base import BaseReactForm class ReactForm(BaseReactForm): ''' Class that creates a Reaction form for the dashboard ''' apikey = TextField("API Key", validators=[DataRequired(message='API Key is a required field')]) appname = TextField("Application Name", validators=[DataRequired(message='Application Name is a required field')]) call_on = SelectField("Call On", choices=[('false', 'False Monitors'), ('true', 'True Monitors')], validators=[DataRequired(message='Call On is a required field')]) dyno_type = TextField("Dyno Type", validators=[DataRequired(message='Dyno Type is a required field')]) max_quantity = TextField("Maximum Dynos", validators=[DataRequired(message="Maximum Dyno is a required field"), NumberRange(min=1, max=None, message="Must be a number between 1 - 99999")]) if __name__ == '__main__': pass
agpl-3.0
chienlieu2017/it_management
project/it_management/__manifest__.py
1
1817
# -*- coding: utf-8 -*- ############################################################################## # # Copyright 2009-2017 4Leaf Team # ############################################################################## { 'name': 'IT Management Module', 'version': '1.0', 'category': 'IT', 'description': """ IT Management Module """, 'author': '4Leaf Team', 'website': '', 'depends': [ 'base', ], 'data': [ # ============================================================ # SECURITY SETTING - GROUP - PROFILE # ============================================================ # 'security/', # ============================================================ # DATA # ============================================================ # 'data/', 'data/res_company_data.xml', # WIZARD # ============================================================ # 'wizard/', # REPORT # ============================================================ # 'report/', # ============================================================ # VIEWS # ============================================================ # 'view/', # ============================================================ # MENU # ============================================================ # 'menu/', # ============================================================ # FUNCTION USED TO UPDATE DATA LIKE POST OBJECT # ============================================================ # "data/ubiz_spa_update_functions_data.xml", ], 'test': [], 'demo': [], 'installable': True, 'active': False, 'application': True, }
gpl-3.0
sjperkins/tensorflow
tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
76
42915
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for functional style sequence-to-sequence models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import math import random import numpy as np from tensorflow.contrib.legacy_seq2seq.python.ops import seq2seq as seq2seq_lib from tensorflow.contrib.rnn.python.ops import core_rnn_cell from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import init_ops from tensorflow.python.ops import nn_impl from tensorflow.python.ops import rnn from tensorflow.python.ops import rnn_cell from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adam class Seq2SeqTest(test.TestCase): def testRNNDecoder(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): inp = [constant_op.constant(0.5, shape=[2, 2])] * 2 _, enc_state = rnn.static_rnn( rnn_cell.GRUCell(2), inp, dtype=dtypes.float32) dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3 cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4) dec, mem = seq2seq_lib.rnn_decoder(dec_inp, enc_state, cell) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testBasicRNNSeq2Seq(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): inp = [constant_op.constant(0.5, shape=[2, 2])] * 2 dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3 cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4) dec, mem = seq2seq_lib.basic_rnn_seq2seq(inp, dec_inp, cell) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testTiedRNNSeq2Seq(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): inp = [constant_op.constant(0.5, shape=[2, 2])] * 2 dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3 cell = core_rnn_cell.OutputProjectionWrapper(rnn_cell.GRUCell(2), 4) dec, mem = seq2seq_lib.tied_rnn_seq2seq(inp, dec_inp, cell) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual(1, len(res)) self.assertEqual((2, 2), res[0].shape) def testEmbeddingRNNDecoder(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): inp = [constant_op.constant(0.5, shape=[2, 2])] * 2 cell_fn = lambda: rnn_cell.BasicLSTMCell(2) cell = cell_fn() _, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32) dec_inp = [ constant_op.constant( i, dtypes.int32, shape=[2]) for i in range(3) ] # Use a new cell instance since the attention decoder uses a # different variable scope. dec, mem = seq2seq_lib.embedding_rnn_decoder( dec_inp, enc_state, cell_fn(), num_symbols=4, embedding_size=2) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) res = sess.run([mem]) self.assertEqual(1, len(res)) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) def testEmbeddingRNNSeq2Seq(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): enc_inp = [ constant_op.constant( 1, dtypes.int32, shape=[2]) for i in range(2) ] dec_inp = [ constant_op.constant( i, dtypes.int32, shape=[2]) for i in range(3) ] cell_fn = lambda: rnn_cell.BasicLSTMCell(2) cell = cell_fn() dec, mem = seq2seq_lib.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) # Test with state_is_tuple=False. with variable_scope.variable_scope("no_tuple"): cell_nt = rnn_cell.BasicLSTMCell(2, state_is_tuple=False) dec, mem = seq2seq_lib.embedding_rnn_seq2seq( enc_inp, dec_inp, cell_nt, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 4), res[0].shape) # Test externally provided output projection. w = variable_scope.get_variable("proj_w", [2, 5]) b = variable_scope.get_variable("proj_b", [5]) with variable_scope.variable_scope("proj_seq2seq"): dec, _ = seq2seq_lib.embedding_rnn_seq2seq( enc_inp, dec_inp, cell_fn(), num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [ constant_op.constant( 0, dtypes.int32, shape=[2]) for _ in range(3) ] with variable_scope.variable_scope("other"): d3, _ = seq2seq_lib.embedding_rnn_seq2seq( enc_inp, dec_inp2, cell_fn(), num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=constant_op.constant(True)) with variable_scope.variable_scope("other_2"): d1, _ = seq2seq_lib.embedding_rnn_seq2seq( enc_inp, dec_inp, cell_fn(), num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True) with variable_scope.variable_scope("other_3"): d2, _ = seq2seq_lib.embedding_rnn_seq2seq( enc_inp, dec_inp2, cell_fn(), num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, feed_previous=True) sess.run([variables.global_variables_initializer()]) res1 = sess.run(d1) res2 = sess.run(d2) res3 = sess.run(d3) self.assertAllClose(res1, res2) self.assertAllClose(res1, res3) def testEmbeddingTiedRNNSeq2Seq(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): enc_inp = [ constant_op.constant( 1, dtypes.int32, shape=[2]) for i in range(2) ] dec_inp = [ constant_op.constant( i, dtypes.int32, shape=[2]) for i in range(3) ] cell = functools.partial(rnn_cell.BasicLSTMCell, 2, state_is_tuple=True) dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell(), num_symbols=5, embedding_size=2) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) # Test when num_decoder_symbols is provided, the size of decoder output # is num_decoder_symbols. with variable_scope.variable_scope("decoder_symbols_seq2seq"): dec, mem = seq2seq_lib.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell(), num_symbols=5, num_decoder_symbols=3, embedding_size=2) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape) # Test externally provided output projection. w = variable_scope.get_variable("proj_w", [2, 5]) b = variable_scope.get_variable("proj_b", [5]) with variable_scope.variable_scope("proj_seq2seq"): dec, _ = seq2seq_lib.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell(), num_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) # Test that previous-feeding model ignores inputs after the first. dec_inp2 = [constant_op.constant(0, dtypes.int32, shape=[2])] * 3 with variable_scope.variable_scope("other"): d3, _ = seq2seq_lib.embedding_tied_rnn_seq2seq( enc_inp, dec_inp2, cell(), num_symbols=5, embedding_size=2, feed_previous=constant_op.constant(True)) with variable_scope.variable_scope("other_2"): d1, _ = seq2seq_lib.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell(), num_symbols=5, embedding_size=2, feed_previous=True) with variable_scope.variable_scope("other_3"): d2, _ = seq2seq_lib.embedding_tied_rnn_seq2seq( enc_inp, dec_inp2, cell(), num_symbols=5, embedding_size=2, feed_previous=True) sess.run([variables.global_variables_initializer()]) res1 = sess.run(d1) res2 = sess.run(d2) res3 = sess.run(d3) self.assertAllClose(res1, res2) self.assertAllClose(res1, res3) def testAttentionDecoder1(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): cell_fn = lambda: rnn_cell.GRUCell(2) cell = cell_fn() inp = [constant_op.constant(0.5, shape=[2, 2])] * 2 enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32) attn_states = array_ops.concat([ array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs ], 1) dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3 # Create a new cell instance for the decoder, since it uses a # different variable scope dec, mem = seq2seq_lib.attention_decoder( dec_inp, enc_state, attn_states, cell_fn(), output_size=4) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testAttentionDecoder2(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): cell_fn = lambda: rnn_cell.GRUCell(2) cell = cell_fn() inp = [constant_op.constant(0.5, shape=[2, 2])] * 2 enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32) attn_states = array_ops.concat([ array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs ], 1) dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3 # Use a new cell instance since the attention decoder uses a # different variable scope. dec, mem = seq2seq_lib.attention_decoder( dec_inp, enc_state, attn_states, cell_fn(), output_size=4, num_heads=2) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testDynamicAttentionDecoder1(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): cell_fn = lambda: rnn_cell.GRUCell(2) cell = cell_fn() inp = constant_op.constant(0.5, shape=[2, 2, 2]) enc_outputs, enc_state = rnn.dynamic_rnn( cell, inp, dtype=dtypes.float32) attn_states = enc_outputs dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3 # Use a new cell instance since the attention decoder uses a # different variable scope. dec, mem = seq2seq_lib.attention_decoder( dec_inp, enc_state, attn_states, cell_fn(), output_size=4) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testDynamicAttentionDecoder2(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): cell_fn = lambda: rnn_cell.GRUCell(2) cell = cell_fn() inp = constant_op.constant(0.5, shape=[2, 2, 2]) enc_outputs, enc_state = rnn.dynamic_rnn( cell, inp, dtype=dtypes.float32) attn_states = enc_outputs dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3 # Use a new cell instance since the attention decoder uses a # different variable scope. dec, mem = seq2seq_lib.attention_decoder( dec_inp, enc_state, attn_states, cell_fn(), output_size=4, num_heads=2) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testAttentionDecoderStateIsTuple(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): single_cell = lambda: rnn_cell.BasicLSTMCell( # pylint: disable=g-long-lambda 2, state_is_tuple=True) cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda cells=[single_cell() for _ in range(2)], state_is_tuple=True) cell = cell_fn() inp = [constant_op.constant(0.5, shape=[2, 2])] * 2 enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32) attn_states = array_ops.concat([ array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs ], 1) dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3 # Use a new cell instance since the attention decoder uses a # different variable scope. dec, mem = seq2seq_lib.attention_decoder( dec_inp, enc_state, attn_states, cell_fn(), output_size=4) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual(2, len(res[0])) self.assertEqual((2, 2), res[0][0].c.shape) self.assertEqual((2, 2), res[0][0].h.shape) self.assertEqual((2, 2), res[0][1].c.shape) self.assertEqual((2, 2), res[0][1].h.shape) def testDynamicAttentionDecoderStateIsTuple(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): cell_fn = lambda: rnn_cell.MultiRNNCell( # pylint: disable=g-long-lambda cells=[rnn_cell.BasicLSTMCell(2) for _ in range(2)]) cell = cell_fn() inp = [constant_op.constant(0.5, shape=[2, 2])] * 2 enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32) attn_states = array_ops.concat([ array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs ], 1) dec_inp = [constant_op.constant(0.4, shape=[2, 2])] * 3 # Use a new cell instance since the attention decoder uses a # different variable scope. dec, mem = seq2seq_lib.attention_decoder( dec_inp, enc_state, attn_states, cell_fn(), output_size=4) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 4), res[0].shape) res = sess.run([mem]) self.assertEqual(2, len(res[0])) self.assertEqual((2, 2), res[0][0].c.shape) self.assertEqual((2, 2), res[0][0].h.shape) self.assertEqual((2, 2), res[0][1].c.shape) self.assertEqual((2, 2), res[0][1].h.shape) def testEmbeddingAttentionDecoder(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): inp = [constant_op.constant(0.5, shape=[2, 2])] * 2 cell_fn = lambda: rnn_cell.GRUCell(2) cell = cell_fn() enc_outputs, enc_state = rnn.static_rnn(cell, inp, dtype=dtypes.float32) attn_states = array_ops.concat([ array_ops.reshape(e, [-1, 1, cell.output_size]) for e in enc_outputs ], 1) dec_inp = [ constant_op.constant( i, dtypes.int32, shape=[2]) for i in range(3) ] # Use a new cell instance since the attention decoder uses a # different variable scope. dec, mem = seq2seq_lib.embedding_attention_decoder( dec_inp, enc_state, attn_states, cell_fn(), num_symbols=4, embedding_size=2, output_size=3) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 3), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].shape) def testEmbeddingAttentionSeq2Seq(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): enc_inp = [ constant_op.constant( 1, dtypes.int32, shape=[2]) for i in range(2) ] dec_inp = [ constant_op.constant( i, dtypes.int32, shape=[2]) for i in range(3) ] cell_fn = lambda: rnn_cell.BasicLSTMCell(2) cell = cell_fn() dec, mem = seq2seq_lib.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) # Test with state_is_tuple=False. with variable_scope.variable_scope("no_tuple"): cell_fn = functools.partial( rnn_cell.BasicLSTMCell, 2, state_is_tuple=False) cell_nt = cell_fn() dec, mem = seq2seq_lib.embedding_attention_seq2seq( enc_inp, dec_inp, cell_nt, num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run([mem]) self.assertEqual((2, 4), res[0].shape) # Test externally provided output projection. w = variable_scope.get_variable("proj_w", [2, 5]) b = variable_scope.get_variable("proj_b", [5]) with variable_scope.variable_scope("proj_seq2seq"): dec, _ = seq2seq_lib.embedding_attention_seq2seq( enc_inp, dec_inp, cell_fn(), num_encoder_symbols=2, num_decoder_symbols=5, embedding_size=2, output_projection=(w, b)) sess.run([variables.global_variables_initializer()]) res = sess.run(dec) self.assertEqual(3, len(res)) self.assertEqual((2, 2), res[0].shape) # TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse # within a variable scope that already has a weights tensor. # # # Test that previous-feeding model ignores inputs after the first. # dec_inp2 = [ # constant_op.constant( # 0, dtypes.int32, shape=[2]) for _ in range(3) # ] # with variable_scope.variable_scope("other"): # d3, _ = seq2seq_lib.embedding_attention_seq2seq( # enc_inp, # dec_inp2, # cell_fn(), # num_encoder_symbols=2, # num_decoder_symbols=5, # embedding_size=2, # feed_previous=constant_op.constant(True)) # sess.run([variables.global_variables_initializer()]) # variable_scope.get_variable_scope().reuse_variables() # cell = cell_fn() # d1, _ = seq2seq_lib.embedding_attention_seq2seq( # enc_inp, # dec_inp, # cell, # num_encoder_symbols=2, # num_decoder_symbols=5, # embedding_size=2, # feed_previous=True) # d2, _ = seq2seq_lib.embedding_attention_seq2seq( # enc_inp, # dec_inp2, # cell, # num_encoder_symbols=2, # num_decoder_symbols=5, # embedding_size=2, # feed_previous=True) # res1 = sess.run(d1) # res2 = sess.run(d2) # res3 = sess.run(d3) # self.assertAllClose(res1, res2) # self.assertAllClose(res1, res3) def testOne2ManyRNNSeq2Seq(self): with self.test_session() as sess: with variable_scope.variable_scope( "root", initializer=init_ops.constant_initializer(0.5)): enc_inp = [ constant_op.constant( 1, dtypes.int32, shape=[2]) for i in range(2) ] dec_inp_dict = {} dec_inp_dict["0"] = [ constant_op.constant( i, dtypes.int32, shape=[2]) for i in range(3) ] dec_inp_dict["1"] = [ constant_op.constant( i, dtypes.int32, shape=[2]) for i in range(4) ] dec_symbols_dict = {"0": 5, "1": 6} def EncCellFn(): return rnn_cell.BasicLSTMCell(2, state_is_tuple=True) def DecCellsFn(): return dict((k, rnn_cell.BasicLSTMCell(2, state_is_tuple=True)) for k in dec_symbols_dict) outputs_dict, state_dict = (seq2seq_lib.one2many_rnn_seq2seq( enc_inp, dec_inp_dict, EncCellFn(), DecCellsFn(), 2, dec_symbols_dict, embedding_size=2)) sess.run([variables.global_variables_initializer()]) res = sess.run(outputs_dict["0"]) self.assertEqual(3, len(res)) self.assertEqual((2, 5), res[0].shape) res = sess.run(outputs_dict["1"]) self.assertEqual(4, len(res)) self.assertEqual((2, 6), res[0].shape) res = sess.run([state_dict["0"]]) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) res = sess.run([state_dict["1"]]) self.assertEqual((2, 2), res[0].c.shape) self.assertEqual((2, 2), res[0].h.shape) # Test that previous-feeding model ignores inputs after the first, i.e. # dec_inp_dict2 has different inputs from dec_inp_dict after the first # time-step. dec_inp_dict2 = {} dec_inp_dict2["0"] = [ constant_op.constant( 0, dtypes.int32, shape=[2]) for _ in range(3) ] dec_inp_dict2["1"] = [ constant_op.constant( 0, dtypes.int32, shape=[2]) for _ in range(4) ] with variable_scope.variable_scope("other"): outputs_dict3, _ = seq2seq_lib.one2many_rnn_seq2seq( enc_inp, dec_inp_dict2, EncCellFn(), DecCellsFn(), 2, dec_symbols_dict, embedding_size=2, feed_previous=constant_op.constant(True)) with variable_scope.variable_scope("other_2"): outputs_dict1, _ = seq2seq_lib.one2many_rnn_seq2seq( enc_inp, dec_inp_dict, EncCellFn(), DecCellsFn(), 2, dec_symbols_dict, embedding_size=2, feed_previous=True) with variable_scope.variable_scope("other_3"): outputs_dict2, _ = seq2seq_lib.one2many_rnn_seq2seq( enc_inp, dec_inp_dict2, EncCellFn(), DecCellsFn(), 2, dec_symbols_dict, embedding_size=2, feed_previous=True) sess.run([variables.global_variables_initializer()]) res1 = sess.run(outputs_dict1["0"]) res2 = sess.run(outputs_dict2["0"]) res3 = sess.run(outputs_dict3["0"]) self.assertAllClose(res1, res2) self.assertAllClose(res1, res3) def testSequenceLoss(self): with self.test_session() as sess: logits = [constant_op.constant(i + 0.5, shape=[2, 5]) for i in range(3)] targets = [ constant_op.constant( i, dtypes.int32, shape=[2]) for i in range(3) ] weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)] average_loss_per_example = seq2seq_lib.sequence_loss( logits, targets, weights, average_across_timesteps=True, average_across_batch=True) res = sess.run(average_loss_per_example) self.assertAllClose(1.60944, res) average_loss_per_sequence = seq2seq_lib.sequence_loss( logits, targets, weights, average_across_timesteps=False, average_across_batch=True) res = sess.run(average_loss_per_sequence) self.assertAllClose(4.828314, res) total_loss = seq2seq_lib.sequence_loss( logits, targets, weights, average_across_timesteps=False, average_across_batch=False) res = sess.run(total_loss) self.assertAllClose(9.656628, res) def testSequenceLossByExample(self): with self.test_session() as sess: output_classes = 5 logits = [ constant_op.constant( i + 0.5, shape=[2, output_classes]) for i in range(3) ] targets = [ constant_op.constant( i, dtypes.int32, shape=[2]) for i in range(3) ] weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)] average_loss_per_example = (seq2seq_lib.sequence_loss_by_example( logits, targets, weights, average_across_timesteps=True)) res = sess.run(average_loss_per_example) self.assertAllClose(np.asarray([1.609438, 1.609438]), res) loss_per_sequence = seq2seq_lib.sequence_loss_by_example( logits, targets, weights, average_across_timesteps=False) res = sess.run(loss_per_sequence) self.assertAllClose(np.asarray([4.828314, 4.828314]), res) # TODO(ebrevdo, lukaszkaiser): Re-enable once RNNCells allow reuse # within a variable scope that already has a weights tensor. # # def testModelWithBucketsScopeAndLoss(self): # """Test variable scope reuse is not reset after model_with_buckets.""" # classes = 10 # buckets = [(4, 4), (8, 8)] # with self.test_session(): # # Here comes a sample Seq2Seq model using GRU cells. # def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss): # """Example sequence-to-sequence model that uses GRU cells.""" # def GRUSeq2Seq(enc_inp, dec_inp): # cell = rnn_cell.MultiRNNCell( # [rnn_cell.GRUCell(24) for _ in range(2)]) # return seq2seq_lib.embedding_attention_seq2seq( # enc_inp, # dec_inp, # cell, # num_encoder_symbols=classes, # num_decoder_symbols=classes, # embedding_size=24) # targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0] # return seq2seq_lib.model_with_buckets( # enc_inp, # dec_inp, # targets, # weights, # buckets, # GRUSeq2Seq, # per_example_loss=per_example_loss) # # Now we construct the copy model. # inp = [ # array_ops.placeholder( # dtypes.int32, shape=[None]) for _ in range(8) # ] # out = [ # array_ops.placeholder( # dtypes.int32, shape=[None]) for _ in range(8) # ] # weights = [ # array_ops.ones_like( # inp[0], dtype=dtypes.float32) for _ in range(8) # ] # with variable_scope.variable_scope("root"): # _, losses1 = SampleGRUSeq2Seq( # inp, out, weights, per_example_loss=False) # # Now check that we did not accidentally set reuse. # self.assertEqual(False, variable_scope.get_variable_scope().reuse) # with variable_scope.variable_scope("new"): # _, losses2 = SampleGRUSeq2Seq # inp, out, weights, per_example_loss=True) # # First loss is scalar, the second one is a 1-dimensional tensor. # self.assertEqual([], losses1[0].get_shape().as_list()) # self.assertEqual([None], losses2[0].get_shape().as_list()) def testModelWithBuckets(self): """Larger tests that does full sequence-to-sequence model training.""" # We learn to copy 10 symbols in 2 buckets: length 4 and length 8. classes = 10 buckets = [(4, 4), (8, 8)] perplexities = [[], []] # Results for each bucket. random_seed.set_random_seed(111) random.seed(111) np.random.seed(111) with self.test_session() as sess: # We use sampled softmax so we keep output projection separate. w = variable_scope.get_variable("proj_w", [24, classes]) w_t = array_ops.transpose(w) b = variable_scope.get_variable("proj_b", [classes]) # Here comes a sample Seq2Seq model using GRU cells. def SampleGRUSeq2Seq(enc_inp, dec_inp, weights): """Example sequence-to-sequence model that uses GRU cells.""" def GRUSeq2Seq(enc_inp, dec_inp): cell = rnn_cell.MultiRNNCell( [rnn_cell.GRUCell(24) for _ in range(2)], state_is_tuple=True) return seq2seq_lib.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols=classes, num_decoder_symbols=classes, embedding_size=24, output_projection=(w, b)) targets = [dec_inp[i + 1] for i in range(len(dec_inp) - 1)] + [0] def SampledLoss(labels, logits): labels = array_ops.reshape(labels, [-1, 1]) return nn_impl.sampled_softmax_loss( weights=w_t, biases=b, labels=labels, inputs=logits, num_sampled=8, num_classes=classes) return seq2seq_lib.model_with_buckets( enc_inp, dec_inp, targets, weights, buckets, GRUSeq2Seq, softmax_loss_function=SampledLoss) # Now we construct the copy model. batch_size = 8 inp = [ array_ops.placeholder( dtypes.int32, shape=[None]) for _ in range(8) ] out = [ array_ops.placeholder( dtypes.int32, shape=[None]) for _ in range(8) ] weights = [ array_ops.ones_like( inp[0], dtype=dtypes.float32) for _ in range(8) ] with variable_scope.variable_scope("root"): _, losses = SampleGRUSeq2Seq(inp, out, weights) updates = [] params = variables.global_variables() optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5) for i in range(len(buckets)): full_grads = gradients_impl.gradients(losses[i], params) grads, _ = clip_ops.clip_by_global_norm(full_grads, 30.0) update = optimizer.apply_gradients(zip(grads, params)) updates.append(update) sess.run([variables.global_variables_initializer()]) steps = 6 for _ in range(steps): bucket = random.choice(np.arange(len(buckets))) length = buckets[bucket][0] i = [ np.array( [np.random.randint(9) + 1 for _ in range(batch_size)], dtype=np.int32) for _ in range(length) ] # 0 is our "GO" symbol here. o = [np.array([0] * batch_size, dtype=np.int32)] + i feed = {} for i1, i2, o1, o2 in zip(inp[:length], i[:length], out[:length], o[:length]): feed[i1.name] = i2 feed[o1.name] = o2 if length < 8: # For the 4-bucket, we need the 5th as target. feed[out[length].name] = o[length] res = sess.run([updates[bucket], losses[bucket]], feed) perplexities[bucket].append(math.exp(float(res[1]))) for bucket in range(len(buckets)): if len(perplexities[bucket]) > 1: # Assert that perplexity went down. self.assertLess(perplexities[bucket][-1], # 20% margin of error. 1.2 * perplexities[bucket][0]) def testModelWithBooleanFeedPrevious(self): """Test the model behavior when feed_previous is True. For example, the following two cases have the same effect: - Train `embedding_rnn_seq2seq` with `feed_previous=True`, which contains a `embedding_rnn_decoder` with `feed_previous=True` and `update_embedding_for_previous=True`. The decoder is fed with "<Go>" and outputs "A, B, C". - Train `embedding_rnn_seq2seq` with `feed_previous=False`. The decoder is fed with "<Go>, A, B". """ num_encoder_symbols = 3 num_decoder_symbols = 5 batch_size = 2 num_enc_timesteps = 2 num_dec_timesteps = 3 def TestModel(seq2seq): with self.test_session(graph=ops.Graph()) as sess: random_seed.set_random_seed(111) random.seed(111) np.random.seed(111) enc_inp = [ constant_op.constant( i + 1, dtypes.int32, shape=[batch_size]) for i in range(num_enc_timesteps) ] dec_inp_fp_true = [ constant_op.constant( i, dtypes.int32, shape=[batch_size]) for i in range(num_dec_timesteps) ] dec_inp_holder_fp_false = [ array_ops.placeholder( dtypes.int32, shape=[batch_size]) for _ in range(num_dec_timesteps) ] targets = [ constant_op.constant( i + 1, dtypes.int32, shape=[batch_size]) for i in range(num_dec_timesteps) ] weights = [ constant_op.constant( 1.0, shape=[batch_size]) for i in range(num_dec_timesteps) ] def ForwardBackward(enc_inp, dec_inp, feed_previous): scope_name = "fp_{}".format(feed_previous) with variable_scope.variable_scope(scope_name): dec_op, _ = seq2seq(enc_inp, dec_inp, feed_previous=feed_previous) net_variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope_name) optimizer = adam.AdamOptimizer(0.03, epsilon=1e-5) update_op = optimizer.minimize( seq2seq_lib.sequence_loss(dec_op, targets, weights), var_list=net_variables) return dec_op, update_op, net_variables dec_op_fp_true, update_fp_true, variables_fp_true = ForwardBackward( enc_inp, dec_inp_fp_true, feed_previous=True) _, update_fp_false, variables_fp_false = ForwardBackward( enc_inp, dec_inp_holder_fp_false, feed_previous=False) sess.run(variables.global_variables_initializer()) # We only check consistencies between the variables existing in both # the models with True and False feed_previous. Variables created by # the loop_function in the model with True feed_previous are ignored. v_false_name_dict = { v.name.split("/", 1)[-1]: v for v in variables_fp_false } matched_variables = [(v, v_false_name_dict[v.name.split("/", 1)[-1]]) for v in variables_fp_true] for v_true, v_false in matched_variables: sess.run(state_ops.assign(v_false, v_true)) # Take the symbols generated by the decoder with feed_previous=True as # the true input symbols for the decoder with feed_previous=False. dec_fp_true = sess.run(dec_op_fp_true) output_symbols_fp_true = np.argmax(dec_fp_true, axis=2) dec_inp_fp_false = np.vstack((dec_inp_fp_true[0].eval(), output_symbols_fp_true[:-1])) sess.run(update_fp_true) sess.run(update_fp_false, { holder: inp for holder, inp in zip(dec_inp_holder_fp_false, dec_inp_fp_false) }) for v_true, v_false in matched_variables: self.assertAllClose(v_true.eval(), v_false.eval()) def EmbeddingRNNSeq2SeqF(enc_inp, dec_inp, feed_previous): cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return seq2seq_lib.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingRNNSeq2SeqNoTupleF(enc_inp, dec_inp, feed_previous): cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False) return seq2seq_lib.embedding_rnn_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingTiedRNNSeq2Seq(enc_inp, dec_inp, feed_previous): cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return seq2seq_lib.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingTiedRNNSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous): cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False) return seq2seq_lib.embedding_tied_rnn_seq2seq( enc_inp, dec_inp, cell, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingAttentionSeq2Seq(enc_inp, dec_inp, feed_previous): cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=True) return seq2seq_lib.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) def EmbeddingAttentionSeq2SeqNoTuple(enc_inp, dec_inp, feed_previous): cell = rnn_cell.BasicLSTMCell(2, state_is_tuple=False) return seq2seq_lib.embedding_attention_seq2seq( enc_inp, dec_inp, cell, num_encoder_symbols, num_decoder_symbols, embedding_size=2, feed_previous=feed_previous) for model in (EmbeddingRNNSeq2SeqF, EmbeddingRNNSeq2SeqNoTupleF, EmbeddingTiedRNNSeq2Seq, EmbeddingTiedRNNSeq2SeqNoTuple, EmbeddingAttentionSeq2Seq, EmbeddingAttentionSeq2SeqNoTuple): TestModel(model) if __name__ == "__main__": test.main()
apache-2.0
SalesforceEng/LinuxTelemetry
plugins/buddyinfo.py
1
8671
#!/usr/bin/python ########################################################################## # Copyright (c) 2015, Salesforce.com, Inc. # All rights reserved. # # Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # # Neither the name of Salesforce.com nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT # NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE # GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER # IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ########################################################################## """ **buddyinfo.py** Linux uses buddy allocator for memory management. Pages are allocated in each NUMA node and zones within each node. Within each zones, pages are allocated as contiguous groups of 1, 2, 3, 4, and so on order pages where 1 means 4K pages. Number of free pages in each bucket is exposed through /proc/buddyinfo When this number goes below a threshold in any bucket, kswapd (slowpath for finding free pages) kicks in. It then scans for free pages in all order levels until all of them reach above min limit. This process can take long time and may cause issues for GC latencies. Typical contents of /proc/buddyinfo: - Node 0, zone Normal 1490 4026 12224 8508 4493 1929 849 301 101 45 5257 - Node 1, zone DMA 1 1 1 1 1 0 1 0 1 1 3 - Node 1, zone DMA32 15 3 2 5 8 7 4 4 7 8 681 - Node 1, zone Normal 6061 13681 20887 15188 9097 4546 1948 731 273 125 3976 Here are the fields interpretation in each row: 1. NUMA node (such as 0 or 1) 2. Zone name (Normal, DMA32, DMA, etc.) 3. Col. 3 to end: page order or buckets on contiguous memory sizes: 4K, 8K, 16K, 32K, 64K, 128K, 256K, 512K, 1024K, and 2048K """ import collectd import platform import os import socket import time import re import sys import traceback os_name = platform.system() host_name = socket.gethostbyaddr(socket.gethostname())[0] host_types = ['app', 'db', 'ffx', 'indexer', 'search', 'other'] host_type = 'other' BUDDY_FNAME = '/proc/buddyinfo' METRIC_PLUGIN = 'buddyinfo' METRIC_TYPE = 'gauge' buddy_fields = ['numa_node', 'zone_name', 'bucket_free_pages' ] buddy_metrics = ['bucket_free_pages_per_sec', 'total_free_pages_per_sec', 'pct_fragment_per_sec' ] white_list = [] node_list = [] zone_list = [] stats_cache = {} stats_current = {} re_buddyinfo=re.compile(r'^\s*Node\s+(?P<node>\d+)' r',\s+zone\s+(?P<zone>\S+)\s+(?P<pages>.*)$') def get_host_type(): for i in host_types: if i in host_name: host_type = i def init_stats_cache(): global white_list try: if os.path.exists(BUDDY_FNAME): num_buckets = 0 with open(BUDDY_FNAME) as f: for line in f: match = re_buddyinfo.search(line) if not match: collectd.error('buddyinfo: unknown line pattern: %s' % (line)) continue; if 'node' in match.groupdict(): node = match.group('node') else: collectd.error('node not found in buddyinfo') return if 'zone' in match.groupdict(): zone = match.group('zone') else: collectd.error('zone not found in buddyinfo') return if 'pages' in match.groupdict(): free_pages = match.group('pages').strip().split() else: collectd.error('pages not found in buddyinfo') return num_buckets = len(free_pages) if node not in node_list: node_list.append(node) if zone not in zone_list: zone_list.append(zone) stats_cache[(node, zone, 'val')] = free_pages stats_cache[(node, zone, 'ts')] = time.time() f.close() for i in range(0, num_buckets): white_list.append('free_pages_' + str(4*2**i) + 'K') collectd.info('buddyinfo: node_list : %s' % (node_list)) collectd.info('buddyinfo: zone_list : %s' % (zone_list)) collectd.info('buddyinfo: white_list: %s' % (white_list)) else: collectd.info('buddyinfo: init_stats_cache: path: %s does not exist' % (BUDDY_FNAME)) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() collectd.error('Exception during buddyinfo init: %s\n%s' % (str(e), traceback.format_tb(exc_traceback))) def collect_buddyinfo(): try: if os.path.exists(BUDDY_FNAME): with open(BUDDY_FNAME) as f: for line in f: match = re_buddyinfo.search(line) if not match: continue; if 'node' in match.groupdict(): node = match.group('node') else: collectd.error('node not found in buddyinfo') return if 'zone' in match.groupdict(): zone = match.group('zone') else: collectd.error('zone not found in buddyinfo') return if 'pages' in match.groupdict(): free_pages = match.group('pages').strip().split() else: collectd.error('pages not found in buddyinfo') return stats_current[(node, zone, 'val')] = free_pages stats_current[(node, zone, 'ts')] = time.time() key_val = dict(zip(white_list, free_pages)) metric = collectd.Values() metric.host = host_name metric.plugin = METRIC_PLUGIN metric.plugin_instance = node metric.type = METRIC_TYPE for k in range(0, len(white_list)): metric.type_instance = 'zone_' + zone + '.' metric.type_instance += white_list[k] metric.values = [free_pages[k]] metric.dispatch() f.close() else: collectd.error('buddyinfo: procfs path: %s does not exist' % (BUDDY_FNAME)) except Exception as e: exc_type, exc_value, exc_traceback = sys.exc_info() collectd.error('Exception during buddyinfo collection: %s\n%s' % (str(e), traceback.format_tb(exc_traceback))) def swap_current_cache(): stats_cache = stats_current.copy() def configer(ObjConfiguration): collectd.info('buddyinfo plugin: configuring host: %s' % (host_name)) def initer(): get_host_type() collectd.info('buddyinfo plugin: host of type: %s' % (host_type)) collectd.info('buddyinfo initer: white list: %s' % (white_list)) init_stats_cache() collectd.info('buddyinfo init: stats_cache: %s' % (stats_cache)) def reader(input_data=None): collect_buddyinfo() swap_current_cache() def writer(metric, data=None): for i in metric.values: collectd.debug('%s (%s): %f' % (metric.plugin, metric.type, i)) def shutdown(): collectd.info('buddyinfo plugin shutting down') #== Callbacks ==# if (os_name == 'Linux'): collectd.register_config(configer) collectd.register_init(initer) collectd.register_read(reader) collectd.register_write(writer) collectd.register_shutdown(shutdown) else: collectd.warning('buddyinfo plugin currently works for Linux only')
bsd-3-clause
pomegranited/edx-platform
openedx/core/djangoapps/user_api/accounts/api.py
30
19144
""" Programmatic integration point for User API Accounts sub-application """ from django.utils.translation import ugettext as _ from django.db import transaction, IntegrityError import datetime from pytz import UTC from django.core.exceptions import ObjectDoesNotExist from django.conf import settings from django.core.validators import validate_email, validate_slug, ValidationError from openedx.core.djangoapps.user_api.preferences.api import update_user_preferences from openedx.core.djangoapps.user_api.errors import PreferenceValidationError from student.models import User, UserProfile, Registration from student import views as student_views from util.model_utils import emit_setting_changed_event from openedx.core.lib.api.view_utils import add_serializer_errors from ..errors import ( AccountUpdateError, AccountValidationError, AccountUsernameInvalid, AccountPasswordInvalid, AccountEmailInvalid, AccountUserAlreadyExists, UserAPIInternalError, UserAPIRequestError, UserNotFound, UserNotAuthorized ) from ..forms import PasswordResetFormNoActive from ..helpers import intercept_errors from ..models import UserPreference from . import ( ACCOUNT_VISIBILITY_PREF_KEY, PRIVATE_VISIBILITY, EMAIL_MIN_LENGTH, EMAIL_MAX_LENGTH, PASSWORD_MIN_LENGTH, PASSWORD_MAX_LENGTH, USERNAME_MIN_LENGTH, USERNAME_MAX_LENGTH ) from .serializers import ( AccountLegacyProfileSerializer, AccountUserSerializer, UserReadOnlySerializer ) @intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError]) def get_account_settings(request, username=None, configuration=None, view=None): """Returns account information for a user serialized as JSON. Note: If `request.user.username` != `username`, this method will return differing amounts of information based on who `request.user` is and the privacy settings of the user associated with `username`. Args: request (Request): The request object with account information about the requesting user. Only the user with username `username` or users with "is_staff" privileges can get full account information. Other users will get the account fields that the user has elected to share. username (str): Optional username for the desired account information. If not specified, `request.user.username` is assumed. configuration (dict): an optional configuration specifying which fields in the account can be shared, and the default visibility settings. If not present, the setting value with key ACCOUNT_VISIBILITY_CONFIGURATION is used. view (str): An optional string allowing "is_staff" users and users requesting their own account information to get just the fields that are shared with everyone. If view is "shared", only shared account information will be returned, regardless of `request.user`. Returns: A dict containing account fields. Raises: UserNotFound: no user with username `username` exists (or `request.user.username` if `username` is not specified) UserAPIInternalError: the operation failed due to an unexpected error. """ requesting_user = request.user if username is None: username = requesting_user.username try: existing_user = User.objects.select_related('profile').get(username=username) except ObjectDoesNotExist: raise UserNotFound() has_full_access = requesting_user.username == username or requesting_user.is_staff if has_full_access and view != 'shared': admin_fields = settings.ACCOUNT_VISIBILITY_CONFIGURATION.get('admin_fields') else: admin_fields = None return UserReadOnlySerializer( existing_user, configuration=configuration, custom_fields=admin_fields, context={'request': request} ).data @intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError]) def update_account_settings(requesting_user, update, username=None): """Update user account information. Note: It is up to the caller of this method to enforce the contract that this method is only called with the user who made the request. Arguments: requesting_user (User): The user requesting to modify account information. Only the user with username 'username' has permissions to modify account information. update (dict): The updated account field values. username (str): Optional username specifying which account should be updated. If not specified, `requesting_user.username` is assumed. Raises: UserNotFound: no user with username `username` exists (or `requesting_user.username` if `username` is not specified) UserNotAuthorized: the requesting_user does not have access to change the account associated with `username` AccountValidationError: the update was not attempted because validation errors were found with the supplied update AccountUpdateError: the update could not be completed. Note that if multiple fields are updated at the same time, some parts of the update may have been successful, even if an AccountUpdateError is returned; in particular, the user account (not including e-mail address) may have successfully been updated, but then the e-mail change request, which is processed last, may throw an error. UserAPIInternalError: the operation failed due to an unexpected error. """ if username is None: username = requesting_user.username existing_user, existing_user_profile = _get_user_and_profile(username) if requesting_user.username != username: raise UserNotAuthorized() # If user has requested to change email, we must call the multi-step process to handle this. # It is not handled by the serializer (which considers email to be read-only). changing_email = False if "email" in update: changing_email = True new_email = update["email"] del update["email"] # If user has requested to change name, store old name because we must update associated metadata # after the save process is complete. old_name = None if "name" in update: old_name = existing_user_profile.name # Check for fields that are not editable. Marking them read-only causes them to be ignored, but we wish to 400. read_only_fields = set(update.keys()).intersection( AccountUserSerializer.get_read_only_fields() + AccountLegacyProfileSerializer.get_read_only_fields() ) # Build up all field errors, whether read-only, validation, or email errors. field_errors = {} if read_only_fields: for read_only_field in read_only_fields: field_errors[read_only_field] = { "developer_message": u"This field is not editable via this API", "user_message": _(u"The '{field_name}' field cannot be edited.").format(field_name=read_only_field) } del update[read_only_field] user_serializer = AccountUserSerializer(existing_user, data=update) legacy_profile_serializer = AccountLegacyProfileSerializer(existing_user_profile, data=update) for serializer in user_serializer, legacy_profile_serializer: field_errors = add_serializer_errors(serializer, update, field_errors) # If the user asked to change email, validate it. if changing_email: try: student_views.validate_new_email(existing_user, new_email) except ValueError as err: field_errors["email"] = { "developer_message": u"Error thrown from validate_new_email: '{}'".format(err.message), "user_message": err.message } # If we have encountered any validation errors, return them to the user. if field_errors: raise AccountValidationError(field_errors) try: # If everything validated, go ahead and save the serializers. # We have not found a way using signals to get the language proficiency changes (grouped by user). # As a workaround, store old and new values here and emit them after save is complete. if "language_proficiencies" in update: old_language_proficiencies = legacy_profile_serializer.data["language_proficiencies"] for serializer in user_serializer, legacy_profile_serializer: serializer.save() # if any exception is raised for user preference (i.e. account_privacy), the entire transaction for user account # patch is rolled back and the data is not saved if 'account_privacy' in update: update_user_preferences( requesting_user, {'account_privacy': update["account_privacy"]}, existing_user ) if "language_proficiencies" in update: new_language_proficiencies = update["language_proficiencies"] emit_setting_changed_event( user=existing_user, db_table=existing_user_profile.language_proficiencies.model._meta.db_table, setting_name="language_proficiencies", old_value=old_language_proficiencies, new_value=new_language_proficiencies, ) # If the name was changed, store information about the change operation. This is outside of the # serializer so that we can store who requested the change. if old_name: meta = existing_user_profile.get_meta() if 'old_names' not in meta: meta['old_names'] = [] meta['old_names'].append([ old_name, u"Name change requested through account API by {0}".format(requesting_user.username), datetime.datetime.now(UTC).isoformat() ]) existing_user_profile.set_meta(meta) existing_user_profile.save() except PreferenceValidationError as err: raise AccountValidationError(err.preference_errors) except Exception as err: raise AccountUpdateError( u"Error thrown when saving account updates: '{}'".format(err.message) ) # And try to send the email change request if necessary. if changing_email: try: student_views.do_email_change_request(existing_user, new_email) except ValueError as err: raise AccountUpdateError( u"Error thrown from do_email_change_request: '{}'".format(err.message), user_message=err.message ) def _get_user_and_profile(username): """ Helper method to return the legacy user and profile objects based on username. """ try: existing_user = User.objects.get(username=username) existing_user_profile = UserProfile.objects.get(user=existing_user) except ObjectDoesNotExist: raise UserNotFound() return existing_user, existing_user_profile @intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError]) @transaction.atomic def create_account(username, password, email): """Create a new user account. This will implicitly create an empty profile for the user. WARNING: This function does NOT yet implement all the features in `student/views.py`. Until it does, please use this method ONLY for tests of the account API, not in production code. In particular, these are currently missing: * 3rd party auth * External auth (shibboleth) * Complex password policies (ENFORCE_PASSWORD_POLICY) In addition, we assume that some functionality is handled at higher layers: * Analytics events * Activation email * Terms of service / honor code checking * Recording demographic info (use profile API) * Auto-enrollment in courses (if invited via instructor dash) Args: username (unicode): The username for the new account. password (unicode): The user's password. email (unicode): The email address associated with the account. Returns: unicode: an activation key for the account. Raises: AccountUserAlreadyExists AccountUsernameInvalid AccountEmailInvalid AccountPasswordInvalid UserAPIInternalError: the operation failed due to an unexpected error. """ # Validate the username, password, and email # This will raise an exception if any of these are not in a valid format. _validate_username(username) _validate_password(password, username) _validate_email(email) # Create the user account, setting them to "inactive" until they activate their account. user = User(username=username, email=email, is_active=False) user.set_password(password) try: user.save() except IntegrityError: raise AccountUserAlreadyExists # Create a registration to track the activation process # This implicitly saves the registration. registration = Registration() registration.register(user) # Create an empty user profile with default values UserProfile(user=user).save() # Return the activation key, which the caller should send to the user return registration.activation_key def check_account_exists(username=None, email=None): """Check whether an account with a particular username or email already exists. Keyword Arguments: username (unicode) email (unicode) Returns: list of conflicting fields Example Usage: >>> account_api.check_account_exists(username="bob") [] >>> account_api.check_account_exists(username="ted", email="[email protected]") ["email", "username"] """ conflicts = [] if email is not None and User.objects.filter(email=email).exists(): conflicts.append("email") if username is not None and User.objects.filter(username=username).exists(): conflicts.append("username") return conflicts @intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError]) def activate_account(activation_key): """Activate a user's account. Args: activation_key (unicode): The activation key the user received via email. Returns: None Raises: UserNotAuthorized UserAPIInternalError: the operation failed due to an unexpected error. """ try: registration = Registration.objects.get(activation_key=activation_key) except Registration.DoesNotExist: raise UserNotAuthorized else: # This implicitly saves the registration registration.activate() @intercept_errors(UserAPIInternalError, ignore_errors=[UserAPIRequestError]) def request_password_change(email, orig_host, is_secure): """Email a single-use link for performing a password reset. Users must confirm the password change before we update their information. Args: email (str): An email address orig_host (str): An originating host, extracted from a request with get_host is_secure (bool): Whether the request was made with HTTPS Returns: None Raises: UserNotFound AccountRequestError UserAPIInternalError: the operation failed due to an unexpected error. """ # Binding data to a form requires that the data be passed as a dictionary # to the Form class constructor. form = PasswordResetFormNoActive({'email': email}) # Validate that a user exists with the given email address. if form.is_valid(): # Generate a single-use link for performing a password reset # and email it to the user. form.save( from_email=settings.DEFAULT_FROM_EMAIL, domain_override=orig_host, use_https=is_secure ) else: # No user with the provided email address exists. raise UserNotFound def _validate_username(username): """Validate the username. Arguments: username (unicode): The proposed username. Returns: None Raises: AccountUsernameInvalid """ if not isinstance(username, basestring): raise AccountUsernameInvalid(u"Username must be a string") if len(username) < USERNAME_MIN_LENGTH: raise AccountUsernameInvalid( u"Username '{username}' must be at least {min} characters long".format( username=username, min=USERNAME_MIN_LENGTH ) ) if len(username) > USERNAME_MAX_LENGTH: raise AccountUsernameInvalid( u"Username '{username}' must be at most {max} characters long".format( username=username, max=USERNAME_MAX_LENGTH ) ) try: validate_slug(username) except ValidationError: raise AccountUsernameInvalid( u"Username '{username}' must contain only A-Z, a-z, 0-9, -, or _ characters" ) def _validate_password(password, username): """Validate the format of the user's password. Passwords cannot be the same as the username of the account, so we take `username` as an argument. Arguments: password (unicode): The proposed password. username (unicode): The username associated with the user's account. Returns: None Raises: AccountPasswordInvalid """ if not isinstance(password, basestring): raise AccountPasswordInvalid(u"Password must be a string") if len(password) < PASSWORD_MIN_LENGTH: raise AccountPasswordInvalid( u"Password must be at least {min} characters long".format( min=PASSWORD_MIN_LENGTH ) ) if len(password) > PASSWORD_MAX_LENGTH: raise AccountPasswordInvalid( u"Password must be at most {max} characters long".format( max=PASSWORD_MAX_LENGTH ) ) if password == username: raise AccountPasswordInvalid(u"Password cannot be the same as the username") def _validate_email(email): """Validate the format of the email address. Arguments: email (unicode): The proposed email. Returns: None Raises: AccountEmailInvalid """ if not isinstance(email, basestring): raise AccountEmailInvalid(u"Email must be a string") if len(email) < EMAIL_MIN_LENGTH: raise AccountEmailInvalid( u"Email '{email}' must be at least {min} characters long".format( email=email, min=EMAIL_MIN_LENGTH ) ) if len(email) > EMAIL_MAX_LENGTH: raise AccountEmailInvalid( u"Email '{email}' must be at most {max} characters long".format( email=email, max=EMAIL_MAX_LENGTH ) ) try: validate_email(email) except ValidationError: raise AccountEmailInvalid( u"Email '{email}' format is not valid".format(email=email) )
agpl-3.0
DimensionDataCBUSydney/libcloud
libcloud/test/storage/test_s3.py
10
39819
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import hmac import os import sys import unittest from hashlib import sha1 try: from lxml import etree as ET except ImportError: from xml.etree import ElementTree as ET from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse from libcloud.utils.py3 import parse_qs from libcloud.common.types import InvalidCredsError from libcloud.common.types import LibcloudError, MalformedResponseError from libcloud.storage.base import Container, Object from libcloud.storage.types import ContainerDoesNotExistError from libcloud.storage.types import ContainerError from libcloud.storage.types import ContainerIsNotEmptyError from libcloud.storage.types import InvalidContainerNameError from libcloud.storage.types import ObjectDoesNotExistError from libcloud.storage.types import ObjectHashMismatchError from libcloud.storage.drivers.s3 import BaseS3Connection from libcloud.storage.drivers.s3 import S3StorageDriver, S3USWestStorageDriver from libcloud.storage.drivers.s3 import S3EUWestStorageDriver from libcloud.storage.drivers.s3 import S3APSEStorageDriver from libcloud.storage.drivers.s3 import S3APNEStorageDriver from libcloud.storage.drivers.s3 import CHUNK_SIZE from libcloud.storage.drivers.dummy import DummyIterator from libcloud.utils.py3 import b from libcloud.test import StorageMockHttp, MockRawResponse # pylint: disable-msg=E0611 from libcloud.test import MockHttpTestCase # pylint: disable-msg=E0611 from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611 from libcloud.test.secrets import STORAGE_S3_PARAMS class S3MockHttp(StorageMockHttp, MockHttpTestCase): fixtures = StorageFileFixtures('s3') base_headers = {} def _UNAUTHORIZED(self, method, url, body, headers): return (httplib.UNAUTHORIZED, '', self.base_headers, httplib.responses[httplib.OK]) def _DIFFERENT_REGION(self, method, url, body, headers): return (httplib.MOVED_PERMANENTLY, '', self.base_headers, httplib.responses[httplib.OK]) def _list_containers_EMPTY(self, method, url, body, headers): body = self.fixtures.load('list_containers_empty.xml') return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) def _list_containers_TOKEN(self, method, url, body, headers): self.assertEqual(headers['x-amz-security-token'], 'asdf') body = self.fixtures.load('list_containers_empty.xml') return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) def _list_containers(self, method, url, body, headers): body = self.fixtures.load('list_containers.xml') return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) def _test_container_EMPTY(self, method, url, body, headers): body = self.fixtures.load('list_container_objects_empty.xml') return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) def _test_container(self, method, url, body, headers): body = self.fixtures.load('list_container_objects.xml') return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) def _test_container_ITERATOR(self, method, url, body, headers): if url.find('3.zip') == -1: # First part of the response (first 3 objects) file_name = 'list_container_objects_not_exhausted1.xml' else: file_name = 'list_container_objects_not_exhausted2.xml' body = self.fixtures.load(file_name) return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) def _test2_get_object(self, method, url, body, headers): body = self.fixtures.load('list_container_objects.xml') return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) def _test2_test_get_object(self, method, url, body, headers): # test_get_object body = self.fixtures.load('list_containers.xml') headers = {'content-type': 'application/zip', 'etag': '"e31208wqsdoj329jd"', 'x-amz-meta-rabbits': 'monkeys', 'content-length': 12345, 'last-modified': 'Thu, 13 Sep 2012 07:13:22 GMT' } return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _new_container_INVALID_NAME(self, method, url, body, headers): # test_create_container return (httplib.BAD_REQUEST, body, headers, httplib.responses[httplib.OK]) def _new_container_ALREADY_EXISTS(self, method, url, body, headers): # test_create_container return (httplib.CONFLICT, body, headers, httplib.responses[httplib.OK]) def _new_container(self, method, url, body, headers): # test_create_container, test_delete_container if method == 'PUT': status = httplib.OK elif method == 'DELETE': status = httplib.NO_CONTENT return (status, body, headers, httplib.responses[httplib.OK]) def _new_container_DOESNT_EXIST(self, method, url, body, headers): # test_delete_container return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.OK]) def _new_container_NOT_EMPTY(self, method, url, body, headers): # test_delete_container return (httplib.CONFLICT, body, headers, httplib.responses[httplib.OK]) def _test1_get_container(self, method, url, body, headers): body = self.fixtures.load('list_container_objects.xml') return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK]) def _container1_get_container(self, method, url, body, headers): return (httplib.NOT_FOUND, '', self.base_headers, httplib.responses[httplib.NOT_FOUND]) def _test_inexistent_get_object(self, method, url, body, headers): return (httplib.NOT_FOUND, '', self.base_headers, httplib.responses[httplib.NOT_FOUND]) def _foo_bar_container(self, method, url, body, headers): # test_delete_container return (httplib.NO_CONTENT, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_NOT_FOUND(self, method, url, body, headers): # test_delete_container_not_found return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_foo_bar_object_NOT_FOUND(self, method, url, body, headers): # test_delete_object_not_found return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_foo_bar_object(self, method, url, body, headers): # test_delete_object return (httplib.NO_CONTENT, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_foo_test_stream_data(self, method, url, body, headers): # test_upload_object_via_stream body = '' headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'} return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_foo_test_stream_data_MULTIPART(self, method, url, body, headers): headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'} TEST_ID = 'VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3ZpZS5tMnRzIHVwbG9hZA' query_string = urlparse.urlsplit(url).query query = parse_qs(query_string) if not query.get('uploadId', False): self.fail('Request doesnt contain uploadId query parameter') upload_id = query['uploadId'][0] if upload_id != TEST_ID: self.fail('first uploadId doesnt match TEST_ID') if method == 'PUT': # PUT is used for uploading the part. part number is mandatory if not query.get('partNumber', False): self.fail('Request is missing partNumber query parameter') body = '' return (httplib.OK, body, headers, httplib.responses[httplib.OK]) elif method == 'DELETE': # DELETE is done for aborting the upload body = '' return (httplib.NO_CONTENT, body, headers, httplib.responses[httplib.NO_CONTENT]) else: # POST is done for committing the upload. Parse the XML and # check if the commit is proper (TODO: XML Schema based check?) commit = ET.fromstring(body) count = 0 for part in commit.findall('Part'): count += 1 part_no = part.find('PartNumber').text etag = part.find('ETag').text self.assertEqual(part_no, str(count)) self.assertEqual(etag, headers['etag']) # Make sure that manifest contains at least one part self.assertTrue(count >= 1) body = self.fixtures.load('complete_multipart.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_LIST_MULTIPART(self, method, url, body, headers): query_string = urlparse.urlsplit(url).query query = parse_qs(query_string) if 'key-marker' not in query: body = self.fixtures.load('list_multipart_1.xml') else: body = self.fixtures.load('list_multipart_2.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_my_divisor_LIST_MULTIPART(self, method, url, body, headers): body = '' return (httplib.NO_CONTENT, body, headers, httplib.responses[httplib.NO_CONTENT]) def _foo_bar_container_my_movie_m2ts_LIST_MULTIPART(self, method, url, body, headers): body = '' return (httplib.NO_CONTENT, body, headers, httplib.responses[httplib.NO_CONTENT]) class S3MockRawResponse(MockRawResponse): fixtures = StorageFileFixtures('s3') def parse_body(self): if len(self.body) == 0 and not self.parse_zero_length_body: return self.body try: try: body = ET.XML(self.body) except ValueError: # lxml wants a bytes and tests are basically hard-coded to str body = ET.XML(self.body.encode('utf-8')) except: raise MalformedResponseError("Failed to parse XML", body=self.body, driver=self.connection.driver) return body def _foo_bar_container_foo_bar_object(self, method, url, body, headers): # test_download_object_success body = self._generate_random_data(1000) return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_foo_test_upload_INVALID_HASH1(self, method, url, body, headers): body = '' headers = {} headers['etag'] = '"foobar"' # test_upload_object_invalid_hash1 return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_foo_test_upload_INVALID_HASH2(self, method, url, body, headers): # test_upload_object_invalid_hash2 body = '' headers = {'etag': '"hash343hhash89h932439jsaa89"'} return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_foo_test_upload(self, method, url, body, headers): # test_upload_object_success body = '' headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'} return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_foo_bar_object_INVALID_SIZE(self, method, url, body, headers): # test_upload_object_invalid_file_size body = '' return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_foo_test_stream_data(self, method, url, body, headers): # test_upload_object_via_stream body = '' headers = {'etag': '"0cc175b9c0f1b6a831c399e269772661"'} return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _foo_bar_container_foo_test_stream_data_MULTIPART(self, method, url, body, headers): headers = {} # POST is done for initiating multipart upload if method == 'POST': body = self.fixtures.load('initiate_multipart.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) else: body = '' return (httplib.BAD_REQUEST, body, headers, httplib.responses[httplib.BAD_REQUEST]) class S3Tests(unittest.TestCase): driver_type = S3StorageDriver driver_args = STORAGE_S3_PARAMS mock_response_klass = S3MockHttp mock_raw_response_klass = S3MockRawResponse @classmethod def create_driver(self): return self.driver_type(*self.driver_args) def setUp(self): self.driver_type.connectionCls.conn_classes = (None, self.mock_response_klass) self.driver_type.connectionCls.rawResponseCls = \ self.mock_raw_response_klass self.mock_response_klass.type = None self.mock_raw_response_klass.type = None self.driver = self.create_driver() def tearDown(self): self._remove_test_file() def _remove_test_file(self): file_path = os.path.abspath(__file__) + '.temp' try: os.unlink(file_path) except OSError: pass def test_invalid_credentials(self): self.mock_response_klass.type = 'UNAUTHORIZED' try: self.driver.list_containers() except InvalidCredsError: e = sys.exc_info()[1] self.assertEqual(True, isinstance(e, InvalidCredsError)) else: self.fail('Exception was not thrown') def test_token(self): self.mock_response_klass.type = 'list_containers_TOKEN' self.driver = self.driver_type(*self.driver_args, token='asdf') self.driver.list_containers() def test_signature(self): secret_key = 'ssssh!' sig = BaseS3Connection.get_auth_signature( method='GET', headers={'foo': 'bar', 'content-type': 'TYPE!', 'x-aws-test': 'test_value'}, params={'hello': 'world'}, expires=None, secret_key=secret_key, path='/', vendor_prefix='x-aws' ) string_to_sign = 'GET\n\nTYPE!\n\nx-aws-test:test_value\n/' b64_hmac = base64.b64encode( hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest() ) expected_sig = b64_hmac.decode('utf-8') self.assertEqual(sig, expected_sig) def test_bucket_is_located_in_different_region(self): self.mock_response_klass.type = 'DIFFERENT_REGION' try: self.driver.list_containers() except LibcloudError: pass else: self.fail('Exception was not thrown') def test_list_containers_empty(self): self.mock_response_klass.type = 'list_containers_EMPTY' containers = self.driver.list_containers() self.assertEqual(len(containers), 0) def test_list_containers_success(self): self.mock_response_klass.type = 'list_containers' containers = self.driver.list_containers() self.assertEqual(len(containers), 2) self.assertTrue('creation_date' in containers[1].extra) def test_list_container_objects_empty(self): self.mock_response_klass.type = 'EMPTY' container = Container(name='test_container', extra={}, driver=self.driver) objects = self.driver.list_container_objects(container=container) self.assertEqual(len(objects), 0) def test_list_container_objects_success(self): self.mock_response_klass.type = None container = Container(name='test_container', extra={}, driver=self.driver) objects = self.driver.list_container_objects(container=container) self.assertEqual(len(objects), 1) obj = [o for o in objects if o.name == '1.zip'][0] self.assertEqual(obj.hash, '4397da7a7649e8085de9916c240e8166') self.assertEqual(obj.size, 1234567) self.assertEqual(obj.container.name, 'test_container') self.assertEqual( obj.extra['last_modified'], '2011-04-09T19:05:18.000Z') self.assertTrue('owner' in obj.meta_data) def test_list_container_objects_iterator_has_more(self): self.mock_response_klass.type = 'ITERATOR' container = Container(name='test_container', extra={}, driver=self.driver) objects = self.driver.list_container_objects(container=container) obj = [o for o in objects if o.name == '1.zip'][0] self.assertEqual(obj.hash, '4397da7a7649e8085de9916c240e8166') self.assertEqual(obj.size, 1234567) self.assertEqual(obj.container.name, 'test_container') self.assertTrue(obj in objects) self.assertEqual(len(objects), 5) def test_list_container_objects_with_prefix(self): self.mock_response_klass.type = None container = Container(name='test_container', extra={}, driver=self.driver) objects = self.driver.list_container_objects(container=container, ex_prefix='test_prefix') self.assertEqual(len(objects), 1) obj = [o for o in objects if o.name == '1.zip'][0] self.assertEqual(obj.hash, '4397da7a7649e8085de9916c240e8166') self.assertEqual(obj.size, 1234567) self.assertEqual(obj.container.name, 'test_container') self.assertTrue('owner' in obj.meta_data) def test_get_container_doesnt_exist(self): self.mock_response_klass.type = 'get_container' try: self.driver.get_container(container_name='container1') except ContainerDoesNotExistError: pass else: self.fail('Exception was not thrown') def test_get_container_success(self): self.mock_response_klass.type = 'get_container' container = self.driver.get_container(container_name='test1') self.assertTrue(container.name, 'test1') def test_get_object_container_doesnt_exist(self): # This method makes two requests which makes mocking the response a bit # trickier self.mock_response_klass.type = 'get_object' try: self.driver.get_object(container_name='test-inexistent', object_name='test') except ContainerDoesNotExistError: pass else: self.fail('Exception was not thrown') def test_get_object_success(self): # This method makes two requests which makes mocking the response a bit # trickier self.mock_response_klass.type = 'get_object' obj = self.driver.get_object(container_name='test2', object_name='test') self.assertEqual(obj.name, 'test') self.assertEqual(obj.container.name, 'test2') self.assertEqual(obj.size, 12345) self.assertEqual(obj.hash, 'e31208wqsdoj329jd') self.assertEqual(obj.extra['last_modified'], 'Thu, 13 Sep 2012 07:13:22 GMT') self.assertEqual(obj.extra['content_type'], 'application/zip') self.assertEqual(obj.meta_data['rabbits'], 'monkeys') def test_create_container_bad_request(self): # invalid container name, returns a 400 bad request self.mock_response_klass.type = 'INVALID_NAME' try: self.driver.create_container(container_name='new_container') except ContainerError: pass else: self.fail('Exception was not thrown') def test_create_container_already_exists(self): # container with this name already exists self.mock_response_klass.type = 'ALREADY_EXISTS' try: self.driver.create_container(container_name='new-container') except InvalidContainerNameError: pass else: self.fail('Exception was not thrown') def test_create_container_success(self): # success self.mock_response_klass.type = None name = 'new_container' container = self.driver.create_container(container_name=name) self.assertEqual(container.name, name) def test_delete_container_doesnt_exist(self): container = Container(name='new_container', extra=None, driver=self.driver) self.mock_response_klass.type = 'DOESNT_EXIST' try: self.driver.delete_container(container=container) except ContainerDoesNotExistError: pass else: self.fail('Exception was not thrown') def test_delete_container_not_empty(self): container = Container(name='new_container', extra=None, driver=self.driver) self.mock_response_klass.type = 'NOT_EMPTY' try: self.driver.delete_container(container=container) except ContainerIsNotEmptyError: pass else: self.fail('Exception was not thrown') # success self.mock_response_klass.type = None self.assertTrue(self.driver.delete_container(container=container)) def test_delete_container_not_found(self): self.mock_response_klass.type = 'NOT_FOUND' container = Container(name='foo_bar_container', extra={}, driver=self.driver) try: self.driver.delete_container(container=container) except ContainerDoesNotExistError: pass else: self.fail('Container does not exist but an exception was not' + 'thrown') def test_delete_container_success(self): self.mock_response_klass.type = None container = Container(name='new_container', extra=None, driver=self.driver) self.assertTrue(self.driver.delete_container(container=container)) def test_download_object_success(self): container = Container(name='foo_bar_container', extra={}, driver=self.driver) obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, container=container, meta_data=None, driver=self.driver_type) destination_path = os.path.abspath(__file__) + '.temp' result = self.driver.download_object(obj=obj, destination_path=destination_path, overwrite_existing=False, delete_on_failure=True) self.assertTrue(result) def test_download_object_invalid_file_size(self): self.mock_raw_response_klass.type = 'INVALID_SIZE' container = Container(name='foo_bar_container', extra={}, driver=self.driver) obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, container=container, meta_data=None, driver=self.driver_type) destination_path = os.path.abspath(__file__) + '.temp' result = self.driver.download_object(obj=obj, destination_path=destination_path, overwrite_existing=False, delete_on_failure=True) self.assertFalse(result) def test_download_object_invalid_file_already_exists(self): self.mock_raw_response_klass.type = 'INVALID_SIZE' container = Container(name='foo_bar_container', extra={}, driver=self.driver) obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, container=container, meta_data=None, driver=self.driver_type) destination_path = os.path.abspath(__file__) try: self.driver.download_object(obj=obj, destination_path=destination_path, overwrite_existing=False, delete_on_failure=True) except LibcloudError: pass else: self.fail('Exception was not thrown') def test_download_object_as_stream_success(self): container = Container(name='foo_bar_container', extra={}, driver=self.driver) obj = Object(name='foo_bar_object', size=1000, hash=None, extra={}, container=container, meta_data=None, driver=self.driver_type) stream = self.driver.download_object_as_stream(obj=obj, chunk_size=None) self.assertTrue(hasattr(stream, '__iter__')) def test_upload_object_invalid_ex_storage_class(self): # Invalid hash is detected on the amazon side and BAD_REQUEST is # returned file_path = os.path.abspath(__file__) container = Container(name='foo_bar_container', extra={}, driver=self.driver) object_name = 'foo_test_upload' try: self.driver.upload_object(file_path=file_path, container=container, object_name=object_name, verify_hash=True, ex_storage_class='invalid-class') except ValueError: e = sys.exc_info()[1] self.assertTrue(str(e).lower().find('invalid storage class') != -1) else: self.fail('Exception was not thrown') def test_upload_object_invalid_hash1(self): # Invalid hash is detected on the amazon side and BAD_REQUEST is # returned def upload_file(self, response, file_path, chunked=False, calculate_hash=True): return True, 'hash343hhash89h932439jsaa89', 1000 self.mock_raw_response_klass.type = 'INVALID_HASH1' old_func = self.driver_type._upload_file self.driver_type._upload_file = upload_file file_path = os.path.abspath(__file__) container = Container(name='foo_bar_container', extra={}, driver=self.driver) object_name = 'foo_test_upload' try: self.driver.upload_object(file_path=file_path, container=container, object_name=object_name, verify_hash=True) except ObjectHashMismatchError: pass else: self.fail( 'Invalid hash was returned but an exception was not thrown') finally: self.driver_type._upload_file = old_func def test_upload_object_invalid_hash2(self): # Invalid hash is detected when comparing hash provided in the response # ETag header def upload_file(self, response, file_path, chunked=False, calculate_hash=True): return True, '0cc175b9c0f1b6a831c399e269772661', 1000 self.mock_raw_response_klass.type = 'INVALID_HASH2' old_func = self.driver_type._upload_file self.driver_type._upload_file = upload_file file_path = os.path.abspath(__file__) container = Container(name='foo_bar_container', extra={}, driver=self.driver) object_name = 'foo_test_upload' try: self.driver.upload_object(file_path=file_path, container=container, object_name=object_name, verify_hash=True) except ObjectHashMismatchError: pass else: self.fail( 'Invalid hash was returned but an exception was not thrown') finally: self.driver_type._upload_file = old_func def test_upload_object_success(self): def upload_file(self, response, file_path, chunked=False, calculate_hash=True): return True, '0cc175b9c0f1b6a831c399e269772661', 1000 old_func = self.driver_type._upload_file self.driver_type._upload_file = upload_file file_path = os.path.abspath(__file__) container = Container(name='foo_bar_container', extra={}, driver=self.driver) object_name = 'foo_test_upload' extra = {'meta_data': {'some-value': 'foobar'}} obj = self.driver.upload_object(file_path=file_path, container=container, object_name=object_name, extra=extra, verify_hash=True) self.assertEqual(obj.name, 'foo_test_upload') self.assertEqual(obj.size, 1000) self.assertTrue('some-value' in obj.meta_data) self.driver_type._upload_file = old_func def test_upload_object_with_acl(self): def upload_file(self, response, file_path, chunked=False, calculate_hash=True): return True, '0cc175b9c0f1b6a831c399e269772661', 1000 old_func = self.driver_type._upload_file self.driver_type._upload_file = upload_file file_path = os.path.abspath(__file__) container = Container(name='foo_bar_container', extra={}, driver=self.driver) object_name = 'foo_test_upload' extra = {'acl': 'public-read'} obj = self.driver.upload_object(file_path=file_path, container=container, object_name=object_name, extra=extra, verify_hash=True) self.assertEqual(obj.name, 'foo_test_upload') self.assertEqual(obj.size, 1000) self.assertEqual(obj.extra['acl'], 'public-read') self.driver_type._upload_file = old_func def test_upload_empty_object_via_stream(self): if self.driver.supports_s3_multipart_upload: self.mock_raw_response_klass.type = 'MULTIPART' self.mock_response_klass.type = 'MULTIPART' else: self.mock_raw_response_klass.type = None self.mock_response_klass.type = None container = Container(name='foo_bar_container', extra={}, driver=self.driver) object_name = 'foo_test_stream_data' iterator = DummyIterator(data=['']) extra = {'content_type': 'text/plain'} obj = self.driver.upload_object_via_stream(container=container, object_name=object_name, iterator=iterator, extra=extra) self.assertEqual(obj.name, object_name) self.assertEqual(obj.size, 0) def test_upload_small_object_via_stream(self): if self.driver.supports_s3_multipart_upload: self.mock_raw_response_klass.type = 'MULTIPART' self.mock_response_klass.type = 'MULTIPART' else: self.mock_raw_response_klass.type = None self.mock_response_klass.type = None container = Container(name='foo_bar_container', extra={}, driver=self.driver) object_name = 'foo_test_stream_data' iterator = DummyIterator(data=['2', '3', '5']) extra = {'content_type': 'text/plain'} obj = self.driver.upload_object_via_stream(container=container, object_name=object_name, iterator=iterator, extra=extra) self.assertEqual(obj.name, object_name) self.assertEqual(obj.size, 3) def test_upload_big_object_via_stream(self): if self.driver.supports_s3_multipart_upload: self.mock_raw_response_klass.type = 'MULTIPART' self.mock_response_klass.type = 'MULTIPART' else: self.mock_raw_response_klass.type = None self.mock_response_klass.type = None container = Container(name='foo_bar_container', extra={}, driver=self.driver) object_name = 'foo_test_stream_data' iterator = DummyIterator( data=['2' * CHUNK_SIZE, '3' * CHUNK_SIZE, '5']) extra = {'content_type': 'text/plain'} obj = self.driver.upload_object_via_stream(container=container, object_name=object_name, iterator=iterator, extra=extra) self.assertEqual(obj.name, object_name) self.assertEqual(obj.size, CHUNK_SIZE * 2 + 1) def test_upload_object_via_stream_abort(self): if not self.driver.supports_s3_multipart_upload: return self.mock_raw_response_klass.type = 'MULTIPART' self.mock_response_klass.type = 'MULTIPART' def _faulty_iterator(): for i in range(0, 5): yield str(i) raise RuntimeError('Error in fetching data') container = Container(name='foo_bar_container', extra={}, driver=self.driver) object_name = 'foo_test_stream_data' iterator = _faulty_iterator() extra = {'content_type': 'text/plain'} try: self.driver.upload_object_via_stream(container=container, object_name=object_name, iterator=iterator, extra=extra) except Exception: pass return def test_s3_list_multipart_uploads(self): if not self.driver.supports_s3_multipart_upload: return self.mock_response_klass.type = 'LIST_MULTIPART' S3StorageDriver.RESPONSES_PER_REQUEST = 2 container = Container(name='foo_bar_container', extra={}, driver=self.driver) for upload in self.driver.ex_iterate_multipart_uploads(container): self.assertNotEqual(upload.key, None) self.assertNotEqual(upload.id, None) self.assertNotEqual(upload.created_at, None) self.assertNotEqual(upload.owner, None) self.assertNotEqual(upload.initiator, None) def test_s3_abort_multipart_uploads(self): if not self.driver.supports_s3_multipart_upload: return self.mock_response_klass.type = 'LIST_MULTIPART' S3StorageDriver.RESPONSES_PER_REQUEST = 2 container = Container(name='foo_bar_container', extra={}, driver=self.driver) self.driver.ex_cleanup_all_multipart_uploads(container) def test_delete_object_not_found(self): self.mock_response_klass.type = 'NOT_FOUND' container = Container(name='foo_bar_container', extra={}, driver=self.driver) obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, meta_data=None, container=container, driver=self.driver) try: self.driver.delete_object(obj=obj) except ObjectDoesNotExistError: pass else: self.fail('Exception was not thrown') def test_delete_object_success(self): container = Container(name='foo_bar_container', extra={}, driver=self.driver) obj = Object(name='foo_bar_object', size=1234, hash=None, extra=None, meta_data=None, container=container, driver=self.driver) result = self.driver.delete_object(obj=obj) self.assertTrue(result) class S3USWestTests(S3Tests): driver_type = S3USWestStorageDriver class S3EUWestTests(S3Tests): driver_type = S3EUWestStorageDriver class S3APSETests(S3Tests): driver_type = S3APSEStorageDriver class S3APNETests(S3Tests): driver_tyoe = S3APNEStorageDriver if __name__ == '__main__': sys.exit(unittest.main())
apache-2.0
tensorflow/tensorflow
tensorflow/python/saved_model/method_name_updater_test.py
18
10472
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for method name utils.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile from google.protobuf import text_format from tensorflow.core.protobuf import saved_model_pb2 from tensorflow.python.lib.io import file_io from tensorflow.python.platform import test from tensorflow.python.saved_model import constants from tensorflow.python.saved_model import loader_impl as loader from tensorflow.python.saved_model import method_name_updater from tensorflow.python.util import compat _SAVED_MODEL_PROTO = text_format.Parse(""" saved_model_schema_version: 1 meta_graphs { meta_info_def { tags: "serve" } signature_def: { key: "serving_default" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: -1 } dim { size: 100 } } } } } } signature_def: { key: "foo" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: 1 } } } } } } } meta_graphs { meta_info_def { tags: "serve" tags: "gpu" } signature_def: { key: "serving_default" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: -1 } } } } } } signature_def: { key: "bar" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: 1 } } } } } } } """, saved_model_pb2.SavedModel()) class MethodNameUpdaterTest(test.TestCase): def setUp(self): super(MethodNameUpdaterTest, self).setUp() self._saved_model_path = tempfile.mkdtemp(prefix=test.get_temp_dir()) def testBasic(self): path = os.path.join( compat.as_bytes(self._saved_model_path), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB)) file_io.write_string_to_file( path, _SAVED_MODEL_PROTO.SerializeToString(deterministic=True)) updater = method_name_updater.MethodNameUpdater(self._saved_model_path) updater.replace_method_name( signature_key="serving_default", method_name="classify") updater.save() actual = loader.parse_saved_model(self._saved_model_path) self.assertProtoEquals( actual, text_format.Parse( """ saved_model_schema_version: 1 meta_graphs { meta_info_def { tags: "serve" } signature_def: { key: "serving_default" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "classify" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: -1 } dim { size: 100 } } } } } } signature_def: { key: "foo" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: 1 } } } } } } } meta_graphs { meta_info_def { tags: "serve" tags: "gpu" } signature_def: { key: "serving_default" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "classify" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: -1 } } } } } } signature_def: { key: "bar" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: 1 } } } } } } } """, saved_model_pb2.SavedModel())) def testTextFormatAndNewExportDir(self): path = os.path.join( compat.as_bytes(self._saved_model_path), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)) file_io.write_string_to_file(path, str(_SAVED_MODEL_PROTO)) updater = method_name_updater.MethodNameUpdater(self._saved_model_path) updater.replace_method_name( signature_key="foo", method_name="regress", tags="serve") updater.replace_method_name( signature_key="bar", method_name="classify", tags=["gpu", "serve"]) new_export_dir = tempfile.mkdtemp(prefix=test.get_temp_dir()) updater.save(new_export_dir) self.assertTrue( file_io.file_exists( os.path.join( compat.as_bytes(new_export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)))) actual = loader.parse_saved_model(new_export_dir) self.assertProtoEquals( actual, text_format.Parse( """ saved_model_schema_version: 1 meta_graphs { meta_info_def { tags: "serve" } signature_def: { key: "serving_default" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: -1 } dim { size: 100 } } } } } } signature_def: { key: "foo" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "regress" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: 1 } } } } } } } meta_graphs { meta_info_def { tags: "serve" tags: "gpu" } signature_def: { key: "serving_default" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "predict" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: -1 } } } } } } signature_def: { key: "bar" value: { inputs: { key: "inputs" value { name: "input_node:0" } } method_name: "classify" outputs: { key: "outputs" value { dtype: DT_FLOAT tensor_shape { dim { size: 1 } } } } } } } """, saved_model_pb2.SavedModel())) def testExceptions(self): with self.assertRaises(IOError): updater = method_name_updater.MethodNameUpdater( tempfile.mkdtemp(prefix=test.get_temp_dir())) path = os.path.join( compat.as_bytes(self._saved_model_path), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB)) file_io.write_string_to_file( path, _SAVED_MODEL_PROTO.SerializeToString(deterministic=True)) updater = method_name_updater.MethodNameUpdater(self._saved_model_path) with self.assertRaisesRegex(ValueError, "signature_key must be defined"): updater.replace_method_name( signature_key=None, method_name="classify") with self.assertRaisesRegex(ValueError, "method_name must be defined"): updater.replace_method_name( signature_key="foobar", method_name="") with self.assertRaisesRegex( ValueError, r"MetaGraphDef associated with tags \['gpu'\] could not be found"): updater.replace_method_name( signature_key="bar", method_name="classify", tags=["gpu"]) with self.assertRaisesRegex( ValueError, r"MetaGraphDef associated with tags \['serve'\] does not " r"have a signature_def with key: baz"): updater.replace_method_name( signature_key="baz", method_name="classify", tags=["serve"]) if __name__ == "__main__": test.main()
apache-2.0