repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
jeanlinux/calibre
src/calibre/ebooks/oeb/display/webview.py
13
2192
#!/usr/bin/env python2 # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <[email protected]>' __docformat__ = 'restructuredtext en' import re from calibre import guess_type class EntityDeclarationProcessor(object): # {{{ def __init__(self, html): self.declared_entities = {} for match in re.finditer(r'<!\s*ENTITY\s+([^>]+)>', html): tokens = match.group(1).split() if len(tokens) > 1: self.declared_entities[tokens[0].strip()] = tokens[1].strip().replace('"', '') self.processed_html = html for key, val in self.declared_entities.iteritems(): self.processed_html = self.processed_html.replace('&%s;'%key, val) # }}} def self_closing_sub(match): tag = match.group(1) if tag.lower().strip() == 'br': return match.group() return '<%s%s></%s>'%(match.group(1), match.group(2), match.group(1)) def load_html(path, view, codec='utf-8', mime_type=None, pre_load_callback=lambda x:None, path_is_html=False, force_as_html=False): from PyQt5.Qt import QUrl, QByteArray if mime_type is None: mime_type = guess_type(path)[0] if not mime_type: mime_type = 'text/html' if path_is_html: html = path else: with open(path, 'rb') as f: html = f.read().decode(codec, 'replace') html = EntityDeclarationProcessor(html).processed_html self_closing_pat = re.compile(r'<\s*([:A-Za-z0-9-]+)([^>]*)/\s*>') html = self_closing_pat.sub(self_closing_sub, html) loading_url = QUrl.fromLocalFile(path) pre_load_callback(loading_url) if force_as_html or re.search(r'<[a-zA-Z0-9-]+:svg', html) is None: view.setHtml(html, loading_url) else: view.setContent(QByteArray(html.encode(codec)), mime_type, loading_url) mf = view.page().mainFrame() elem = mf.findFirstElement('parsererror') if not elem.isNull(): return False return True
gpl-3.0
mathemage/h2o-3
h2o-py/tests/testdir_algos/gbm/pyunit_cv_cars_gbm.py
4
7071
from __future__ import print_function from builtins import range import sys sys.path.insert(1,"../../../") import h2o from tests import pyunit_utils import random from h2o.estimators.gbm import H2OGradientBoostingEstimator def cv_cars_gbm(): # read in the dataset and construct training set (and validation set) cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv")) # choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial, # 2:multinomial problem = 1 #random.sample(list(range(3)),1)[0] # pick the predictors and response column, along with the correct distribution predictors = ["displacement","power","weight","acceleration","year"] if problem == 1 : response_col = "economy_20mpg" distribution = "bernoulli" cars[response_col] = cars[response_col].asfactor() elif problem == 2 : response_col = "cylinders" distribution = "multinomial" cars[response_col] = cars[response_col].asfactor() else : response_col = "economy" distribution = "gaussian" print("Distribution: {0}".format(distribution)) print("Response column: {0}".format(response_col)) ## cross-validation # 1. check that cv metrics are the same over repeated "Modulo" runs nfolds = random.randint(3,10) gbm1 = H2OGradientBoostingEstimator(nfolds=nfolds, distribution=distribution, ntrees=5, fold_assignment="Modulo") gbm1.train(x=predictors, y=response_col, training_frame=cars) gbm2 = H2OGradientBoostingEstimator(nfolds=nfolds, distribution=distribution, ntrees=5, fold_assignment="Modulo") gbm2.train(x=predictors, y=response_col, training_frame=cars) pyunit_utils.check_models(gbm1, gbm2, True) # 2. check that cv metrics are different over repeated "Random" runs nfolds = random.randint(3,10) gbm1 = H2OGradientBoostingEstimator(nfolds=nfolds, distribution=distribution, ntrees=5, fold_assignment="Random") gbm1.train(x=predictors, y=response_col, training_frame=cars) gbm2 = H2OGradientBoostingEstimator(nfolds=nfolds, distribution=distribution, ntrees=5, fold_assignment="Random") gbm2.train(x=predictors, y=response_col, training_frame=cars) try: pyunit_utils.check_models(gbm1, gbm2, True) assert False, "Expected models to be different over repeated Random runs" except AssertionError: assert True # 3. folds_column num_folds = random.randint(2,5) fold_assignments = h2o.H2OFrame([[random.randint(0,num_folds-1)] for f in range(cars.nrow)]) fold_assignments.set_names(["fold_assignments"]) cars = cars.cbind(fold_assignments) gbm = H2OGradientBoostingEstimator(distribution=distribution, ntrees=5, keep_cross_validation_predictions=True) gbm.train(x=predictors, y=response_col, training_frame=cars, fold_column="fold_assignments") num_cv_models = len(gbm._model_json['output']['cross_validation_models']) assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \ "{1}".format(num_folds, num_cv_models) cv_model1 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][0]['name']) cv_model2 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][1]['name']) # 4. keep_cross_validation_predictions cv_predictions = gbm1._model_json['output']['cross_validation_predictions'] assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions) cv_predictions = gbm._model_json['output']['cross_validation_predictions'] assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \ "as folds, but got {0}".format(len(cv_predictions)) ## boundary cases # 1. nfolds = number of observations (leave-one-out cross-validation) gbm = H2OGradientBoostingEstimator(nfolds=cars.nrow, distribution=distribution,ntrees=5, fold_assignment="Modulo") gbm.train(x=predictors, y=response_col, training_frame=cars) # 2. nfolds = 0 gbm1 = H2OGradientBoostingEstimator(nfolds=0, distribution=distribution, ntrees=5) gbm1.train(x=predictors, y=response_col,training_frame=cars) # check that this is equivalent to no nfolds gbm2 = H2OGradientBoostingEstimator(distribution=distribution, ntrees=5) gbm2.train(x=predictors, y=response_col, training_frame=cars) pyunit_utils.check_models(gbm1, gbm2) # 3. cross-validation and regular validation attempted gbm = H2OGradientBoostingEstimator(nfolds=random.randint(3,10), ntrees=5, distribution=distribution) gbm.train(x=predictors, y=response_col, training_frame=cars, validation_frame=cars) ## error cases # 1. nfolds == 1 or < 0 try: gbm = H2OGradientBoostingEstimator(nfolds=random.sample([-1,1],1)[0], ntrees=5, distribution=distribution) gbm.train(x=predictors, y=response_col, training_frame=cars) assert False, "Expected model-build to fail when nfolds is 1 or < 0" except EnvironmentError: assert True # 2. more folds than observations try: gbm = H2OGradientBoostingEstimator(nfolds=cars.nrow+1, distribution=distribution, ntrees=5, fold_assignment="Modulo") gbm.train(x=predictors, y=response_col, training_frame=cars) assert False, "Expected model-build to fail when nfolds > nobs" except EnvironmentError: assert True # 3. fold_column and nfolds both specified try: gbm = H2OGradientBoostingEstimator(nfolds=3, ntrees=5, distribution=distribution) gbm.train(x=predictors, y=response_col, training_frame=cars, fold_column="fold_assignments") assert False, "Expected model-build to fail when fold_column and nfolds both specified" except EnvironmentError: assert True # 4. fold_column and fold_assignment both specified try: gbm = H2OGradientBoostingEstimator(ntrees=5, fold_assignment="Random", distribution=distribution) gbm.train(x=predictors, y=response_col, training_frame=cars, fold_column="fold_assignments") assert False, "Expected model-build to fail when fold_column and fold_assignment both specified" except EnvironmentError: assert True if __name__ == "__main__": pyunit_utils.standalone_test(cv_cars_gbm) else: cv_cars_gbm()
apache-2.0
yatinag/selfcure
lib/werkzeug/contrib/cache.py
252
27983
# -*- coding: utf-8 -*- """ werkzeug.contrib.cache ~~~~~~~~~~~~~~~~~~~~~~ The main problem with dynamic Web sites is, well, they're dynamic. Each time a user requests a page, the webserver executes a lot of code, queries the database, renders templates until the visitor gets the page he sees. This is a lot more expensive than just loading a file from the file system and sending it to the visitor. For most Web applications, this overhead isn't a big deal but once it becomes, you will be glad to have a cache system in place. How Caching Works ================= Caching is pretty simple. Basically you have a cache object lurking around somewhere that is connected to a remote cache or the file system or something else. When the request comes in you check if the current page is already in the cache and if so, you're returning it from the cache. Otherwise you generate the page and put it into the cache. (Or a fragment of the page, you don't have to cache the full thing) Here is a simple example of how to cache a sidebar for a template:: def get_sidebar(user): identifier = 'sidebar_for/user%d' % user.id value = cache.get(identifier) if value is not None: return value value = generate_sidebar_for(user=user) cache.set(identifier, value, timeout=60 * 5) return value Creating a Cache Object ======================= To create a cache object you just import the cache system of your choice from the cache module and instantiate it. Then you can start working with that object: >>> from werkzeug.contrib.cache import SimpleCache >>> c = SimpleCache() >>> c.set("foo", "value") >>> c.get("foo") 'value' >>> c.get("missing") is None True Please keep in mind that you have to create the cache and put it somewhere you have access to it (either as a module global you can import or you just put it into your WSGI application). :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import os import re import errno import tempfile from hashlib import md5 from time import time try: import cPickle as pickle except ImportError: # pragma: no cover import pickle from werkzeug._compat import iteritems, string_types, text_type, \ integer_types, to_native from werkzeug.posixemulation import rename def _items(mappingorseq): """Wrapper for efficient iteration over mappings represented by dicts or sequences:: >>> for k, v in _items((i, i*i) for i in xrange(5)): ... assert k*k == v >>> for k, v in _items(dict((i, i*i) for i in xrange(5))): ... assert k*k == v """ if hasattr(mappingorseq, 'items'): return iteritems(mappingorseq) return mappingorseq class BaseCache(object): """Baseclass for the cache systems. All the cache systems implement this API or a superset of it. :param default_timeout: the default timeout (in seconds) that is used if no timeout is specified on :meth:`set`. A timeout of 0 indicates that the cache never expires. """ def __init__(self, default_timeout=300): self.default_timeout = default_timeout def get(self, key): """Look up key in the cache and return the value for it. :param key: the key to be looked up. :returns: The value if it exists and is readable, else ``None``. """ return None def delete(self, key): """Delete `key` from the cache. :param key: the key to delete. :returns: Whether the key existed and has been deleted. :rtype: boolean """ return True def get_many(self, *keys): """Returns a list of values for the given keys. For each key a item in the list is created:: foo, bar = cache.get_many("foo", "bar") Has the same error handling as :meth:`get`. :param keys: The function accepts multiple keys as positional arguments. """ return map(self.get, keys) def get_dict(self, *keys): """Like :meth:`get_many` but return a dict:: d = cache.get_dict("foo", "bar") foo = d["foo"] bar = d["bar"] :param keys: The function accepts multiple keys as positional arguments. """ return dict(zip(keys, self.get_many(*keys))) def set(self, key, value, timeout=None): """Add a new key/value to the cache (overwrites value, if key already exists in the cache). :param key: the key to set :param value: the value for the key :param timeout: the cache timeout for the key (if not specified, it uses the default timeout). A timeout of 0 idicates that the cache never expires. :returns: ``True`` if key has been updated, ``False`` for backend errors. Pickling errors, however, will raise a subclass of ``pickle.PickleError``. :rtype: boolean """ return True def add(self, key, value, timeout=None): """Works like :meth:`set` but does not overwrite the values of already existing keys. :param key: the key to set :param value: the value for the key :param timeout: the cache timeout for the key or the default timeout if not specified. A timeout of 0 indicates that the cache never expires. :returns: Same as :meth:`set`, but also ``False`` for already existing keys. :rtype: boolean """ return True def set_many(self, mapping, timeout=None): """Sets multiple keys and values from a mapping. :param mapping: a mapping with the keys/values to set. :param timeout: the cache timeout for the key (if not specified, it uses the default timeout). A timeout of 0 indicates tht the cache never expires. :returns: Whether all given keys have been set. :rtype: boolean """ rv = True for key, value in _items(mapping): if not self.set(key, value, timeout): rv = False return rv def delete_many(self, *keys): """Deletes multiple keys at once. :param keys: The function accepts multiple keys as positional arguments. :returns: Whether all given keys have been deleted. :rtype: boolean """ return all(self.delete(key) for key in keys) def has(self, key): """Checks if a key exists in the cache without returning it. This is a cheap operation that bypasses loading the actual data on the backend. This method is optional and may not be implemented on all caches. :param key: the key to check """ raise NotImplementedError( '%s doesn\'t have an efficient implementation of `has`. That ' 'means it is impossible to check whether a key exists without ' 'fully loading the key\'s data. Consider using `self.get` ' 'explicitly if you don\'t care about performance.' ) def clear(self): """Clears the cache. Keep in mind that not all caches support completely clearing the cache. :returns: Whether the cache has been cleared. :rtype: boolean """ return True def inc(self, key, delta=1): """Increments the value of a key by `delta`. If the key does not yet exist it is initialized with `delta`. For supporting caches this is an atomic operation. :param key: the key to increment. :param delta: the delta to add. :returns: The new value or ``None`` for backend errors. """ value = (self.get(key) or 0) + delta return value if self.set(key, value) else None def dec(self, key, delta=1): """Decrements the value of a key by `delta`. If the key does not yet exist it is initialized with `-delta`. For supporting caches this is an atomic operation. :param key: the key to increment. :param delta: the delta to subtract. :returns: The new value or `None` for backend errors. """ value = (self.get(key) or 0) - delta return value if self.set(key, value) else None class NullCache(BaseCache): """A cache that doesn't cache. This can be useful for unit testing. :param default_timeout: a dummy parameter that is ignored but exists for API compatibility with other caches. """ class SimpleCache(BaseCache): """Simple memory cache for single process environments. This class exists mainly for the development server and is not 100% thread safe. It tries to use as many atomic operations as possible and no locks for simplicity but it could happen under heavy load that keys are added multiple times. :param threshold: the maximum number of items the cache stores before it starts deleting some. :param default_timeout: the default timeout that is used if no timeout is specified on :meth:`~BaseCache.set`. A timeout of 0 indicates that the cache never expires. """ def __init__(self, threshold=500, default_timeout=300): BaseCache.__init__(self, default_timeout) self._cache = {} self.clear = self._cache.clear self._threshold = threshold def _prune(self): if len(self._cache) > self._threshold: now = time() toremove = [] for idx, (key, (expires, _)) in enumerate(self._cache.items()): if (expires != 0 and expires <= now) or idx % 3 == 0: toremove.append(key) for key in toremove: self._cache.pop(key, None) def _get_expiration(self, timeout): if timeout is None: timeout = self.default_timeout if timeout > 0: timeout = time() + timeout return timeout def get(self, key): try: expires, value = self._cache[key] if expires == 0 or expires > time(): return pickle.loads(value) except (KeyError, pickle.PickleError): return None def set(self, key, value, timeout=None): expires = self._get_expiration(timeout) self._prune() self._cache[key] = (expires, pickle.dumps(value, pickle.HIGHEST_PROTOCOL)) return True def add(self, key, value, timeout=None): expires = self._get_expiration(timeout) self._prune() item = (expires, pickle.dumps(value, pickle.HIGHEST_PROTOCOL)) if key in self._cache: return False self._cache.setdefault(key, item) return True def delete(self, key): return self._cache.pop(key, None) is not None def has(self, key): try: expires, value = self._cache[key] return expires == 0 or expires > time() except KeyError: return False _test_memcached_key = re.compile(r'[^\x00-\x21\xff]{1,250}$').match class MemcachedCache(BaseCache): """A cache that uses memcached as backend. The first argument can either be an object that resembles the API of a :class:`memcache.Client` or a tuple/list of server addresses. In the event that a tuple/list is passed, Werkzeug tries to import the best available memcache library. This cache looks into the following packages/modules to find bindings for memcached: - ``pylibmc`` - ``google.appengine.api.memcached`` - ``memcached`` Implementation notes: This cache backend works around some limitations in memcached to simplify the interface. For example unicode keys are encoded to utf-8 on the fly. Methods such as :meth:`~BaseCache.get_dict` return the keys in the same format as passed. Furthermore all get methods silently ignore key errors to not cause problems when untrusted user data is passed to the get methods which is often the case in web applications. :param servers: a list or tuple of server addresses or alternatively a :class:`memcache.Client` or a compatible client. :param default_timeout: the default timeout that is used if no timeout is specified on :meth:`~BaseCache.set`. A timeout of 0 indicates taht the cache never expires. :param key_prefix: a prefix that is added before all keys. This makes it possible to use the same memcached server for different applications. Keep in mind that :meth:`~BaseCache.clear` will also clear keys with a different prefix. """ def __init__(self, servers=None, default_timeout=300, key_prefix=None): BaseCache.__init__(self, default_timeout) if servers is None or isinstance(servers, (list, tuple)): if servers is None: servers = ['127.0.0.1:11211'] self._client = self.import_preferred_memcache_lib(servers) if self._client is None: raise RuntimeError('no memcache module found') else: # NOTE: servers is actually an already initialized memcache # client. self._client = servers self.key_prefix = to_native(key_prefix) def _normalize_key(self, key): key = to_native(key, 'utf-8') if self.key_prefix: key = self.key_prefix + key return key def _normalize_timeout(self, timeout): if timeout is None: timeout = self.default_timeout if timeout > 0: timeout = int(time()) + timeout return timeout def get(self, key): key = self._normalize_key(key) # memcached doesn't support keys longer than that. Because often # checks for so long keys can occur because it's tested from user # submitted data etc we fail silently for getting. if _test_memcached_key(key): return self._client.get(key) def get_dict(self, *keys): key_mapping = {} have_encoded_keys = False for key in keys: encoded_key = self._normalize_key(key) if not isinstance(key, str): have_encoded_keys = True if _test_memcached_key(key): key_mapping[encoded_key] = key d = rv = self._client.get_multi(key_mapping.keys()) if have_encoded_keys or self.key_prefix: rv = {} for key, value in iteritems(d): rv[key_mapping[key]] = value if len(rv) < len(keys): for key in keys: if key not in rv: rv[key] = None return rv def add(self, key, value, timeout=None): key = self._normalize_key(key) timeout = self._normalize_timeout(timeout) return self._client.add(key, value, timeout) def set(self, key, value, timeout=None): key = self._normalize_key(key) timeout = self._normalize_timeout(timeout) return self._client.set(key, value, timeout) def get_many(self, *keys): d = self.get_dict(*keys) return [d[key] for key in keys] def set_many(self, mapping, timeout=None): new_mapping = {} for key, value in _items(mapping): key = self._normalize_key(key) new_mapping[key] = value timeout = self._normalize_timeout(timeout) failed_keys = self._client.set_multi(new_mapping, timeout) return not failed_keys def delete(self, key): key = self._normalize_key(key) if _test_memcached_key(key): return self._client.delete(key) def delete_many(self, *keys): new_keys = [] for key in keys: key = self._normalize_key(key) if _test_memcached_key(key): new_keys.append(key) return self._client.delete_multi(new_keys) def has(self, key): key = self._normalize_key(key) if _test_memcached_key(key): return self._client.append(key, '') return False def clear(self): return self._client.flush_all() def inc(self, key, delta=1): key = self._normalize_key(key) return self._client.incr(key, delta) def dec(self, key, delta=1): key = self._normalize_key(key) return self._client.decr(key, delta) def import_preferred_memcache_lib(self, servers): """Returns an initialized memcache client. Used by the constructor.""" try: import pylibmc except ImportError: pass else: return pylibmc.Client(servers) try: from google.appengine.api import memcache except ImportError: pass else: return memcache.Client() try: import memcache except ImportError: pass else: return memcache.Client(servers) # backwards compatibility GAEMemcachedCache = MemcachedCache class RedisCache(BaseCache): """Uses the Redis key-value store as a cache backend. The first argument can be either a string denoting address of the Redis server or an object resembling an instance of a redis.Redis class. Note: Python Redis API already takes care of encoding unicode strings on the fly. .. versionadded:: 0.7 .. versionadded:: 0.8 `key_prefix` was added. .. versionchanged:: 0.8 This cache backend now properly serializes objects. .. versionchanged:: 0.8.3 This cache backend now supports password authentication. .. versionchanged:: 0.10 ``**kwargs`` is now passed to the redis object. :param host: address of the Redis server or an object which API is compatible with the official Python Redis client (redis-py). :param port: port number on which Redis server listens for connections. :param password: password authentication for the Redis server. :param db: db (zero-based numeric index) on Redis Server to connect. :param default_timeout: the default timeout that is used if no timeout is specified on :meth:`~BaseCache.set`. A timeout of 0 indicates that the cache never expires. :param key_prefix: A prefix that should be added to all keys. Any additional keyword arguments will be passed to ``redis.Redis``. """ def __init__(self, host='localhost', port=6379, password=None, db=0, default_timeout=300, key_prefix=None, **kwargs): BaseCache.__init__(self, default_timeout) if isinstance(host, string_types): try: import redis except ImportError: raise RuntimeError('no redis module found') if kwargs.get('decode_responses', None): raise ValueError('decode_responses is not supported by ' 'RedisCache.') self._client = redis.Redis(host=host, port=port, password=password, db=db, **kwargs) else: self._client = host self.key_prefix = key_prefix or '' def _get_expiration(self, timeout): if timeout is None: timeout = self.default_timeout if timeout == 0: timeout = -1 return timeout def dump_object(self, value): """Dumps an object into a string for redis. By default it serializes integers as regular string and pickle dumps everything else. """ t = type(value) if t in integer_types: return str(value).encode('ascii') return b'!' + pickle.dumps(value) def load_object(self, value): """The reversal of :meth:`dump_object`. This might be called with None. """ if value is None: return None if value.startswith(b'!'): try: return pickle.loads(value[1:]) except pickle.PickleError: return None try: return int(value) except ValueError: # before 0.8 we did not have serialization. Still support that. return value def get(self, key): return self.load_object(self._client.get(self.key_prefix + key)) def get_many(self, *keys): if self.key_prefix: keys = [self.key_prefix + key for key in keys] return [self.load_object(x) for x in self._client.mget(keys)] def set(self, key, value, timeout=None): timeout = self._get_expiration(timeout) dump = self.dump_object(value) if timeout == -1: result = self._client.set(name=self.key_prefix + key, value=dump) else: result = self._client.setex(name=self.key_prefix + key, value=dump, time=timeout) return result def add(self, key, value, timeout=None): timeout = self._get_expiration(timeout) dump = self.dump_object(value) return ( self._client.setnx(name=self.key_prefix + key, value=dump) and self._client.expire(name=self.key_prefix + key, time=timeout) ) def set_many(self, mapping, timeout=None): timeout = self._get_expiration(timeout) # Use transaction=False to batch without calling redis MULTI # which is not supported by twemproxy pipe = self._client.pipeline(transaction=False) for key, value in _items(mapping): dump = self.dump_object(value) if timeout == -1: pipe.set(name=self.key_prefix + key, value=dump) else: pipe.setex(name=self.key_prefix + key, value=dump, time=timeout) return pipe.execute() def delete(self, key): return self._client.delete(self.key_prefix + key) def delete_many(self, *keys): if not keys: return if self.key_prefix: keys = [self.key_prefix + key for key in keys] return self._client.delete(*keys) def has(self, key): return self._client.exists(self.key_prefix + key) def clear(self): status = False if self.key_prefix: keys = self._client.keys(self.key_prefix + '*') if keys: status = self._client.delete(*keys) else: status = self._client.flushdb() return status def inc(self, key, delta=1): return self._client.incr(name=self.key_prefix + key, amount=delta) def dec(self, key, delta=1): return self._client.decr(name=self.key_prefix + key, amount=delta) class FileSystemCache(BaseCache): """A cache that stores the items on the file system. This cache depends on being the only user of the `cache_dir`. Make absolutely sure that nobody but this cache stores files there or otherwise the cache will randomly delete files therein. :param cache_dir: the directory where cache files are stored. :param threshold: the maximum number of items the cache stores before it starts deleting some. :param default_timeout: the default timeout that is used if no timeout is specified on :meth:`~BaseCache.set`. A timeout of 0 indicates that the cache never expires. :param mode: the file mode wanted for the cache files, default 0600 """ #: used for temporary files by the FileSystemCache _fs_transaction_suffix = '.__wz_cache' def __init__(self, cache_dir, threshold=500, default_timeout=300, mode=0o600): BaseCache.__init__(self, default_timeout) self._path = cache_dir self._threshold = threshold self._mode = mode try: os.makedirs(self._path) except OSError as ex: if ex.errno != errno.EEXIST: raise def _list_dir(self): """return a list of (fully qualified) cache filenames """ return [os.path.join(self._path, fn) for fn in os.listdir(self._path) if not fn.endswith(self._fs_transaction_suffix)] def _prune(self): entries = self._list_dir() if len(entries) > self._threshold: now = time() try: for idx, fname in enumerate(entries): remove = False with open(fname, 'rb') as f: expires = pickle.load(f) remove = (expires != 0 and expires <= now) or idx % 3 == 0 if remove: os.remove(fname) except (IOError, OSError): pass def clear(self): for fname in self._list_dir(): try: os.remove(fname) except (IOError, OSError): return False return True def _get_filename(self, key): if isinstance(key, text_type): key = key.encode('utf-8') # XXX unicode review hash = md5(key).hexdigest() return os.path.join(self._path, hash) def get(self, key): filename = self._get_filename(key) try: with open(filename, 'rb') as f: pickle_time = pickle.load(f) if pickle_time == 0 or pickle_time >= time(): return pickle.load(f) else: os.remove(filename) return None except (IOError, OSError, pickle.PickleError): return None def add(self, key, value, timeout=None): filename = self._get_filename(key) if not os.path.exists(filename): return self.set(key, value, timeout) return False def set(self, key, value, timeout=None): if timeout is None: timeout = int(time() + self.default_timeout) elif timeout != 0: timeout = int(time() + timeout) filename = self._get_filename(key) self._prune() try: fd, tmp = tempfile.mkstemp(suffix=self._fs_transaction_suffix, dir=self._path) with os.fdopen(fd, 'wb') as f: pickle.dump(timeout, f, 1) pickle.dump(value, f, pickle.HIGHEST_PROTOCOL) rename(tmp, filename) os.chmod(filename, self._mode) except (IOError, OSError): return False else: return True def delete(self, key): try: os.remove(self._get_filename(key)) except (IOError, OSError): return False else: return True def has(self, key): filename = self._get_filename(key) try: with open(filename, 'rb') as f: pickle_time = pickle.load(f) if pickle_time == 0 or pickle_time >= time(): return True else: os.remove(filename) return False except (IOError, OSError, pickle.PickleError): return False
apache-2.0
sdispater/cleo
tests/io/inputs/test_argument.py
1
1695
import pytest from cleo.exceptions import LogicException from cleo.io.inputs.argument import Argument def test_optional_non_list_argument(): argument = Argument( "foo", required=False, is_list=False, description="Foo description", default="bar", ) assert argument.name == "foo" assert not argument.is_required() assert not argument.is_list() assert argument.description == "Foo description" assert argument.default == "bar" def test_required_non_list_argument(): argument = Argument("foo", is_list=False, description="Foo description") assert argument.name == "foo" assert argument.is_required() assert not argument.is_list() assert argument.description == "Foo description" assert argument.default is None def test_list_argument(): argument = Argument("foo", is_list=True, description="Foo description") assert argument.name == "foo" assert argument.is_required() assert argument.is_list() assert argument.description == "Foo description" assert argument.default == [] def test_required_arguments_do_not_support_default_values(): with pytest.raises( LogicException, match="Cannot set a default value for required arguments" ): Argument("foo", description="Foo description", default="bar") def test_list_arguments_do_not_support_non_list_default_values(): with pytest.raises( LogicException, match="A default value for a list argument must be a list" ): Argument( "foo", required=False, is_list=True, description="Foo description", default="bar", )
mit
lmyrefelt/CouchPotatoServer
libs/requests/utils.py
58
16299
# -*- coding: utf-8 -*- """ requests.utils ~~~~~~~~~~~~~~ This module provides utility functions that are used within Requests that are also useful for external consumption. """ import cgi import codecs import collections import os import platform import re import sys from netrc import netrc, NetrcParseError from . import __version__ from . import certs from .compat import parse_http_list as _parse_list_header from .compat import quote, urlparse, bytes, str, OrderedDict, urlunparse from .cookies import RequestsCookieJar, cookiejar_from_dict from .structures import CaseInsensitiveDict _hush_pyflakes = (RequestsCookieJar,) NETRC_FILES = ('.netrc', '_netrc') DEFAULT_CA_BUNDLE_PATH = certs.where() def dict_to_sequence(d): """Returns an internal sequence dictionary update.""" if hasattr(d, 'items'): d = d.items() return d def super_len(o): if hasattr(o, '__len__'): return len(o) if hasattr(o, 'len'): return o.len if hasattr(o, 'fileno'): return os.fstat(o.fileno()).st_size def get_netrc_auth(url): """Returns the Requests tuple auth for a given url from netrc.""" try: locations = (os.path.expanduser('~/{0}'.format(f)) for f in NETRC_FILES) netrc_path = None for loc in locations: if os.path.exists(loc) and not netrc_path: netrc_path = loc # Abort early if there isn't one. if netrc_path is None: return netrc_path ri = urlparse(url) # Strip port numbers from netloc host = ri.netloc.split(':')[0] try: _netrc = netrc(netrc_path).authenticators(host) if _netrc: # Return with login / password login_i = (0 if _netrc[0] else 1) return (_netrc[login_i], _netrc[2]) except (NetrcParseError, IOError): # If there was a parsing error or a permissions issue reading the file, # we'll just skip netrc auth pass # AppEngine hackiness. except (ImportError, AttributeError): pass def guess_filename(obj): """Tries to guess the filename of the given object.""" name = getattr(obj, 'name', None) if name and name[0] != '<' and name[-1] != '>': return os.path.basename(name) def from_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. Unless it can not be represented as such, return an OrderedDict, e.g., :: >>> from_key_val_list([('key', 'val')]) OrderedDict([('key', 'val')]) >>> from_key_val_list('string') ValueError: need more than 1 value to unpack >>> from_key_val_list({'key': 'val'}) OrderedDict([('key', 'val')]) """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') return OrderedDict(value) def to_key_val_list(value): """Take an object and test to see if it can be represented as a dictionary. If it can be, return a list of tuples, e.g., :: >>> to_key_val_list([('key', 'val')]) [('key', 'val')] >>> to_key_val_list({'key': 'val'}) [('key', 'val')] >>> to_key_val_list('string') ValueError: cannot encode objects that are not 2-tuples. """ if value is None: return None if isinstance(value, (str, bytes, bool, int)): raise ValueError('cannot encode objects that are not 2-tuples') if isinstance(value, collections.Mapping): value = value.items() return list(value) # From mitsuhiko/werkzeug (used with permission). def parse_list_header(value): """Parse lists as described by RFC 2068 Section 2. In particular, parse comma-separated lists where the elements of the list may include quoted-strings. A quoted-string could contain a comma. A non-quoted string could have quotes in the middle. Quotes are removed automatically after parsing. It basically works like :func:`parse_set_header` just that items may appear multiple times and case sensitivity is preserved. The return value is a standard :class:`list`: >>> parse_list_header('token, "quoted value"') ['token', 'quoted value'] To create a header from the :class:`list` again, use the :func:`dump_header` function. :param value: a string with a list header. :return: :class:`list` """ result = [] for item in _parse_list_header(value): if item[:1] == item[-1:] == '"': item = unquote_header_value(item[1:-1]) result.append(item) return result # From mitsuhiko/werkzeug (used with permission). def parse_dict_header(value): """Parse lists of key, value pairs as described by RFC 2068 Section 2 and convert them into a python dict: >>> d = parse_dict_header('foo="is a fish", bar="as well"') >>> type(d) is dict True >>> sorted(d.items()) [('bar', 'as well'), ('foo', 'is a fish')] If there is no value for a key it will be `None`: >>> parse_dict_header('key_without_value') {'key_without_value': None} To create a header from the :class:`dict` again, use the :func:`dump_header` function. :param value: a string with a dict header. :return: :class:`dict` """ result = {} for item in _parse_list_header(value): if '=' not in item: result[item] = None continue name, value = item.split('=', 1) if value[:1] == value[-1:] == '"': value = unquote_header_value(value[1:-1]) result[name] = value return result # From mitsuhiko/werkzeug (used with permission). def unquote_header_value(value, is_filename=False): r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). This does not use the real unquoting but what browsers are actually using for quoting. :param value: the header value to unquote. """ if value and value[0] == value[-1] == '"': # this is not the real unquoting, but fixing this so that the # RFC is met will result in bugs with internet explorer and # probably some other browsers as well. IE for example is # uploading files with "C:\foo\bar.txt" as filename value = value[1:-1] # if this is a filename and the starting characters look like # a UNC path, then just return the value without quotes. Using the # replace sequence below on a UNC path has the effect of turning # the leading double slash into a single slash and then # _fix_ie_filename() doesn't work correctly. See #458. if not is_filename or value[:2] != '\\\\': return value.replace('\\\\', '\\').replace('\\"', '"') return value def dict_from_cookiejar(cj): """Returns a key/value dictionary from a CookieJar. :param cj: CookieJar object to extract cookies from. """ cookie_dict = {} for cookie in cj: cookie_dict[cookie.name] = cookie.value return cookie_dict def add_dict_to_cookiejar(cj, cookie_dict): """Returns a CookieJar from a key/value dictionary. :param cj: CookieJar to insert cookies into. :param cookie_dict: Dict of key/values to insert into CookieJar. """ cj2 = cookiejar_from_dict(cookie_dict) cj.update(cj2) return cj def get_encodings_from_content(content): """Returns encodings from given content string. :param content: bytestring to extract encodings from. """ charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I) return charset_re.findall(content) def get_encoding_from_headers(headers): """Returns encodings from given HTTP Header Dict. :param headers: dictionary to extract encoding from. """ content_type = headers.get('content-type') if not content_type: return None content_type, params = cgi.parse_header(content_type) if 'charset' in params: return params['charset'].strip("'\"") if 'text' in content_type: return 'ISO-8859-1' def stream_decode_response_unicode(iterator, r): """Stream decodes a iterator.""" if r.encoding is None: for item in iterator: yield item return decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') for chunk in iterator: rv = decoder.decode(chunk) if rv: yield rv rv = decoder.decode('', final=True) if rv: yield rv def iter_slices(string, slice_length): """Iterate over slices of a string.""" pos = 0 while pos < len(string): yield string[pos:pos + slice_length] pos += slice_length def get_unicode_from_response(r): """Returns the requested content back in unicode. :param r: Response object to get unicode content from. Tried: 1. charset from content-type 2. every encodings from ``<meta ... charset=XXX>`` 3. fall back and replace all unicode characters """ tried_encodings = [] # Try charset from content-type encoding = get_encoding_from_headers(r.headers) if encoding: try: return str(r.content, encoding) except UnicodeError: tried_encodings.append(encoding) # Fall back: try: return str(r.content, encoding, errors='replace') except TypeError: return r.content # The unreserved URI characters (RFC 3986) UNRESERVED_SET = frozenset( "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~") def unquote_unreserved(uri): """Un-escape any percent-escape sequences in a URI that are unreserved characters. This leaves all reserved, illegal and non-ASCII bytes encoded. """ parts = uri.split('%') for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): c = chr(int(h, 16)) if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: parts[i] = '%' + parts[i] else: parts[i] = '%' + parts[i] return ''.join(parts) def requote_uri(uri): """Re-quote the given URI. This function passes the given URI through an unquote/quote cycle to ensure that it is fully and consistently quoted. """ # Unquote only the unreserved characters # Then quote only illegal characters (do not quote reserved, unreserved, # or '%') return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~") def get_environ_proxies(url): """Return a dict of environment proxies.""" proxy_keys = [ 'all', 'http', 'https', 'ftp', 'socks' ] get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) # First check whether no_proxy is defined. If it is, check that the URL # we're getting isn't in the no_proxy list. no_proxy = get_proxy('no_proxy') if no_proxy: # We need to check whether we match here. We need to see if we match # the end of the netloc, both with and without the port. no_proxy = no_proxy.split(',') netloc = urlparse(url).netloc for host in no_proxy: if netloc.endswith(host) or netloc.split(':')[0].endswith(host): # The URL does match something in no_proxy, so we don't want # to apply the proxies on this URL. return {} # If we get here, we either didn't have no_proxy set or we're not going # anywhere that no_proxy applies to. proxies = [(key, get_proxy(key + '_proxy')) for key in proxy_keys] return dict([(key, val) for (key, val) in proxies if val]) def default_user_agent(): """Return a string representing the default user agent.""" _implementation = platform.python_implementation() if _implementation == 'CPython': _implementation_version = platform.python_version() elif _implementation == 'PyPy': _implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, sys.pypy_version_info.minor, sys.pypy_version_info.micro) if sys.pypy_version_info.releaselevel != 'final': _implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel]) elif _implementation == 'Jython': _implementation_version = platform.python_version() # Complete Guess elif _implementation == 'IronPython': _implementation_version = platform.python_version() # Complete Guess else: _implementation_version = 'Unknown' try: p_system = platform.system() p_release = platform.release() except IOError: p_system = 'Unknown' p_release = 'Unknown' return " ".join(['python-requests/%s' % __version__, '%s/%s' % (_implementation, _implementation_version), '%s/%s' % (p_system, p_release)]) def default_headers(): return CaseInsensitiveDict({ 'User-Agent': default_user_agent(), 'Accept-Encoding': ', '.join(('gzip', 'deflate', 'compress')), 'Accept': '*/*' }) def parse_header_links(value): """Return a dict of parsed link headers proxies. i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg" """ links = [] replace_chars = " '\"" for val in value.split(","): try: url, params = val.split(";", 1) except ValueError: url, params = val, '' link = {} link["url"] = url.strip("<> '\"") for param in params.split(";"): try: key, value = param.split("=") except ValueError: break link[key.strip(replace_chars)] = value.strip(replace_chars) links.append(link) return links # Null bytes; no need to recreate these on each call to guess_json_utf _null = '\x00'.encode('ascii') # encoding to ASCII for Python 3 _null2 = _null * 2 _null3 = _null * 3 def guess_json_utf(data): # JSON always starts with two ASCII characters, so detection is as # easy as counting the nulls and from their location and count # determine the encoding. Also detect a BOM, if present. sample = data[:4] if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE): return 'utf-32' # BOM included if sample[:3] == codecs.BOM_UTF8: return 'utf-8-sig' # BOM included, MS style (discouraged) if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): return 'utf-16' # BOM included nullcount = sample.count(_null) if nullcount == 0: return 'utf-8' if nullcount == 2: if sample[::2] == _null2: # 1st and 3rd are null return 'utf-16-be' if sample[1::2] == _null2: # 2nd and 4th are null return 'utf-16-le' # Did not detect 2 valid UTF-16 ascii-range characters if nullcount == 3: if sample[:3] == _null3: return 'utf-32-be' if sample[1:] == _null3: return 'utf-32-le' # Did not detect a valid UTF-32 ascii-range character return None def prepend_scheme_if_needed(url, new_scheme): '''Given a URL that may or may not have a scheme, prepend the given scheme. Does not replace a present scheme with the one provided as an argument.''' scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) # urlparse is a finicky beast, and sometimes decides that there isn't a # netloc present. Assume that it's being over-cautious, and switch netloc # and path if urlparse decided there was no netloc. if not netloc: netloc, path = path, netloc return urlunparse((scheme, netloc, path, params, query, fragment)) def get_auth_from_url(url): """Given a url with authentication components, extract them into a tuple of username,password.""" if url: parsed = urlparse(url) return (parsed.username, parsed.password) else: return ('', '')
gpl-3.0
jpopelka/fabric8-analytics-worker
f8a_worker/workers/victims.py
1
8076
"""Task to analyze vulnerable packages and mark them in graph as such.""" import os import requests from f8a_worker.base import BaseTask from f8a_worker.models import Ecosystem from f8a_worker.victims import VictimsDB, FilteredVictimsDB from f8a_worker.graphutils import update_properties, create_nodes from selinon import StoragePool class VictimsCheck(BaseTask): """Victims CVE Check task.""" def execute(self, arguments): """Task to mark vulnerable packages in graph. :param arguments: dictionary with task arguments :return: None """ self._strict_assert(arguments.get('ecosystem')) wanted_cves = set(arguments.get('cve_filter', [])) victims_cls = VictimsDB if not wanted_cves else FilteredVictimsDB rdb = StoragePool.get_connected_storage('BayesianPostgres') ecosystem = Ecosystem.by_name(rdb.session, arguments.get('ecosystem')) with victims_cls.build_from_git(wanted=wanted_cves) as db: self.log.info('Storing the VictimsDB zip on S3') db.store_on_s3() vulnerable_packages = self.get_vulnerable_packages(db, ecosystem) self.create_in_graph(vulnerable_packages, ecosystem) self.mark_in_graph(vulnerable_packages, ecosystem) self.notify_gemini(vulnerable_packages, ecosystem) def init_auth_sa_token(self): """Generate service token for authentication.""" auth_server_url = os.getenv('F8A_AUTH_SERVICE_HOST', '') if auth_server_url: endpoint = '{url}/api/token'.format(url=auth_server_url) client_id = os.getenv('GEMINI_SA_CLIENT_ID', 'id') client_secret = os.getenv('GEMINI_SA_CLIENT_SECRET', 'secret') payload = {"grant_type": "client_credentials", "client_id": client_id.strip(), "client_secret": client_secret.strip()} try: self.log.info('Starting token generation using {url} and {payload}' .format(url=endpoint, payload=payload)) response = requests.post(endpoint, json=payload) self.log.info('Response status is {status_code}' .format(status_code=response.status_code)) except requests.exceptions.RequestException as e: raise e if response.status_code == 200: data = response.json() access_token = data.get('access_token') if access_token: self.log.info('Access token successfully generated') return access_token raise requests.exceptions.RequestException def get_vulnerable_packages(self, db, ecosystem): """Get vulnerable packages. Constructs a dict where keys are package names and values are details about vulnerabilities. :param db: VictimsDB :param ecosystem: f8a_worker.models.Ecosystem, ecosystem object :return: dict, a dict of vulnerable packages with details """ vulnerable_packages = {} for pkg in db.get_details_for_ecosystem(ecosystem): ga = pkg['package'] ga_data = vulnerable_packages.get(ga, []) ga_data.append(pkg) vulnerable_packages[ga] = ga_data return vulnerable_packages def notify_gemini(self, vulnerable_packages, ecosystem): """Notify gemini service about vulnerabilities in packages. :param vulnerable_packages: dict, a dict of vulnerable packages with details :param ecosystem: f8a_worker.models.Ecosystem, ecosystem :return: None """ try: access_token = self.init_auth_sa_token() except requests.exceptions.RequestException as e: self.log.error('Access token retrieval failed due to {}' .format(e)) return gemini_url = 'http://{host}:{port}/api/v1/user-repo/notify'.format( host=os.environ.get('F8A_GEMINI_SERVER_SERVICE_HOST'), port=os.environ.get('F8A_GEMINI_SERVER_SERVICE_PORT') ) for package, data in vulnerable_packages.items(): if data: versions = data[0].get('affected', []) + data[0].get('not_affected', []) epv_list = [] for version in versions: epv = { 'ecosystem': ecosystem.name, 'name': package, 'version': version, } epv_list.append(epv) resp = requests.post(gemini_url, json=epv_list, headers={'Authorization': access_token}) if resp.status_code != 200: self.log.error('Failed to notify gemini about vulnerabilities in {e}{p}'.format( e=ecosystem.name, p=package )) def create_in_graph(self, vulnerable_packages, ecosystem): """Make sure we have all packages with known vulnerabilities in graph. We don't need to ingest the packages, we just need to create nodes in graph. :param vulnerable_packages: dict, a dict of vulnerable packages with details :param ecosystem: f8a_worker.models.Ecosystem, ecosystem :return: None """ for ga, data in vulnerable_packages.items(): if data: versions = data[0].get('affected', []) + data[0].get('not_affected', []) epv_list = [] for version in versions: epv = { 'ecosystem': ecosystem.name, 'name': ga, 'version': version, 'source_repo': ecosystem.name } epv_list.append(epv) self.log.info( "Creating nodes in graph for {ga}:{v}, if they don't exist yet".format( ga=ga, v=version ) ) try: create_nodes(epv_list) except RuntimeError: # the error has been logged in the function already; # nothing that we can do here pass def mark_in_graph(self, vulnerable_packages, ecosystem): """Mark vulnerable components in graph. :param vulnerable_packages: dict, a dict of vulnerable packages with details :param ecosystem: f8a_worker.models.Ecosystem, ecosystem :return: None """ packages = {} for ga, data in vulnerable_packages.items(): for vulnerability in data: ga = vulnerability.get('package') versions = packages.get(ga) if versions is None: packages[ga] = versions = {} for version in vulnerability.get('affected', []): vulnerabilities = versions.get(version, []) cve_id = vulnerability.get('cve_id') cvss = vulnerability.get('cvss_v2') or vulnerability.get('cvss_v3') if not cvss: self.log.error('No CVSS for {cveid}'.format(cveid=cve_id)) continue cve_str = "{cveid}:{score}".format(cveid=cve_id, score=cvss) vulnerabilities.append(cve_str) packages[ga][version] = vulnerabilities for ga in packages: for version in packages[ga]: cves = packages[ga][version] properties = [{'name': 'cve_ids', 'value': x} for x in cves] self.log.info('Marking {ga}:{v} as vulnerable in graph: {vulns}'.format( ga=ga, v=version, vulns=str(cves)) ) update_properties(ecosystem.name, ga, version, properties)
gpl-3.0
bilke/OpenSG-1.8
SConsLocal/scons-local-0.96.1/SCons/Tool/hpc++.py
2
2432
"""SCons.Tool.hpc++ Tool-specific initialization for c++ on HP/UX. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "/home/scons/scons/branch.0/baseline/src/engine/SCons/Tool/hpc++.py 0.96.1.D001 2004/08/23 09:55:29 knight" import os.path import string import SCons.Util cplusplus = __import__('c++', globals(), locals(), []) acc = None # search for the acc compiler and linker front end try: dirs = os.listdir('/opt') except OSError: dirs = [] for dir in dirs: cc = '/opt/' + dir + '/bin/aCC' if os.path.exists(cc): acc = cc break def generate(env): """Add Builders and construction variables for g++ to an Environment.""" cplusplus.generate(env) if acc: env['CXX'] = acc or 'aCC' # determine version of aCC line = os.popen(acc + ' -V 2>&1').readline().rstrip() if string.find(line, 'aCC: HP ANSI C++') == 0: env['CXXVERSION'] = string.split(line)[-1] if env['PLATFORM'] == 'cygwin': env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS') else: env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS +Z') def exists(env): return acc
lgpl-2.1
Sbalbp/DIRAC
ResourceStatusSystem/Client/Oldtest/Test_Clients.py
10
11312
#import unittest # #from DIRAC.Core.Base import Script #Script.parseCommandLine() # #from DIRAC.ResourceStatusSystem.Utilities.mock import Mock #from DIRAC.ResourceStatusSystem.Client.JobsClient import JobsClient #from DIRAC.ResourceStatusSystem.Client.PilotsClient import PilotsClient #from DIRAC.ResourceStatusSystem.Client.ResourceStatusClient import ResourceStatusClient #from DIRAC.ResourceStatusSystem.Client.ResourceManagementClient import ResourceManagementClient # #from DIRAC.ResourceStatusSystem.Utilities import CS # #ValidRes = CS.getTypedDictRootedAt("GeneralConfig")['Resource'] #ValidStatus = CS.getTypedDictRootedAt("GeneralConfig")['Status'] # ############################################################################## # #class ClientsTestCase( unittest.TestCase ): # """ Base class for the clients test cases # """ # def setUp( self ): # # self.mockRSS = Mock() # # self.RSCli = ResourceStatusClient( serviceIn = self.mockRSS ) # self.RMCli = ResourceManagementClient( serviceIn = self.mockRSS ) # self.PilotsCli = PilotsClient() # self.JobsCli = JobsClient() # ############################################################################## # #class ResourceStatusClientSuccess( ClientsTestCase ): # # def test_getPeriods( self ): # self.mockRSS.getPeriods.return_value = {'OK':True, 'Value':[]} # for granularity in ValidRes: # for status in ValidStatus: # res = self.RSCli.getPeriods( granularity, 'XX', status, 20 ) # self.assertEqual(res['OK'], True) # self.assertEqual( res['Value'], [] ) # # def test_getServiceStats( self ): # self.mockRSS.getServiceStats.return_value = {'OK':True, 'Value':[]} # res = self.RSCli.getServiceStats( 'Site', '' ) # self.assertEqual( res['Value'], [] ) # # def test_getResourceStats( self ): # self.mockRSS.getResourceStats.return_value = {'OK':True, 'Value':[]} # res = self.RSCli.getResourceStats( 'Site', '' ) # self.assertEqual( res['Value'], [] ) # res = self.RSCli.getResourceStats( 'Service', '' ) # self.assertEqual( res['Value'], [] ) # # def test_getStorageElementsStats( self ): # self.mockRSS.getStorageElementsStats.return_value = {'OK':True, 'Value':[]} # res = self.RSCli.getStorageElementsStats( 'Site', '', "Read" ) # self.assertEqual( res['Value'], [] ) # res = self.RSCli.getStorageElementsStats( 'Resource', '', "Read") # self.assertEqual( res['Value'], [] ) # # def test_getMonitoredStatus( self ): # self.mockRSS.getSitesStatusWeb.return_value = {'OK':True, 'Value': {'Records': [['', '', '', '', 'Active', '']]}} # self.mockRSS.getServicesStatusWeb.return_value = {'OK':True, 'Value':{'Records': [['', '', '', '', 'Active', '']]}} # self.mockRSS.getResourcesStatusWeb.return_value = {'OK':True, 'Value':{'Records': [['', '', '', '', '', 'Active', '']]}} # self.mockRSS.getStorageElementsStatusWeb.return_value = {'OK':True, 'Value':{'Records': [['', '', '', '', 'Active', '']]}} # for g in ValidRes: # res = self.RSCli.getMonitoredStatus( g, 'a' ) # self.assertEqual( res['Value'], ['Active'] ) # res = self.RSCli.getMonitoredStatus( g, ['a'] ) # self.assertEqual( res['Value'], ['Active'] ) # res = self.RSCli.getMonitoredStatus( g, ['a', 'b'] ) # self.assertEqual( res['Value'], ['Active', 'Active'] ) # # def test_getCachedAccountingResult( self ): # self.mockRSS.getCachedAccountingResult.return_value = {'OK':True, 'Value':[]} # res = self.RMCli.getCachedAccountingResult( 'XX', 'pippo', 'ZZ' ) # self.assertEqual( res['Value'], [] ) # # def test_getCachedResult( self ): # self.mockRSS.getCachedResult.return_value = {'OK':True, 'Value':[]} # res = self.RMCli.getCachedResult( 'XX', 'pippo', 'ZZ', 1 ) # self.assertEqual( res['Value'], [] ) # # def test_getCachedIDs( self ): # self.mockRSS.getCachedIDs.return_value = {'OK':True, # 'Value':[78805473L, 78805473L, 78805473L, 78805473L]} # res = self.RMCli.getCachedIDs( 'XX', 'pippo' ) # self.assertEqual( res['Value'], [78805473L, 78805473L, 78805473L, 78805473L] ) # # # ############################################################################## # #class JobsClientSuccess( ClientsTestCase ): # # def test_getJobsSimpleEff( self ): # WMS_Mock = Mock() # WMS_Mock.getSiteSummaryWeb.return_value = {'OK': True, # 'rpcStub': ( ( 'WorkloadManagement/WMSAdministrator', # {'skipCACheck': True, # 'delegatedGroup': 'diracAdmin', # 'delegatedDN': '/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=fstagni/CN=693025/CN=Federico Stagni', 'timeout': 600} ), # 'getSiteSummaryWeb', ( {'Site': 'LCG.CERN.ch'}, [], 0, 500 ) ), # 'Value': {'TotalRecords': 1, # 'ParameterNames': ['Site', 'GridType', 'Country', 'Tier', 'MaskStatus', 'Received', 'Checking', 'Staging', 'Waiting', 'Matched', 'Running', 'Stalled', 'Done', 'Completed', 'Failed', 'Efficiency', 'Status'], # 'Extras': {'ru': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'fr': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 12L, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'ch': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 4L, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 1L}, 'nl': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'uk': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'Unknown': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'de': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 1L, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'it': {'Received': 0, 'Staging': 0, 'Checking': 1L, 'Completed': 0, 'Waiting': 2L, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'hu': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'cy': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'bg': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'au': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 10L, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'il': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'br': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'ie': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'pl': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 0, 'Stalled': 0, 'Matched': 0}, 'es': {'Received': 0, 'Staging': 0, 'Checking': 0, 'Completed': 0, 'Waiting': 0, 'Failed': 0, 'Running': 0, 'Done': 2L, 'Stalled': 0, 'Matched': 0}}, # 'Records': [['LCG.CERN.ch', 'LCG', 'ch', 'Tier-1', 'Active', 0, 0, 0, 4L, 1L, 0, 0, 0, 0, 0, '0.0', 'Idle']]}} # res = self.JobsCli.getJobsSimpleEff( 'XX', RPCWMSAdmin = WMS_Mock ) # self.assertEqual( res, {'LCG.CERN.ch': 'Idle'} ) # ############################################################################## # #class PilotsClientSuccess( ClientsTestCase ): # ## def test_getPilotsStats(self): ## self.mockRSS.getPeriods.return_value = {'OK':True, 'Value':[]} ## for granularity in ValidRes: ## for status in ValidStatus: ## res = self.RSCli.getPeriods(granularity, 'XX', status, 20) ## self.assertEqual(res['Periods'], []) # # def test_getPilotsSimpleEff( self ): # #self.mockRSS.getPilotsSimpleEff.return_value = {'OK':True, 'Value':{'Records': [['', '', 0, 3L, 0, 0, 0, 283L, 66L, 0, 0, 352L, '1.00', '81.25', 'Fair', 'Yes']]}} # # WMS_Mock = Mock() # WMS_Mock.getPilotSummaryWeb.return_value = {'OK': True, # 'rpcStub': ( ( 'WorkloadManagement/WMSAdministrator', # {'skipCACheck': True, # 'delegatedGroup': 'diracAdmin', # 'delegatedDN': '/DC=ch/DC=cern/OU=Organic Units/OU=Users/CN=fstagni/CN=693025/CN=Federico Stagni', 'timeout': 600} ), # 'getPilotSummaryWeb', ( {'GridSite': 'LCG.Ferrara.it'}, [], 0, 500 ) ), # 'Value': { # 'TotalRecords': 0, # 'ParameterNames': ['Site', 'CE', 'Submitted', 'Ready', 'Scheduled', 'Waiting', 'Running', 'Done', 'Aborted', 'Done_Empty', 'Aborted_Hour', 'Total', 'PilotsPerJob', 'PilotJobEff', 'Status', 'InMask'], # 'Extras': {'Scheduled': 0, 'Status': 'Poor', 'Aborted_Hour': 20L, 'Waiting': 59L, 'Submitted': 6L, 'PilotsPerJob': '1.03', 'Ready': 0, 'Running': 0, 'PilotJobEff': '39.34', 'Done': 328L, 'Aborted': 606L, 'Done_Empty': 9L, 'Total': 999L}, # 'Records': []}} # # res = self.PilotsCli.getPilotsSimpleEff( 'Site', 'LCG.Ferrara.it', RPCWMSAdmin = WMS_Mock ) # self.assertEqual( res, None ) # res = self.PilotsCli.getPilotsSimpleEff( 'Resource', 'grid0.fe.infn.it', 'LCG.Ferrara.it', RPCWMSAdmin = WMS_Mock ) # self.assertEqual( res, None ) # ############################################################################## # #if __name__ == '__main__': # suite = unittest.defaultTestLoader.loadTestsFromTestCase( ClientsTestCase ) # suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( ResourceStatusClientSuccess ) ) # suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( JobsClientSuccess ) ) # suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( PilotsClientSuccess ) ) # testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
gpl-3.0
maxbeth/fail2ban
fail2ban/tests/sockettestcase.py
18
2925
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*- # vi: set ft=python sts=4 ts=4 sw=4 noet : # This file is part of Fail2Ban. # # Fail2Ban is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Fail2Ban is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Fail2Ban; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # Author: Steven Hiscocks # __author__ = "Steven Hiscocks" __copyright__ = "Copyright (c) 2013 Steven Hiscocks" __license__ = "GPL" import os import sys import tempfile import threading import time import unittest from .. import protocol from ..server.asyncserver import AsyncServer, AsyncServerException from ..client.csocket import CSocket class Socket(unittest.TestCase): def setUp(self): """Call before every test case.""" self.server = AsyncServer(self) sock_fd, sock_name = tempfile.mkstemp('fail2ban.sock', 'socket') os.close(sock_fd) os.remove(sock_name) self.sock_name = sock_name def tearDown(self): """Call after every test case.""" @staticmethod def proceed(message): """Test transmitter proceed method which just returns first arg""" return message def testSocket(self): serverThread = threading.Thread( target=self.server.start, args=(self.sock_name, False)) serverThread.daemon = True serverThread.start() time.sleep(1) client = CSocket(self.sock_name) testMessage = ["A", "test", "message"] self.assertEqual(client.send(testMessage), testMessage) # test close message client.close() # 2nd close does nothing client.close() self.server.stop() serverThread.join(1) self.assertFalse(os.path.exists(self.sock_name)) def testSocketForce(self): open(self.sock_name, 'w').close() # Create sock file # Try to start without force self.assertRaises( AsyncServerException, self.server.start, self.sock_name, False) # Try again with force set serverThread = threading.Thread( target=self.server.start, args=(self.sock_name, True)) serverThread.daemon = True serverThread.start() time.sleep(1) self.server.stop() serverThread.join(1) self.assertFalse(os.path.exists(self.sock_name)) class ClientMisc(unittest.TestCase): def testPrintFormattedAndWiki(self): # redirect stdout to devnull saved_stdout = sys.stdout sys.stdout = open(os.devnull, 'w') try: protocol.printFormatted() protocol.printWiki() finally: # restore stdout sys.stdout = saved_stdout
gpl-2.0
chanceraine/nupic
nupic/datafiles/extra/firstOrder/raw/makeDataset.py
34
3485
#! /usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Generate artificial datasets """ import numpy from nupic.data.file import File def createFirstOrderModel(numCategories=5, alpha=0.5): categoryList = ['cat%02d' % i for i in range(numCategories)] initProbability = numpy.ones(numCategories)/numCategories transitionTable = numpy.random.dirichlet(alpha=[alpha]*numCategories, size=numCategories) return categoryList, initProbability, transitionTable def generateFirstOrderData(model, numIterations=10000, seqLength=5, resets=True, suffix='train'): print "Creating %d iteration file with seqLength %d" % (numIterations, seqLength) print "Filename", categoryList, initProbability, transitionTable = model initProbability = initProbability.cumsum() transitionTable = transitionTable.cumsum(axis=1) outputFile = 'fo_%d_%d_%s.csv' % (numIterations, seqLength, suffix) print "Filename", outputFile fields = [('reset', 'int', 'R'), ('name', 'string', '')] o = File(outputFile, fields) seqIdx = 0 rand = numpy.random.rand() catIdx = numpy.searchsorted(initProbability, rand) for i in xrange(numIterations): rand = numpy.random.rand() if seqIdx == 0 and resets: catIdx = numpy.searchsorted(initProbability, rand) reset = 1 else: catIdx = numpy.searchsorted(transitionTable[catIdx], rand) reset = 0 o.write([reset,categoryList[catIdx]]) seqIdx = (seqIdx+1)%seqLength o.close() if __name__=='__main__': numpy.random.seed(1956) model = createFirstOrderModel() categoryList = model[0] categoryFile = open("categories.txt", 'w') for category in categoryList: categoryFile.write(category+'\n') categoryFile.close() #import pylab #pylab.imshow(model[2], interpolation='nearest') #pylab.show() for resets in [True, False]: for seqLength in [2, 10]: for numIterations in [1000, 10000, 100000]: generateFirstOrderData(model, numIterations=numIterations, seqLength=seqLength, resets=resets, suffix='train_%s' % ('resets' if resets else 'noresets',)) generateFirstOrderData(model, numIterations=10000, seqLength=seqLength, resets=resets, suffix='test_%s' % ('resets' if resets else 'noresets',))
agpl-3.0
codevog/cv-qrcode
QRCodeCodevogViewController/QRCodeCodevogViewController/Libs/zbarSDK/test/barcodetest.py
28
11751
#!/usr/bin/env python import sys, re, unittest as UT, xml.etree.ElementTree as ET from os import path, getcwd from errno import EISDIR, EINVAL, EACCES from StringIO import StringIO from subprocess import Popen, PIPE from urllib2 import urlopen, HTTPError from urlparse import urljoin, urlunparse from traceback import format_exception debug = False # program to run - None means we still need to search for it zbarimg = None # arguments to said program zbarimg_args = [ '-q', '--xml' ] # namespace support try: register_namespace = ET.register_namespace except AttributeError: def register_namespace(prefix, uri): ET._namespace_map[uri] = prefix # barcode results BC = 'http://zbar.sourceforge.net/2008/barcode' register_namespace('bc', BC) # testcase extensions TS = 'http://zbar.sourceforge.net/2009/test-spec' register_namespace('test', TS) # printing support def fixtag(node): return str(node.tag).split('}', 1)[-1] def toxml(node): s = StringIO() ET.ElementTree(node).write(s) return s.getvalue() def hexdump(data): print data for i, c in enumerate(data): if i & 0x7 == 0: print '[%04x]' % i, print ' %04x' % ord(c), if i & 0x7 == 0x7: print if len(c) & 0x7 != 0x7: print '\n' # search for a file in the distribution def distdir_search(subdir, base, suffixes=('',)): # start with current dir, # then try script invocation path rundir = path.dirname(sys.argv[0]) search = [ '', rundir ] # finally, attempt to follow VPATH if present try: import re makefile = open('Makefile') for line in makefile: if re.match(r'^VPATH\s*=', line): vpath = line.split('=', 1)[1].strip() if vpath and vpath != rundir: search.append(vpath) break except: pass # poke around for subdir subdirs = tuple((subdir, path.join('..', subdir), '..', '')) for prefix in search: for subdir in subdirs: for suffix in suffixes: file = path.realpath(path.join(prefix, subdir, base + suffix)) if path.isfile(file): return(file) return None def find_zbarimg(): """search for zbarimg program to run. """ global zbarimg # look in dist dir first zbarimg = distdir_search('zbarimg', 'zbarimg', ('', '.exe')) if not zbarimg: # fall back to PATH zbarimg = 'zbarimg' if debug: print 'using zbarimg from PATH' elif debug: print 'using:', zbarimg def run_zbarimg(images): """invoke zbarimg for the specified files. return results as an ET.Element """ args = [ zbarimg ] args.extend(zbarimg_args) args.extend(images) if debug: print 'running:', ' '.join(args) # FIXME should be able to pipe (feed) parser straight from output child = Popen(args, stdout = PIPE, stderr = PIPE) (xml, err) = child.communicate() rc = child.returncode if debug: print 'zbarimg returned', rc # FIXME trim usage from error msg assert rc in (0, 4), \ 'zbarimg returned error status (%d)\n' % rc + err assert not err, err result = ET.XML(xml) assert result.tag == ET.QName(BC, 'barcodes') return result class TestCase(UT.TestCase): """single barcode source test. must have source attribute set to an ET.Element representation of a bc:source tag before test is run. """ def shortDescription(self): return self.source.get('href') def setUp(self): if not zbarimg: find_zbarimg() def runTest(self): expect = self.source assert expect is not None assert expect.tag == ET.QName(BC, 'source') actual = run_zbarimg((expect.get('href'),)) self.assertEqual(len(actual), 1) try: compare_sources(expect, actual[0]) except AssertionError: if expect.get(str(ET.QName(TS, 'exception'))) != 'TODO': raise # ignore class BuiltinTestCase(TestCase): def __init__(self, methodName='runTest'): TestCase.__init__(self, methodName) href = distdir_search('examples', 'barcode.png') if not href: href = 'http://zbar.sf.net/test/barcode.png' self.source = src = ET.Element(ET.QName(BC, 'source'), href=href) sym = ET.SubElement(src, ET.QName(BC, 'symbol'), type='EAN-13', orientation='UP') data = ET.SubElement(sym, ET.QName(BC, 'data')) data.text = '9876543210128' def compare_maps(expect, actual, compare_func): errors = [] notes = [] for key, iact in actual.iteritems(): iexp = expect.pop(key, None) if iexp is None: errors.append('bonus unexpected result:\n' + toxml(iact)) continue try: compare_func(iexp, iact) except: errors.append(''.join(format_exception(*sys.exc_info()))) if iexp.get(str(ET.QName(TS, 'exception'))) == 'TODO': notes.append('TODO unexpected result:\n' + toxml(iact)) for key, iexp in expect.iteritems(): if iexp.get(str(ET.QName(TS, 'exception'))) == 'TODO': notes.append('TODO missing expected result:\n' + toxml(iexp)) else: errors.append('missing expected result:\n' + toxml(iexp)) if len(notes) == 1: print >>sys.stderr, '(TODO)', elif notes: print >>sys.stderr, '(%d TODO)' % len(notes), assert not errors, '\n'.join(errors) def compare_sources(expect, actual): assert actual.tag == ET.QName(BC, 'source') assert actual.get('href').endswith(expect.get('href')), \ 'source href mismatch: %s != %s' % (acthref, exphref) # FIXME process/trim test:* contents def map_source(src): if not len(src) or src[0].tag != ET.QName(BC, 'index'): # insert artificial hierarchy syms = src[:] del src[:] idx = ET.SubElement(src, ET.QName(BC, 'index'), num='0') idx[:] = syms exc = src.get(str(ET.QName(TS, 'exception'))) if exc is not None: idx.set(str(ET.QName(TS, 'exception')), exc) return { '0': idx } elif len(src): assert src[0].tag != ET.QName(BC, 'symbol'), \ 'invalid source element: ' + \ 'expecting "index" or "symbol", got "%s"' % fixtag(src[0]) srcmap = { } for idx in src: srcmap[idx.get('num')] = idx return srcmap compare_maps(map_source(expect), map_source(actual), compare_indices) def compare_indices(expect, actual): assert actual.tag == ET.QName(BC, 'index') assert actual.get('num') == expect.get('num') # FIXME process/trim test:* contents def map_index(idx): idxmap = { } for sym in idx: assert sym.tag == ET.QName(BC, 'symbol') typ = sym.get('type') assert typ is not None data = sym.find(str(ET.QName(BC, 'data'))).text idxmap[typ, data] = sym return idxmap try: compare_maps(map_index(expect), map_index(actual), compare_symbols) except AssertionError: if expect.get(str(ET.QName(TS, 'exception'))) != 'TODO': raise def compare_symbols(expect, actual): orient = expect.get('orientation') if orient: assert actual.get('orientation') == orient # override unittest.TestLoader to populate tests from xml description class TestLoader: suiteClass = UT.TestSuite def __init__(self): self.cwd = urlunparse(('file', '', getcwd() + '/', '', '', '')) if debug: print 'cwd =', self.cwd def loadTestsFromModule(self, module): return self.suiteClass([BuiltinTestCase()]) def loadTestsFromNames(self, names, module=None): suites = [ self.loadTestsFromName(name, module) for name in names ] return self.suiteClass(suites) def loadTestsFromURL(self, url=None, file=None): if debug: print 'loading url:', url target = None if not file: if not url: return self.suiteClass([BuiltinTestCase()]) content = None url = urljoin(self.cwd, url) # FIXME grok fragment try: if debug: print 'trying:', url file = urlopen(url) content = file.info().get('Content-Type') except HTTPError, e: # possible remote directory pass except IOError, e: if e.errno not in (EISDIR, EINVAL, EACCES): raise # could be local directory if (not file or content == 'text/html' or (isinstance(file, HTTPError) and file.code != 200)): # could be directory, try opening index try: tmp = urljoin(url + '/', 'index.xml') if debug: print 'trying index:', tmp file = urlopen(tmp) content = file.info().get('Content-Type') url = tmp except IOError: raise IOError('no test index found at: %s' % url) if debug: print '\tContent-Type:', content if content not in ('application/xml', 'text/xml'): # assume url is image to test, try containing index # FIXME should be able to keep searching up try: target = url.rsplit('/', 1)[1] index = urljoin(url, 'index.xml') if debug: print 'trying index:', index file = urlopen(index) content = file.info().get('Content-Type') if debug: print '\tContent-Type:', content assert content in ('application/xml', 'text/xml') url = index except IOError: raise IOError('no index found for: %s' % url) index = ET.ElementTree(file=file).getroot() assert index.tag == ET.QName(BC, 'barcodes') suite = self.suiteClass() for src in index: # FIXME trim any meta info href = src.get('href') if target and target != href: continue if src.tag == ET.QName(BC, 'source'): test = TestCase() # convert file URLs to filesystem paths href = urljoin(url, href) href = re.sub(r'^file://', '', href) src.set('href', href) test.source = src suite.addTest(test) elif src.tag == ET.QName(TS, 'index'): suite.addTest(self.loadTestsFromURL(urljoin(url, href))) else: raise AssertionError('malformed test index') # FIXME detail assert suite.countTestCases(), 'empty test index: %s' % url return suite def loadTestsFromName(self, name=None, module=None): return self.loadTestsFromURL(name) def unsupported(self, *args, **kwargs): raise TypeError("unsupported TestLoader API") loadTestsFromTestCase = unsupported getTestCaseNames = unsupported if __name__ == '__main__': if '-d' in sys.argv: debug = True sys.argv.remove('-d') UT.main(module=None, testLoader=TestLoader())
lgpl-2.1
andyzsf/edx
lms/djangoapps/verify_student/tests/test_views.py
3
31400
# encoding: utf-8 """ verify_student/start?course_id=MITx/6.002x/2013_Spring # create /upload_face?course_id=MITx/6.002x/2013_Spring /upload_photo_id /confirm # mark_ready() ---> To Payment """ import json import mock import urllib import decimal from mock import patch, Mock import pytz from datetime import timedelta, datetime from django.test.client import Client from django.test import TestCase from django.test.utils import override_settings from django.conf import settings from django.core.urlresolvers import reverse from django.core.exceptions import ObjectDoesNotExist from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, mixed_store_config from xmodule.modulestore.tests.factories import CourseFactory from opaque_keys.edx.locations import SlashSeparatedCourseKey from student.tests.factories import UserFactory from student.models import CourseEnrollment from course_modes.tests.factories import CourseModeFactory from course_modes.models import CourseMode from shoppingcart.models import Order, CertificateItem from verify_student.views import render_to_response from verify_student.models import SoftwareSecurePhotoVerification from reverification.tests.factories import MidcourseReverificationWindowFactory # Since we don't need any XML course fixtures, use a modulestore configuration # that disables the XML modulestore. MODULESTORE_CONFIG = mixed_store_config(settings.COMMON_TEST_DATA_ROOT, {}, include_xml=False) def mock_render_to_response(*args, **kwargs): return render_to_response(*args, **kwargs) render_mock = Mock(side_effect=mock_render_to_response) class StartView(TestCase): def start_url(self, course_id=""): return "/verify_student/{0}".format(urllib.quote(course_id)) def test_start_new_verification(self): """ Test the case where the user has no pending `PhotoVerficiationAttempts`, but is just starting their first. """ user = UserFactory.create(username="rusty", password="test") self.client.login(username="rusty", password="test") def must_be_logged_in(self): self.assertHttpForbidden(self.client.get(self.start_url())) @override_settings(MODULESTORE=MODULESTORE_CONFIG) class TestCreateOrderView(ModuleStoreTestCase): """ Tests for the create_order view of verified course registration process """ def setUp(self): self.user = UserFactory.create(username="rusty", password="test") self.client.login(username="rusty", password="test") self.course_id = 'Robot/999/Test_Course' self.course = CourseFactory.create(org='Robot', number='999', display_name='Test Course') verified_mode = CourseMode( course_id=SlashSeparatedCourseKey("Robot", "999", 'Test_Course'), mode_slug="verified", mode_display_name="Verified Certificate", min_price=50 ) verified_mode.save() course_mode_post_data = { 'certificate_mode': 'Select Certificate', 'contribution': 50, 'contribution-other-amt': '', 'explain': '' } self.client.post( reverse("course_modes_choose", kwargs={'course_id': self.course_id}), course_mode_post_data ) def test_invalid_photos_data(self): """ Test that the invalid photo data cannot be submitted """ create_order_post_data = { 'contribution': 50, 'course_id': self.course_id, 'face_image': '', 'photo_id_image': '' } response = self.client.post(reverse('verify_student_create_order'), create_order_post_data) json_response = json.loads(response.content) self.assertFalse(json_response.get('success')) @patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True}) def test_invalid_amount(self): """ Test that the user cannot give invalid amount """ create_order_post_data = { 'contribution': '1.a', 'course_id': self.course_id, 'face_image': ',', 'photo_id_image': ',' } response = self.client.post(reverse('verify_student_create_order'), create_order_post_data) self.assertEquals(response.status_code, 400) self.assertIn('Selected price is not valid number.', response.content) @patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True}) def test_invalid_mode(self): """ Test that the course without verified mode cannot be processed """ course_id = 'Fake/999/Test_Course' CourseFactory.create(org='Fake', number='999', display_name='Test Course') create_order_post_data = { 'contribution': '50', 'course_id': course_id, 'face_image': ',', 'photo_id_image': ',' } response = self.client.post(reverse('verify_student_create_order'), create_order_post_data) self.assertEquals(response.status_code, 400) self.assertIn('This course doesn\'t support verified certificates', response.content) @patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True}) def test_create_order_fail_with_get(self): """ Test that create_order will not work if wrong http method used """ create_order_post_data = { 'contribution': 50, 'course_id': self.course_id, 'face_image': ',', 'photo_id_image': ',' } response = self.client.get(reverse('verify_student_create_order'), create_order_post_data) self.assertEqual(response.status_code, 405) @patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True}) def test_create_order_success(self): """ Test that the order is created successfully when given valid data """ create_order_post_data = { 'contribution': 50, 'course_id': self.course_id, 'face_image': ',', 'photo_id_image': ',' } response = self.client.post(reverse('verify_student_create_order'), create_order_post_data) json_response = json.loads(response.content) self.assertTrue(json_response.get('success')) self.assertIsNotNone(json_response.get('orderNumber')) # Verify that the order exists and is configured correctly order = Order.objects.get(user=self.user) self.assertEqual(order.status, 'paying') item = CertificateItem.objects.get(order=order) self.assertEqual(item.status, 'paying') self.assertEqual(item.course_id, self.course.id) self.assertEqual(item.mode, 'verified') @override_settings(MODULESTORE=MODULESTORE_CONFIG) class TestVerifyView(ModuleStoreTestCase): def setUp(self): self.user = UserFactory.create(username="rusty", password="test") self.client.login(username="rusty", password="test") self.course_key = SlashSeparatedCourseKey('Robot', '999', 'Test_Course') self.course = CourseFactory.create(org='Robot', number='999', display_name='Test Course') verified_mode = CourseMode(course_id=self.course_key, mode_slug="verified", mode_display_name="Verified Certificate", min_price=50, suggested_prices="50.0,100.0") verified_mode.save() def test_invalid_course(self): fake_course_id = "Robot/999/Fake_Course" url = reverse('verify_student_verify', kwargs={"course_id": fake_course_id}) response = self.client.get(url) self.assertEquals(response.status_code, 302) def test_valid_course_enrollment_text(self): url = reverse('verify_student_verify', kwargs={"course_id": unicode(self.course_key)}) response = self.client.get(url) self.assertIn("You are now enrolled in", response.content) # make sure org, name, and number are present self.assertIn(self.course.display_org_with_default, response.content) self.assertIn(self.course.display_name_with_default, response.content) self.assertIn(self.course.display_number_with_default, response.content) def test_valid_course_upgrade_text(self): url = reverse('verify_student_verify', kwargs={"course_id": unicode(self.course_key)}) response = self.client.get(url, {'upgrade': "True"}) self.assertIn("You are upgrading your enrollment for", response.content) def test_show_selected_contribution_amount(self): # Set the donation amount in the client's session session = self.client.session session['donation_for_course'] = { unicode(self.course_key): decimal.Decimal('1.23') } session.save() # Retrieve the page url = reverse('verify_student_verify', kwargs={"course_id": unicode(self.course_key)}) response = self.client.get(url) # Expect that the user's contribution amount is shown on the page self.assertContains(response, '1.23') @override_settings(MODULESTORE=MODULESTORE_CONFIG) class TestVerifiedView(ModuleStoreTestCase): """ Tests for VerifiedView. """ def setUp(self): self.user = UserFactory.create(username="abc", password="test") self.client.login(username="abc", password="test") self.course = CourseFactory.create(org='MITx', number='999.1x', display_name='Verified Course') self.course_id = self.course.id def test_verified_course_mode_none(self): """ Test VerifiedView when there is no active verified mode for course. """ url = reverse('verify_student_verified', kwargs={"course_id": self.course_id.to_deprecated_string()}) verify_mode = CourseMode.mode_for_course(self.course_id, "verified") # Verify mode should be None. self.assertEquals(verify_mode, None) response = self.client.get(url) # Status code should be 302. self.assertTrue(response.status_code, 302) # Location should contains dashboard. self.assertIn('dashboard', response._headers.get('location')[1]) def test_show_selected_contribution_amount(self): # Configure the course to have a verified mode for mode in ('audit', 'honor', 'verified'): CourseModeFactory(mode_slug=mode, course_id=self.course.id) # Set the donation amount in the client's session session = self.client.session session['donation_for_course'] = { unicode(self.course_id): decimal.Decimal('1.23') } session.save() # Retrieve the page url = reverse('verify_student_verified', kwargs={"course_id": unicode(self.course_id)}) response = self.client.get(url) # Expect that the user's contribution amount is shown on the page self.assertContains(response, '1.23') @override_settings(MODULESTORE=MODULESTORE_CONFIG) class TestReverifyView(ModuleStoreTestCase): """ Tests for the reverification views """ def setUp(self): self.user = UserFactory.create(username="rusty", password="test") self.client.login(username="rusty", password="test") self.course = CourseFactory.create(org='MITx', number='999', display_name='Robot Super Course') self.course_key = self.course.id @patch('verify_student.views.render_to_response', render_mock) def test_reverify_get(self): url = reverse('verify_student_reverify') response = self.client.get(url) self.assertEquals(response.status_code, 200) ((_template, context), _kwargs) = render_mock.call_args self.assertFalse(context['error']) @patch('verify_student.views.render_to_response', render_mock) def test_reverify_post_failure(self): url = reverse('verify_student_reverify') response = self.client.post(url, {'face_image': '', 'photo_id_image': ''}) self.assertEquals(response.status_code, 200) ((template, context), _kwargs) = render_mock.call_args self.assertIn('photo_reverification', template) self.assertTrue(context['error']) @patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True}) def test_reverify_post_success(self): url = reverse('verify_student_reverify') response = self.client.post(url, {'face_image': ',', 'photo_id_image': ','}) self.assertEquals(response.status_code, 302) try: verification_attempt = SoftwareSecurePhotoVerification.objects.get(user=self.user) self.assertIsNotNone(verification_attempt) except ObjectDoesNotExist: self.fail('No verification object generated') ((template, context), _kwargs) = render_mock.call_args self.assertIn('photo_reverification', template) self.assertTrue(context['error']) @override_settings(MODULESTORE=MODULESTORE_CONFIG) class TestPhotoVerificationResultsCallback(ModuleStoreTestCase): """ Tests for the results_callback view. """ def setUp(self): self.course = CourseFactory.create(org='Robot', number='999', display_name='Test Course') self.course_id = self.course.id self.user = UserFactory.create() self.attempt = SoftwareSecurePhotoVerification( status="submitted", user=self.user ) self.attempt.save() self.receipt_id = self.attempt.receipt_id self.client = Client() def mocked_has_valid_signature(method, headers_dict, body_dict, access_key, secret_key): return True def test_invalid_json(self): """ Test for invalid json being posted by software secure. """ data = {"Testing invalid"} response = self.client.post( reverse('verify_student_results_callback'), data=data, content_type='application/json', HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB: testing', HTTP_DATE='testdate' ) self.assertIn('Invalid JSON', response.content) self.assertEqual(response.status_code, 400) def test_invalid_dict(self): """ Test for invalid dictionary being posted by software secure. """ data = '"\\"Test\\tTesting"' response = self.client.post( reverse('verify_student_results_callback'), data=data, content_type='application/json', HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing', HTTP_DATE='testdate' ) self.assertIn('JSON should be dict', response.content) self.assertEqual(response.status_code, 400) @mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature)) def test_invalid_access_key(self): """ Test for invalid access key. """ data = { "EdX-ID": self.receipt_id, "Result": "Testing", "Reason": "Testing", "MessageType": "Testing" } json_data = json.dumps(data) response = self.client.post( reverse('verify_student_results_callback'), data=json_data, content_type='application/json', HTTP_AUTHORIZATION='test testing:testing', HTTP_DATE='testdate' ) self.assertIn('Access key invalid', response.content) self.assertEqual(response.status_code, 400) @mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature)) def test_wrong_edx_id(self): """ Test for wrong id of Software secure verification attempt. """ data = { "EdX-ID": "Invalid-Id", "Result": "Testing", "Reason": "Testing", "MessageType": "Testing" } json_data = json.dumps(data) response = self.client.post( reverse('verify_student_results_callback'), data=json_data, content_type='application/json', HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing', HTTP_DATE='testdate' ) self.assertIn('edX ID Invalid-Id not found', response.content) self.assertEqual(response.status_code, 400) @mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature)) def test_pass_result(self): """ Test for verification passed. """ data = { "EdX-ID": self.receipt_id, "Result": "PASS", "Reason": "", "MessageType": "You have been verified." } json_data = json.dumps(data) response = self.client.post( reverse('verify_student_results_callback'), data=json_data, content_type='application/json', HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing', HTTP_DATE='testdate' ) attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=self.receipt_id) self.assertEqual(attempt.status, u'approved') self.assertEquals(response.content, 'OK!') @mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature)) def test_fail_result(self): """ Test for failed verification. """ data = { "EdX-ID": self.receipt_id, "Result": 'FAIL', "Reason": 'Invalid photo', "MessageType": 'Your photo doesn\'t meet standards.' } json_data = json.dumps(data) response = self.client.post( reverse('verify_student_results_callback'), data=json_data, content_type='application/json', HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing', HTTP_DATE='testdate' ) attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=self.receipt_id) self.assertEqual(attempt.status, u'denied') self.assertEqual(attempt.error_code, u'Your photo doesn\'t meet standards.') self.assertEqual(attempt.error_msg, u'"Invalid photo"') self.assertEquals(response.content, 'OK!') @mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature)) def test_system_fail_result(self): """ Test for software secure result system failure. """ data = {"EdX-ID": self.receipt_id, "Result": 'SYSTEM FAIL', "Reason": 'Memory overflow', "MessageType": 'You must retry the verification.'} json_data = json.dumps(data) response = self.client.post( reverse('verify_student_results_callback'), data=json_data, content_type='application/json', HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing', HTTP_DATE='testdate' ) attempt = SoftwareSecurePhotoVerification.objects.get(receipt_id=self.receipt_id) self.assertEqual(attempt.status, u'must_retry') self.assertEqual(attempt.error_code, u'You must retry the verification.') self.assertEqual(attempt.error_msg, u'"Memory overflow"') self.assertEquals(response.content, 'OK!') @mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature)) def test_unknown_result(self): """ test for unknown software secure result """ data = { "EdX-ID": self.receipt_id, "Result": 'Unknown', "Reason": 'Unknown reason', "MessageType": 'Unknown message' } json_data = json.dumps(data) response = self.client.post( reverse('verify_student_results_callback'), data=json_data, content_type='application/json', HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing', HTTP_DATE='testdate' ) self.assertIn('Result Unknown not understood', response.content) @mock.patch('verify_student.ssencrypt.has_valid_signature', mock.Mock(side_effect=mocked_has_valid_signature)) def test_reverification(self): """ Test software secure result for reverification window. """ data = { "EdX-ID": self.receipt_id, "Result": "PASS", "Reason": "", "MessageType": "You have been verified." } window = MidcourseReverificationWindowFactory(course_id=self.course_id) self.attempt.window = window self.attempt.save() json_data = json.dumps(data) self.assertEqual(CourseEnrollment.objects.filter(course_id=self.course_id).count(), 0) response = self.client.post( reverse('verify_student_results_callback'), data=json_data, content_type='application/json', HTTP_AUTHORIZATION='test BBBBBBBBBBBBBBBBBBBB:testing', HTTP_DATE='testdate' ) self.assertEquals(response.content, 'OK!') self.assertIsNotNone(CourseEnrollment.objects.get(course_id=self.course_id)) @override_settings(MODULESTORE=MODULESTORE_CONFIG) class TestMidCourseReverifyView(ModuleStoreTestCase): """ Tests for the midcourse reverification views """ def setUp(self): self.user = UserFactory.create(username="rusty", password="test") self.client.login(username="rusty", password="test") self.course_key = SlashSeparatedCourseKey("Robot", "999", "Test_Course") CourseFactory.create(org='Robot', number='999', display_name='Test Course') patcher = patch('student.models.tracker') self.mock_tracker = patcher.start() self.addCleanup(patcher.stop) @patch('verify_student.views.render_to_response', render_mock) def test_midcourse_reverify_get(self): url = reverse('verify_student_midcourse_reverify', kwargs={"course_id": self.course_key.to_deprecated_string()}) response = self.client.get(url) self.mock_tracker.emit.assert_any_call( # pylint: disable=maybe-no-member 'edx.course.enrollment.mode_changed', { 'user_id': self.user.id, 'course_id': self.course_key.to_deprecated_string(), 'mode': "verified", } ) # Check that user entering the reverify flow was logged, and that it was the last call self.mock_tracker.emit.assert_called_with( # pylint: disable=maybe-no-member 'edx.course.enrollment.reverify.started', { 'user_id': self.user.id, 'course_id': self.course_key.to_deprecated_string(), 'mode': "verified", } ) self.assertTrue(self.mock_tracker.emit.call_count, 2) self.mock_tracker.emit.reset_mock() # pylint: disable=maybe-no-member self.assertEquals(response.status_code, 200) ((_template, context), _kwargs) = render_mock.call_args self.assertFalse(context['error']) @patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True}) def test_midcourse_reverify_post_success(self): window = MidcourseReverificationWindowFactory(course_id=self.course_key) url = reverse('verify_student_midcourse_reverify', kwargs={'course_id': self.course_key.to_deprecated_string()}) response = self.client.post(url, {'face_image': ','}) self.mock_tracker.emit.assert_any_call( # pylint: disable=maybe-no-member 'edx.course.enrollment.mode_changed', { 'user_id': self.user.id, 'course_id': self.course_key.to_deprecated_string(), 'mode': "verified", } ) # Check that submission event was logged, and that it was the last call self.mock_tracker.emit.assert_called_with( # pylint: disable=maybe-no-member 'edx.course.enrollment.reverify.submitted', { 'user_id': self.user.id, 'course_id': self.course_key.to_deprecated_string(), 'mode': "verified", } ) self.assertTrue(self.mock_tracker.emit.call_count, 2) self.mock_tracker.emit.reset_mock() # pylint: disable=maybe-no-member self.assertEquals(response.status_code, 302) try: verification_attempt = SoftwareSecurePhotoVerification.objects.get(user=self.user, window=window) self.assertIsNotNone(verification_attempt) except ObjectDoesNotExist: self.fail('No verification object generated') @patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True}) def test_midcourse_reverify_post_failure_expired_window(self): window = MidcourseReverificationWindowFactory( course_id=self.course_key, start_date=datetime.now(pytz.UTC) - timedelta(days=100), end_date=datetime.now(pytz.UTC) - timedelta(days=50), ) url = reverse('verify_student_midcourse_reverify', kwargs={'course_id': self.course_key.to_deprecated_string()}) response = self.client.post(url, {'face_image': ','}) self.assertEquals(response.status_code, 302) with self.assertRaises(ObjectDoesNotExist): SoftwareSecurePhotoVerification.objects.get(user=self.user, window=window) @patch('verify_student.views.render_to_response', render_mock) def test_midcourse_reverify_dash(self): url = reverse('verify_student_midcourse_reverify_dash') response = self.client.get(url) # not enrolled in any courses self.assertEquals(response.status_code, 200) enrollment = CourseEnrollment.get_or_create_enrollment(self.user, self.course_key) enrollment.update_enrollment(mode="verified", is_active=True) MidcourseReverificationWindowFactory(course_id=self.course_key) response = self.client.get(url) # enrolled in a verified course, and the window is open self.assertEquals(response.status_code, 200) @override_settings(MODULESTORE=MODULESTORE_CONFIG) class TestReverificationBanner(ModuleStoreTestCase): """ Tests for the midcourse reverification failed toggle banner off """ @patch.dict(settings.FEATURES, {'AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING': True}) def setUp(self): self.user = UserFactory.create(username="rusty", password="test") self.client.login(username="rusty", password="test") self.course_id = 'Robot/999/Test_Course' CourseFactory.create(org='Robot', number='999', display_name=u'Test Course é') self.window = MidcourseReverificationWindowFactory(course_id=self.course_id) url = reverse('verify_student_midcourse_reverify', kwargs={'course_id': self.course_id}) self.client.post(url, {'face_image': ','}) photo_verification = SoftwareSecurePhotoVerification.objects.get(user=self.user, window=self.window) photo_verification.status = 'denied' photo_verification.save() def test_banner_display_off(self): self.client.post(reverse('verify_student_toggle_failed_banner_off')) photo_verification = SoftwareSecurePhotoVerification.objects.get(user=self.user, window=self.window) self.assertFalse(photo_verification.display) @override_settings(MODULESTORE=MODULESTORE_CONFIG) class TestCreateOrder(ModuleStoreTestCase): """ Tests for the create order view. """ def setUp(self): """ Create a user and course. """ self.user = UserFactory.create(username="test", password="test") self.course = CourseFactory.create() for mode in ('audit', 'honor', 'verified'): CourseModeFactory(mode_slug=mode, course_id=self.course.id) self.client.login(username="test", password="test") def test_create_order_already_verified(self): # Verify the student so we don't need to submit photos self._verify_student() # Create an order url = reverse('verify_student_create_order') params = { 'course_id': unicode(self.course.id), } response = self.client.post(url, params) self.assertEqual(response.status_code, 200) # Verify that the information will be sent to the correct callback URL # (configured by test settings) data = json.loads(response.content) self.assertEqual(data['override_custom_receipt_page'], "http://testserver/shoppingcart/postpay_callback/") # Verify that the course ID and transaction type are included in "merchant-defined data" self.assertEqual(data['merchant_defined_data1'], unicode(self.course.id)) self.assertEqual(data['merchant_defined_data2'], "verified") def test_create_order_already_verified_prof_ed(self): # Verify the student so we don't need to submit photos self._verify_student() # Create a prof ed course course = CourseFactory.create() CourseModeFactory(mode_slug="professional", course_id=course.id) # Create an order for a prof ed course url = reverse('verify_student_create_order') params = { 'course_id': unicode(course.id) } response = self.client.post(url, params) self.assertEqual(response.status_code, 200) # Verify that the course ID and transaction type are included in "merchant-defined data" data = json.loads(response.content) self.assertEqual(data['merchant_defined_data1'], unicode(course.id)) self.assertEqual(data['merchant_defined_data2'], "professional") def test_create_order_set_donation_amount(self): # Verify the student so we don't need to submit photos self._verify_student() # Create an order url = reverse('verify_student_create_order') params = { 'course_id': unicode(self.course.id), 'contribution': '1.23' } self.client.post(url, params) # Verify that the client's session contains the new donation amount self.assertIn('donation_for_course', self.client.session) self.assertIn(unicode(self.course.id), self.client.session['donation_for_course']) actual_amount = self.client.session['donation_for_course'][unicode(self.course.id)] expected_amount = decimal.Decimal('1.23') self.assertEqual(actual_amount, expected_amount) def _verify_student(self): """ Simulate that the student's identity has already been verified. """ attempt = SoftwareSecurePhotoVerification.objects.create(user=self.user) attempt.mark_ready() attempt.submit() attempt.approve()
agpl-3.0
geekboxzone/mmallow_external_parameter-framework
test/functional-tests/PfwTestCase/Domains/tDomain_Configuration.py
10
28508
# -*-coding:utf-8 -* # Copyright (c) 2011-2015, Intel Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation and/or # other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Creation, renaming and deletion configuration testcases List of tested functions : -------------------------- - [listConfigurations] function - [createConfiguration] function - [deleteConfiguration] function - [renameConfiguration] function Test cases : ------------ - Testing configuration creation error - Testing configuration renaming error - Testing configuration deletion error - Testing nominal case """ import os from Util.PfwUnitTestLib import PfwTestCase from Util import ACTLogging log=ACTLogging.Logger() # Test of Domains - Rename class TestCases(PfwTestCase): def setUp(self): self.pfw.sendCmd("setTuningMode", "on") self.domain_name = "domain_test" self.conf_test = "conf_white" self.conf_test_renamed = "conf_black" self.new_conf_number = 5 def tearDown(self): self.pfw.sendCmd("setTuningMode", "off") def test_Conf_Creation_Error(self): """ Testing configuration creation error ------------------------------------ Test case description : ~~~~~~~~~~~~~~~~~~~~~~~ - Create an already existent configuration - Create a configuration with no name specified - Create a configuration on a wrong domain name Tested commands : ~~~~~~~~~~~~~~~~~ - [createConfiguration] function - [createDomain] function - [listConfigurations] function - [deleteConfiguration] function - [deleteDomain] function Expected result : ~~~~~~~~~~~~~~~~~ - no configuration created - existent configurations not affected by error """ log.D(self.test_Conf_Creation_Error.__doc__) # New domain creation for testing purpose log.I("New domain creation for testing purpose : %s" % (self.domain_name)) log.I("command [createDomain]") out, err = self.pfw.sendCmd("createDomain",self.domain_name, "") assert out == "Done", out assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name) log.I("command [createDomain] correctly executed") log.I("Domain %s created" % (self.domain_name)) # New configurations creation for testing purpose for iteration in range (self.new_conf_number): new_conf_name = "".join([self.conf_test, "_", str(iteration)]) log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name)) log.I("command [createConfiguration]") out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name) assert out == "Done", out assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name) log.I("command [createConfiguration] correctly executed") log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name)) # Domain configurations listing backup log.I("Configurations listing for domain %s" % (self.domain_name)) log.I("command [listConfigurations]") out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "") assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name) log.I("command [listConfigurations] correctly executed") # Saving configurations names f_configurations_backup = open("f_configurations_backup", "w") f_configurations_backup.write(out) f_configurations_backup.close() # New configurations creation error log.I("Creating an already existent configurations names") for iteration in range (self.new_conf_number): new_conf_name = "".join([self.conf_test, "_", str(iteration)]) log.I("Trying to create already existent %s configuration for domain %s" % (new_conf_name,self.domain_name)) log.I("command [createConfiguration]") out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name) assert out != "Done", "ERROR : command [createConfiguration] - Error not detected while creating already existent configuration %s" % (new_conf_name) assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name) log.I("command [createConfiguration] correctly executed") log.I("error correctly detected, no configuration created") log.I("Creating a configuration without specifying a name") out, err = self.pfw.sendCmd("createConfiguration",self.domain_name) assert out != "Done", "ERROR : command [createConfiguration] - Error not detected while creating a configuration without specifying a name" assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration" log.I("error correctly detected") log.I("Creating a configuration on a wrong domain name") new_conf_name = "new_conf" out, err = self.pfw.sendCmd("createConfiguration","wrong_domain_name",new_conf_name) assert out != "Done", "ERROR : command [createConfiguration] - Error not detected while creating a configuration on a wrong domain name" assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration" log.I("error correctly detected") # New domain configurations listing log.I("Configurations listing for domain %s" % (self.domain_name)) log.I("command [listConfigurations]" ) out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "") assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name) log.I("command [listConfigurations] correctly executed") # Saving configurations names f_configurations = open("f_configurations", "w") f_configurations.write(out) f_configurations.close() # Checking configurations names integrity log.I("Configurations listing conformity check") f_configurations = open("f_configurations", "r") f_configurations_backup = open("f_configurations_backup", "r") for iteration in range(self.new_conf_number): listed_conf_backup = f_configurations_backup.readline().strip('\n') listed_conf = f_configurations.readline().strip('\n') assert listed_conf==listed_conf_backup, "ERROR : Error while listing configuration %s (found %s)" % (listed_conf_backup, listed_conf) log.I("No change detected, listed configurations names conform to expected values") # New domain deletion log.I("End of test, new domain deletion") log.I("command [deleteDomain]") out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "") assert out == "Done", "ERROR : %s" % (out) assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name) log.I("command [deleteDomain] correctly executed") # Closing and deleting temp files f_configurations_backup.close() os.remove("f_configurations_backup") f_configurations.close() os.remove("f_configurations") def test_Conf_Renaming_Error(self): """ Testing configuration renaming error ------------------------------------ Test case description : ~~~~~~~~~~~~~~~~~~~~~~~ - Rename a configuration with an already used name - Rename a configuration with no name specified - Rename a configuration on a wrong domain name Tested commands : ~~~~~~~~~~~~~~~~~ - [renameConfiguration] function - [createDomain] function - [listConfigurations] function - [createConfiguration] function - [deleteConfiguration] function - [deleteDomain] function Expected result : ~~~~~~~~~~~~~~~~~ - error detected - no configuration created - existent configurations not affected by error """ log.D(self.test_Conf_Renaming_Error.__doc__) # New domain creation for testing purpose log.I("New domain creation for testing purpose : %s" % (self.domain_name)) log.I("command [createDomain]") out, err = self.pfw.sendCmd("createDomain",self.domain_name, "") assert out == "Done", out assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name) log.I("command [createDomain] correctly executed") log.I("Domain %s created" % (self.domain_name)) # New configurations creation for testing purpose for iteration in range (self.new_conf_number): new_conf_name = "".join([self.conf_test, "_", str(iteration)]) log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name)) log.I("command [createConfiguration]") out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name) assert out == "Done", out assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name) log.I("command [createConfiguration] correctly executed") log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name)) # Domain configurations listing backup log.I("Configurations listing for domain %s" % (self.domain_name)) log.I("command [listConfigurations]") out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "") assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name) log.I("command [listConfigurations] correctly executed") # Saving configurations names f_configurations_backup = open("f_configurations_backup", "w") f_configurations_backup.write(out) f_configurations_backup.close() # New configurations renaming error log.I("renaming a configuration with an already used name") for iteration in range (self.new_conf_number-1): conf_name = "".join([self.conf_test, "_", str(iteration)]) new_conf_name = "".join([self.conf_test, "_", str(iteration+1)]) log.I("Trying to rename %s on domain %s with an already used name : %s" % (conf_name,self.domain_name,new_conf_name)) log.I("command [renameConfiguration]" ) out, err = self.pfw.sendCmd("renameConfiguration",self.domain_name,conf_name,new_conf_name) assert out != "Done", "ERROR : command [renameConfiguration] - Error not detected while renaming configuration %s with an already used name" % (new_conf_name) assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration %s" % (new_conf_name) log.I("command [renameConfiguration] correctly executed") log.I("error correctly detected, no configuration renamed") log.I("renaming a configuration without specifying a new name") out, err = self.pfw.sendCmd("renameConfiguration",self.domain_name,new_conf_name) assert out != "Done", "ERROR : command [renameConfiguration] - Error not detected while renaming a configuration without specifying a new name" assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration" log.I("error correctly detected, no configuration renamed") log.I("renaming a configuration on a wrong domain name") new_conf_name = "new_conf" out, err = self.pfw.sendCmd("renameConfiguration","wrong_domain_name",new_conf_name,"Configuration") assert out != "Done", "ERROR : command [renameConfiguration] - Error not detected while renaming a configuration on a wrong domain name" assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration" log.I("error correctly detected, no configuration renamed") # New domain configurations listing log.I("Configurations listing for domain %s" % (self.domain_name)) log.I("command [listConfigurations]") out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "") assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name) log.I("command [listConfigurations] correctly executed") # Saving configurations names f_configurations = open("f_configurations", "w") f_configurations.write(out) f_configurations.close() # Checking configurations names integrity log.I("Configurations listing conformity check") f_configurations = open("f_configurations", "r") f_configurations_backup = open("f_configurations_backup", "r") for iteration in range(self.new_conf_number): listed_conf_backup = f_configurations_backup.readline().strip('\n') listed_conf = f_configurations.readline().strip('\n') assert listed_conf==listed_conf_backup, "ERROR : Error while listing configuration %s (found %s)" % (listed_conf_backup, listed_conf) log.I("No change detected, listed configurations names conform to expected values") # Testing domain deletion log.I("End of test, new domain deletion") log.I("command [deleteDomain]") out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "") assert out == "Done", "ERROR : %s" % (out) assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name) log.I("command [deleteDomain] correctly executed") # Closing and deleting temp files f_configurations_backup.close() os.remove("f_configurations_backup") f_configurations.close() os.remove("f_configurations") def test_Conf_Deletion_Error(self): """ Testing configuration deletion error ------------------------------------ Test case description : ~~~~~~~~~~~~~~~~~~~~~~~ - Delete a configuration with a non existent name - Delete a configuration with no name specified - Delete a configuration on a wrong domain name Tested commands : ~~~~~~~~~~~~~~~~~ - [deleteConfiguration] function - [createDomain] function - [listConfigurations] function - [createConfiguration] function - [deleteDomain] function Expected result : ~~~~~~~~~~~~~~~~~ - error detected - no configuration created - existent configurations not affected by error """ print self.test_Conf_Renaming_Error.__doc__ # New domain creation for testing purpose log.I("New domain creation for testing purpose : %s" % (self.domain_name)) log.I("command [createDomain]") out, err = self.pfw.sendCmd("createDomain",self.domain_name, "") assert out == "Done", out assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name) log.I("command [createDomain] correctly executed") log.I("Domain %s created" % (self.domain_name)) # New configurations creation for testing purpose for iteration in range (self.new_conf_number): new_conf_name = "".join([self.conf_test, "_", str(iteration)]) log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name)) log.I("command [createConfiguration]") out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name) assert out == "Done", out assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name) log.I("command [createConfiguration] correctly executed") log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name)) # Domain configurations listing backup log.I("Configurations listing for domain %s" % (self.domain_name)) log.I("command [listConfigurations]") out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "") assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name) log.I("command [listConfigurations] correctly executed") # Saving configurations names f_configurations_backup = open("f_configurations_backup", "w") f_configurations_backup.write(out) f_configurations_backup.close() # Configurations deletion errors log.I("Trying various deletions error test cases") log.I("Trying to delete a wrong configuration name on domain %s" % (self.domain_name)) log.I("command [deleteConfiguration]") out, err = self.pfw.sendCmd("deleteConfiguration",self.domain_name,"wrong_configuration_name") assert out != "Done", "ERROR : command [deleteConfiguration] - Error not detected while deleting non existent configuration name" assert err == None, "ERROR : command [deleteConfiguration] - Error while deleting configuration" log.I("command [deleteConfiguration] correctly executed") log.I("error correctly detected, no configuration deleted") log.I("deleting a configuration with no name specified") out, err = self.pfw.sendCmd("deleteConfiguration",self.domain_name) assert out != "Done", "ERROR : command [deleteConfiguration] - Error not detected while deleting a configuration without specifying a name" assert err == None, "ERROR : command [deleteConfiguration] - Error while deleting configuration" log.I("error correctly detected, no configuration deleted") log.I("deleting a configuration on a wrong domain name") out, err = self.pfw.sendCmd("deleteConfiguration","wrong_domain_name",new_conf_name) assert out != "Done", "ERROR : command [deleteConfiguration] - Error not detected while deleting a configuration on a wrong domain name" assert err == None, "ERROR : command [deleteConfiguration] - Error while deleting configuration" log.I("error correctly detected, no configuration deleted") # New domain configurations listing log.I("Configurations listing for domain %s" % (self.domain_name)) log.I("command [listConfigurations]") out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "") assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name) log.I("command [listConfigurations] correctly executed") # Saving configurations names f_configurations = open("f_configurations", "w") f_configurations.write(out) f_configurations.close() # Checking configurations names integrity log.I("Configurations listing conformity check") f_configurations = open("f_configurations", "r") f_configurations_backup = open("f_configurations_backup", "r") for iteration in range(self.new_conf_number): listed_conf_backup = f_configurations_backup.readline().strip('\n') listed_conf = f_configurations.readline().strip('\n') assert listed_conf==listed_conf_backup, "ERROR : Error while listing configuration %s (found %s)" % (listed_conf_backup, listed_conf) log.I("No change detected, listed configurations names conform to expected values") # Testing domain deletion log.I("End of test, new domain deletion") log.I("command [deleteDomain]") out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "") assert out == "Done", "ERROR : %s" % (out) assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name) log.I("command [deleteDomain] correctly executed") # Closing and deleting temp files f_configurations_backup.close() os.remove("f_configurations_backup") f_configurations.close() os.remove("f_configurations") def test_Nominal_Case(self): """ Testing nominal cases --------------------- Test case description : ~~~~~~~~~~~~~~~~~~~~~~~ - Create new configurations - List domain configurations - Rename configurations - Delete configurations Tested commands : ~~~~~~~~~~~~~~~~~ - [listConfigurations] function - [createConfiguration] function - [renameConfiguration] function - [deleteConfiguration] function - [createDomain] function - [deleteDomain] function Expected result : ~~~~~~~~~~~~~~~~~ - all operations succeed """ log.D(self.test_Nominal_Case.__doc__) # New domain creation log.I("New domain creation for testing purpose : %s" % (self.domain_name)) log.I("command [createDomain]") out, err = self.pfw.sendCmd("createDomain",self.domain_name, "") assert out == "Done", out assert err == None, "ERROR : command [createDomain] - Error while creating domain %s" % (self.domain_name) log.I("command [createDomain] correctly executed") log.I("Domain %s created" % (self.domain_name)) # New configurations creation for iteration in range (self.new_conf_number): new_conf_name = "".join([self.conf_test, "_", str(iteration)]) log.I("New configuration %s creation for domain %s" % (new_conf_name,self.domain_name)) log.I("command [createConfiguration]" ) out, err = self.pfw.sendCmd("createConfiguration",self.domain_name,new_conf_name) assert out == "Done", out assert err == None, "ERROR : command [createConfiguration] - Error while creating configuration %s" % (new_conf_name) log.I("command [createConfiguration] correctly executed") log.I("Configuration %s created for domain %s" % (new_conf_name,self.domain_name)) # Listing domain configurations log.I("Configurations listing for domain %s" % (self.domain_name)) log.I("command [listConfigurations]") out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "") assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name) log.I("command [listConfigurations] correctly executed") # Saving configurations names f_configurations = open("f_configurations", "w") f_configurations.write(out) f_configurations.close() # Checking configurations names integrity log.I("Configurations listing conformity check") f_configurations = open("f_configurations", "r") for iteration in range(self.new_conf_number): new_conf_name = "".join([self.conf_test, "_", str(iteration)]) listed_conf = f_configurations.readline().strip('\n') assert listed_conf==new_conf_name, "ERROR : Error while listing configuration %s (found %s)" % (listed_conf, new_conf_name) log.I("Listed configurations names conform to expected values") # Configuration renaming log.I("Configurations renaming") for iteration in range (self.new_conf_number): conf_name = "".join([self.conf_test, "_", str(iteration)]) new_conf_name = "".join([self.conf_test_renamed, "_", str(iteration)]) log.I("Configuration %s renamed to %s in domain %s" % (conf_name,new_conf_name,self.domain_name)) log.I("command [renameConfiguration]") out, err = self.pfw.sendCmd("renameConfiguration",self.domain_name,conf_name,new_conf_name) assert out == "Done", out assert err == None, "ERROR : command [renameConfiguration] - Error while renaming configuration %s to %s" % (conf_name,new_conf_name) log.I("command [renameConfiguration] correctly executed") log.I("Configuration %s renamed to %s for domain %s" % (conf_name,new_conf_name,self.domain_name)) # Listing domain configurations log.I("Configurations listing to check configurations renaming") log.I("command [listConfigurations]") out, err = self.pfw.sendCmd("listConfigurations",self.domain_name, "") assert err == None, "ERROR : command [listConfigurations] - Error while listing configurations for domain %s" % (self.domain_name) log.I("command [listConfigurations] correctly executed") # Saving configurations names f_configurations_renamed = open("f_configurations_renamed", "w") f_configurations_renamed.write(out) f_configurations_renamed.close() # Checking configurations names integrity log.I("Configurations listing conformity check") f_configurations_renamed = open("f_configurations_renamed", "r") for iteration in range(self.new_conf_number): new_conf_name = "".join([self.conf_test_renamed, "_", str(iteration)]) listed_conf = f_configurations_renamed.readline().strip('\n') assert listed_conf==new_conf_name, "ERROR : Error while renaming configuration %s (found %s)" % (new_conf_name,listed_conf) log.I("Listed configurations names conform to expected values, renaming successfull") # New domain deletion log.I("End of test, new domain deletion") log.I("command [deleteDomain]") out, err = self.pfw.sendCmd("deleteDomain",self.domain_name, "") assert out == "Done", "ERROR : %s" % (out) assert err == None, "ERROR : command [deleteDomain] - Error while deleting domain %s" % (self.domain_name) log.I("command [deleteDomain] correctly executed") # Closing and deleting temp file f_configurations.close() os.remove("f_configurations") f_configurations_renamed.close() os.remove("f_configurations_renamed")
bsd-3-clause
prembasumatary/osrm-backend
cmake/cmake_options_script.py
80
1514
# Based on @berenm's pull request https://github.com/quarnster/SublimeClang/pull/135 # Create the database with cmake with for example: cmake -DCMAKE_EXPORT_COMPILE_COMMANDS=ON .. # or you could have set(CMAKE_EXPORT_COMPILE_COMMANDS ON) in your CMakeLists.txt # Usage within SublimeClang: # "sublimeclang_options_script": "python ${home}/code/cmake_options_script.py ${project_path:build}/compile_commands.json", import re import os import os.path import pickle import sys import json compilation_database_pattern = re.compile('(?<=\s)-[DIOUWfgs][^=\s]+(?:=\\"[^"]+\\"|=[^"]\S+)?') def load_db(filename): compilation_database = {} with open(filename) as compilation_database_file: compilation_database_entries = json.load(compilation_database_file) total = len(compilation_database_entries) entry = 0 for compilation_entry in compilation_database_entries: entry = entry + 1 compilation_database[compilation_entry["file"]] = [ p.strip() for p in compilation_database_pattern.findall(compilation_entry["command"]) ] return compilation_database scriptpath = os.path.dirname(os.path.abspath(sys.argv[1])) cache_file = "%s/cached_options.txt" % (scriptpath) db = None if os.access(cache_file, os.R_OK) == 0: db = load_db(sys.argv[1]) f = open(cache_file, "wb") pickle.dump(db, f) f.close() else: f = open(cache_file) db = pickle.load(f) f.close() if db and sys.argv[2] in db: for option in db[sys.argv[2]]: print option
bsd-2-clause
rhdedgar/openshift-tools
openshift/installer/vendored/openshift-ansible-3.0.88/filter_plugins/oo_filters.py
18
39909
#!/usr/bin/python # -*- coding: utf-8 -*- # vim: expandtab:tabstop=4:shiftwidth=4 """ Custom filters for use in openshift-ansible """ from ansible import errors from collections import Mapping from operator import itemgetter import OpenSSL.crypto import os import pdb import re import json import yaml from ansible.utils.unicode import to_unicode # Disabling too-many-public-methods, since filter methods are necessarily # public # pylint: disable=too-many-public-methods class FilterModule(object): """ Custom ansible filters """ @staticmethod def oo_pdb(arg): """ This pops you into a pdb instance where arg is the data passed in from the filter. Ex: "{{ hostvars | oo_pdb }}" """ pdb.set_trace() return arg @staticmethod def get_attr(data, attribute=None): """ This looks up dictionary attributes of the form a.b.c and returns the value. Ex: data = {'a': {'b': {'c': 5}}} attribute = "a.b.c" returns 5 """ if not attribute: raise errors.AnsibleFilterError("|failed expects attribute to be set") ptr = data for attr in attribute.split('.'): ptr = ptr[attr] return ptr @staticmethod def oo_flatten(data): """ This filter plugin will flatten a list of lists """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to flatten a List") return [item for sublist in data for item in sublist] @staticmethod def oo_merge_dicts(first_dict, second_dict): """ Merge two dictionaries where second_dict values take precedence. Ex: first_dict={'a': 1, 'b': 2} second_dict={'b': 3, 'c': 4} returns {'a': 1, 'b': 3, 'c': 4} """ if not isinstance(first_dict, dict) or not isinstance(second_dict, dict): raise errors.AnsibleFilterError("|failed expects to merge two dicts") merged = first_dict.copy() merged.update(second_dict) return merged @staticmethod def oo_collect(data, attribute=None, filters=None): """ This takes a list of dict and collects all attributes specified into a list. If filter is specified then we will include all items that match _ALL_ of filters. If a dict entry is missing the key in a filter it will be excluded from the match. Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return {'a':2, 'z': 'z'}, # True, return {'a':3, 'z': 'z'}, # True, return {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z'] ] attribute = 'a' filters = {'z': 'z'} returns [1, 2, 3] """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to filter on a List") if not attribute: raise errors.AnsibleFilterError("|failed expects attribute to be set") if filters is not None: if not isinstance(filters, dict): raise errors.AnsibleFilterError("|failed expects filter to be a" " dict") retval = [FilterModule.get_attr(d, attribute) for d in data if ( all([d.get(key, None) == filters[key] for key in filters]))] else: retval = [FilterModule.get_attr(d, attribute) for d in data] return retval @staticmethod def oo_select_keys_from_list(data, keys): """ This returns a list, which contains the value portions for the keys Ex: data = { 'a':1, 'b':2, 'c':3 } keys = ['a', 'c'] returns [1, 3] """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to filter on a list") if not isinstance(keys, list): raise errors.AnsibleFilterError("|failed expects first param is a list") # Gather up the values for the list of keys passed in retval = [FilterModule.oo_select_keys(item, keys) for item in data] return FilterModule.oo_flatten(retval) @staticmethod def oo_select_keys(data, keys): """ This returns a list, which contains the value portions for the keys Ex: data = { 'a':1, 'b':2, 'c':3 } keys = ['a', 'c'] returns [1, 3] """ if not isinstance(data, Mapping): raise errors.AnsibleFilterError("|failed expects to filter on a dict or object") if not isinstance(keys, list): raise errors.AnsibleFilterError("|failed expects first param is a list") # Gather up the values for the list of keys passed in retval = [data[key] for key in keys if key in data] return retval @staticmethod def oo_prepend_strings_in_list(data, prepend): """ This takes a list of strings and prepends a string to each item in the list Ex: data = ['cart', 'tree'] prepend = 'apple-' returns ['apple-cart', 'apple-tree'] """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects first param is a list") if not all(isinstance(x, basestring) for x in data): raise errors.AnsibleFilterError("|failed expects first param is a list" " of strings") retval = [prepend + s for s in data] return retval @staticmethod def oo_combine_key_value(data, joiner='='): """Take a list of dict in the form of { 'key': 'value'} and arrange them as a list of strings ['key=value'] """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects first param is a list") rval = [] for item in data: rval.append("%s%s%s" % (item['key'], joiner, item['value'])) return rval @staticmethod def oo_combine_dict(data, in_joiner='=', out_joiner=' '): """Take a dict in the form of { 'key': 'value', 'key': 'value' } and arrange them as a string 'key=value key=value' """ if not isinstance(data, dict): raise errors.AnsibleFilterError("|failed expects first param is a dict") return out_joiner.join([in_joiner.join([k, v]) for k, v in data.items()]) @staticmethod def oo_ami_selector(data, image_name): """ This takes a list of amis and an image name and attempts to return the latest ami. """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects first param is a list") if not data: return None else: if image_name is None or not image_name.endswith('_*'): ami = sorted(data, key=itemgetter('name'), reverse=True)[0] return ami['ami_id'] else: ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data] ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0] return ami['ami_id'] @staticmethod def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False): """ This takes a dictionary of volume definitions and returns a valid ec2 volume definition based on the host_type and the values in the dictionary. The dictionary should look similar to this: { 'master': { 'root': { 'volume_size': 10, 'device_type': 'gp2', 'iops': 500 }, 'docker': { 'volume_size': 40, 'device_type': 'gp2', 'iops': 500, 'ephemeral': 'true' } }, 'node': { 'root': { 'volume_size': 10, 'device_type': 'io1', 'iops': 1000 }, 'docker': { 'volume_size': 40, 'device_type': 'gp2', 'iops': 500, 'ephemeral': 'true' } } } """ if not isinstance(data, dict): raise errors.AnsibleFilterError("|failed expects first param is a dict") if host_type not in ['master', 'node', 'etcd']: raise errors.AnsibleFilterError("|failed expects etcd, master or node" " as the host type") root_vol = data[host_type]['root'] root_vol['device_name'] = '/dev/sda1' root_vol['delete_on_termination'] = True if root_vol['device_type'] != 'io1': root_vol.pop('iops', None) if host_type in ['master', 'node'] and 'docker' in data[host_type]: docker_vol = data[host_type]['docker'] docker_vol['device_name'] = '/dev/xvdb' docker_vol['delete_on_termination'] = True if docker_vol['device_type'] != 'io1': docker_vol.pop('iops', None) if docker_ephemeral: docker_vol.pop('device_type', None) docker_vol.pop('delete_on_termination', None) docker_vol['ephemeral'] = 'ephemeral0' return [root_vol, docker_vol] elif host_type == 'etcd' and 'etcd' in data[host_type]: etcd_vol = data[host_type]['etcd'] etcd_vol['device_name'] = '/dev/xvdb' etcd_vol['delete_on_termination'] = True if etcd_vol['device_type'] != 'io1': etcd_vol.pop('iops', None) return [root_vol, etcd_vol] return [root_vol] @staticmethod def oo_split(string, separator=','): """ This splits the input string into a list. If the input string is already a list we will return it as is. """ if isinstance(string, list): return string return string.split(separator) @staticmethod def oo_haproxy_backend_masters(hosts): """ This takes an array of dicts and returns an array of dicts to be used as a backend for the haproxy role """ servers = [] for idx, host_info in enumerate(hosts): server = dict(name="master%s" % idx) server_ip = host_info['openshift']['common']['ip'] server_port = host_info['openshift']['master']['api_port'] server['address'] = "%s:%s" % (server_ip, server_port) server['opts'] = 'check' servers.append(server) return servers @staticmethod def oo_filter_list(data, filter_attr=None): """ This returns a list, which contains all items where filter_attr evaluates to true Ex: data = [ { a: 1, b: True }, { a: 3, b: False }, { a: 5, b: True } ] filter_attr = 'b' returns [ { a: 1, b: True }, { a: 5, b: True } ] """ if not isinstance(data, list): raise errors.AnsibleFilterError("|failed expects to filter on a list") if not isinstance(filter_attr, basestring): raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode") # Gather up the values for the list of keys passed in return [x for x in data if filter_attr in x and x[filter_attr]] @staticmethod def oo_oc_nodes_matching_selector(nodes, selector): """ Filters a list of nodes by selector. Examples: nodes = [{"kind": "Node", "metadata": {"name": "node1.example.com", "labels": {"kubernetes.io/hostname": "node1.example.com", "color": "green"}}}, {"kind": "Node", "metadata": {"name": "node2.example.com", "labels": {"kubernetes.io/hostname": "node2.example.com", "color": "red"}}}] selector = 'color=green' returns = ['node1.example.com'] nodes = [{"kind": "Node", "metadata": {"name": "node1.example.com", "labels": {"kubernetes.io/hostname": "node1.example.com", "color": "green"}}}, {"kind": "Node", "metadata": {"name": "node2.example.com", "labels": {"kubernetes.io/hostname": "node2.example.com", "color": "red"}}}] selector = 'color=green,color=red' returns = ['node1.example.com','node2.example.com'] Args: nodes (list[dict]): list of node definitions selector (str): "label=value" node selector to filter `nodes` by Returns: list[str]: nodes filtered by selector """ if not isinstance(nodes, list): raise errors.AnsibleFilterError("failed expects nodes to be a list, got {0}".format(type(nodes))) if not isinstance(selector, basestring): raise errors.AnsibleFilterError("failed expects selector to be a string") if not re.match('.*=.*', selector): raise errors.AnsibleFilterError("failed selector does not match \"label=value\" format") node_lists = [] for node_selector in ''.join(selector.split()).split(','): label = node_selector.split('=')[0] value = node_selector.split('=')[1] node_lists.append(FilterModule.oo_oc_nodes_with_label(nodes, label, value)) nodes = set(node_lists[0]) for node_list in node_lists[1:]: nodes.intersection_update(node_list) return list(nodes) @staticmethod def oo_oc_nodes_with_label(nodes, label, value): """ Filters a list of nodes by label, value. Examples: nodes = [{"kind": "Node", "metadata": {"name": "node1.example.com", "labels": {"kubernetes.io/hostname": "node1.example.com", "color": "green"}}}, {"kind": "Node", "metadata": {"name": "node2.example.com", "labels": {"kubernetes.io/hostname": "node2.example.com", "color": "red"}}}] label = 'color' value = 'green' returns = ['node1.example.com'] Args: nodes (list[dict]): list of node definitions label (str): label to filter `nodes` by value (str): value of `label` to filter `nodes` by Returns: list[str]: nodes filtered by selector """ if not isinstance(nodes, list): raise errors.AnsibleFilterError("failed expects nodes to be a list") if not isinstance(label, basestring): raise errors.AnsibleFilterError("failed expects label to be a string") if not isinstance(value, basestring): raise errors.AnsibleFilterError("failed expects value to be a string") matching_nodes = [] for node in nodes: if label in node['metadata']['labels']: if node['metadata']['labels'][label] == value: matching_nodes.append(node['metadata']['name']) return matching_nodes @staticmethod def oo_nodes_with_label(nodes, label, value=None): """ Filters a list of nodes by label and value (if provided) It handles labels that are in the following variables by priority: openshift_node_labels, cli_openshift_node_labels, openshift['node']['labels'] Examples: data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}}, 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}, 'c': {'openshift_node_labels': {'size': 'S'}}] label = 'color' returns = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}}, 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}] data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}}, 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}, 'c': {'openshift_node_labels': {'size': 'S'}}] label = 'color' value = 'green' returns = ['b': {'labels': {'color': 'green', 'size': 'L'}}] Args: nodes (list[dict]): list of node to node variables label (str): label to filter `nodes` by value (Optional[str]): value of `label` to filter by Defaults to None. Returns: list[dict]: nodes filtered by label and value (if provided) """ if not isinstance(nodes, list): raise errors.AnsibleFilterError("failed expects to filter on a list") if not isinstance(label, basestring): raise errors.AnsibleFilterError("failed expects label to be a string") if value is not None and not isinstance(value, basestring): raise errors.AnsibleFilterError("failed expects value to be a string") def label_filter(node): """ filter function for testing if node should be returned """ if not isinstance(node, dict): raise errors.AnsibleFilterError("failed expects to filter on a list of dicts") if 'openshift_node_labels' in node: labels = node['openshift_node_labels'] elif 'cli_openshift_node_labels' in node: labels = node['cli_openshift_node_labels'] elif 'openshift' in node and 'node' in node['openshift'] and 'labels' in node['openshift']['node']: labels = node['openshift']['node']['labels'] else: return False if isinstance(labels, basestring): labels = yaml.safe_load(labels) if not isinstance(labels, dict): raise errors.AnsibleFilterError( "failed expected node labels to be a dict or serializable to a dict" ) return label in labels and (value is None or labels[label] == value) return [n for n in nodes if label_filter(n)] @staticmethod def oo_parse_heat_stack_outputs(data): """ Formats the HEAT stack output into a usable form The goal is to transform something like this: +---------------+-------------------------------------------------+ | Property | Value | +---------------+-------------------------------------------------+ | capabilities | [] | | | creation_time | 2015-06-26T12:26:26Z | | | description | OpenShift cluster | | | … | … | | outputs | [ | | | { | | | "output_value": "value_A" | | | "description": "This is the value of Key_A" | | | "output_key": "Key_A" | | | }, | | | { | | | "output_value": [ | | | "value_B1", | | | "value_B2" | | | ], | | | "description": "This is the value of Key_B" | | | "output_key": "Key_B" | | | }, | | | ] | | parameters | { | | … | … | +---------------+-------------------------------------------------+ into something like this: { "Key_A": "value_A", "Key_B": [ "value_B1", "value_B2" ] } """ # Extract the “outputs” JSON snippet from the pretty-printed array in_outputs = False outputs = '' line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|') for line in data['stdout_lines']: match = line_regex.match(line) if match: if match.group(1) == 'outputs': in_outputs = True elif match.group(1) != '': in_outputs = False if in_outputs: outputs += match.group(2) outputs = json.loads(outputs) # Revamp the “outputs” to put it in the form of a “Key: value” map revamped_outputs = {} for output in outputs: revamped_outputs[output['output_key']] = output['output_value'] return revamped_outputs @staticmethod # pylint: disable=too-many-branches def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames): """ Parses names from list of certificate hashes. Ex: certificates = [{ "certfile": "/root/custom1.crt", "keyfile": "/root/custom1.key" }, { "certfile": "custom2.crt", "keyfile": "custom2.key" }] returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt", "keyfile": "/etc/origin/master/named_certificates/custom1.key", "names": [ "public-master-host.com", "other-master-host.com" ] }, { "certfile": "/etc/origin/master/named_certificates/custom2.crt", "keyfile": "/etc/origin/master/named_certificates/custom2.key", "names": [ "some-hostname.com" ] }] """ if not isinstance(named_certs_dir, basestring): raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode") if not isinstance(internal_hostnames, list): raise errors.AnsibleFilterError("|failed expects internal_hostnames is list") for certificate in certificates: if 'names' in certificate.keys(): continue else: certificate['names'] = [] if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']): raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" % (certificate['certfile'], certificate['keyfile'])) try: st_cert = open(certificate['certfile'], 'rt').read() cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert) certificate['names'].append(str(cert.get_subject().commonName.decode())) for i in range(cert.get_extension_count()): if cert.get_extension(i).get_short_name() == 'subjectAltName': for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '): certificate['names'].append(name) except: raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] + "please specify certificate names in host inventory")) certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames] certificate['names'] = list(set(certificate['names'])) if not certificate['names']: raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] + "detected a collision with internal hostname, please specify " + "certificate names in host inventory")) for certificate in certificates: # Update paths for configuration certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile'])) certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile'])) return certificates @staticmethod def oo_pretty_print_cluster(data): """ Read a subset of hostvars and build a summary of the cluster in the following layout: "c_id": { "master": { "default": [ { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1" } ] "node": { "infra": [ { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2" } ], "compute": [ { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3" }, ... ] } """ def _get_tag_value(tags, key): """ Extract values of a map implemented as a set. Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' } key = 'bar' returns 'value2' """ for tag in tags: if tag[:len(key)+4] == 'tag_' + key: return tag[len(key)+5:] raise KeyError(key) def _add_host(clusters, clusterid, host_type, sub_host_type, host): """ Add a new host in the clusters data structure """ if clusterid not in clusters: clusters[clusterid] = {} if host_type not in clusters[clusterid]: clusters[clusterid][host_type] = {} if sub_host_type not in clusters[clusterid][host_type]: clusters[clusterid][host_type][sub_host_type] = [] clusters[clusterid][host_type][sub_host_type].append(host) clusters = {} for host in data: try: _add_host(clusters=clusters, clusterid=_get_tag_value(host['group_names'], 'clusterid'), host_type=_get_tag_value(host['group_names'], 'host-type'), sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'), host={'name': host['inventory_hostname'], 'public IP': host['ansible_ssh_host'], 'private IP': host['ansible_default_ipv4']['address']}) except KeyError: pass return clusters @staticmethod def oo_generate_secret(num_bytes): """ generate a session secret """ if not isinstance(num_bytes, int): raise errors.AnsibleFilterError("|failed expects num_bytes is int") secret = os.urandom(num_bytes) return secret.encode('base-64').strip() @staticmethod def to_padded_yaml(data, level=0, indent=2, **kw): """ returns a yaml snippet padded to match the indent level you specify """ if data in [None, ""]: return "" try: transformed = yaml.safe_dump(data, indent=indent, allow_unicode=True, default_flow_style=False, **kw) padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()]) return to_unicode("\n{0}".format(padded)) except Exception as my_e: raise errors.AnsibleFilterError('Failed to convert: %s', my_e) @staticmethod def oo_openshift_env(hostvars): ''' Return facts which begin with "openshift_" and translate legacy facts to their openshift_env counterparts. Ex: hostvars = {'openshift_fact': 42, 'theyre_taking_the_hobbits_to': 'isengard'} returns = {'openshift_fact': 42} ''' if not issubclass(type(hostvars), dict): raise errors.AnsibleFilterError("|failed expects hostvars is a dict") facts = {} regex = re.compile('^openshift_.*') for key in hostvars: if regex.match(key): facts[key] = hostvars[key] migrations = {'openshift_router_selector': 'openshift_hosted_router_selector'} for old_fact, new_fact in migrations.iteritems(): if old_fact in facts and new_fact not in facts: facts[new_fact] = facts[old_fact] return facts @staticmethod # pylint: disable=too-many-branches def oo_persistent_volumes(hostvars, groups, persistent_volumes=None): """ Generate list of persistent volumes based on oo_openshift_env storage options set in host variables. """ if not issubclass(type(hostvars), dict): raise errors.AnsibleFilterError("|failed expects hostvars is a dict") if not issubclass(type(groups), dict): raise errors.AnsibleFilterError("|failed expects groups is a dict") if persistent_volumes != None and not issubclass(type(persistent_volumes), list): raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list") if persistent_volumes == None: persistent_volumes = [] if 'hosted' in hostvars['openshift']: for component in hostvars['openshift']['hosted']: if 'storage' in hostvars['openshift']['hosted'][component]: kind = hostvars['openshift']['hosted'][component]['storage']['kind'] create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] if kind != None and create_pv: if kind == 'nfs': host = hostvars['openshift']['hosted'][component]['storage']['host'] if host == None: if len(groups['oo_nfs_to_config']) > 0: host = groups['oo_nfs_to_config'][0] else: raise errors.AnsibleFilterError("|failed no storage host detected") directory = hostvars['openshift']['hosted'][component]['storage']['nfs']['directory'] volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] path = directory + '/' + volume size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] persistent_volume = dict( name="{0}-volume".format(volume), capacity=size, access_modes=access_modes, storage=dict( nfs=dict( server=host, path=path))) persistent_volumes.append(persistent_volume) else: msg = "|failed invalid storage kind '{0}' for component '{1}'".format( kind, component) raise errors.AnsibleFilterError(msg) return persistent_volumes @staticmethod def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None): """ Generate list of persistent volume claims based on oo_openshift_env storage options set in host variables. """ if not issubclass(type(hostvars), dict): raise errors.AnsibleFilterError("|failed expects hostvars is a dict") if persistent_volume_claims != None and not issubclass(type(persistent_volume_claims), list): raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list") if persistent_volume_claims == None: persistent_volume_claims = [] if 'hosted' in hostvars['openshift']: for component in hostvars['openshift']['hosted']: if 'storage' in hostvars['openshift']['hosted'][component]: kind = hostvars['openshift']['hosted'][component]['storage']['kind'] create_pv = hostvars['openshift']['hosted'][component]['storage']['create_pv'] if kind != None and create_pv: volume = hostvars['openshift']['hosted'][component]['storage']['volume']['name'] size = hostvars['openshift']['hosted'][component]['storage']['volume']['size'] access_modes = hostvars['openshift']['hosted'][component]['storage']['access_modes'] persistent_volume_claim = dict( name="{0}-claim".format(volume), capacity=size, access_modes=access_modes) persistent_volume_claims.append(persistent_volume_claim) return persistent_volume_claims @staticmethod def oo_31_rpm_rename_conversion(rpms, openshift_version=None): """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms names with proper version (if provided) If 3.1 rpms are passed in they will only be augmented with the correct version. This is important for hosts that are running both Masters and Nodes. """ if not isinstance(rpms, list): raise errors.AnsibleFilterError("failed expects to filter on a list") if openshift_version is not None and not isinstance(openshift_version, basestring): raise errors.AnsibleFilterError("failed expects openshift_version to be a string") rpms_31 = [] for rpm in rpms: if not 'atomic' in rpm: rpm = rpm.replace("openshift", "atomic-openshift") if openshift_version: rpm = rpm + openshift_version rpms_31.append(rpm) return rpms_31 @staticmethod def oo_pods_match_component(pods, deployment_type, component): """ Filters a list of Pods and returns the ones matching the deployment_type and component """ if not isinstance(pods, list): raise errors.AnsibleFilterError("failed expects to filter on a list") if not isinstance(deployment_type, basestring): raise errors.AnsibleFilterError("failed expects deployment_type to be a string") if not isinstance(component, basestring): raise errors.AnsibleFilterError("failed expects component to be a string") image_prefix = 'openshift/origin-' if deployment_type in ['enterprise', 'online', 'openshift-enterprise']: image_prefix = 'openshift3/ose-' elif deployment_type == 'atomic-enterprise': image_prefix = 'aep3_beta/aep-' matching_pods = [] image_regex = image_prefix + component + r'.*' for pod in pods: for container in pod['spec']['containers']: if re.search(image_regex, container['image']): matching_pods.append(pod) break # stop here, don't add a pod more than once return matching_pods @staticmethod def oo_get_hosts_from_hostvars(hostvars, hosts): """ Return a list of hosts from hostvars """ retval = [] for host in hosts: try: retval.append(hostvars[host]) except errors.AnsibleError as _: # host does not exist pass return retval @staticmethod def oo_image_tag_to_rpm_version(version, include_dash=False): """ Convert an image tag string to an RPM version if necessary Empty strings and strings that are already in rpm version format are ignored. Also remove non semantic version components. Ex. v3.2.0.10 -> -3.2.0.10 v1.2.0-rc1 -> -1.2.0 """ if not isinstance(version, basestring): raise errors.AnsibleFilterError("|failed expects a string or unicode") # TODO: Do we need to make this actually convert v1.2.0-rc1 into 1.2.0-0.rc1 # We'd need to be really strict about how we build the RPM Version+Release if version.startswith("v"): version = version.replace("v", "") version = version.split('-')[0] if include_dash: version = "-" + version return version def filters(self): """ returns a mapping of filters to methods """ return { "oo_select_keys": self.oo_select_keys, "oo_select_keys_from_list": self.oo_select_keys_from_list, "oo_collect": self.oo_collect, "oo_flatten": self.oo_flatten, "oo_pdb": self.oo_pdb, "oo_prepend_strings_in_list": self.oo_prepend_strings_in_list, "oo_ami_selector": self.oo_ami_selector, "oo_ec2_volume_definition": self.oo_ec2_volume_definition, "oo_combine_key_value": self.oo_combine_key_value, "oo_combine_dict": self.oo_combine_dict, "oo_split": self.oo_split, "oo_filter_list": self.oo_filter_list, "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs, "oo_parse_named_certificates": self.oo_parse_named_certificates, "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters, "oo_pretty_print_cluster": self.oo_pretty_print_cluster, "oo_generate_secret": self.oo_generate_secret, "to_padded_yaml": self.to_padded_yaml, "oo_nodes_with_label": self.oo_nodes_with_label, "oo_openshift_env": self.oo_openshift_env, "oo_persistent_volumes": self.oo_persistent_volumes, "oo_persistent_volume_claims": self.oo_persistent_volume_claims, "oo_31_rpm_rename_conversion": self.oo_31_rpm_rename_conversion, "oo_pods_match_component": self.oo_pods_match_component, "oo_get_hosts_from_hostvars": self.oo_get_hosts_from_hostvars, "oo_image_tag_to_rpm_version": self.oo_image_tag_to_rpm_version, "oo_merge_dicts": self.oo_merge_dicts, "oo_oc_nodes_matching_selector": self.oo_oc_nodes_matching_selector, "oo_oc_nodes_with_label": self.oo_oc_nodes_with_label }
apache-2.0
taiwanlennon/flask-master
flask/testsuite/reqctx.py
557
5960
# -*- coding: utf-8 -*- """ flask.testsuite.reqctx ~~~~~~~~~~~~~~~~~~~~~~ Tests the request context. :copyright: (c) 2012 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import flask import unittest try: from greenlet import greenlet except ImportError: greenlet = None from flask.testsuite import FlaskTestCase class RequestContextTestCase(FlaskTestCase): def test_teardown_on_pop(self): buffer = [] app = flask.Flask(__name__) @app.teardown_request def end_of_request(exception): buffer.append(exception) ctx = app.test_request_context() ctx.push() self.assert_equal(buffer, []) ctx.pop() self.assert_equal(buffer, [None]) def test_proper_test_request_context(self): app = flask.Flask(__name__) app.config.update( SERVER_NAME='localhost.localdomain:5000' ) @app.route('/') def index(): return None @app.route('/', subdomain='foo') def sub(): return None with app.test_request_context('/'): self.assert_equal(flask.url_for('index', _external=True), 'http://localhost.localdomain:5000/') with app.test_request_context('/'): self.assert_equal(flask.url_for('sub', _external=True), 'http://foo.localhost.localdomain:5000/') try: with app.test_request_context('/', environ_overrides={'HTTP_HOST': 'localhost'}): pass except Exception as e: self.assert_true(isinstance(e, ValueError)) self.assert_equal(str(e), "the server name provided " + "('localhost.localdomain:5000') does not match the " + \ "server name from the WSGI environment ('localhost')") try: app.config.update(SERVER_NAME='localhost') with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost'}): pass except ValueError as e: raise ValueError( "No ValueError exception should have been raised \"%s\"" % e ) try: app.config.update(SERVER_NAME='localhost:80') with app.test_request_context('/', environ_overrides={'SERVER_NAME': 'localhost:80'}): pass except ValueError as e: raise ValueError( "No ValueError exception should have been raised \"%s\"" % e ) def test_context_binding(self): app = flask.Flask(__name__) @app.route('/') def index(): return 'Hello %s!' % flask.request.args['name'] @app.route('/meh') def meh(): return flask.request.url with app.test_request_context('/?name=World'): self.assert_equal(index(), 'Hello World!') with app.test_request_context('/meh'): self.assert_equal(meh(), 'http://localhost/meh') self.assert_true(flask._request_ctx_stack.top is None) def test_context_test(self): app = flask.Flask(__name__) self.assert_false(flask.request) self.assert_false(flask.has_request_context()) ctx = app.test_request_context() ctx.push() try: self.assert_true(flask.request) self.assert_true(flask.has_request_context()) finally: ctx.pop() def test_manual_context_binding(self): app = flask.Flask(__name__) @app.route('/') def index(): return 'Hello %s!' % flask.request.args['name'] ctx = app.test_request_context('/?name=World') ctx.push() self.assert_equal(index(), 'Hello World!') ctx.pop() try: index() except RuntimeError: pass else: self.assert_true(0, 'expected runtime error') def test_greenlet_context_copying(self): app = flask.Flask(__name__) greenlets = [] @app.route('/') def index(): reqctx = flask._request_ctx_stack.top.copy() def g(): self.assert_false(flask.request) self.assert_false(flask.current_app) with reqctx: self.assert_true(flask.request) self.assert_equal(flask.current_app, app) self.assert_equal(flask.request.path, '/') self.assert_equal(flask.request.args['foo'], 'bar') self.assert_false(flask.request) return 42 greenlets.append(greenlet(g)) return 'Hello World!' rv = app.test_client().get('/?foo=bar') self.assert_equal(rv.data, b'Hello World!') result = greenlets[0].run() self.assert_equal(result, 42) def test_greenlet_context_copying_api(self): app = flask.Flask(__name__) greenlets = [] @app.route('/') def index(): reqctx = flask._request_ctx_stack.top.copy() @flask.copy_current_request_context def g(): self.assert_true(flask.request) self.assert_equal(flask.current_app, app) self.assert_equal(flask.request.path, '/') self.assert_equal(flask.request.args['foo'], 'bar') return 42 greenlets.append(greenlet(g)) return 'Hello World!' rv = app.test_client().get('/?foo=bar') self.assert_equal(rv.data, b'Hello World!') result = greenlets[0].run() self.assert_equal(result, 42) # Disable test if we don't have greenlets available if greenlet is None: test_greenlet_context_copying = None test_greenlet_context_copying_api = None def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(RequestContextTestCase)) return suite
bsd-3-clause
orgito/ansible
lib/ansible/modules/network/a10/a10_service_group.py
18
13223
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2014, Mischa Peters <[email protected]>, # Eric Chou <[email protected]> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: a10_service_group version_added: 1.8 short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' service groups. description: - Manage SLB (Server Load Balancing) service-group objects on A10 Networks devices via aXAPIv2. author: - Eric Chou (@ericchou) - Mischa Peters (@mischapeters) notes: - Requires A10 Networks aXAPI 2.1. - When a server doesn't exist and is added to the service-group the server will be created. extends_documentation_fragment: - a10 - url options: state: description: - If the specified service group should exists. default: present choices: ['present', 'absent'] partition: version_added: "2.3" description: - set active-partition service_group: description: - The SLB (Server Load Balancing) service-group name required: true aliases: ['service', 'pool', 'group'] service_group_protocol: description: - The SLB service-group protocol of TCP or UDP. default: tcp aliases: ['proto', 'protocol'] choices: ['tcp', 'udp'] service_group_method: description: - The SLB service-group load balancing method, such as round-robin or weighted-rr. default: round-robin aliases: ['method'] choices: - 'round-robin' - 'weighted-rr' - 'least-connection' - 'weighted-least-connection' - 'service-least-connection' - 'service-weighted-least-connection' - 'fastest-response' - 'least-request' - 'round-robin-strict' - 'src-ip-only-hash' - 'src-ip-hash' servers: description: - A list of servers to add to the service group. Each list item should be a dictionary which specifies the C(server:) and C(port:), but can also optionally specify the C(status:). See the examples below for details. aliases: ['server', 'member'] validate_certs: description: - If C(no), SSL certificates will not be validated. This should only be used on personally controlled devices using self-signed certificates. type: bool default: 'yes' ''' EXAMPLES = ''' # Create a new service-group - a10_service_group: host: a10.mydomain.com username: myadmin password: mypassword partition: mypartition service_group: sg-80-tcp servers: - server: foo1.mydomain.com port: 8080 - server: foo2.mydomain.com port: 8080 - server: foo3.mydomain.com port: 8080 - server: foo4.mydomain.com port: 8080 status: disabled ''' RETURN = ''' content: description: the full info regarding the slb_service_group returned: success type: str sample: "mynewservicegroup" ''' import json from ansible.module_utils.network.a10.a10 import (axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_enabled_disabled) from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.urls import url_argument_spec VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method'] VALID_SERVER_FIELDS = ['server', 'port', 'status'] def validate_servers(module, servers): for item in servers: for key in item: if key not in VALID_SERVER_FIELDS: module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS))) # validate the server name is present if 'server' not in item: module.fail_json(msg="server definitions must define the server field") # validate the port number is present and an integer if 'port' in item: try: item['port'] = int(item['port']) except Exception: module.fail_json(msg="server port definitions must be integers") else: module.fail_json(msg="server definitions must define the port field") # convert the status to the internal API integer value if 'status' in item: item['status'] = axapi_enabled_disabled(item['status']) else: item['status'] = 1 def main(): argument_spec = a10_argument_spec() argument_spec.update(url_argument_spec()) argument_spec.update( dict( state=dict(type='str', default='present', choices=['present', 'absent']), service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True), service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']), service_group_method=dict(type='str', default='round-robin', aliases=['method'], choices=['round-robin', 'weighted-rr', 'least-connection', 'weighted-least-connection', 'service-least-connection', 'service-weighted-least-connection', 'fastest-response', 'least-request', 'round-robin-strict', 'src-ip-only-hash', 'src-ip-hash']), servers=dict(type='list', aliases=['server', 'member'], default=[]), partition=dict(type='str', default=[]), ) ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False ) host = module.params['host'] username = module.params['username'] password = module.params['password'] partition = module.params['partition'] state = module.params['state'] write_config = module.params['write_config'] slb_service_group = module.params['service_group'] slb_service_group_proto = module.params['service_group_protocol'] slb_service_group_method = module.params['service_group_method'] slb_servers = module.params['servers'] if slb_service_group is None: module.fail_json(msg='service_group is required') axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json' load_balancing_methods = {'round-robin': 0, 'weighted-rr': 1, 'least-connection': 2, 'weighted-least-connection': 3, 'service-least-connection': 4, 'service-weighted-least-connection': 5, 'fastest-response': 6, 'least-request': 7, 'round-robin-strict': 8, 'src-ip-only-hash': 14, 'src-ip-hash': 15} if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp': protocol = 2 else: protocol = 3 # validate the server data list structure validate_servers(module, slb_servers) json_post = { 'service_group': { 'name': slb_service_group, 'protocol': protocol, 'lb_method': load_balancing_methods[slb_service_group_method], } } # first we authenticate to get a session id session_url = axapi_authenticate(module, axapi_base_url, username, password) # then we select the active-partition axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition})) # then we check to see if the specified group exists slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group})) slb_service_group_exist = not axapi_failure(slb_result) changed = False if state == 'present': # before creating/updating we need to validate that servers # defined in the servers list exist to prevent errors checked_servers = [] for server in slb_servers: result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']})) if axapi_failure(result): module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server']) checked_servers.append(server['server']) if not slb_service_group_exist: result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg=result['response']['err']['msg']) changed = True else: # check to see if the service group definition without the # server members is different, and update that individually # if it needs it do_update = False for field in VALID_SERVICE_GROUP_FIELDS: if json_post['service_group'][field] != slb_result['service_group'][field]: do_update = True break if do_update: result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post)) if axapi_failure(result): module.fail_json(msg=result['response']['err']['msg']) changed = True # next we pull the defined list of servers out of the returned # results to make it a bit easier to iterate over defined_servers = slb_result.get('service_group', {}).get('member_list', []) # next we add/update new member servers from the user-specified # list if they're different or not on the target device for server in slb_servers: found = False different = False for def_server in defined_servers: if server['server'] == def_server['server']: found = True for valid_field in VALID_SERVER_FIELDS: if server[valid_field] != def_server[valid_field]: different = True break if found or different: break # add or update as required server_data = { "name": slb_service_group, "member": server, } if not found: result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data)) changed = True elif different: result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data)) changed = True # finally, remove any servers that are on the target # device but were not specified in the list given for server in defined_servers: found = False for slb_server in slb_servers: if server['server'] == slb_server['server']: found = True break # remove if not found server_data = { "name": slb_service_group, "member": server, } if not found: result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data)) changed = True # if we changed things, get the full info regarding # the service group for the return data below if changed: result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group})) else: result = slb_result elif state == 'absent': if slb_service_group_exist: result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group})) changed = True else: result = dict(msg="the service group was not present") # if the config has changed, save the config unless otherwise requested if changed and write_config: write_result = axapi_call(module, session_url + '&method=system.action.write_memory') if axapi_failure(write_result): module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) # log out of the session nicely and exit axapi_call(module, session_url + '&method=session.close') module.exit_json(changed=changed, content=result) if __name__ == '__main__': main()
gpl-3.0
googlei18n/glyphsLib
tests/writer_test.py
1
34927
# coding=UTF-8 # # Copyright 2016 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from textwrap import dedent from collections import OrderedDict import os from fontTools.misc.py23 import UnicodeIO from glyphsLib import classes from glyphsLib.types import parse_datetime, Point, Rect from glyphsLib.writer import dump, dumps from glyphsLib.parser import Parser from . import test_helpers class WriterTest(unittest.TestCase, test_helpers.AssertLinesEqual): def assertWrites(self, glyphs_object, text): """Assert that the given object, when given to the writer, produces the given text. """ expected = text.splitlines() actual = test_helpers.write_to_lines(glyphs_object) self.assertLinesEqual( expected, actual, "The writer has not produced the expected output" ) def assertWritesValue(self, glyphs_value, text): """Assert that the writer produces the given text for the given value.""" expected = ( dedent( """\ {{ writtenValue = {0}; }} """ ) .format(text) .splitlines() ) # We wrap the value in a dict to use the same test helper actual = test_helpers.write_to_lines({"writtenValue": glyphs_value}) self.assertLinesEqual( expected, actual, "The writer has not produced the expected output" ) def test_write_font_attributes(self): """Test the writer on all GSFont attributes""" font = classes.GSFont() # List of properties from https://docu.glyphsapp.com/#gsfont # parent: not handled because it's internal and read-only # masters m1 = classes.GSFontMaster() m1.id = "M1" font.masters.insert(0, m1) m2 = classes.GSFontMaster() m2.id = "M2" font.masters.insert(1, m2) # instances i1 = classes.GSInstance() i1.name = "MuchBold" font.instances.append(i1) # glyphs g1 = classes.GSGlyph() g1.name = "G1" font.glyphs.append(g1) # classes c1 = classes.GSClass() c1.name = "C1" font.classes.append(c1) # features f1 = classes.GSFeature() f1.name = "F1" font.features.append(f1) # featurePrefixes fp1 = classes.GSFeaturePrefix() fp1 = "FP1" font.featurePrefixes.append(fp1) # copyright font.copyright = "Copyright Bob" # designer font.designer = "Bob" # designerURL font.designerURL = "bob.me" # manufacturer font.manufacturer = "Manu" # manufacturerURL font.manufacturerURL = "manu.com" # versionMajor font.versionMajor = 2 # versionMinor font.versionMinor = 104 # date font.date = parse_datetime("2017-10-03 07:35:46 +0000") # familyName font.familyName = "Sans Rien" # upm font.upm = 2000 # note font.note = "Was bored, made this" # kerning font.kerning = OrderedDict( [("M1", OrderedDict([("@MMK_L_G1", OrderedDict([("@MMK_R_G1", 0.1)]))]))] ) # userData font.userData = { "a": "test", "b": [1, {"c": 2}], "d": [1, "1"], "noodleThickness": "106.0", } # grid -> gridLength font.grid = 35 # gridSubDivisions font.gridSubDivisions = 5 # keyboardIncrement font.keyboardIncrement = 1.2 # disablesNiceNames font.disablesNiceNames = True # customParameters font.customParameters["ascender"] = 300 # selection: not written # selectedLayers: not written # selectedFontMaster: not written # masterIndex: not written # currentText: not written # tabs: not written # currentTab: not written # filepath: not written # tool: not written # tools: not handled because it is a read-only list of GUI features # .appVersion (extra property that is not in the docs!) font.appVersion = "895" self.assertWrites( font, dedent( """\ { .appVersion = "895"; classes = ( { code = ""; name = C1; } ); copyright = "Copyright Bob"; customParameters = ( { name = note; value = "Was bored, made this"; }, { name = ascender; value = 300; } ); date = "2017-10-03 07:35:46 +0000"; designer = Bob; designerURL = bob.me; disablesNiceNames = 1; familyName = "Sans Rien"; featurePrefixes = ( FP1 ); features = ( { code = ""; name = F1; } ); fontMaster = ( { ascender = 800; capHeight = 700; descender = -200; id = M1; xHeight = 500; }, { ascender = 800; capHeight = 700; descender = -200; id = M2; xHeight = 500; } ); glyphs = ( { glyphname = G1; } ); gridLength = 35; gridSubDivision = 5; instances = ( { name = MuchBold; } ); kerning = { M1 = { "@MMK_L_G1" = { "@MMK_R_G1" = 0.1; }; }; }; keyboardIncrement = 1.2; manufacturer = Manu; manufacturerURL = manu.com; unitsPerEm = 2000; userData = { a = test; b = ( 1, { c = 2; } ); d = ( 1, "1" ); noodleThickness = "106.0"; }; versionMajor = 2; versionMinor = 104; } """ ), ) # Don't write the keyboardIncrement if it's 1 (default) font.keyboardIncrement = 1 written = test_helpers.write_to_lines(font) self.assertFalse(any("keyboardIncrement" in line for line in written)) # Always write versionMajor and versionMinor, even when 0 font.versionMajor = 0 font.versionMinor = 0 written = test_helpers.write_to_lines(font) self.assertIn("versionMajor = 0;", written) self.assertIn("versionMinor = 0;", written) def test_write_font_master_attributes(self): """Test the writer on all GSFontMaster attributes""" master = classes.GSFontMaster() # List of properties from https://docu.glyphsapp.com/#gsfontmaster # id master.id = "MASTER-ID" # name master._name = "Name Hairline Megawide" master.customParameters["Master Name"] = "Param Hairline Megawide" # weight master.weight = "Thin" # width master.width = "Wide" # weightValue master.weightValue = 0.01 # widthValue master.widthValue = 0.99 # customValue # customName master.customName = "Overextended" # A value of 0.0 is not written to the file. master.customValue = 0.001 master.customValue1 = 0.1 master.customValue2 = 0.2 master.customValue3 = 0.3 # ascender master.ascender = 234.5 # capHeight master.capHeight = 200.6 # xHeight master.xHeight = 59.1 # descender master.descender = -89.2 # italicAngle master.italicAngle = 12.2 # verticalStems master.verticalStems = [1, 2, 3] # horizontalStems master.horizontalStems = [4, 5, 6] # alignmentZones zone = classes.GSAlignmentZone(0, -30) master.alignmentZones = [zone] # blueValues: not handled because it is read-only # otherBlues: not handled because it is read-only # guides guide = classes.GSGuideLine() guide.name = "middle" master.guides.append(guide) # userData master.userData["rememberToMakeTea"] = True # customParameters master.customParameters["underlinePosition"] = -135 self.assertWrites( master, dedent( """\ { alignmentZones = ( "{0, -30}" ); ascender = 234.5; capHeight = 200.6; custom = Overextended; customValue = 0.001; customValue1 = 0.1; customValue2 = 0.2; customValue3 = 0.3; customParameters = ( { name = "Master Name"; value = "Param Hairline Megawide"; }, { name = underlinePosition; value = -135; } ); descender = -89.2; guideLines = ( { name = middle; } ); horizontalStems = ( 4, 5, 6 ); id = "MASTER-ID"; italicAngle = 12.2; name = "Name Hairline Megawide"; userData = { rememberToMakeTea = 1; }; verticalStems = ( 1, 2, 3 ); weight = Thin; weightValue = 0.01; width = Wide; widthValue = 0.99; xHeight = 59.1; } """ ), ) # Write the capHeight and xHeight even if they are "0" master.xHeight = 0 master.capHeight = 0 written = test_helpers.write_to_lines(master) self.assertIn("xHeight = 0;", written) self.assertIn("capHeight = 0;", written) def test_write_alignment_zone(self): zone = classes.GSAlignmentZone(23, 40) self.assertWritesValue(zone, '"{23, 40}"') def test_write_instance(self): instance = classes.GSInstance() # List of properties from https://docu.glyphsapp.com/#gsinstance # active instance.active = True # name instance.name = "SemiBoldCompressed (name)" # weight instance.weight = "SemiBold (weight)" # width instance.width = "Compressed (width)" # weightValue instance.weightValue = 600 # widthValue instance.widthValue = 200 # customValue instance.customValue = 0.4 # isItalic instance.isItalic = True # isBold instance.isBold = True # linkStyle instance.linkStyle = "linked style value" # familyName instance.familyName = "Sans Rien (familyName)" # preferredFamily instance.preferredFamily = "Sans Rien (preferredFamily)" # preferredSubfamilyName instance.preferredSubfamilyName = ( "Semi Bold Compressed (preferredSubFamilyName)" ) # windowsFamily instance.windowsFamily = "Sans Rien MS (windowsFamily)" # windowsStyle: read only # windowsLinkedToStyle: read only # fontName instance.fontName = "SansRien (fontName)" # fullName instance.fullName = "Sans Rien Semi Bold Compressed (fullName)" # customParameters instance.customParameters["hheaLineGap"] = 10 # instanceInterpolations instance.instanceInterpolations = {"M1": 0.2, "M2": 0.8} # manualInterpolation instance.manualInterpolation = True # interpolatedFont: read only self.assertWrites( instance, dedent( """\ { customParameters = ( { name = familyName; value = "Sans Rien (familyName)"; }, { name = preferredFamily; value = "Sans Rien (preferredFamily)"; }, { name = preferredSubfamilyName; value = "Semi Bold Compressed (preferredSubFamilyName)"; }, { name = styleMapFamilyName; value = "Sans Rien MS (windowsFamily)"; }, { name = postscriptFontName; value = "SansRien (fontName)"; }, { name = postscriptFullName; value = "Sans Rien Semi Bold Compressed (fullName)"; }, { name = hheaLineGap; value = 10; } ); interpolationCustom = 0.4; interpolationWeight = 600; interpolationWidth = 200; instanceInterpolations = { M1 = 0.2; M2 = 0.8; }; isBold = 1; isItalic = 1; linkStyle = "linked style value"; manualInterpolation = 1; name = "SemiBoldCompressed (name)"; weightClass = "SemiBold (weight)"; widthClass = "Compressed (width)"; } """ ), ) def test_write_custom_parameter(self): # Name without quotes self.assertWritesValue( classes.GSCustomParameter("myParam", "myValue"), "{\nname = myParam;\nvalue = myValue;\n}", ) # Name with quotes self.assertWritesValue( classes.GSCustomParameter("my param", "myValue"), '{\nname = "my param";\nvalue = myValue;\n}', ) # Value with quotes self.assertWritesValue( classes.GSCustomParameter("myParam", "my value"), '{\nname = myParam;\nvalue = "my value";\n}', ) # Int param (ascender): should convert the value to string self.assertWritesValue( classes.GSCustomParameter("ascender", 12), "{\nname = ascender;\nvalue = 12;\n}", ) # Float param (postscriptBlueScale): should convert the value to string self.assertWritesValue( classes.GSCustomParameter("postscriptBlueScale", 0.125), "{\nname = postscriptBlueScale;\nvalue = 0.125;\n}", ) # Bool param (isFixedPitch): should convert the boolean value to 0/1 self.assertWritesValue( classes.GSCustomParameter("isFixedPitch", True), "{\nname = isFixedPitch;\nvalue = 1;\n}", ) # Intlist param: should map list of int to list of strings self.assertWritesValue( classes.GSCustomParameter("fsType", [1, 2]), "{\nname = fsType;\nvalue = (\n1,\n2\n);\n}", ) def test_write_class(self): class_ = classes.GSClass() class_.name = "e" class_.code = "e eacute egrave" class_.automatic = True self.assertWrites( class_, dedent( """\ { automatic = 1; code = "e eacute egrave"; name = e; } """ ), ) # When the code is an empty string, write an empty string class_.code = "" self.assertWrites( class_, dedent( """\ { automatic = 1; code = ""; name = e; } """ ), ) def test_write_feature_prefix(self): fp = classes.GSFeaturePrefix() fp.name = "Languagesystems" fp.code = "languagesystem DFLT dflt;" fp.automatic = True self.assertWrites( fp, dedent( """\ { automatic = 1; code = "languagesystem DFLT dflt;"; name = Languagesystems; } """ ), ) def test_write_feature(self): feature = classes.GSFeature() feature.name = "sups" feature.code = " sub @standard by @sups;" feature.automatic = True feature.notes = "notes about sups" self.assertWrites( feature, dedent( """\ { automatic = 1; code = " sub @standard by @sups;"; name = sups; notes = "notes about sups"; } """ ), ) def test_write_glyph(self): glyph = classes.GSGlyph() # https://docu.glyphsapp.com/#gsglyph # parent: not written # layers # Put the glyph in a font with at least one master for the magic in # `glyph.layers.append()` to work. font = classes.GSFont() master = classes.GSFontMaster() master.id = "MASTER-ID" font.masters.insert(0, master) font.glyphs.append(glyph) layer = classes.GSLayer() layer.layerId = "LAYER-ID" layer.name = "L1" glyph.layers.insert(0, layer) # name glyph.name = "Aacute" # unicode glyph.unicode = "00C1" # string: not written # id: not written # category glyph.category = "Letter" # subCategory glyph.subCategory = "Uppercase" # script glyph.script = "latin" # productionName glyph.productionName = "Aacute.prod" # glyphInfo: not written # leftKerningGroup glyph.leftKerningGroup = "A" # rightKerningGroup glyph.rightKerningGroup = "A" # leftKerningKey: not written # rightKerningKey: not written # leftMetricsKey glyph.leftMetricsKey = "A" # rightMetricsKey glyph.rightMetricsKey = "A" # widthMetricsKey glyph.widthMetricsKey = "A" # export glyph.export = False # color glyph.color = 11 # colorObject: not written # note glyph.note = "Stunning one-bedroom A with renovated acute accent" # selected: not written # mastersCompatible: not stored # userData glyph.userData["rememberToMakeCoffe"] = True # Check that empty collections are written glyph.userData["com.someoneelse.coolsoftware.customdata"] = [ OrderedDict( [("zero", 0), ("emptyList", []), ("emptyDict", {}), ("emptyString", "")] ), [], {}, "", "hey", 0, 1, ] # smartComponentAxes axis = classes.GSSmartComponentAxis() axis.name = "crotchDepth" glyph.smartComponentAxes.append(axis) # lastChange glyph.lastChange = parse_datetime("2017-10-03 07:35:46 +0000") self.assertWrites( glyph, dedent( """\ { color = 11; export = 0; glyphname = Aacute; lastChange = "2017-10-03 07:35:46 +0000"; layers = ( { associatedMasterId = "MASTER-ID"; layerId = "LAYER-ID"; name = L1; width = 600; } ); leftKerningGroup = A; leftMetricsKey = A; widthMetricsKey = A; note = "Stunning one-bedroom A with renovated acute accent"; rightKerningGroup = A; rightMetricsKey = A; unicode = 00C1; script = latin; category = Letter; subCategory = Uppercase; userData = { com.someoneelse.coolsoftware.customdata = ( { zero = 0; emptyList = ( ); emptyDict = { }; emptyString = ""; }, ( ), { }, "", hey, 0, 1 ); rememberToMakeCoffe = 1; }; partsSettings = ( { name = crotchDepth; bottomValue = 0; topValue = 0; } ); } """ ), ) # Write the script even when it's an empty string # Same for category and subCategory glyph.script = "" glyph.category = "" glyph.subCategory = "" written = test_helpers.write_to_lines(glyph) self.assertIn('script = "";', written) self.assertIn('category = "";', written) self.assertIn('subCategory = "";', written) # Write double unicodes glyph.unicodes = ["00C1", "E002"] written = test_helpers.write_to_lines(glyph) self.assertIn('unicode = "00C1,E002";', written) def test_write_layer(self): layer = classes.GSLayer() # http://docu.glyphsapp.com/#gslayer # parent: not written # name layer.name = "{125, 100}" # associatedMasterId layer.associatedMasterId = "M1" # layerId layer.layerId = "L1" # color layer.color = (1, 2, 3, 4) # colorObject: read-only, computed # components component = classes.GSComponent(glyph="glyphName") layer.components.append(component) # guides guide = classes.GSGuideLine() guide.name = "xheight" layer.guides.append(guide) # annotations annotation = classes.GSAnnotation() annotation.type = classes.TEXT annotation.text = "Fuck, this curve is ugly!" layer.annotations.append(annotation) # hints hint = classes.GSHint() hint.name = "hintName" layer.hints.append(hint) # anchors anchor = classes.GSAnchor() anchor.name = "top" layer.anchors["top"] = anchor # paths path = classes.GSPath() layer.paths.append(path) # selection: read-only # LSB, RSB, TSB, BSB: not written # width layer.width = 890.4 # leftMetricsKey layer.leftMetricsKey = "A" # rightMetricsKey layer.rightMetricsKey = "A" # widthMetricsKey layer.widthMetricsKey = "A" # bounds: read-only, computed # selectionBounds: read-only, computed # background # XXX bg is unused? bg = layer.background # noqa: F841 # backgroundImage image = classes.GSBackgroundImage("/path/to/file.jpg") layer.backgroundImage = image # bezierPath: read-only, objective-c # openBezierPath: read-only, objective-c # completeOpenBezierPath: read-only, objective-c # isAligned # FIXME: (jany) is this read-only? # is this computed from each component's alignment? # layer.isAligned = False # userData layer.userData["rememberToMakeCoffe"] = True # smartComponentPoleMapping layer.smartComponentPoleMapping["crotchDepth"] = 2 # Top pole layer.smartComponentPoleMapping["shoulderWidth"] = 1 # Bottom pole self.assertWrites( layer, dedent( """\ { anchors = ( { name = top; position = "{0, 0}"; } ); annotations = ( { text = "Fuck, this curve is ugly!"; type = 1; } ); associatedMasterId = M1; background = { }; backgroundImage = { crop = "{{0, 0}, {0, 0}}"; imagePath = "/path/to/file.jpg"; }; color = (1, 2, 3, 4); components = ( { name = glyphName; } ); guideLines = ( { name = xheight; } ); hints = ( { name = hintName; } ); layerId = L1; leftMetricsKey = A; widthMetricsKey = A; rightMetricsKey = A; name = "{125, 100}"; paths = ( { closed = 1; } ); userData = { PartSelection = { crotchDepth = 2; shoulderWidth = 1; }; rememberToMakeCoffe = 1; }; width = 890.4; } """ ), ) # Don't write a blank layer name layer.name = "" written = test_helpers.write_to_lines(layer) self.assertNotIn('name = "";', written) # Write the width even if 0 layer.width = 0 written = test_helpers.write_to_lines(layer) self.assertIn("width = 0;", written) def test_write_anchor(self): anchor = classes.GSAnchor("top", Point(23, 45.5)) self.assertWrites( anchor, dedent( """\ { name = top; position = "{23, 45.5}"; } """ ), ) # Write a position of 0, 0 anchor = classes.GSAnchor("top", Point(0, 0)) self.assertWrites( anchor, dedent( """\ { name = top; position = "{0, 0}"; } """ ), ) def test_write_component(self): component = classes.GSComponent("dieresis") # http://docu.glyphsapp.com/#gscomponent # position component.position = Point(45.5, 250) # scale component.scale = 2.0 # rotation component.rotation = 90 # componentName: already set at init # component: read-only # layer: read-only # transform: already set using scale & position # bounds: read-only, objective-c # automaticAlignment component.automaticAlignment = True # anchor component.anchor = "top" # selected: not written # smartComponentValues component.smartComponentValues = {"crotchDepth": -77} # bezierPath: read-only, objective-c self.assertWrites( component, dedent( """\ { anchor = top; name = dieresis; piece = { crotchDepth = -77; }; transform = "{0, 2, -2, 0, 45.5, 250}"; } """ ), ) def test_write_smart_component_axis(self): axis = classes.GSSmartComponentAxis() # http://docu.glyphsapp.com/#gssmartcomponentaxis axis.name = "crotchDepth" axis.topName = "High" axis.topValue = 0 axis.bottomName = "Low" axis.bottomValue = -100 self.assertWrites( axis, dedent( """\ { name = crotchDepth; bottomName = Low; bottomValue = -100; topName = High; topValue = 0; } """ ), ) def test_write_path(self): path = classes.GSPath() # http://docu.glyphsapp.com/#gspath # parent: not written # nodes node = classes.GSNode() path.nodes.append(node) # segments: computed, objective-c # closed path.closed = True # direction: computed # bounds: computed # selected: not written # bezierPath: computed self.assertWrites( path, dedent( """\ { closed = 1; nodes = ( "0 0 LINE" ); } """ ), ) def test_write_node(self): node = classes.GSNode(Point(10, 30), classes.GSNode.CURVE) # http://docu.glyphsapp.com/#gsnode # position: already set # type: already set # smooth node.smooth = True # connection: deprecated # selected: not written # index, nextNode, prevNode: computed # name node.name = "top-left corner" # userData node.userData["rememberToDownloadARealRemindersApp"] = True self.assertWritesValue( node, '"10 30 CURVE SMOOTH {name = \\"top-left corner\\";\\n\ rememberToDownloadARealRemindersApp = 1;}"', ) # Write floating point coordinates node = classes.GSNode(Point(499.99, 512.01), classes.GSNode.OFFCURVE) self.assertWritesValue(node, '"499.99 512.01 OFFCURVE"') # Write userData with special characters test_user_data = { "\nkey\"';\n\n\n": "\"'value\nseveral lines\n;\n", ";": ";\n", "escapeception": "\\\"\\'\\n\\\\n", } node = classes.GSNode(Point(130, 431), classes.GSNode.LINE) for key, value in test_user_data.items(): node.userData[key] = value # This is the output of Glyphs 1089 expected_output = ( '"130 431 LINE {\\"\\012key\\\\"\';\\012\\012\\012\\" ' '= \\"\\\\"\'value\\012several lines\\012;\\012\\"' ';\\n\\";\\" = \\";\\012\\";\\n' 'escapeception = \\"\\\\\\\\"\\\\\'\\\\\\n\\\\\\\\\\n\\";}"' ) self.assertWritesValue(node, expected_output) # Check that we can read the userData back node = Parser(classes.GSNode).parse(expected_output) self.assertEqual(test_user_data, dict(node.userData)) def test_write_guideline(self): line = classes.GSGuideLine() # http://docu.glyphsapp.com/#GSGuideLine line.position = Point(56, 45) line.angle = 11.0 line.name = "italic angle" # selected: not written self.assertWrites( line, dedent( """\ { angle = 11; name = "italic angle"; position = "{56, 45}"; } """ ), ) def test_write_annotation(self): annotation = classes.GSAnnotation() # http://docu.glyphsapp.com/#gsannotation annotation.position = Point(12, 34) annotation.type = classes.TEXT annotation.text = "Look here" annotation.angle = 123.5 annotation.width = 135 self.assertWrites( annotation, dedent( """\ { angle = 123.5; position = "{12, 34}"; text = "Look here"; type = 1; width = 135; } """ ), ) def test_write_hint(self): hint = classes.GSHint() # http://docu.glyphsapp.com/#gshint layer = classes.GSLayer() path1 = classes.GSPath() layer.paths.append(path1) node1 = classes.GSNode(Point(100, 100)) path1.nodes.append(node1) hint.originNode = node1 node2 = classes.GSNode(Point(200, 200)) path1.nodes.append(node2) hint.targetNode = node2 node3 = classes.GSNode(Point(300, 300)) path1.nodes.append(node3) hint.otherNode1 = node3 path2 = classes.GSPath() layer.paths.append(path2) node4 = classes.GSNode(Point(400, 400)) path2.nodes.append(node4) hint.otherNode2 = node4 hint.type = classes.CORNER hint.options = classes.TTROUND | classes.TRIPLE hint.horizontal = True # selected: not written hint.name = "My favourite hint" self.assertWrites( hint, dedent( """\ { horizontal = 1; origin = "{0, 0}"; target = "{0, 1}"; other1 = "{0, 2}"; other2 = "{1, 0}"; type = 16; name = "My favourite hint"; options = 128; } """ ), ) # FIXME: (jany) What about the undocumented scale & stem? # -> Add a test for that # Test with target = "up" # FIXME: (jany) what does target = "up" mean? # Is there an official python API to write that? # hint.targetNode = 'up' # written = test_helpers.write_to_lines(hint) # self.assertIn('target = up;', written) def test_write_background_image(self): image = classes.GSBackgroundImage("/tmp/img.jpg") # http://docu.glyphsapp.com/#gsbackgroundimage # path: already set # image: read-only, objective-c image.crop = Rect(Point(0, 10), Point(500, 510)) image.locked = True image.alpha = 70 image.position = Point(40, 90) image.scale = (1.1, 1.2) image.rotation = 0.3 # transform: Already set with scale/rotation self.assertWrites( image, dedent( """\ { alpha = 70; crop = "{{0, 10}, {500, 510}}"; imagePath = "/tmp/img.jpg"; locked = 1; transform = "{1.09998, 0.00576, -0.00628, 1.19998, 40, 90}"; } """ ), ) class WriterDumpInterfaceTest(unittest.TestCase): def test_dump(self): obj = classes.GSFont() fp = UnicodeIO() dump(obj, fp) self.assertTrue(fp.getvalue()) def test_dumps(self): obj = classes.GSFont() string = dumps(obj) self.assertTrue(string) class WriterRoundtripTest(unittest.TestCase, test_helpers.AssertParseWriteRoundtrip): def test_roundtrip_on_file(self): filename = os.path.join( os.path.dirname(__file__), "data/GlyphsUnitTestSans.glyphs" ) self.assertParseWriteRoundtrip(filename) if __name__ == "__main__": unittest.main()
apache-2.0
sujeet4github/MyLangUtils
LangPython/oreilly-intro-to-flask-video/venv/lib/python3.6/site-packages/sqlalchemy/orm/query.py
22
154548
# orm/query.py # Copyright (C) 2005-2017 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """The Query class and support. Defines the :class:`.Query` class, the central construct used by the ORM to construct database queries. The :class:`.Query` class should not be confused with the :class:`.Select` class, which defines database SELECT operations at the SQL (non-ORM) level. ``Query`` differs from ``Select`` in that it returns ORM-mapped objects and interacts with an ORM session, whereas the ``Select`` construct interacts directly with the database to return iterable result sets. """ from itertools import chain from . import ( attributes, interfaces, object_mapper, persistence, exc as orm_exc, loading ) from .base import _entity_descriptor, _is_aliased_class, \ _is_mapped_class, _orm_columns, _generative, InspectionAttr from .path_registry import PathRegistry from .util import ( AliasedClass, ORMAdapter, join as orm_join, with_parent, aliased ) from .. import sql, util, log, exc as sa_exc, inspect, inspection from ..sql.expression import _interpret_as_from from ..sql import ( util as sql_util, expression, visitors ) from ..sql.base import ColumnCollection from . import properties __all__ = ['Query', 'QueryContext', 'aliased'] _path_registry = PathRegistry.root @inspection._self_inspects @log.class_logger class Query(object): """ORM-level SQL construction object. :class:`.Query` is the source of all SELECT statements generated by the ORM, both those formulated by end-user query operations as well as by high level internal operations such as related collection loading. It features a generative interface whereby successive calls return a new :class:`.Query` object, a copy of the former with additional criteria and options associated with it. :class:`.Query` objects are normally initially generated using the :meth:`~.Session.query` method of :class:`.Session`, and in less common cases by instantiating the :class:`.Query` directly and associating with a :class:`.Session` using the :meth:`.Query.with_session` method. For a full walkthrough of :class:`.Query` usage, see the :ref:`ormtutorial_toplevel`. """ _enable_eagerloads = True _enable_assertions = True _with_labels = False _criterion = None _yield_per = None _order_by = False _group_by = False _having = None _distinct = False _prefixes = None _suffixes = None _offset = None _limit = None _for_update_arg = None _statement = None _correlate = frozenset() _populate_existing = False _invoke_all_eagers = True _version_check = False _autoflush = True _only_load_props = None _refresh_state = None _from_obj = () _join_entities = () _select_from_entity = None _mapper_adapter_map = {} _filter_aliases = None _from_obj_alias = None _joinpath = _joinpoint = util.immutabledict() _execution_options = util.immutabledict() _params = util.immutabledict() _attributes = util.immutabledict() _with_options = () _with_hints = () _enable_single_crit = True _orm_only_adapt = True _orm_only_from_obj_alias = True _current_path = _path_registry _has_mapper_entities = False def __init__(self, entities, session=None): """Construct a :class:`.Query` directly. E.g.:: q = Query([User, Address], session=some_session) The above is equivalent to:: q = some_session.query(User, Address) :param entities: a sequence of entities and/or SQL expressions. :param session: a :class:`.Session` with which the :class:`.Query` will be associated. Optional; a :class:`.Query` can be associated with a :class:`.Session` generatively via the :meth:`.Query.with_session` method as well. .. seealso:: :meth:`.Session.query` :meth:`.Query.with_session` """ self.session = session self._polymorphic_adapters = {} self._set_entities(entities) def _set_entities(self, entities, entity_wrapper=None): if entity_wrapper is None: entity_wrapper = _QueryEntity self._entities = [] self._primary_entity = None self._has_mapper_entities = False for ent in util.to_list(entities): entity_wrapper(self, ent) self._set_entity_selectables(self._entities) def _set_entity_selectables(self, entities): self._mapper_adapter_map = d = self._mapper_adapter_map.copy() for ent in entities: for entity in ent.entities: if entity not in d: ext_info = inspect(entity) if not ext_info.is_aliased_class and \ ext_info.mapper.with_polymorphic: if ext_info.mapper.mapped_table not in \ self._polymorphic_adapters: self._mapper_loads_polymorphically_with( ext_info.mapper, sql_util.ColumnAdapter( ext_info.selectable, ext_info.mapper._equivalent_columns ) ) aliased_adapter = None elif ext_info.is_aliased_class: aliased_adapter = ext_info._adapter else: aliased_adapter = None d[entity] = ( ext_info, aliased_adapter ) ent.setup_entity(*d[entity]) def _mapper_loads_polymorphically_with(self, mapper, adapter): for m2 in mapper._with_polymorphic_mappers or [mapper]: self._polymorphic_adapters[m2] = adapter for m in m2.iterate_to_root(): self._polymorphic_adapters[m.local_table] = adapter def _set_select_from(self, obj, set_base_alias): fa = [] select_from_alias = None for from_obj in obj: info = inspect(from_obj) if hasattr(info, 'mapper') and \ (info.is_mapper or info.is_aliased_class): self._select_from_entity = info if set_base_alias and not info.is_aliased_class: raise sa_exc.ArgumentError( "A selectable (FromClause) instance is " "expected when the base alias is being set.") fa.append(info.selectable) elif not info.is_selectable: raise sa_exc.ArgumentError( "argument is not a mapped class, mapper, " "aliased(), or FromClause instance.") else: if isinstance(from_obj, expression.SelectBase): from_obj = from_obj.alias() if set_base_alias: select_from_alias = from_obj fa.append(from_obj) self._from_obj = tuple(fa) if set_base_alias and \ len(self._from_obj) == 1 and \ isinstance(select_from_alias, expression.Alias): equivs = self.__all_equivs() self._from_obj_alias = sql_util.ColumnAdapter( self._from_obj[0], equivs) elif set_base_alias and \ len(self._from_obj) == 1 and \ hasattr(info, "mapper") and \ info.is_aliased_class: self._from_obj_alias = info._adapter def _reset_polymorphic_adapter(self, mapper): for m2 in mapper._with_polymorphic_mappers: self._polymorphic_adapters.pop(m2, None) for m in m2.iterate_to_root(): self._polymorphic_adapters.pop(m.local_table, None) def _adapt_polymorphic_element(self, element): if "parententity" in element._annotations: search = element._annotations['parententity'] alias = self._polymorphic_adapters.get(search, None) if alias: return alias.adapt_clause(element) if isinstance(element, expression.FromClause): search = element elif hasattr(element, 'table'): search = element.table else: return None alias = self._polymorphic_adapters.get(search, None) if alias: return alias.adapt_clause(element) def _adapt_col_list(self, cols): return [ self._adapt_clause( expression._literal_as_label_reference(o), True, True) for o in cols ] @_generative() def _adapt_all_clauses(self): self._orm_only_adapt = False def _adapt_clause(self, clause, as_filter, orm_only): """Adapt incoming clauses to transformations which have been applied within this query.""" adapters = [] # do we adapt all expression elements or only those # tagged as 'ORM' constructs ? if not self._orm_only_adapt: orm_only = False if as_filter and self._filter_aliases: for fa in self._filter_aliases._visitor_iterator: adapters.append( ( orm_only, fa.replace ) ) if self._from_obj_alias: # for the "from obj" alias, apply extra rule to the # 'ORM only' check, if this query were generated from a # subquery of itself, i.e. _from_selectable(), apply adaption # to all SQL constructs. adapters.append( ( orm_only if self._orm_only_from_obj_alias else False, self._from_obj_alias.replace ) ) if self._polymorphic_adapters: adapters.append( ( orm_only, self._adapt_polymorphic_element ) ) if not adapters: return clause def replace(elem): for _orm_only, adapter in adapters: # if 'orm only', look for ORM annotations # in the element before adapting. if not _orm_only or \ '_orm_adapt' in elem._annotations or \ "parententity" in elem._annotations: e = adapter(elem) if e is not None: return e return visitors.replacement_traverse( clause, {}, replace ) def _query_entity_zero(self): """Return the first QueryEntity.""" return self._entities[0] def _mapper_zero(self): """return the Mapper associated with the first QueryEntity.""" return self._entities[0].mapper def _entity_zero(self): """Return the 'entity' (mapper or AliasedClass) associated with the first QueryEntity, or alternatively the 'select from' entity if specified.""" return self._select_from_entity \ if self._select_from_entity is not None \ else self._query_entity_zero().entity_zero @property def _mapper_entities(self): for ent in self._entities: if isinstance(ent, _MapperEntity): yield ent def _joinpoint_zero(self): return self._joinpoint.get( '_joinpoint_entity', self._entity_zero() ) def _bind_mapper(self): ezero = self._entity_zero() if ezero is not None: insp = inspect(ezero) if not insp.is_clause_element: return insp.mapper return None def _only_full_mapper_zero(self, methname): if self._entities != [self._primary_entity]: raise sa_exc.InvalidRequestError( "%s() can only be used against " "a single mapped class." % methname) return self._primary_entity.entity_zero def _only_entity_zero(self, rationale=None): if len(self._entities) > 1: raise sa_exc.InvalidRequestError( rationale or "This operation requires a Query " "against a single mapper." ) return self._entity_zero() def __all_equivs(self): equivs = {} for ent in self._mapper_entities: equivs.update(ent.mapper._equivalent_columns) return equivs def _get_condition(self): return self._no_criterion_condition( "get", order_by=False, distinct=False) def _get_existing_condition(self): self._no_criterion_assertion("get", order_by=False, distinct=False) def _no_criterion_assertion(self, meth, order_by=True, distinct=True): if not self._enable_assertions: return if self._criterion is not None or \ self._statement is not None or self._from_obj or \ self._limit is not None or self._offset is not None or \ self._group_by or (order_by and self._order_by) or \ (distinct and self._distinct): raise sa_exc.InvalidRequestError( "Query.%s() being called on a " "Query with existing criterion. " % meth) def _no_criterion_condition(self, meth, order_by=True, distinct=True): self._no_criterion_assertion(meth, order_by, distinct) self._from_obj = () self._statement = self._criterion = None self._order_by = self._group_by = self._distinct = False def _no_clauseelement_condition(self, meth): if not self._enable_assertions: return if self._order_by: raise sa_exc.InvalidRequestError( "Query.%s() being called on a " "Query with existing criterion. " % meth) self._no_criterion_condition(meth) def _no_statement_condition(self, meth): if not self._enable_assertions: return if self._statement is not None: raise sa_exc.InvalidRequestError( ("Query.%s() being called on a Query with an existing full " "statement - can't apply criterion.") % meth) def _no_limit_offset(self, meth): if not self._enable_assertions: return if self._limit is not None or self._offset is not None: raise sa_exc.InvalidRequestError( "Query.%s() being called on a Query which already has LIMIT " "or OFFSET applied. To modify the row-limited results of a " " Query, call from_self() first. " "Otherwise, call %s() before limit() or offset() " "are applied." % (meth, meth) ) def _get_options(self, populate_existing=None, version_check=None, only_load_props=None, refresh_state=None): if populate_existing: self._populate_existing = populate_existing if version_check: self._version_check = version_check if refresh_state: self._refresh_state = refresh_state if only_load_props: self._only_load_props = set(only_load_props) return self def _clone(self): cls = self.__class__ q = cls.__new__(cls) q.__dict__ = self.__dict__.copy() return q @property def statement(self): """The full SELECT statement represented by this Query. The statement by default will not have disambiguating labels applied to the construct unless with_labels(True) is called first. """ stmt = self._compile_context(labels=self._with_labels).\ statement if self._params: stmt = stmt.params(self._params) # TODO: there's no tests covering effects of # the annotation not being there return stmt._annotate({'no_replacement_traverse': True}) def subquery(self, name=None, with_labels=False, reduce_columns=False): """return the full SELECT statement represented by this :class:`.Query`, embedded within an :class:`.Alias`. Eager JOIN generation within the query is disabled. :param name: string name to be assigned as the alias; this is passed through to :meth:`.FromClause.alias`. If ``None``, a name will be deterministically generated at compile time. :param with_labels: if True, :meth:`.with_labels` will be called on the :class:`.Query` first to apply table-qualified labels to all columns. :param reduce_columns: if True, :meth:`.Select.reduce_columns` will be called on the resulting :func:`.select` construct, to remove same-named columns where one also refers to the other via foreign key or WHERE clause equivalence. .. versionchanged:: 0.8 the ``with_labels`` and ``reduce_columns`` keyword arguments were added. """ q = self.enable_eagerloads(False) if with_labels: q = q.with_labels() q = q.statement if reduce_columns: q = q.reduce_columns() return q.alias(name=name) def cte(self, name=None, recursive=False): r"""Return the full SELECT statement represented by this :class:`.Query` represented as a common table expression (CTE). Parameters and usage are the same as those of the :meth:`.SelectBase.cte` method; see that method for further details. Here is the `PostgreSQL WITH RECURSIVE example <http://www.postgresql.org/docs/8.4/static/queries-with.html>`_. Note that, in this example, the ``included_parts`` cte and the ``incl_alias`` alias of it are Core selectables, which means the columns are accessed via the ``.c.`` attribute. The ``parts_alias`` object is an :func:`.orm.aliased` instance of the ``Part`` entity, so column-mapped attributes are available directly:: from sqlalchemy.orm import aliased class Part(Base): __tablename__ = 'part' part = Column(String, primary_key=True) sub_part = Column(String, primary_key=True) quantity = Column(Integer) included_parts = session.query( Part.sub_part, Part.part, Part.quantity).\ filter(Part.part=="our part").\ cte(name="included_parts", recursive=True) incl_alias = aliased(included_parts, name="pr") parts_alias = aliased(Part, name="p") included_parts = included_parts.union_all( session.query( parts_alias.sub_part, parts_alias.part, parts_alias.quantity).\ filter(parts_alias.part==incl_alias.c.sub_part) ) q = session.query( included_parts.c.sub_part, func.sum(included_parts.c.quantity). label('total_quantity') ).\ group_by(included_parts.c.sub_part) .. seealso:: :meth:`.HasCTE.cte` """ return self.enable_eagerloads(False).\ statement.cte(name=name, recursive=recursive) def label(self, name): """Return the full SELECT statement represented by this :class:`.Query`, converted to a scalar subquery with a label of the given name. Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.label`. .. versionadded:: 0.6.5 """ return self.enable_eagerloads(False).statement.label(name) def as_scalar(self): """Return the full SELECT statement represented by this :class:`.Query`, converted to a scalar subquery. Analogous to :meth:`sqlalchemy.sql.expression.SelectBase.as_scalar`. .. versionadded:: 0.6.5 """ return self.enable_eagerloads(False).statement.as_scalar() @property def selectable(self): """Return the :class:`.Select` object emitted by this :class:`.Query`. Used for :func:`.inspect` compatibility, this is equivalent to:: query.enable_eagerloads(False).with_labels().statement """ return self.__clause_element__() def __clause_element__(self): return self.enable_eagerloads(False).with_labels().statement @_generative() def enable_eagerloads(self, value): """Control whether or not eager joins and subqueries are rendered. When set to False, the returned Query will not render eager joins regardless of :func:`~sqlalchemy.orm.joinedload`, :func:`~sqlalchemy.orm.subqueryload` options or mapper-level ``lazy='joined'``/``lazy='subquery'`` configurations. This is used primarily when nesting the Query's statement into a subquery or other selectable, or when using :meth:`.Query.yield_per`. """ self._enable_eagerloads = value def _no_yield_per(self, message): raise sa_exc.InvalidRequestError( "The yield_per Query option is currently not " "compatible with %s eager loading. Please " "specify lazyload('*') or query.enable_eagerloads(False) in " "order to " "proceed with query.yield_per()." % message) @_generative() def with_labels(self): """Apply column labels to the return value of Query.statement. Indicates that this Query's `statement` accessor should return a SELECT statement that applies labels to all columns in the form <tablename>_<columnname>; this is commonly used to disambiguate columns from multiple tables which have the same name. When the `Query` actually issues SQL to load rows, it always uses column labeling. .. note:: The :meth:`.Query.with_labels` method *only* applies the output of :attr:`.Query.statement`, and *not* to any of the result-row invoking systems of :class:`.Query` itself, e.g. :meth:`.Query.first`, :meth:`.Query.all`, etc. To execute a query using :meth:`.Query.with_labels`, invoke the :attr:`.Query.statement` using :meth:`.Session.execute`:: result = session.execute(query.with_labels().statement) """ self._with_labels = True @_generative() def enable_assertions(self, value): """Control whether assertions are generated. When set to False, the returned Query will not assert its state before certain operations, including that LIMIT/OFFSET has not been applied when filter() is called, no criterion exists when get() is called, and no "from_statement()" exists when filter()/order_by()/group_by() etc. is called. This more permissive mode is used by custom Query subclasses to specify criterion or other modifiers outside of the usual usage patterns. Care should be taken to ensure that the usage pattern is even possible. A statement applied by from_statement() will override any criterion set by filter() or order_by(), for example. """ self._enable_assertions = value @property def whereclause(self): """A readonly attribute which returns the current WHERE criterion for this Query. This returned value is a SQL expression construct, or ``None`` if no criterion has been established. """ return self._criterion @_generative() def _with_current_path(self, path): """indicate that this query applies to objects loaded within a certain path. Used by deferred loaders (see strategies.py) which transfer query options from an originating query to a newly generated query intended for the deferred load. """ self._current_path = path @_generative(_no_clauseelement_condition) def with_polymorphic(self, cls_or_mappers, selectable=None, polymorphic_on=None): """Load columns for inheriting classes. :meth:`.Query.with_polymorphic` applies transformations to the "main" mapped class represented by this :class:`.Query`. The "main" mapped class here means the :class:`.Query` object's first argument is a full class, i.e. ``session.query(SomeClass)``. These transformations allow additional tables to be present in the FROM clause so that columns for a joined-inheritance subclass are available in the query, both for the purposes of load-time efficiency as well as the ability to use these columns at query time. See the documentation section :ref:`with_polymorphic` for details on how this method is used. .. versionchanged:: 0.8 A new and more flexible function :func:`.orm.with_polymorphic` supersedes :meth:`.Query.with_polymorphic`, as it can apply the equivalent functionality to any set of columns or classes in the :class:`.Query`, not just the "zero mapper". See that function for a description of arguments. """ if not self._primary_entity: raise sa_exc.InvalidRequestError( "No primary mapper set up for this Query.") entity = self._entities[0]._clone() self._entities = [entity] + self._entities[1:] entity.set_with_polymorphic(self, cls_or_mappers, selectable=selectable, polymorphic_on=polymorphic_on) @_generative() def yield_per(self, count): r"""Yield only ``count`` rows at a time. The purpose of this method is when fetching very large result sets (> 10K rows), to batch results in sub-collections and yield them out partially, so that the Python interpreter doesn't need to declare very large areas of memory which is both time consuming and leads to excessive memory use. The performance from fetching hundreds of thousands of rows can often double when a suitable yield-per setting (e.g. approximately 1000) is used, even with DBAPIs that buffer rows (which are most). The :meth:`.Query.yield_per` method **is not compatible with most eager loading schemes, including subqueryload and joinedload with collections**. For this reason, it may be helpful to disable eager loads, either unconditionally with :meth:`.Query.enable_eagerloads`:: q = sess.query(Object).yield_per(100).enable_eagerloads(False) Or more selectively using :func:`.lazyload`; such as with an asterisk to specify the default loader scheme:: q = sess.query(Object).yield_per(100).\ options(lazyload('*'), joinedload(Object.some_related)) .. warning:: Use this method with caution; if the same instance is present in more than one batch of rows, end-user changes to attributes will be overwritten. In particular, it's usually impossible to use this setting with eagerly loaded collections (i.e. any lazy='joined' or 'subquery') since those collections will be cleared for a new load when encountered in a subsequent result batch. In the case of 'subquery' loading, the full result for all rows is fetched which generally defeats the purpose of :meth:`~sqlalchemy.orm.query.Query.yield_per`. Also note that while :meth:`~sqlalchemy.orm.query.Query.yield_per` will set the ``stream_results`` execution option to True, currently this is only understood by :mod:`~sqlalchemy.dialects.postgresql.psycopg2`, :mod:`~sqlalchemy.dialects.mysql.mysqldb` and :mod:`~sqlalchemy.dialects.mysql.pymysql` dialects which will stream results using server side cursors instead of pre-buffer all rows for this query. Other DBAPIs **pre-buffer all rows** before making them available. The memory use of raw database rows is much less than that of an ORM-mapped object, but should still be taken into consideration when benchmarking. .. seealso:: :meth:`.Query.enable_eagerloads` """ self._yield_per = count self._execution_options = self._execution_options.union( {"stream_results": True, "max_row_buffer": count}) def get(self, ident): """Return an instance based on the given primary key identifier, or ``None`` if not found. E.g.:: my_user = session.query(User).get(5) some_object = session.query(VersionedFoo).get((5, 10)) :meth:`~.Query.get` is special in that it provides direct access to the identity map of the owning :class:`.Session`. If the given primary key identifier is present in the local identity map, the object is returned directly from this collection and no SQL is emitted, unless the object has been marked fully expired. If not present, a SELECT is performed in order to locate the object. :meth:`~.Query.get` also will perform a check if the object is present in the identity map and marked as expired - a SELECT is emitted to refresh the object as well as to ensure that the row is still present. If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. :meth:`~.Query.get` is only used to return a single mapped instance, not multiple instances or individual column constructs, and strictly on a single primary key value. The originating :class:`.Query` must be constructed in this way, i.e. against a single mapped entity, with no additional filtering criterion. Loading options via :meth:`~.Query.options` may be applied however, and will be used if the object is not yet locally present. A lazy-loading, many-to-one attribute configured by :func:`.relationship`, using a simple foreign-key-to-primary-key criterion, will also use an operation equivalent to :meth:`~.Query.get` in order to retrieve the target value from the local identity map before querying the database. See :doc:`/orm/loading_relationships` for further details on relationship loading. :param ident: A scalar or tuple value representing the primary key. For a composite primary key, the order of identifiers corresponds in most cases to that of the mapped :class:`.Table` object's primary key columns. For a :func:`.mapper` that was given the ``primary key`` argument during construction, the order of identifiers corresponds to the elements present in this collection. :return: The object instance, or ``None``. """ return self._get_impl(ident, loading.load_on_ident) def _get_impl(self, ident, fallback_fn): # convert composite types to individual args if hasattr(ident, '__composite_values__'): ident = ident.__composite_values__() ident = util.to_list(ident) mapper = self._only_full_mapper_zero("get") if len(ident) != len(mapper.primary_key): raise sa_exc.InvalidRequestError( "Incorrect number of values in identifier to formulate " "primary key for query.get(); primary key columns are %s" % ','.join("'%s'" % c for c in mapper.primary_key)) key = mapper.identity_key_from_primary_key(ident) if not self._populate_existing and \ not mapper.always_refresh and \ self._for_update_arg is None: instance = loading.get_from_identity( self.session, key, attributes.PASSIVE_OFF) if instance is not None: self._get_existing_condition() # reject calls for id in identity map but class # mismatch. if not issubclass(instance.__class__, mapper.class_): return None return instance return fallback_fn(self, key) @_generative() def correlate(self, *args): """Return a :class:`.Query` construct which will correlate the given FROM clauses to that of an enclosing :class:`.Query` or :func:`~.expression.select`. The method here accepts mapped classes, :func:`.aliased` constructs, and :func:`.mapper` constructs as arguments, which are resolved into expression constructs, in addition to appropriate expression constructs. The correlation arguments are ultimately passed to :meth:`.Select.correlate` after coercion to expression constructs. The correlation arguments take effect in such cases as when :meth:`.Query.from_self` is used, or when a subquery as returned by :meth:`.Query.subquery` is embedded in another :func:`~.expression.select` construct. """ for s in args: if s is None: self._correlate = self._correlate.union([None]) else: self._correlate = self._correlate.union( sql_util.surface_selectables(_interpret_as_from(s)) ) @_generative() def autoflush(self, setting): """Return a Query with a specific 'autoflush' setting. Note that a Session with autoflush=False will not autoflush, even if this flag is set to True at the Query level. Therefore this flag is usually used only to disable autoflush for a specific Query. """ self._autoflush = setting @_generative() def populate_existing(self): """Return a :class:`.Query` that will expire and refresh all instances as they are loaded, or reused from the current :class:`.Session`. :meth:`.populate_existing` does not improve behavior when the ORM is used normally - the :class:`.Session` object's usual behavior of maintaining a transaction and expiring all attributes after rollback or commit handles object state automatically. This method is not intended for general use. """ self._populate_existing = True @_generative() def _with_invoke_all_eagers(self, value): """Set the 'invoke all eagers' flag which causes joined- and subquery loaders to traverse into already-loaded related objects and collections. Default is that of :attr:`.Query._invoke_all_eagers`. """ self._invoke_all_eagers = value def with_parent(self, instance, property=None): """Add filtering criterion that relates the given instance to a child object or collection, using its attribute state as well as an established :func:`.relationship()` configuration. The method uses the :func:`.with_parent` function to generate the clause, the result of which is passed to :meth:`.Query.filter`. Parameters are the same as :func:`.with_parent`, with the exception that the given property can be None, in which case a search is performed against this :class:`.Query` object's target mapper. """ if property is None: mapper_zero = self._mapper_zero() mapper = object_mapper(instance) for prop in mapper.iterate_properties: if isinstance(prop, properties.RelationshipProperty) and \ prop.mapper is mapper_zero: property = prop break else: raise sa_exc.InvalidRequestError( "Could not locate a property which relates instances " "of class '%s' to instances of class '%s'" % ( self._mapper_zero().class_.__name__, instance.__class__.__name__) ) return self.filter(with_parent(instance, property)) @_generative() def add_entity(self, entity, alias=None): """add a mapped entity to the list of result columns to be returned.""" if alias is not None: entity = aliased(entity, alias) self._entities = list(self._entities) m = _MapperEntity(self, entity) self._set_entity_selectables([m]) @_generative() def with_session(self, session): """Return a :class:`.Query` that will use the given :class:`.Session`. While the :class:`.Query` object is normally instantiated using the :meth:`.Session.query` method, it is legal to build the :class:`.Query` directly without necessarily using a :class:`.Session`. Such a :class:`.Query` object, or any :class:`.Query` already associated with a different :class:`.Session`, can produce a new :class:`.Query` object associated with a target session using this method:: from sqlalchemy.orm import Query query = Query([MyClass]).filter(MyClass.id == 5) result = query.with_session(my_session).one() """ self.session = session def from_self(self, *entities): r"""return a Query that selects from this Query's SELECT statement. :meth:`.Query.from_self` essentially turns the SELECT statement into a SELECT of itself. Given a query such as:: q = session.query(User).filter(User.name.like('e%')) Given the :meth:`.Query.from_self` version:: q = session.query(User).filter(User.name.like('e%')).from_self() This query renders as: .. sourcecode:: sql SELECT anon_1.user_id AS anon_1_user_id, anon_1.user_name AS anon_1_user_name FROM (SELECT "user".id AS user_id, "user".name AS user_name FROM "user" WHERE "user".name LIKE :name_1) AS anon_1 There are lots of cases where :meth:`.Query.from_self` may be useful. A simple one is where above, we may want to apply a row LIMIT to the set of user objects we query against, and then apply additional joins against that row-limited set:: q = session.query(User).filter(User.name.like('e%')).\ limit(5).from_self().\ join(User.addresses).filter(Address.email.like('q%')) The above query joins to the ``Address`` entity but only against the first five results of the ``User`` query: .. sourcecode:: sql SELECT anon_1.user_id AS anon_1_user_id, anon_1.user_name AS anon_1_user_name FROM (SELECT "user".id AS user_id, "user".name AS user_name FROM "user" WHERE "user".name LIKE :name_1 LIMIT :param_1) AS anon_1 JOIN address ON anon_1.user_id = address.user_id WHERE address.email LIKE :email_1 **Automatic Aliasing** Another key behavior of :meth:`.Query.from_self` is that it applies **automatic aliasing** to the entities inside the subquery, when they are referenced on the outside. Above, if we continue to refer to the ``User`` entity without any additional aliasing applied to it, those references wil be in terms of the subquery:: q = session.query(User).filter(User.name.like('e%')).\ limit(5).from_self().\ join(User.addresses).filter(Address.email.like('q%')).\ order_by(User.name) The ORDER BY against ``User.name`` is aliased to be in terms of the inner subquery: .. sourcecode:: sql SELECT anon_1.user_id AS anon_1_user_id, anon_1.user_name AS anon_1_user_name FROM (SELECT "user".id AS user_id, "user".name AS user_name FROM "user" WHERE "user".name LIKE :name_1 LIMIT :param_1) AS anon_1 JOIN address ON anon_1.user_id = address.user_id WHERE address.email LIKE :email_1 ORDER BY anon_1.user_name The automatic aliasing feature only works in a **limited** way, for simple filters and orderings. More ambitious constructions such as referring to the entity in joins should prefer to use explicit subquery objects, typically making use of the :meth:`.Query.subquery` method to produce an explicit subquery object. Always test the structure of queries by viewing the SQL to ensure a particular structure does what's expected! **Changing the Entities** :meth:`.Query.from_self` also includes the ability to modify what columns are being queried. In our example, we want ``User.id`` to be queried by the inner query, so that we can join to the ``Address`` entity on the outside, but we only wanted the outer query to return the ``Address.email`` column:: q = session.query(User).filter(User.name.like('e%')).\ limit(5).from_self(Address.email).\ join(User.addresses).filter(Address.email.like('q%')) yielding: .. sourcecode:: sql SELECT address.email AS address_email FROM (SELECT "user".id AS user_id, "user".name AS user_name FROM "user" WHERE "user".name LIKE :name_1 LIMIT :param_1) AS anon_1 JOIN address ON anon_1.user_id = address.user_id WHERE address.email LIKE :email_1 **Looking out for Inner / Outer Columns** Keep in mind that when referring to columns that originate from inside the subquery, we need to ensure they are present in the columns clause of the subquery itself; this is an ordinary aspect of SQL. For example, if we wanted to load from a joined entity inside the subquery using :func:`.contains_eager`, we need to add those columns. Below illustrates a join of ``Address`` to ``User``, then a subquery, and then we'd like :func:`.contains_eager` to access the ``User`` columns:: q = session.query(Address).join(Address.user).\ filter(User.name.like('e%')) q = q.add_entity(User).from_self().\ options(contains_eager(Address.user)) We use :meth:`.Query.add_entity` above **before** we call :meth:`.Query.from_self` so that the ``User`` columns are present in the inner subquery, so that they are available to the :func:`.contains_eager` modifier we are using on the outside, producing: .. sourcecode:: sql SELECT anon_1.address_id AS anon_1_address_id, anon_1.address_email AS anon_1_address_email, anon_1.address_user_id AS anon_1_address_user_id, anon_1.user_id AS anon_1_user_id, anon_1.user_name AS anon_1_user_name FROM ( SELECT address.id AS address_id, address.email AS address_email, address.user_id AS address_user_id, "user".id AS user_id, "user".name AS user_name FROM address JOIN "user" ON "user".id = address.user_id WHERE "user".name LIKE :name_1) AS anon_1 If we didn't call ``add_entity(User)``, but still asked :func:`.contains_eager` to load the ``User`` entity, it would be forced to add the table on the outside without the correct join criteria - note the ``anon1, "user"`` phrase at the end: .. sourcecode:: sql -- incorrect query SELECT anon_1.address_id AS anon_1_address_id, anon_1.address_email AS anon_1_address_email, anon_1.address_user_id AS anon_1_address_user_id, "user".id AS user_id, "user".name AS user_name FROM ( SELECT address.id AS address_id, address.email AS address_email, address.user_id AS address_user_id FROM address JOIN "user" ON "user".id = address.user_id WHERE "user".name LIKE :name_1) AS anon_1, "user" :param \*entities: optional list of entities which will replace those being selected. """ fromclause = self.with_labels().enable_eagerloads(False).\ statement.correlate(None) q = self._from_selectable(fromclause) q._enable_single_crit = False q._select_from_entity = self._entity_zero() if entities: q._set_entities(entities) return q @_generative() def _set_enable_single_crit(self, val): self._enable_single_crit = val @_generative() def _from_selectable(self, fromclause): for attr in ( '_statement', '_criterion', '_order_by', '_group_by', '_limit', '_offset', '_joinpath', '_joinpoint', '_distinct', '_having', '_prefixes', '_suffixes' ): self.__dict__.pop(attr, None) self._set_select_from([fromclause], True) # this enables clause adaptation for non-ORM # expressions. self._orm_only_from_obj_alias = False old_entities = self._entities self._entities = [] for e in old_entities: e.adapt_to_selectable(self, self._from_obj[0]) def values(self, *columns): """Return an iterator yielding result tuples corresponding to the given list of columns""" if not columns: return iter(()) q = self._clone() q._set_entities(columns, entity_wrapper=_ColumnEntity) if not q._yield_per: q._yield_per = 10 return iter(q) _values = values def value(self, column): """Return a scalar result corresponding to the given column expression.""" try: return next(self.values(column))[0] except StopIteration: return None @_generative() def with_entities(self, *entities): """Return a new :class:`.Query` replacing the SELECT list with the given entities. e.g.:: # Users, filtered on some arbitrary criterion # and then ordered by related email address q = session.query(User).\ join(User.address).\ filter(User.name.like('%ed%')).\ order_by(Address.email) # given *only* User.id==5, Address.email, and 'q', what # would the *next* User in the result be ? subq = q.with_entities(Address.email).\ order_by(None).\ filter(User.id==5).\ subquery() q = q.join((subq, subq.c.email < Address.email)).\ limit(1) .. versionadded:: 0.6.5 """ self._set_entities(entities) @_generative() def add_columns(self, *column): """Add one or more column expressions to the list of result columns to be returned.""" self._entities = list(self._entities) l = len(self._entities) for c in column: _ColumnEntity(self, c) # _ColumnEntity may add many entities if the # given arg is a FROM clause self._set_entity_selectables(self._entities[l:]) @util.pending_deprecation("0.7", ":meth:`.add_column` is superseded " "by :meth:`.add_columns`", False) def add_column(self, column): """Add a column expression to the list of result columns to be returned. Pending deprecation: :meth:`.add_column` will be superseded by :meth:`.add_columns`. """ return self.add_columns(column) def options(self, *args): """Return a new Query object, applying the given list of mapper options. Most supplied options regard changing how column- and relationship-mapped attributes are loaded. See the sections :ref:`deferred` and :doc:`/orm/loading_relationships` for reference documentation. """ return self._options(False, *args) def _conditional_options(self, *args): return self._options(True, *args) @_generative() def _options(self, conditional, *args): # most MapperOptions write to the '_attributes' dictionary, # so copy that as well self._attributes = self._attributes.copy() opts = tuple(util.flatten_iterator(args)) self._with_options = self._with_options + opts if conditional: for opt in opts: opt.process_query_conditionally(self) else: for opt in opts: opt.process_query(self) def with_transformation(self, fn): """Return a new :class:`.Query` object transformed by the given function. E.g.:: def filter_something(criterion): def transform(q): return q.filter(criterion) return transform q = q.with_transformation(filter_something(x==5)) This allows ad-hoc recipes to be created for :class:`.Query` objects. See the example at :ref:`hybrid_transformers`. .. versionadded:: 0.7.4 """ return fn(self) @_generative() def with_hint(self, selectable, text, dialect_name='*'): """Add an indexing or other executional context hint for the given entity or selectable to this :class:`.Query`. Functionality is passed straight through to :meth:`~sqlalchemy.sql.expression.Select.with_hint`, with the addition that ``selectable`` can be a :class:`.Table`, :class:`.Alias`, or ORM entity / mapped class /etc. .. seealso:: :meth:`.Query.with_statement_hint` """ if selectable is not None: selectable = inspect(selectable).selectable self._with_hints += ((selectable, text, dialect_name),) def with_statement_hint(self, text, dialect_name='*'): """add a statement hint to this :class:`.Select`. This method is similar to :meth:`.Select.with_hint` except that it does not require an individual table, and instead applies to the statement as a whole. This feature calls down into :meth:`.Select.with_statement_hint`. .. versionadded:: 1.0.0 .. seealso:: :meth:`.Query.with_hint` """ return self.with_hint(None, text, dialect_name) @_generative() def execution_options(self, **kwargs): """ Set non-SQL options which take effect during execution. The options are the same as those accepted by :meth:`.Connection.execution_options`. Note that the ``stream_results`` execution option is enabled automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()` method is used. """ self._execution_options = self._execution_options.union(kwargs) @_generative() def with_lockmode(self, mode): """Return a new :class:`.Query` object with the specified "locking mode", which essentially refers to the ``FOR UPDATE`` clause. .. deprecated:: 0.9.0 superseded by :meth:`.Query.with_for_update`. :param mode: a string representing the desired locking mode. Valid values are: * ``None`` - translates to no lockmode * ``'update'`` - translates to ``FOR UPDATE`` (standard SQL, supported by most dialects) * ``'update_nowait'`` - translates to ``FOR UPDATE NOWAIT`` (supported by Oracle, PostgreSQL 8.1 upwards) * ``'read'`` - translates to ``LOCK IN SHARE MODE`` (for MySQL), and ``FOR SHARE`` (for PostgreSQL) .. seealso:: :meth:`.Query.with_for_update` - improved API for specifying the ``FOR UPDATE`` clause. """ self._for_update_arg = LockmodeArg.parse_legacy_query(mode) @_generative() def with_for_update(self, read=False, nowait=False, of=None, skip_locked=False, key_share=False): """return a new :class:`.Query` with the specified options for the ``FOR UPDATE`` clause. The behavior of this method is identical to that of :meth:`.SelectBase.with_for_update`. When called with no arguments, the resulting ``SELECT`` statement will have a ``FOR UPDATE`` clause appended. When additional arguments are specified, backend-specific options such as ``FOR UPDATE NOWAIT`` or ``LOCK IN SHARE MODE`` can take effect. E.g.:: q = sess.query(User).with_for_update(nowait=True, of=User) The above query on a PostgreSQL backend will render like:: SELECT users.id AS users_id FROM users FOR UPDATE OF users NOWAIT .. versionadded:: 0.9.0 :meth:`.Query.with_for_update` supersedes the :meth:`.Query.with_lockmode` method. .. seealso:: :meth:`.GenerativeSelect.with_for_update` - Core level method with full argument and behavioral description. """ self._for_update_arg = LockmodeArg(read=read, nowait=nowait, of=of, skip_locked=skip_locked, key_share=key_share) @_generative() def params(self, *args, **kwargs): r"""add values for bind parameters which may have been specified in filter(). parameters may be specified using \**kwargs, or optionally a single dictionary as the first positional argument. The reason for both is that \**kwargs is convenient, however some parameter dictionaries contain unicode keys in which case \**kwargs cannot be used. """ if len(args) == 1: kwargs.update(args[0]) elif len(args) > 0: raise sa_exc.ArgumentError( "params() takes zero or one positional argument, " "which is a dictionary.") self._params = self._params.copy() self._params.update(kwargs) @_generative(_no_statement_condition, _no_limit_offset) def filter(self, *criterion): r"""apply the given filtering criterion to a copy of this :class:`.Query`, using SQL expressions. e.g.:: session.query(MyClass).filter(MyClass.name == 'some name') Multiple criteria may be specified as comma separated; the effect is that they will be joined together using the :func:`.and_` function:: session.query(MyClass).\ filter(MyClass.name == 'some name', MyClass.id > 5) The criterion is any SQL expression object applicable to the WHERE clause of a select. String expressions are coerced into SQL expression constructs via the :func:`.text` construct. .. seealso:: :meth:`.Query.filter_by` - filter on keyword expressions. """ for criterion in list(criterion): criterion = expression._expression_literal_as_text(criterion) criterion = self._adapt_clause(criterion, True, True) if self._criterion is not None: self._criterion = self._criterion & criterion else: self._criterion = criterion def filter_by(self, **kwargs): r"""apply the given filtering criterion to a copy of this :class:`.Query`, using keyword expressions. e.g.:: session.query(MyClass).filter_by(name = 'some name') Multiple criteria may be specified as comma separated; the effect is that they will be joined together using the :func:`.and_` function:: session.query(MyClass).\ filter_by(name = 'some name', id = 5) The keyword expressions are extracted from the primary entity of the query, or the last entity that was the target of a call to :meth:`.Query.join`. .. seealso:: :meth:`.Query.filter` - filter on SQL expressions. """ clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value for key, value in kwargs.items()] return self.filter(sql.and_(*clauses)) @_generative(_no_statement_condition, _no_limit_offset) def order_by(self, *criterion): """apply one or more ORDER BY criterion to the query and return the newly resulting ``Query`` All existing ORDER BY settings can be suppressed by passing ``None`` - this will suppress any ORDER BY configured on mappers as well. Alternatively, passing False will reset ORDER BY and additionally re-allow default mapper.order_by to take place. Note mapper.order_by is deprecated. """ if len(criterion) == 1: if criterion[0] is False: if '_order_by' in self.__dict__: self._order_by = False return if criterion[0] is None: self._order_by = None return criterion = self._adapt_col_list(criterion) if self._order_by is False or self._order_by is None: self._order_by = criterion else: self._order_by = self._order_by + criterion @_generative(_no_statement_condition, _no_limit_offset) def group_by(self, *criterion): """apply one or more GROUP BY criterion to the query and return the newly resulting :class:`.Query` All existing GROUP BY settings can be suppressed by passing ``None`` - this will suppress any GROUP BY configured on mappers as well. .. versionadded:: 1.1 GROUP BY can be cancelled by passing None, in the same way as ORDER BY. """ if len(criterion) == 1: if criterion[0] is None: self._group_by = False return criterion = list(chain(*[_orm_columns(c) for c in criterion])) criterion = self._adapt_col_list(criterion) if self._group_by is False: self._group_by = criterion else: self._group_by = self._group_by + criterion @_generative(_no_statement_condition, _no_limit_offset) def having(self, criterion): r"""apply a HAVING criterion to the query and return the newly resulting :class:`.Query`. :meth:`~.Query.having` is used in conjunction with :meth:`~.Query.group_by`. HAVING criterion makes it possible to use filters on aggregate functions like COUNT, SUM, AVG, MAX, and MIN, eg.:: q = session.query(User.id).\ join(User.addresses).\ group_by(User.id).\ having(func.count(Address.id) > 2) """ criterion = expression._expression_literal_as_text(criterion) if criterion is not None and \ not isinstance(criterion, sql.ClauseElement): raise sa_exc.ArgumentError( "having() argument must be of type " "sqlalchemy.sql.ClauseElement or string") criterion = self._adapt_clause(criterion, True, True) if self._having is not None: self._having = self._having & criterion else: self._having = criterion def _set_op(self, expr_fn, *q): return self._from_selectable( expr_fn(*([self] + list(q))) )._set_enable_single_crit(False) def union(self, *q): """Produce a UNION of this Query against one or more queries. e.g.:: q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar') q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo') q3 = q1.union(q2) The method accepts multiple Query objects so as to control the level of nesting. A series of ``union()`` calls such as:: x.union(y).union(z).all() will nest on each ``union()``, and produces:: SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y) UNION SELECT * FROM Z) Whereas:: x.union(y, z).all() produces:: SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION SELECT * FROM Z) Note that many database backends do not allow ORDER BY to be rendered on a query called within UNION, EXCEPT, etc. To disable all ORDER BY clauses including those configured on mappers, issue ``query.order_by(None)`` - the resulting :class:`.Query` object will not render ORDER BY within its SELECT statement. """ return self._set_op(expression.union, *q) def union_all(self, *q): """Produce a UNION ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. """ return self._set_op(expression.union_all, *q) def intersect(self, *q): """Produce an INTERSECT of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. """ return self._set_op(expression.intersect, *q) def intersect_all(self, *q): """Produce an INTERSECT ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. """ return self._set_op(expression.intersect_all, *q) def except_(self, *q): """Produce an EXCEPT of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. """ return self._set_op(expression.except_, *q) def except_all(self, *q): """Produce an EXCEPT ALL of this Query against one or more queries. Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See that method for usage examples. """ return self._set_op(expression.except_all, *q) def join(self, *props, **kwargs): r"""Create a SQL JOIN against this :class:`.Query` object's criterion and apply generatively, returning the newly resulting :class:`.Query`. **Simple Relationship Joins** Consider a mapping between two classes ``User`` and ``Address``, with a relationship ``User.addresses`` representing a collection of ``Address`` objects associated with each ``User``. The most common usage of :meth:`~.Query.join` is to create a JOIN along this relationship, using the ``User.addresses`` attribute as an indicator for how this should occur:: q = session.query(User).join(User.addresses) Where above, the call to :meth:`~.Query.join` along ``User.addresses`` will result in SQL equivalent to:: SELECT user.* FROM user JOIN address ON user.id = address.user_id In the above example we refer to ``User.addresses`` as passed to :meth:`~.Query.join` as the *on clause*, that is, it indicates how the "ON" portion of the JOIN should be constructed. For a single-entity query such as the one above (i.e. we start by selecting only from ``User`` and nothing else), the relationship can also be specified by its string name:: q = session.query(User).join("addresses") :meth:`~.Query.join` can also accommodate multiple "on clause" arguments to produce a chain of joins, such as below where a join across four related entities is constructed:: q = session.query(User).join("orders", "items", "keywords") The above would be shorthand for three separate calls to :meth:`~.Query.join`, each using an explicit attribute to indicate the source entity:: q = session.query(User).\ join(User.orders).\ join(Order.items).\ join(Item.keywords) **Joins to a Target Entity or Selectable** A second form of :meth:`~.Query.join` allows any mapped entity or core selectable construct as a target. In this usage, :meth:`~.Query.join` will attempt to create a JOIN along the natural foreign key relationship between two entities:: q = session.query(User).join(Address) The above calling form of :meth:`~.Query.join` will raise an error if either there are no foreign keys between the two entities, or if there are multiple foreign key linkages between them. In the above calling form, :meth:`~.Query.join` is called upon to create the "on clause" automatically for us. The target can be any mapped entity or selectable, such as a :class:`.Table`:: q = session.query(User).join(addresses_table) **Joins to a Target with an ON Clause** The third calling form allows both the target entity as well as the ON clause to be passed explicitly. Suppose for example we wanted to join to ``Address`` twice, using an alias the second time. We use :func:`~sqlalchemy.orm.aliased` to create a distinct alias of ``Address``, and join to it using the ``target, onclause`` form, so that the alias can be specified explicitly as the target along with the relationship to instruct how the ON clause should proceed:: a_alias = aliased(Address) q = session.query(User).\ join(User.addresses).\ join(a_alias, User.addresses).\ filter(Address.email_address=='[email protected]').\ filter(a_alias.email_address=='[email protected]') Where above, the generated SQL would be similar to:: SELECT user.* FROM user JOIN address ON user.id = address.user_id JOIN address AS address_1 ON user.id=address_1.user_id WHERE address.email_address = :email_address_1 AND address_1.email_address = :email_address_2 The two-argument calling form of :meth:`~.Query.join` also allows us to construct arbitrary joins with SQL-oriented "on clause" expressions, not relying upon configured relationships at all. Any SQL expression can be passed as the ON clause when using the two-argument form, which should refer to the target entity in some way as well as an applicable source entity:: q = session.query(User).join(Address, User.id==Address.user_id) .. versionchanged:: 0.7 In SQLAlchemy 0.6 and earlier, the two argument form of :meth:`~.Query.join` requires the usage of a tuple: ``query(User).join((Address, User.id==Address.user_id))``\ . This calling form is accepted in 0.7 and further, though is not necessary unless multiple join conditions are passed to a single :meth:`~.Query.join` call, which itself is also not generally necessary as it is now equivalent to multiple calls (this wasn't always the case). **Advanced Join Targeting and Adaption** There is a lot of flexibility in what the "target" can be when using :meth:`~.Query.join`. As noted previously, it also accepts :class:`.Table` constructs and other selectables such as :func:`.alias` and :func:`.select` constructs, with either the one or two-argument forms:: addresses_q = select([Address.user_id]).\ where(Address.email_address.endswith("@bar.com")).\ alias() q = session.query(User).\ join(addresses_q, addresses_q.c.user_id==User.id) :meth:`~.Query.join` also features the ability to *adapt* a :meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target selectable. Below we construct a JOIN from ``User`` to a subquery against ``Address``, allowing the relationship denoted by ``User.addresses`` to *adapt* itself to the altered target:: address_subq = session.query(Address).\ filter(Address.email_address == '[email protected]').\ subquery() q = session.query(User).join(address_subq, User.addresses) Producing SQL similar to:: SELECT user.* FROM user JOIN ( SELECT address.id AS id, address.user_id AS user_id, address.email_address AS email_address FROM address WHERE address.email_address = :email_address_1 ) AS anon_1 ON user.id = anon_1.user_id The above form allows one to fall back onto an explicit ON clause at any time:: q = session.query(User).\ join(address_subq, User.id==address_subq.c.user_id) **Controlling what to Join From** While :meth:`~.Query.join` exclusively deals with the "right" side of the JOIN, we can also control the "left" side, in those cases where it's needed, using :meth:`~.Query.select_from`. Below we construct a query against ``Address`` but can still make usage of ``User.addresses`` as our ON clause by instructing the :class:`.Query` to select first from the ``User`` entity:: q = session.query(Address).select_from(User).\ join(User.addresses).\ filter(User.name == 'ed') Which will produce SQL similar to:: SELECT address.* FROM user JOIN address ON user.id=address.user_id WHERE user.name = :name_1 **Constructing Aliases Anonymously** :meth:`~.Query.join` can construct anonymous aliases using the ``aliased=True`` flag. This feature is useful when a query is being joined algorithmically, such as when querying self-referentially to an arbitrary depth:: q = session.query(Node).\ join("children", "children", aliased=True) When ``aliased=True`` is used, the actual "alias" construct is not explicitly available. To work with it, methods such as :meth:`.Query.filter` will adapt the incoming entity to the last join point:: q = session.query(Node).\ join("children", "children", aliased=True).\ filter(Node.name == 'grandchild 1') When using automatic aliasing, the ``from_joinpoint=True`` argument can allow a multi-node join to be broken into multiple calls to :meth:`~.Query.join`, so that each path along the way can be further filtered:: q = session.query(Node).\ join("children", aliased=True).\ filter(Node.name='child 1').\ join("children", aliased=True, from_joinpoint=True).\ filter(Node.name == 'grandchild 1') The filtering aliases above can then be reset back to the original ``Node`` entity using :meth:`~.Query.reset_joinpoint`:: q = session.query(Node).\ join("children", "children", aliased=True).\ filter(Node.name == 'grandchild 1').\ reset_joinpoint().\ filter(Node.name == 'parent 1) For an example of ``aliased=True``, see the distribution example :ref:`examples_xmlpersistence` which illustrates an XPath-like query system using algorithmic joins. :param \*props: A collection of one or more join conditions, each consisting of a relationship-bound attribute or string relationship name representing an "on clause", or a single target entity, or a tuple in the form of ``(target, onclause)``. A special two-argument calling form of the form ``target, onclause`` is also accepted. :param aliased=False: If True, indicate that the JOIN target should be anonymously aliased. Subsequent calls to :meth:`~.Query.filter` and similar will adapt the incoming criterion to the target alias, until :meth:`~.Query.reset_joinpoint` is called. :param isouter=False: If True, the join used will be a left outer join, just as if the :meth:`.Query.outerjoin` method were called. This flag is here to maintain consistency with the same flag as accepted by :meth:`.FromClause.join` and other Core constructs. .. versionadded:: 1.0.0 :param full=False: render FULL OUTER JOIN; implies ``isouter``. .. versionadded:: 1.1 :param from_joinpoint=False: When using ``aliased=True``, a setting of True here will cause the join to be from the most recent joined target, rather than starting back from the original FROM clauses of the query. .. seealso:: :ref:`ormtutorial_joins` in the ORM tutorial. :ref:`inheritance_toplevel` for details on how :meth:`~.Query.join` is used for inheritance relationships. :func:`.orm.join` - a standalone ORM-level join function, used internally by :meth:`.Query.join`, which in previous SQLAlchemy versions was the primary ORM-level joining interface. """ aliased, from_joinpoint, isouter, full = kwargs.pop('aliased', False),\ kwargs.pop('from_joinpoint', False),\ kwargs.pop('isouter', False),\ kwargs.pop('full', False) if kwargs: raise TypeError("unknown arguments: %s" % ', '.join(sorted(kwargs))) return self._join(props, outerjoin=isouter, full=full, create_aliases=aliased, from_joinpoint=from_joinpoint) def outerjoin(self, *props, **kwargs): """Create a left outer join against this ``Query`` object's criterion and apply generatively, returning the newly resulting ``Query``. Usage is the same as the ``join()`` method. """ aliased, from_joinpoint, full = kwargs.pop('aliased', False), \ kwargs.pop('from_joinpoint', False), \ kwargs.pop('full', False) if kwargs: raise TypeError("unknown arguments: %s" % ', '.join(sorted(kwargs))) return self._join(props, outerjoin=True, full=full, create_aliases=aliased, from_joinpoint=from_joinpoint) def _update_joinpoint(self, jp): self._joinpoint = jp # copy backwards to the root of the _joinpath # dict, so that no existing dict in the path is mutated while 'prev' in jp: f, prev = jp['prev'] prev = prev.copy() prev[f] = jp jp['prev'] = (f, prev) jp = prev self._joinpath = jp @_generative(_no_statement_condition, _no_limit_offset) def _join(self, keys, outerjoin, full, create_aliases, from_joinpoint): """consumes arguments from join() or outerjoin(), places them into a consistent format with which to form the actual JOIN constructs. """ if not from_joinpoint: self._reset_joinpoint() if len(keys) == 2 and \ isinstance(keys[0], (expression.FromClause, type, AliasedClass)) and \ isinstance(keys[1], (str, expression.ClauseElement, interfaces.PropComparator)): # detect 2-arg form of join and # convert to a tuple. keys = (keys,) keylist = util.to_list(keys) for idx, arg1 in enumerate(keylist): if isinstance(arg1, tuple): # "tuple" form of join, multiple # tuples are accepted as well. The simpler # "2-arg" form is preferred. May deprecate # the "tuple" usage. arg1, arg2 = arg1 else: arg2 = None # determine onclause/right_entity. there # is a little bit of legacy behavior still at work here # which means they might be in either order. may possibly # lock this down to (right_entity, onclause) in 0.6. if isinstance( arg1, (interfaces.PropComparator, util.string_types)): right_entity, onclause = arg2, arg1 else: right_entity, onclause = arg1, arg2 left_entity = prop = None if isinstance(onclause, interfaces.PropComparator): of_type = getattr(onclause, '_of_type', None) else: of_type = None if isinstance(onclause, util.string_types): left_entity = self._joinpoint_zero() descriptor = _entity_descriptor(left_entity, onclause) onclause = descriptor # check for q.join(Class.propname, from_joinpoint=True) # and Class is that of the current joinpoint elif from_joinpoint and \ isinstance(onclause, interfaces.PropComparator): left_entity = onclause._parententity info = inspect(self._joinpoint_zero()) left_mapper, left_selectable, left_is_aliased = \ getattr(info, 'mapper', None), \ info.selectable, \ getattr(info, 'is_aliased_class', None) if left_mapper is left_entity: left_entity = self._joinpoint_zero() descriptor = _entity_descriptor(left_entity, onclause.key) onclause = descriptor if isinstance(onclause, interfaces.PropComparator): if right_entity is None: if of_type: right_entity = of_type else: right_entity = onclause.property.mapper left_entity = onclause._parententity prop = onclause.property if not isinstance(onclause, attributes.QueryableAttribute): onclause = prop if not create_aliases: # check for this path already present. # don't render in that case. edge = (left_entity, right_entity, prop.key) if edge in self._joinpoint: # The child's prev reference might be stale -- # it could point to a parent older than the # current joinpoint. If this is the case, # then we need to update it and then fix the # tree's spine with _update_joinpoint. Copy # and then mutate the child, which might be # shared by a different query object. jp = self._joinpoint[edge].copy() jp['prev'] = (edge, self._joinpoint) self._update_joinpoint(jp) if idx == len(keylist) - 1: util.warn( "Pathed join target %s has already " "been joined to; skipping" % prop) continue elif onclause is not None and right_entity is None: # TODO: no coverage here raise NotImplementedError("query.join(a==b) not supported.") self._join_left_to_right( left_entity, right_entity, onclause, outerjoin, full, create_aliases, prop) def _join_left_to_right(self, left, right, onclause, outerjoin, full, create_aliases, prop): """append a JOIN to the query's from clause.""" self._polymorphic_adapters = self._polymorphic_adapters.copy() if left is None: if self._from_obj: left = self._from_obj[0] elif self._entities: left = self._entities[0].entity_zero_or_selectable if left is None: if self._entities: problem = "Don't know how to join from %s" % self._entities[0] else: problem = "No entities to join from" raise sa_exc.InvalidRequestError( "%s; please use " "select_from() to establish the left " "entity/selectable of this join" % problem) if left is right and \ not create_aliases: raise sa_exc.InvalidRequestError( "Can't construct a join from %s to %s, they " "are the same entity" % (left, right)) l_info = inspect(left) r_info = inspect(right) overlap = False if not create_aliases: right_mapper = getattr(r_info, "mapper", None) # if the target is a joined inheritance mapping, # be more liberal about auto-aliasing. if right_mapper and ( right_mapper.with_polymorphic or isinstance(right_mapper.mapped_table, expression.Join) ): for from_obj in self._from_obj or [l_info.selectable]: if sql_util.selectables_overlap( l_info.selectable, from_obj) and \ sql_util.selectables_overlap( from_obj, r_info.selectable): overlap = True break if (overlap or not create_aliases) and \ l_info.selectable is r_info.selectable: raise sa_exc.InvalidRequestError( "Can't join table/selectable '%s' to itself" % l_info.selectable) right, onclause = self._prepare_right_side( r_info, right, onclause, create_aliases, prop, overlap) # if joining on a MapperProperty path, # track the path to prevent redundant joins if not create_aliases and prop: self._update_joinpoint({ '_joinpoint_entity': right, 'prev': ((left, right, prop.key), self._joinpoint) }) else: self._joinpoint = {'_joinpoint_entity': right} self._join_to_left(l_info, left, right, onclause, outerjoin, full) def _prepare_right_side(self, r_info, right, onclause, create_aliases, prop, overlap): info = r_info right_mapper, right_selectable, right_is_aliased = \ getattr(info, 'mapper', None), \ info.selectable, \ getattr(info, 'is_aliased_class', False) if right_mapper: self._join_entities += (info, ) if right_mapper and prop and \ not right_mapper.common_parent(prop.mapper): raise sa_exc.InvalidRequestError( "Join target %s does not correspond to " "the right side of join condition %s" % (right, onclause) ) if not right_mapper and prop: right_mapper = prop.mapper need_adapter = False if right_mapper and right is right_selectable: if not right_selectable.is_derived_from( right_mapper.mapped_table): raise sa_exc.InvalidRequestError( "Selectable '%s' is not derived from '%s'" % (right_selectable.description, right_mapper.mapped_table.description)) if isinstance(right_selectable, expression.SelectBase): # TODO: this isn't even covered now! right_selectable = right_selectable.alias() need_adapter = True right = aliased(right_mapper, right_selectable) aliased_entity = right_mapper and \ not right_is_aliased and \ ( right_mapper.with_polymorphic and isinstance( right_mapper._with_polymorphic_selectable, expression.Alias) or overlap # test for overlap: # orm/inheritance/relationships.py # SelfReferentialM2MTest ) if not need_adapter and (create_aliases or aliased_entity): right = aliased(right, flat=True) need_adapter = True # if an alias() of the right side was generated here, # apply an adapter to all subsequent filter() calls # until reset_joinpoint() is called. if need_adapter: self._filter_aliases = ORMAdapter( right, equivalents=right_mapper and right_mapper._equivalent_columns or {}, chain_to=self._filter_aliases) # if the onclause is a ClauseElement, adapt it with any # adapters that are in place right now if isinstance(onclause, expression.ClauseElement): onclause = self._adapt_clause(onclause, True, True) # if an alias() on the right side was generated, # which is intended to wrap a the right side in a subquery, # ensure that columns retrieved from this target in the result # set are also adapted. if aliased_entity and not create_aliases: self._mapper_loads_polymorphically_with( right_mapper, ORMAdapter( right, equivalents=right_mapper._equivalent_columns ) ) return right, onclause def _join_to_left(self, l_info, left, right, onclause, outerjoin, full): info = l_info left_mapper = getattr(info, 'mapper', None) left_selectable = info.selectable if self._from_obj: replace_clause_index, clause = sql_util.find_join_source( self._from_obj, left_selectable) if clause is not None: try: clause = orm_join(clause, right, onclause, isouter=outerjoin, full=full) except sa_exc.ArgumentError as ae: raise sa_exc.InvalidRequestError( "Could not find a FROM clause to join from. " "Tried joining to %s, but got: %s" % (right, ae)) self._from_obj = \ self._from_obj[:replace_clause_index] + \ (clause, ) + \ self._from_obj[replace_clause_index + 1:] return if left_mapper: for ent in self._entities: if ent.corresponds_to(left): clause = ent.selectable break else: clause = left else: clause = left_selectable assert clause is not None try: clause = orm_join( clause, right, onclause, isouter=outerjoin, full=full) except sa_exc.ArgumentError as ae: raise sa_exc.InvalidRequestError( "Could not find a FROM clause to join from. " "Tried joining to %s, but got: %s" % (right, ae)) self._from_obj = self._from_obj + (clause,) def _reset_joinpoint(self): self._joinpoint = self._joinpath self._filter_aliases = None @_generative(_no_statement_condition) def reset_joinpoint(self): """Return a new :class:`.Query`, where the "join point" has been reset back to the base FROM entities of the query. This method is usually used in conjunction with the ``aliased=True`` feature of the :meth:`~.Query.join` method. See the example in :meth:`~.Query.join` for how this is used. """ self._reset_joinpoint() @_generative(_no_clauseelement_condition) def select_from(self, *from_obj): r"""Set the FROM clause of this :class:`.Query` explicitly. :meth:`.Query.select_from` is often used in conjunction with :meth:`.Query.join` in order to control which entity is selected from on the "left" side of the join. The entity or selectable object here effectively replaces the "left edge" of any calls to :meth:`~.Query.join`, when no joinpoint is otherwise established - usually, the default "join point" is the leftmost entity in the :class:`~.Query` object's list of entities to be selected. A typical example:: q = session.query(Address).select_from(User).\ join(User.addresses).\ filter(User.name == 'ed') Which produces SQL equivalent to:: SELECT address.* FROM user JOIN address ON user.id=address.user_id WHERE user.name = :name_1 :param \*from_obj: collection of one or more entities to apply to the FROM clause. Entities can be mapped classes, :class:`.AliasedClass` objects, :class:`.Mapper` objects as well as core :class:`.FromClause` elements like subqueries. .. versionchanged:: 0.9 This method no longer applies the given FROM object to be the selectable from which matching entities select from; the :meth:`.select_entity_from` method now accomplishes this. See that method for a description of this behavior. .. seealso:: :meth:`~.Query.join` :meth:`.Query.select_entity_from` """ self._set_select_from(from_obj, False) @_generative(_no_clauseelement_condition) def select_entity_from(self, from_obj): r"""Set the FROM clause of this :class:`.Query` to a core selectable, applying it as a replacement FROM clause for corresponding mapped entities. The :meth:`.Query.select_entity_from` method supplies an alternative approach to the use case of applying an :func:`.aliased` construct explicitly throughout a query. Instead of referring to the :func:`.aliased` construct explicitly, :meth:`.Query.select_entity_from` automatically *adapts* all occurences of the entity to the target selectable. Given a case for :func:`.aliased` such as selecting ``User`` objects from a SELECT statement:: select_stmt = select([User]).where(User.id == 7) user_alias = aliased(User, select_stmt) q = session.query(user_alias).\ filter(user_alias.name == 'ed') Above, we apply the ``user_alias`` object explicitly throughout the query. When it's not feasible for ``user_alias`` to be referenced explicitly in many places, :meth:`.Query.select_entity_from` may be used at the start of the query to adapt the existing ``User`` entity:: q = session.query(User).\ select_entity_from(select_stmt).\ filter(User.name == 'ed') Above, the generated SQL will show that the ``User`` entity is adapted to our statement, even in the case of the WHERE clause: .. sourcecode:: sql SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM (SELECT "user".id AS id, "user".name AS name FROM "user" WHERE "user".id = :id_1) AS anon_1 WHERE anon_1.name = :name_1 The :meth:`.Query.select_entity_from` method is similar to the :meth:`.Query.select_from` method, in that it sets the FROM clause of the query. The difference is that it additionally applies adaptation to the other parts of the query that refer to the primary entity. If above we had used :meth:`.Query.select_from` instead, the SQL generated would have been: .. sourcecode:: sql -- uses plain select_from(), not select_entity_from() SELECT "user".id AS user_id, "user".name AS user_name FROM "user", (SELECT "user".id AS id, "user".name AS name FROM "user" WHERE "user".id = :id_1) AS anon_1 WHERE "user".name = :name_1 To supply textual SQL to the :meth:`.Query.select_entity_from` method, we can make use of the :func:`.text` construct. However, the :func:`.text` construct needs to be aligned with the columns of our entity, which is achieved by making use of the :meth:`.TextClause.columns` method:: text_stmt = text("select id, name from user").columns( User.id, User.name) q = session.query(User).select_entity_from(text_stmt) :meth:`.Query.select_entity_from` itself accepts an :func:`.aliased` object, so that the special options of :func:`.aliased` such as :paramref:`.aliased.adapt_on_names` may be used within the scope of the :meth:`.Query.select_entity_from` method's adaptation services. Suppose a view ``user_view`` also returns rows from ``user``. If we reflect this view into a :class:`.Table`, this view has no relationship to the :class:`.Table` to which we are mapped, however we can use name matching to select from it:: user_view = Table('user_view', metadata, autoload_with=engine) user_view_alias = aliased( User, user_view, adapt_on_names=True) q = session.query(User).\ select_entity_from(user_view_alias).\ order_by(User.name) .. versionchanged:: 1.1.7 The :meth:`.Query.select_entity_from` method now accepts an :func:`.aliased` object as an alternative to a :class:`.FromClause` object. :param from_obj: a :class:`.FromClause` object that will replace the FROM clause of this :class:`.Query`. It also may be an instance of :func:`.aliased`. .. seealso:: :meth:`.Query.select_from` """ self._set_select_from([from_obj], True) def __getitem__(self, item): if isinstance(item, slice): start, stop, step = util.decode_slice(item) if isinstance(stop, int) and \ isinstance(start, int) and \ stop - start <= 0: return [] # perhaps we should execute a count() here so that we # can still use LIMIT/OFFSET ? elif (isinstance(start, int) and start < 0) \ or (isinstance(stop, int) and stop < 0): return list(self)[item] res = self.slice(start, stop) if step is not None: return list(res)[None:None:item.step] else: return list(res) else: if item == -1: return list(self)[-1] else: return list(self[item:item + 1])[0] @_generative(_no_statement_condition) def slice(self, start, stop): """Computes the "slice" of the :class:`.Query` represented by the given indices and returns the resulting :class:`.Query`. The start and stop indices behave like the argument to Python's built-in :func:`range` function. This method provides an alternative to using ``LIMIT``/``OFFSET`` to get a slice of the query. For example, :: session.query(User).order_by(User.id).slice(1, 3) renders as .. sourcecode:: sql SELECT users.id AS users_id, users.name AS users_name FROM users ORDER BY users.id LIMIT ? OFFSET ? (2, 1) .. seealso:: :meth:`.Query.limit` :meth:`.Query.offset` """ if start is not None and stop is not None: self._offset = (self._offset or 0) + start self._limit = stop - start elif start is None and stop is not None: self._limit = stop elif start is not None and stop is None: self._offset = (self._offset or 0) + start if self._offset == 0: self._offset = None @_generative(_no_statement_condition) def limit(self, limit): """Apply a ``LIMIT`` to the query and return the newly resulting ``Query``. """ self._limit = limit @_generative(_no_statement_condition) def offset(self, offset): """Apply an ``OFFSET`` to the query and return the newly resulting ``Query``. """ self._offset = offset @_generative(_no_statement_condition) def distinct(self, *criterion): r"""Apply a ``DISTINCT`` to the query and return the newly resulting ``Query``. .. note:: The :meth:`.distinct` call includes logic that will automatically add columns from the ORDER BY of the query to the columns clause of the SELECT statement, to satisfy the common need of the database backend that ORDER BY columns be part of the SELECT list when DISTINCT is used. These columns *are not* added to the list of columns actually fetched by the :class:`.Query`, however, so would not affect results. The columns are passed through when using the :attr:`.Query.statement` accessor, however. :param \*expr: optional column expressions. When present, the PostgreSQL dialect will render a ``DISTINCT ON (<expressions>>)`` construct. """ if not criterion: self._distinct = True else: criterion = self._adapt_col_list(criterion) if isinstance(self._distinct, list): self._distinct += criterion else: self._distinct = criterion @_generative() def prefix_with(self, *prefixes): r"""Apply the prefixes to the query and return the newly resulting ``Query``. :param \*prefixes: optional prefixes, typically strings, not using any commas. In particular is useful for MySQL keywords. e.g.:: query = sess.query(User.name).\ prefix_with('HIGH_PRIORITY').\ prefix_with('SQL_SMALL_RESULT', 'ALL') Would render:: SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name FROM users .. versionadded:: 0.7.7 .. seealso:: :meth:`.HasPrefixes.prefix_with` """ if self._prefixes: self._prefixes += prefixes else: self._prefixes = prefixes @_generative() def suffix_with(self, *suffixes): r"""Apply the suffix to the query and return the newly resulting ``Query``. :param \*suffixes: optional suffixes, typically strings, not using any commas. .. versionadded:: 1.0.0 .. seealso:: :meth:`.Query.prefix_with` :meth:`.HasSuffixes.suffix_with` """ if self._suffixes: self._suffixes += suffixes else: self._suffixes = suffixes def all(self): """Return the results represented by this ``Query`` as a list. This results in an execution of the underlying query. """ return list(self) @_generative(_no_clauseelement_condition) def from_statement(self, statement): """Execute the given SELECT statement and return results. This method bypasses all internal statement compilation, and the statement is executed without modification. The statement is typically either a :func:`~.expression.text` or :func:`~.expression.select` construct, and should return the set of columns appropriate to the entity class represented by this :class:`.Query`. .. seealso:: :ref:`orm_tutorial_literal_sql` - usage examples in the ORM tutorial """ statement = expression._expression_literal_as_text(statement) if not isinstance(statement, (expression.TextClause, expression.SelectBase)): raise sa_exc.ArgumentError( "from_statement accepts text(), select(), " "and union() objects only.") self._statement = statement def first(self): """Return the first result of this ``Query`` or None if the result doesn't contain any row. first() applies a limit of one within the generated SQL, so that only one primary entity row is generated on the server side (note this may consist of multiple result rows if join-loaded collections are present). Calling :meth:`.Query.first` results in an execution of the underlying query. .. seealso:: :meth:`.Query.one` :meth:`.Query.one_or_none` """ if self._statement is not None: ret = list(self)[0:1] else: ret = list(self[0:1]) if len(ret) > 0: return ret[0] else: return None def one_or_none(self): """Return at most one result or raise an exception. Returns ``None`` if the query selects no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` if multiple object identities are returned, or if multiple rows are returned for a query that returns only scalar values as opposed to full identity-mapped entities. Calling :meth:`.Query.one_or_none` results in an execution of the underlying query. .. versionadded:: 1.0.9 Added :meth:`.Query.one_or_none` .. seealso:: :meth:`.Query.first` :meth:`.Query.one` """ ret = list(self) l = len(ret) if l == 1: return ret[0] elif l == 0: return None else: raise orm_exc.MultipleResultsFound( "Multiple rows were found for one_or_none()") def one(self): """Return exactly one result or raise an exception. Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` if multiple object identities are returned, or if multiple rows are returned for a query that returns only scalar values as opposed to full identity-mapped entities. Calling :meth:`.one` results in an execution of the underlying query. .. seealso:: :meth:`.Query.first` :meth:`.Query.one_or_none` """ try: ret = self.one_or_none() except orm_exc.MultipleResultsFound: raise orm_exc.MultipleResultsFound( "Multiple rows were found for one()") else: if ret is None: raise orm_exc.NoResultFound("No row was found for one()") return ret def scalar(self): """Return the first element of the first result or None if no rows present. If multiple rows are returned, raises MultipleResultsFound. >>> session.query(Item).scalar() <Item> >>> session.query(Item.id).scalar() 1 >>> session.query(Item.id).filter(Item.id < 0).scalar() None >>> session.query(Item.id, Item.name).scalar() 1 >>> session.query(func.count(Parent.id)).scalar() 20 This results in an execution of the underlying query. """ try: ret = self.one() if not isinstance(ret, tuple): return ret return ret[0] except orm_exc.NoResultFound: return None def __iter__(self): context = self._compile_context() context.statement.use_labels = True if self._autoflush and not self._populate_existing: self.session._autoflush() return self._execute_and_instances(context) def __str__(self): context = self._compile_context() try: bind = self._get_bind_args( context, self.session.get_bind) if self.session else None except sa_exc.UnboundExecutionError: bind = None return str(context.statement.compile(bind)) def _connection_from_session(self, **kw): conn = self.session.connection(**kw) if self._execution_options: conn = conn.execution_options(**self._execution_options) return conn def _execute_and_instances(self, querycontext): conn = self._get_bind_args( querycontext, self._connection_from_session, close_with_result=True) result = conn.execute(querycontext.statement, self._params) return loading.instances(querycontext.query, result, querycontext) def _get_bind_args(self, querycontext, fn, **kw): return fn( mapper=self._bind_mapper(), clause=querycontext.statement, **kw ) @property def column_descriptions(self): """Return metadata about the columns which would be returned by this :class:`.Query`. Format is a list of dictionaries:: user_alias = aliased(User, name='user2') q = sess.query(User, User.id, user_alias) # this expression: q.column_descriptions # would return: [ { 'name':'User', 'type':User, 'aliased':False, 'expr':User, 'entity': User }, { 'name':'id', 'type':Integer(), 'aliased':False, 'expr':User.id, 'entity': User }, { 'name':'user2', 'type':User, 'aliased':True, 'expr':user_alias, 'entity': user_alias } ] """ return [ { 'name': ent._label_name, 'type': ent.type, 'aliased': getattr(insp_ent, 'is_aliased_class', False), 'expr': ent.expr, 'entity': getattr(insp_ent, "entity", None) if ent.entity_zero is not None and not insp_ent.is_clause_element else None } for ent, insp_ent in [ ( _ent, (inspect(_ent.entity_zero) if _ent.entity_zero is not None else None) ) for _ent in self._entities ] ] def instances(self, cursor, __context=None): """Given a ResultProxy cursor as returned by connection.execute(), return an ORM result as an iterator. e.g.:: result = engine.execute("select * from users") for u in session.query(User).instances(result): print u """ context = __context if context is None: context = QueryContext(self) return loading.instances(self, cursor, context) def merge_result(self, iterator, load=True): """Merge a result into this :class:`.Query` object's Session. Given an iterator returned by a :class:`.Query` of the same structure as this one, return an identical iterator of results, with all mapped instances merged into the session using :meth:`.Session.merge`. This is an optimized method which will merge all mapped instances, preserving the structure of the result rows and unmapped columns with less method overhead than that of calling :meth:`.Session.merge` explicitly for each value. The structure of the results is determined based on the column list of this :class:`.Query` - if these do not correspond, unchecked errors will occur. The 'load' argument is the same as that of :meth:`.Session.merge`. For an example of how :meth:`~.Query.merge_result` is used, see the source code for the example :ref:`examples_caching`, where :meth:`~.Query.merge_result` is used to efficiently restore state from a cache back into a target :class:`.Session`. """ return loading.merge_result(self, iterator, load) @property def _select_args(self): return { 'limit': self._limit, 'offset': self._offset, 'distinct': self._distinct, 'prefixes': self._prefixes, 'suffixes': self._suffixes, 'group_by': self._group_by or None, 'having': self._having } @property def _should_nest_selectable(self): kwargs = self._select_args return (kwargs.get('limit') is not None or kwargs.get('offset') is not None or kwargs.get('distinct', False)) def exists(self): """A convenience method that turns a query into an EXISTS subquery of the form EXISTS (SELECT 1 FROM ... WHERE ...). e.g.:: q = session.query(User).filter(User.name == 'fred') session.query(q.exists()) Producing SQL similar to:: SELECT EXISTS ( SELECT 1 FROM users WHERE users.name = :name_1 ) AS anon_1 The EXISTS construct is usually used in the WHERE clause:: session.query(User.id).filter(q.exists()).scalar() Note that some databases such as SQL Server don't allow an EXISTS expression to be present in the columns clause of a SELECT. To select a simple boolean value based on the exists as a WHERE, use :func:`.literal`:: from sqlalchemy import literal session.query(literal(True)).filter(q.exists()).scalar() .. versionadded:: 0.8.1 """ # .add_columns() for the case that we are a query().select_from(X), # so that ".statement" can be produced (#2995) but also without # omitting the FROM clause from a query(X) (#2818); # .with_only_columns() after we have a core select() so that # we get just "SELECT 1" without any entities. return sql.exists(self.add_columns('1').with_labels(). statement.with_only_columns([1])) def count(self): r"""Return a count of rows this Query would return. This generates the SQL for this Query as follows:: SELECT count(1) AS count_1 FROM ( SELECT <rest of query follows...> ) AS anon_1 .. versionchanged:: 0.7 The above scheme is newly refined as of 0.7b3. For fine grained control over specific columns to count, to skip the usage of a subquery or otherwise control of the FROM clause, or to use other aggregate functions, use :attr:`~sqlalchemy.sql.expression.func` expressions in conjunction with :meth:`~.Session.query`, i.e.:: from sqlalchemy import func # count User records, without # using a subquery. session.query(func.count(User.id)) # return count of user "id" grouped # by "name" session.query(func.count(User.id)).\ group_by(User.name) from sqlalchemy import distinct # count distinct "name" values session.query(func.count(distinct(User.name))) """ col = sql.func.count(sql.literal_column('*')) return self.from_self(col).scalar() def delete(self, synchronize_session='evaluate'): r"""Perform a bulk delete query. Deletes rows matched by this query from the database. E.g.:: sess.query(User).filter(User.age == 25).\ delete(synchronize_session=False) sess.query(User).filter(User.age == 25).\ delete(synchronize_session='evaluate') .. warning:: The :meth:`.Query.delete` method is a "bulk" operation, which bypasses ORM unit-of-work automation in favor of greater performance. **Please read all caveats and warnings below.** :param synchronize_session: chooses the strategy for the removal of matched objects from the session. Valid values are: ``False`` - don't synchronize the session. This option is the most efficient and is reliable once the session is expired, which typically occurs after a commit(), or explicitly using expire_all(). Before the expiration, objects may still remain in the session which were in fact deleted which can lead to confusing results if they are accessed via get() or already loaded collections. ``'fetch'`` - performs a select query before the delete to find objects that are matched by the delete query and need to be removed from the session. Matched objects are removed from the session. ``'evaluate'`` - Evaluate the query's criteria in Python straight on the objects in the session. If evaluation of the criteria isn't implemented, an error is raised. The expression evaluator currently doesn't account for differing string collations between the database and Python. :return: the count of rows matched as returned by the database's "row count" feature. .. warning:: **Additional Caveats for bulk query deletes** * This method does **not work for joined inheritance mappings**, since the **multiple table deletes are not supported by SQL** as well as that the **join condition of an inheritance mapper is not automatically rendered**. Care must be taken in any multiple-table delete to first accommodate via some other means how the related table will be deleted, as well as to explicitly include the joining condition between those tables, even in mappings where this is normally automatic. E.g. if a class ``Engineer`` subclasses ``Employee``, a DELETE against the ``Employee`` table would look like:: session.query(Engineer).\ filter(Engineer.id == Employee.id).\ filter(Employee.name == 'dilbert').\ delete() However the above SQL will not delete from the Engineer table, unless an ON DELETE CASCADE rule is established in the database to handle it. Short story, **do not use this method for joined inheritance mappings unless you have taken the additional steps to make this feasible**. * The polymorphic identity WHERE criteria is **not** included for single- or joined- table updates - this must be added **manually** even for single table inheritance. * The method does **not** offer in-Python cascading of relationships - it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured for any foreign key references which require it, otherwise the database may emit an integrity violation if foreign key references are being enforced. After the DELETE, dependent objects in the :class:`.Session` which were impacted by an ON DELETE may not contain the current state, or may have been deleted. This issue is resolved once the :class:`.Session` is expired, which normally occurs upon :meth:`.Session.commit` or can be forced by using :meth:`.Session.expire_all`. Accessing an expired object whose row has been deleted will invoke a SELECT to locate the row; when the row is not found, an :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. * The ``'fetch'`` strategy results in an additional SELECT statement emitted and will significantly reduce performance. * The ``'evaluate'`` strategy performs a scan of all matching objects within the :class:`.Session`; if the contents of the :class:`.Session` are expired, such as via a proceeding :meth:`.Session.commit` call, **this will result in SELECT queries emitted for every matching object**. * The :meth:`.MapperEvents.before_delete` and :meth:`.MapperEvents.after_delete` events **are not invoked** from this method. Instead, the :meth:`.SessionEvents.after_bulk_delete` method is provided to act upon a mass DELETE of entity rows. .. seealso:: :meth:`.Query.update` :ref:`inserts_and_updates` - Core SQL tutorial """ delete_op = persistence.BulkDelete.factory( self, synchronize_session) delete_op.exec_() return delete_op.rowcount def update(self, values, synchronize_session='evaluate', update_args=None): r"""Perform a bulk update query. Updates rows matched by this query in the database. E.g.:: sess.query(User).filter(User.age == 25).\ update({User.age: User.age - 10}, synchronize_session=False) sess.query(User).filter(User.age == 25).\ update({"age": User.age - 10}, synchronize_session='evaluate') .. warning:: The :meth:`.Query.update` method is a "bulk" operation, which bypasses ORM unit-of-work automation in favor of greater performance. **Please read all caveats and warnings below.** :param values: a dictionary with attributes names, or alternatively mapped attributes or SQL expressions, as keys, and literal values or sql expressions as values. If :ref:`parameter-ordered mode <updates_order_parameters>` is desired, the values can be passed as a list of 2-tuples; this requires that the :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag is passed to the :paramref:`.Query.update.update_args` dictionary as well. .. versionchanged:: 1.0.0 - string names in the values dictionary are now resolved against the mapped entity; previously, these strings were passed as literal column names with no mapper-level translation. :param synchronize_session: chooses the strategy to update the attributes on objects in the session. Valid values are: ``False`` - don't synchronize the session. This option is the most efficient and is reliable once the session is expired, which typically occurs after a commit(), or explicitly using expire_all(). Before the expiration, updated objects may still remain in the session with stale values on their attributes, which can lead to confusing results. ``'fetch'`` - performs a select query before the update to find objects that are matched by the update query. The updated attributes are expired on matched objects. ``'evaluate'`` - Evaluate the Query's criteria in Python straight on the objects in the session. If evaluation of the criteria isn't implemented, an exception is raised. The expression evaluator currently doesn't account for differing string collations between the database and Python. :param update_args: Optional dictionary, if present will be passed to the underlying :func:`.update` construct as the ``**kw`` for the object. May be used to pass dialect-specific arguments such as ``mysql_limit``, as well as other special arguments such as :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`. .. versionadded:: 1.0.0 :return: the count of rows matched as returned by the database's "row count" feature. .. warning:: **Additional Caveats for bulk query updates** * The method does **not** offer in-Python cascading of relationships - it is assumed that ON UPDATE CASCADE is configured for any foreign key references which require it, otherwise the database may emit an integrity violation if foreign key references are being enforced. After the UPDATE, dependent objects in the :class:`.Session` which were impacted by an ON UPDATE CASCADE may not contain the current state; this issue is resolved once the :class:`.Session` is expired, which normally occurs upon :meth:`.Session.commit` or can be forced by using :meth:`.Session.expire_all`. * The ``'fetch'`` strategy results in an additional SELECT statement emitted and will significantly reduce performance. * The ``'evaluate'`` strategy performs a scan of all matching objects within the :class:`.Session`; if the contents of the :class:`.Session` are expired, such as via a proceeding :meth:`.Session.commit` call, **this will result in SELECT queries emitted for every matching object**. * The method supports multiple table updates, as detailed in :ref:`multi_table_updates`, and this behavior does extend to support updates of joined-inheritance and other multiple table mappings. However, the **join condition of an inheritance mapper is not automatically rendered**. Care must be taken in any multiple-table update to explicitly include the joining condition between those tables, even in mappings where this is normally automatic. E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of the ``Engineer`` local table using criteria against the ``Employee`` local table might look like:: session.query(Engineer).\ filter(Engineer.id == Employee.id).\ filter(Employee.name == 'dilbert').\ update({"engineer_type": "programmer"}) * The polymorphic identity WHERE criteria is **not** included for single- or joined- table updates - this must be added **manually**, even for single table inheritance. * The :meth:`.MapperEvents.before_update` and :meth:`.MapperEvents.after_update` events **are not invoked from this method**. Instead, the :meth:`.SessionEvents.after_bulk_update` method is provided to act upon a mass UPDATE of entity rows. .. seealso:: :meth:`.Query.delete` :ref:`inserts_and_updates` - Core SQL tutorial """ update_args = update_args or {} update_op = persistence.BulkUpdate.factory( self, synchronize_session, values, update_args) update_op.exec_() return update_op.rowcount def _compile_context(self, labels=True): if self.dispatch.before_compile: for fn in self.dispatch.before_compile: new_query = fn(self) if new_query is not None: self = new_query context = QueryContext(self) if context.statement is not None: return context context.labels = labels context._for_update_arg = self._for_update_arg for entity in self._entities: entity.setup_context(self, context) for rec in context.create_eager_joins: strategy = rec[0] strategy(*rec[1:]) if context.from_clause: # "load from explicit FROMs" mode, # i.e. when select_from() or join() is used context.froms = list(context.from_clause) # else "load from discrete FROMs" mode, # i.e. when each _MappedEntity has its own FROM if self._enable_single_crit: self._adjust_for_single_inheritance(context) if not context.primary_columns: if self._only_load_props: raise sa_exc.InvalidRequestError( "No column-based properties specified for " "refresh operation. Use session.expire() " "to reload collections and related items.") else: raise sa_exc.InvalidRequestError( "Query contains no columns with which to " "SELECT from.") if context.multi_row_eager_loaders and self._should_nest_selectable: context.statement = self._compound_eager_statement(context) else: context.statement = self._simple_statement(context) return context def _compound_eager_statement(self, context): # for eager joins present and LIMIT/OFFSET/DISTINCT, # wrap the query inside a select, # then append eager joins onto that if context.order_by: order_by_col_expr = \ sql_util.expand_column_list_from_order_by( context.primary_columns, context.order_by ) else: context.order_by = None order_by_col_expr = [] inner = sql.select( context.primary_columns + order_by_col_expr, context.whereclause, from_obj=context.froms, use_labels=context.labels, # TODO: this order_by is only needed if # LIMIT/OFFSET is present in self._select_args, # else the application on the outside is enough order_by=context.order_by, **self._select_args ) for hint in self._with_hints: inner = inner.with_hint(*hint) if self._correlate: inner = inner.correlate(*self._correlate) inner = inner.alias() equivs = self.__all_equivs() context.adapter = sql_util.ColumnAdapter(inner, equivs) statement = sql.select( [inner] + context.secondary_columns, use_labels=context.labels) statement._for_update_arg = context._for_update_arg from_clause = inner for eager_join in context.eager_joins.values(): # EagerLoader places a 'stop_on' attribute on the join, # giving us a marker as to where the "splice point" of # the join should be from_clause = sql_util.splice_joins( from_clause, eager_join, eager_join.stop_on) statement.append_from(from_clause) if context.order_by: statement.append_order_by( *context.adapter.copy_and_process( context.order_by ) ) statement.append_order_by(*context.eager_order_by) return statement def _simple_statement(self, context): if not context.order_by: context.order_by = None if self._distinct is True and context.order_by: context.primary_columns += \ sql_util.expand_column_list_from_order_by( context.primary_columns, context.order_by ) context.froms += tuple(context.eager_joins.values()) statement = sql.select( context.primary_columns + context.secondary_columns, context.whereclause, from_obj=context.froms, use_labels=context.labels, order_by=context.order_by, **self._select_args ) statement._for_update_arg = context._for_update_arg for hint in self._with_hints: statement = statement.with_hint(*hint) if self._correlate: statement = statement.correlate(*self._correlate) if context.eager_order_by: statement.append_order_by(*context.eager_order_by) return statement def _adjust_for_single_inheritance(self, context): """Apply single-table-inheritance filtering. For all distinct single-table-inheritance mappers represented in the columns clause of this query, add criterion to the WHERE clause of the given QueryContext such that only the appropriate subtypes are selected from the total results. """ for (ext_info, adapter) in set(self._mapper_adapter_map.values()): if ext_info in self._join_entities: continue single_crit = ext_info.mapper._single_table_criterion if single_crit is not None: if adapter: single_crit = adapter.traverse(single_crit) single_crit = self._adapt_clause(single_crit, False, False) context.whereclause = sql.and_( sql.True_._ifnone(context.whereclause), single_crit) from ..sql.selectable import ForUpdateArg class LockmodeArg(ForUpdateArg): @classmethod def parse_legacy_query(self, mode): if mode in (None, False): return None if mode == "read": read = True nowait = False elif mode == "update": read = nowait = False elif mode == "update_nowait": nowait = True read = False else: raise sa_exc.ArgumentError( "Unknown with_lockmode argument: %r" % mode) return LockmodeArg(read=read, nowait=nowait) class _QueryEntity(object): """represent an entity column returned within a Query result.""" def __new__(cls, *args, **kwargs): if cls is _QueryEntity: entity = args[1] if not isinstance(entity, util.string_types) and \ _is_mapped_class(entity): cls = _MapperEntity elif isinstance(entity, Bundle): cls = _BundleEntity else: cls = _ColumnEntity return object.__new__(cls) def _clone(self): q = self.__class__.__new__(self.__class__) q.__dict__ = self.__dict__.copy() return q class _MapperEntity(_QueryEntity): """mapper/class/AliasedClass entity""" def __init__(self, query, entity): if not query._primary_entity: query._primary_entity = self query._entities.append(self) query._has_mapper_entities = True self.entities = [entity] self.expr = entity supports_single_entity = True use_id_for_hash = True def setup_entity(self, ext_info, aliased_adapter): self.mapper = ext_info.mapper self.aliased_adapter = aliased_adapter self.selectable = ext_info.selectable self.is_aliased_class = ext_info.is_aliased_class self._with_polymorphic = ext_info.with_polymorphic_mappers self._polymorphic_discriminator = \ ext_info.polymorphic_on self.entity_zero = ext_info if ext_info.is_aliased_class: self._label_name = self.entity_zero.name else: self._label_name = self.mapper.class_.__name__ self.path = self.entity_zero._path_registry def set_with_polymorphic(self, query, cls_or_mappers, selectable, polymorphic_on): """Receive an update from a call to query.with_polymorphic(). Note the newer style of using a free standing with_polymporphic() construct doesn't make use of this method. """ if self.is_aliased_class: # TODO: invalidrequest ? raise NotImplementedError( "Can't use with_polymorphic() against " "an Aliased object" ) if cls_or_mappers is None: query._reset_polymorphic_adapter(self.mapper) return mappers, from_obj = self.mapper._with_polymorphic_args( cls_or_mappers, selectable) self._with_polymorphic = mappers self._polymorphic_discriminator = polymorphic_on self.selectable = from_obj query._mapper_loads_polymorphically_with( self.mapper, sql_util.ColumnAdapter( from_obj, self.mapper._equivalent_columns)) @property def type(self): return self.mapper.class_ @property def entity_zero_or_selectable(self): return self.entity_zero def corresponds_to(self, entity): if entity.is_aliased_class: if self.is_aliased_class: if entity._base_alias is self.entity_zero._base_alias: return True return False elif self.is_aliased_class: if self.entity_zero._use_mapper_path: return entity in self._with_polymorphic else: return entity is self.entity_zero return entity.common_parent(self.entity_zero) def adapt_to_selectable(self, query, sel): query._entities.append(self) def _get_entity_clauses(self, query, context): adapter = None if not self.is_aliased_class: if query._polymorphic_adapters: adapter = query._polymorphic_adapters.get(self.mapper, None) else: adapter = self.aliased_adapter if adapter: if query._from_obj_alias: ret = adapter.wrap(query._from_obj_alias) else: ret = adapter else: ret = query._from_obj_alias return ret def row_processor(self, query, context, result): adapter = self._get_entity_clauses(query, context) if context.adapter and adapter: adapter = adapter.wrap(context.adapter) elif not adapter: adapter = context.adapter # polymorphic mappers which have concrete tables in # their hierarchy usually # require row aliasing unconditionally. if not adapter and self.mapper._requires_row_aliasing: adapter = sql_util.ColumnAdapter( self.selectable, self.mapper._equivalent_columns) if query._primary_entity is self: only_load_props = query._only_load_props refresh_state = context.refresh_state else: only_load_props = refresh_state = None _instance = loading._instance_processor( self.mapper, context, result, self.path, adapter, only_load_props=only_load_props, refresh_state=refresh_state, polymorphic_discriminator=self._polymorphic_discriminator ) return _instance, self._label_name def setup_context(self, query, context): adapter = self._get_entity_clauses(query, context) # if self._adapted_selectable is None: context.froms += (self.selectable,) if context.order_by is False and self.mapper.order_by: context.order_by = self.mapper.order_by # apply adaptation to the mapper's order_by if needed. if adapter: context.order_by = adapter.adapt_list( util.to_list( context.order_by ) ) loading._setup_entity_query( context, self.mapper, self, self.path, adapter, context.primary_columns, with_polymorphic=self._with_polymorphic, only_load_props=query._only_load_props, polymorphic_discriminator=self._polymorphic_discriminator) def __str__(self): return str(self.mapper) @inspection._self_inspects class Bundle(InspectionAttr): """A grouping of SQL expressions that are returned by a :class:`.Query` under one namespace. The :class:`.Bundle` essentially allows nesting of the tuple-based results returned by a column-oriented :class:`.Query` object. It also is extensible via simple subclassing, where the primary capability to override is that of how the set of expressions should be returned, allowing post-processing as well as custom return types, without involving ORM identity-mapped classes. .. versionadded:: 0.9.0 .. seealso:: :ref:`bundles` """ single_entity = False """If True, queries for a single Bundle will be returned as a single entity, rather than an element within a keyed tuple.""" is_clause_element = False is_mapper = False is_aliased_class = False def __init__(self, name, *exprs, **kw): r"""Construct a new :class:`.Bundle`. e.g.:: bn = Bundle("mybundle", MyClass.x, MyClass.y) for row in session.query(bn).filter( bn.c.x == 5).filter(bn.c.y == 4): print(row.mybundle.x, row.mybundle.y) :param name: name of the bundle. :param \*exprs: columns or SQL expressions comprising the bundle. :param single_entity=False: if True, rows for this :class:`.Bundle` can be returned as a "single entity" outside of any enclosing tuple in the same manner as a mapped entity. """ self.name = self._label = name self.exprs = exprs self.c = self.columns = ColumnCollection() self.columns.update((getattr(col, "key", col._label), col) for col in exprs) self.single_entity = kw.pop('single_entity', self.single_entity) columns = None """A namespace of SQL expressions referred to by this :class:`.Bundle`. e.g.:: bn = Bundle("mybundle", MyClass.x, MyClass.y) q = sess.query(bn).filter(bn.c.x == 5) Nesting of bundles is also supported:: b1 = Bundle("b1", Bundle('b2', MyClass.a, MyClass.b), Bundle('b3', MyClass.x, MyClass.y) ) q = sess.query(b1).filter( b1.c.b2.c.a == 5).filter(b1.c.b3.c.y == 9) .. seealso:: :attr:`.Bundle.c` """ c = None """An alias for :attr:`.Bundle.columns`.""" def _clone(self): cloned = self.__class__.__new__(self.__class__) cloned.__dict__.update(self.__dict__) return cloned def __clause_element__(self): return expression.ClauseList(group=False, *self.c) @property def clauses(self): return self.__clause_element__().clauses def label(self, name): """Provide a copy of this :class:`.Bundle` passing a new label.""" cloned = self._clone() cloned.name = name return cloned def create_row_processor(self, query, procs, labels): """Produce the "row processing" function for this :class:`.Bundle`. May be overridden by subclasses. .. seealso:: :ref:`bundles` - includes an example of subclassing. """ keyed_tuple = util.lightweight_named_tuple('result', labels) def proc(row): return keyed_tuple([proc(row) for proc in procs]) return proc class _BundleEntity(_QueryEntity): use_id_for_hash = False def __init__(self, query, bundle, setup_entities=True): query._entities.append(self) self.bundle = self.expr = bundle self.type = type(bundle) self._label_name = bundle.name self._entities = [] if setup_entities: for expr in bundle.exprs: if isinstance(expr, Bundle): _BundleEntity(self, expr) else: _ColumnEntity(self, expr, namespace=self) self.supports_single_entity = self.bundle.single_entity @property def entities(self): entities = [] for ent in self._entities: entities.extend(ent.entities) return entities @property def entity_zero(self): for ent in self._entities: ezero = ent.entity_zero if ezero is not None: return ezero else: return None def corresponds_to(self, entity): # TODO: this seems to have no effect for # _ColumnEntity either return False @property def entity_zero_or_selectable(self): for ent in self._entities: ezero = ent.entity_zero_or_selectable if ezero is not None: return ezero else: return None def adapt_to_selectable(self, query, sel): c = _BundleEntity(query, self.bundle, setup_entities=False) # c._label_name = self._label_name # c.entity_zero = self.entity_zero # c.entities = self.entities for ent in self._entities: ent.adapt_to_selectable(c, sel) def setup_entity(self, ext_info, aliased_adapter): for ent in self._entities: ent.setup_entity(ext_info, aliased_adapter) def setup_context(self, query, context): for ent in self._entities: ent.setup_context(query, context) def row_processor(self, query, context, result): procs, labels = zip( *[ent.row_processor(query, context, result) for ent in self._entities] ) proc = self.bundle.create_row_processor(query, procs, labels) return proc, self._label_name class _ColumnEntity(_QueryEntity): """Column/expression based entity.""" def __init__(self, query, column, namespace=None): self.expr = column self.namespace = namespace search_entities = True check_column = False if isinstance(column, util.string_types): column = sql.literal_column(column) self._label_name = column.name search_entities = False check_column = True _entity = None elif isinstance(column, ( attributes.QueryableAttribute, interfaces.PropComparator )): _entity = getattr(column, '_parententity', None) if _entity is not None: search_entities = False self._label_name = column.key column = column._query_clause_element() check_column = True if isinstance(column, Bundle): _BundleEntity(query, column) return if not isinstance(column, sql.ColumnElement): if hasattr(column, '_select_iterable'): # break out an object like Table into # individual columns for c in column._select_iterable: if c is column: break _ColumnEntity(query, c, namespace=column) else: return raise sa_exc.InvalidRequestError( "SQL expression, column, or mapped entity " "expected - got '%r'" % (column, ) ) elif not check_column: self._label_name = getattr(column, 'key', None) search_entities = True self.type = type_ = column.type self.use_id_for_hash = not type_.hashable # If the Column is unnamed, give it a # label() so that mutable column expressions # can be located in the result even # if the expression's identity has been changed # due to adaption. if not column._label and not getattr(column, 'is_literal', False): column = column.label(self._label_name) query._entities.append(self) self.column = column self.froms = set() # look for ORM entities represented within the # given expression. Try to count only entities # for columns whose FROM object is in the actual list # of FROMs for the overall expression - this helps # subqueries which were built from ORM constructs from # leaking out their entities into the main select construct self.actual_froms = actual_froms = set(column._from_objects) if not search_entities: self.entity_zero = _entity if _entity: self.entities = [_entity] self.mapper = _entity.mapper else: self.entities = [] self.mapper = None self._from_entities = set(self.entities) else: all_elements = [ elem for elem in sql_util.surface_column_elements(column) if 'parententity' in elem._annotations ] self.entities = util.unique_list([ elem._annotations['parententity'] for elem in all_elements if 'parententity' in elem._annotations ]) self._from_entities = set([ elem._annotations['parententity'] for elem in all_elements if 'parententity' in elem._annotations and actual_froms.intersection(elem._from_objects) ]) if self.entities: self.entity_zero = self.entities[0] self.mapper = self.entity_zero.mapper elif self.namespace is not None: self.entity_zero = self.namespace self.mapper = None else: self.entity_zero = None self.mapper = None supports_single_entity = False @property def entity_zero_or_selectable(self): if self.entity_zero is not None: return self.entity_zero elif self.actual_froms: return list(self.actual_froms)[0] else: return None def adapt_to_selectable(self, query, sel): c = _ColumnEntity(query, sel.corresponding_column(self.column)) c._label_name = self._label_name c.entity_zero = self.entity_zero c.entities = self.entities def setup_entity(self, ext_info, aliased_adapter): if 'selectable' not in self.__dict__: self.selectable = ext_info.selectable if self.actual_froms.intersection(ext_info.selectable._from_objects): self.froms.add(ext_info.selectable) def corresponds_to(self, entity): # TODO: just returning False here, # no tests fail if self.entity_zero is None: return False elif _is_aliased_class(entity): # TODO: polymorphic subclasses ? return entity is self.entity_zero else: return not _is_aliased_class(self.entity_zero) and \ entity.common_parent(self.entity_zero) def row_processor(self, query, context, result): if ('fetch_column', self) in context.attributes: column = context.attributes[('fetch_column', self)] else: column = query._adapt_clause(self.column, False, True) if context.adapter: column = context.adapter.columns[column] getter = result._getter(column) return getter, self._label_name def setup_context(self, query, context): column = query._adapt_clause(self.column, False, True) context.froms += tuple(self.froms) context.primary_columns.append(column) context.attributes[('fetch_column', self)] = column def __str__(self): return str(self.column) class QueryContext(object): __slots__ = ( 'multi_row_eager_loaders', 'adapter', 'froms', 'for_update', 'query', 'session', 'autoflush', 'populate_existing', 'invoke_all_eagers', 'version_check', 'refresh_state', 'primary_columns', 'secondary_columns', 'eager_order_by', 'eager_joins', 'create_eager_joins', 'propagate_options', 'attributes', 'statement', 'from_clause', 'whereclause', 'order_by', 'labels', '_for_update_arg', 'runid', 'partials' ) def __init__(self, query): if query._statement is not None: if isinstance(query._statement, expression.SelectBase) and \ not query._statement._textual and \ not query._statement.use_labels: self.statement = query._statement.apply_labels() else: self.statement = query._statement else: self.statement = None self.from_clause = query._from_obj self.whereclause = query._criterion self.order_by = query._order_by self.multi_row_eager_loaders = False self.adapter = None self.froms = () self.for_update = None self.query = query self.session = query.session self.autoflush = query._autoflush self.populate_existing = query._populate_existing self.invoke_all_eagers = query._invoke_all_eagers self.version_check = query._version_check self.refresh_state = query._refresh_state self.primary_columns = [] self.secondary_columns = [] self.eager_order_by = [] self.eager_joins = {} self.create_eager_joins = [] self.propagate_options = set(o for o in query._with_options if o.propagate_to_loaders) self.attributes = query._attributes.copy() class AliasOption(interfaces.MapperOption): def __init__(self, alias): r"""Return a :class:`.MapperOption` that will indicate to the :class:`.Query` that the main table has been aliased. This is a seldom-used option to suit the very rare case that :func:`.contains_eager` is being used in conjunction with a user-defined SELECT statement that aliases the parent table. E.g.:: # define an aliased UNION called 'ulist' ulist = users.select(users.c.user_id==7).\ union(users.select(users.c.user_id>7)).\ alias('ulist') # add on an eager load of "addresses" statement = ulist.outerjoin(addresses).\ select().apply_labels() # create query, indicating "ulist" will be an # alias for the main table, "addresses" # property should be eager loaded query = session.query(User).options( contains_alias(ulist), contains_eager(User.addresses)) # then get results via the statement results = query.from_statement(statement).all() :param alias: is the string name of an alias, or a :class:`~.sql.expression.Alias` object representing the alias. """ self.alias = alias def process_query(self, query): if isinstance(self.alias, util.string_types): alias = query._mapper_zero().mapped_table.alias(self.alias) else: alias = self.alias query._from_obj_alias = sql_util.ColumnAdapter(alias)
gpl-3.0
patrickstocklin/chattR
lib/python2.7/site-packages/requests/packages/chardet/langthaimodel.py
2930
11275
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # The following result for thai was collected from a limited sample (1M). # Character Mapping Table: TIS620CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40 188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50 253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60 96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70 209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222, 223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235, 236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57, 49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54, 45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63, 22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244, 11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247, 68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253, ) # Model Table: # total sequences: 100% # first 512 sequences: 92.6386% # first 1024 sequences:7.3177% # rest sequences: 1.0230% # negative sequences: 0.0436% ThaiLangModel = ( 0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3, 0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2, 3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3, 0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1, 3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2, 3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1, 3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2, 3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1, 3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1, 3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0, 3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1, 2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1, 3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1, 0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0, 3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1, 0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0, 3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2, 1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0, 3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3, 3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0, 1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2, 0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3, 0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0, 3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1, 2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0, 3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2, 0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2, 3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0, 3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0, 2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2, 3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1, 2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1, 3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1, 3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0, 3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1, 3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1, 3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1, 1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2, 0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3, 0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1, 3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0, 3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1, 1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0, 3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1, 3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2, 0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0, 0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0, 1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1, 1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1, 3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1, 0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0, 3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0, 0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1, 0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0, 0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1, 0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1, 0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0, 0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1, 0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0, 3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0, 0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0, 0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0, 3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1, 2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1, 0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0, 3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0, 0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0, 1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0, 1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0, 1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ) TIS620ThaiModel = { 'charToOrderMap': TIS620CharToOrderMap, 'precedenceMatrix': ThaiLangModel, 'mTypicalPositiveRatio': 0.926386, 'keepEnglishLetter': False, 'charsetName': "TIS-620" } # flake8: noqa
gpl-2.0
cnsoft/kbengine-cocos2dx
kbe/src/lib/python/Lib/nturl2path.py
56
2396
"""Convert a NT pathname to a file URL and vice versa.""" def url2pathname(url): """OS-specific conversion from a relative URL of the 'file' scheme to a file system path; not recommended for general use.""" # e.g. # ///C|/foo/bar/spam.foo # becomes # C:\foo\bar\spam.foo import string, urllib.parse # Windows itself uses ":" even in URLs. url = url.replace(':', '|') if not '|' in url: # No drive specifier, just convert slashes if url[:4] == '////': # path is something like ////host/path/on/remote/host # convert this to \\host\path\on\remote\host # (notice halving of slashes at the start of the path) url = url[2:] components = url.split('/') # make sure not to convert quoted slashes :-) return urllib.parse.unquote('\\'.join(components)) comp = url.split('|') if len(comp) != 2 or comp[0][-1] not in string.ascii_letters: error = 'Bad URL: ' + url raise IOError(error) drive = comp[0][-1].upper() components = comp[1].split('/') path = drive + ':' for comp in components: if comp: path = path + '\\' + urllib.parse.unquote(comp) # Issue #11474 - handing url such as |c/| if path.endswith(':') and url.endswith('/'): path += '\\' return path def pathname2url(p): """OS-specific conversion from a file system path to a relative URL of the 'file' scheme; not recommended for general use.""" # e.g. # C:\foo\bar\spam.foo # becomes # ///C|/foo/bar/spam.foo import urllib.parse if not ':' in p: # No drive specifier, just convert slashes and quote the name if p[:2] == '\\\\': # path is something like \\host\path\on\remote\host # convert this to ////host/path/on/remote/host # (notice doubling of slashes at the start of the path) p = '\\\\' + p components = p.split('\\') return urllib.parse.quote('/'.join(components)) comp = p.split(':') if len(comp) != 2 or len(comp[0]) > 1: error = 'Bad path: ' + p raise IOError(error) drive = urllib.parse.quote(comp[0].upper()) components = comp[1].split('\\') path = '///' + drive + ':' for comp in components: if comp: path = path + '/' + urllib.parse.quote(comp) return path
lgpl-3.0
alxgu/ansible
test/units/modules/network/fortimanager/test_fmgr_fwobj_vip.py
39
24644
# Copyright 2018 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <https://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler import pytest try: from ansible.modules.network.fortimanager import fmgr_fwobj_vip except ImportError: pytest.skip("Could not load required modules for testing", allow_module_level=True) def load_fixtures(): fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') + "/{filename}.json".format( filename=os.path.splitext(os.path.basename(__file__))[0]) try: with open(fixture_path, "r") as fixture_file: fixture_data = json.load(fixture_file) except IOError: return [] return [fixture_data] @pytest.fixture(autouse=True) def module_mock(mocker): connection_class_mock = mocker.patch('ansible.module_utils.basic.AnsibleModule') return connection_class_mock @pytest.fixture(autouse=True) def connection_mock(mocker): connection_class_mock = mocker.patch('ansible.modules.network.fortimanager.fmgr_fwobj_vip.Connection') return connection_class_mock @pytest.fixture(scope="function", params=load_fixtures()) def fixture_data(request): func_name = request.function.__name__.replace("test_", "") return request.param.get(func_name, None) fmg_instance = FortiManagerHandler(connection_mock, module_mock) def test_fmgr_firewall_vip_modify(fixture_data, mocker): mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request", side_effect=fixture_data) # Fixture sets used:########################### ################################################## # comment: Created by Ansible # ssl-send-empty-frags: None # srcintf-filter: None # ssl-max-version: None # ssl-server-session-state-max: None # ssl-hpkp: None # mapped-addr: None # ssl-client-session-state-timeout: None # src-filter: None # server-type: None # ssl-hpkp-include-subdomains: None # ssl-http-location-conversion: None # https-cookie-secure: None # mappedip: 10.7.220.25 # ssl-server-cipher-suites: {'priority': None, 'cipher': None, 'versions': None} # protocol: tcp # ssl-hpkp-backup: None # ssl-dh-bits: None # dns-mapping-ttl: None # ssl-hsts-age: None # extaddr: None # ssl-client-renegotiation: None # monitor: None # service: None # ssl-hpkp-age: None # http-cookie-age: None # weblogic-server: None # http-cookie-share: None # color: 17 # ssl-mode: None # portforward: enable # http-multiplex: None # http-cookie-generation: None # ssl-client-fallback: None # extip: 82.72.192.185 # extintf: any # persistence: None # websphere-server: None # nat-source-vip: None # portmapping-type: None # adom: ansible # ssl-client-session-state-max: None # http-ip-header: None # http-ip-header-name: None # ssl-certificate: None # ssl-hsts: None # arp-reply: None # ssl-hsts-include-subdomains: None # ssl-min-version: None # ldb-method: None # ssl-server-session-state-timeout: None # ssl-server-min-version: None # http-cookie-domain: None # mappedport: 443 # name: Basic PNAT Map Port 10443 # ssl-cipher-suites: {'cipher': None, 'versions': None} # ssl-hpkp-primary: None # outlook-web-access: None # ssl-server-session-state-type: None # ssl-client-session-state-type: None # ssl-http-match-host: None # ssl-server-max-version: None # ssl-hpkp-report-uri: None # http-cookie-domain-from-host: None # ssl-algorithm: None # gratuitous-arp-interval: None # extport: 10443 # max-embryonic-connections: None # mode: set # http-cookie-path: None # ssl-pfs: None # ssl-server-algorithm: None ################################################## ################################################## # comment: Created by Ansible # ssl-send-empty-frags: None # srcintf-filter: None # ssl-max-version: None # ssl-server-session-state-max: None # ssl-hpkp: None # ssl-hsts-include-subdomains: None # mapped-addr: None # src-filter: None # server-type: None # mode: set # ssl-hpkp-include-subdomains: None # ssl-http-location-conversion: None # https-cookie-secure: None # mappedip: 3.3.3.0/24, 4.0.0.0/24 # ssl-server-cipher-suites: {'priority': None, 'cipher': None, 'versions': None} # protocol: None # ssl-hpkp-backup: None # ssl-dh-bits: None # dns-mapping-ttl: None # ssl-hsts-age: None # ssl-client-renegotiation: None # monitor: None # service: None # ssl-hpkp-age: None # http-cookie-age: None # adom: ansible # http-cookie-share: None # ssl-server-session-state-timeout: None # color: 12 # ssl-mode: None # portforward: None # http-cookie-generation: None # max-embryonic-connections: None # ssl-client-fallback: None # ssl-hpkp-report-uri: None # extip: 192.168.0.1-192.168.0.100 # extintf: dmz # persistence: None # websphere-server: None # nat-source-vip: None # portmapping-type: None # http-ip-header-name: None # weblogic-server: None # ssl-client-session-state-max: None # http-ip-header: None # ssl-hsts: None # arp-reply: None # extaddr: None # ssl-min-version: None # ldb-method: None # ssl-certificate: None # ssl-server-min-version: None # http-cookie-domain: None # mappedport: None # outlook-web-access: None # ssl-cipher-suites: {'cipher': None, 'versions': None} # ssl-hpkp-primary: None # name: Basic DNS Translation # ssl-server-session-state-type: None # ssl-client-session-state-type: None # ssl-http-match-host: None # ssl-pfs: None # ssl-server-max-version: None # ssl-client-session-state-timeout: None # http-cookie-domain-from-host: None # extport: None # ssl-server-algorithm: None # gratuitous-arp-interval: None # http-cookie-path: None # ssl-algorithm: None # http-multiplex: None ################################################## ################################################## # comment: Created by Ansible # ssl-send-empty-frags: None # srcintf-filter: None # ssl-max-version: None # ssl-server-session-state-max: None # ssl-hpkp: None # mapped-addr: google-play # ssl-client-session-state-timeout: None # src-filter: None # ldb-method: None # server-type: None # ssl-hpkp-include-subdomains: None # ssl-client-renegotiation: None # ssl-http-location-conversion: None # https-cookie-secure: None # mappedip: None # ssl-server-cipher-suites: {'priority': None, 'cipher': None, 'versions': None} # protocol: None # ssl-hpkp-backup: None # ssl-dh-bits: None # dns-mapping-ttl: None # ssl-hsts-age: None # extaddr: None # monitor: None # service: None # ssl-hpkp-age: None # http-cookie-age: None # weblogic-server: None # http-cookie-share: None # color: 5 # ssl-mode: None # portforward: None # http-cookie-generation: None # ssl-client-fallback: None # extip: None # extintf: None # persistence: None # websphere-server: None # nat-source-vip: None # portmapping-type: None # adom: ansible # ssl-client-session-state-max: None # http-ip-header: None # http-ip-header-name: None # ssl-certificate: None # ssl-hsts: None # arp-reply: None # extport: None # ssl-min-version: None # ssl-server-algorithm: None # ssl-server-session-state-timeout: None # ssl-server-min-version: None # http-cookie-domain: None # mappedport: None # name: Basic FQDN Translation # ssl-cipher-suites: {'cipher': None, 'versions': None} # ssl-hpkp-primary: None # outlook-web-access: None # ssl-server-session-state-type: None # ssl-client-session-state-type: None # ssl-http-match-host: None # ssl-server-max-version: None # ssl-hpkp-report-uri: None # http-cookie-domain-from-host: None # ssl-algorithm: None # gratuitous-arp-interval: None # ssl-hsts-include-subdomains: None # max-embryonic-connections: None # mode: set # http-cookie-path: None # ssl-pfs: None # http-multiplex: None ################################################## ################################################## # comment: Created by Ansible # ssl-send-empty-frags: None # srcintf-filter: None # ssl-max-version: None # ssl-server-session-state-max: None # ssl-hpkp: None # mapped-addr: None # src-filter: None # server-type: None # mode: set # ssl-hpkp-include-subdomains: None # extport: None # ssl-http-location-conversion: None # https-cookie-secure: None # mappedip: 10.7.220.25 # ssl-server-cipher-suites: {'priority': None, 'cipher': None, 'versions': None} # protocol: None # ssl-hpkp-backup: None # ssl-dh-bits: None # dns-mapping-ttl: None # ssl-hsts-age: None # ssl-server-algorithm: None # extaddr: None # monitor: None # service: None # ssl-hpkp-age: None # http-cookie-age: None # adom: ansible # http-cookie-share: None # ssl-server-session-state-timeout: None # color: 17 # ssl-mode: None # portforward: None # http-cookie-generation: None # max-embryonic-connections: None # ssl-client-fallback: None # ssl-hpkp-report-uri: None # extip: 82.72.192.185 # extintf: any # persistence: None # websphere-server: None # nat-source-vip: None # portmapping-type: None # http-ip-header-name: None # weblogic-server: None # ssl-client-session-state-max: None # http-ip-header: None # ssl-hsts: None # arp-reply: None # ssl-client-renegotiation: None # ssl-min-version: None # ldb-method: None # ssl-certificate: None # ssl-server-min-version: None # http-cookie-domain: None # mappedport: None # outlook-web-access: None # ssl-cipher-suites: {'cipher': None, 'versions': None} # ssl-hpkp-primary: None # name: Basic StaticNAT Map # ssl-server-session-state-type: None # ssl-client-session-state-type: None # ssl-http-match-host: None # ssl-pfs: None # ssl-client-session-state-timeout: None # http-cookie-domain-from-host: None # ssl-hsts-include-subdomains: None # ssl-server-max-version: None # gratuitous-arp-interval: None # http-cookie-path: None # ssl-algorithm: None # http-multiplex: None ################################################## ################################################## # comment: Created by Ansible # ssl-send-empty-frags: None # srcintf-filter: None # ssl-max-version: None # ssl-server-session-state-max: None # ssl-hpkp: None # mapped-addr: None # ssl-client-session-state-timeout: None # src-filter: None # server-type: None # ssl-hpkp-include-subdomains: None # ssl-client-renegotiation: None # ssl-http-location-conversion: None # https-cookie-secure: None # mappedip: 10.7.220.25 # ssl-server-cipher-suites: {'priority': None, 'cipher': None, 'versions': None} # protocol: tcp # ssl-hpkp-backup: None # ssl-dh-bits: None # dns-mapping-ttl: None # ssl-hsts-age: None # extaddr: None # monitor: None # service: None # ssl-hpkp-age: None # http-cookie-age: None # weblogic-server: None # http-cookie-share: None # color: 17 # ssl-mode: None # portforward: enable # http-cookie-generation: None # ssl-client-fallback: None # extip: 82.72.192.185 # extintf: any # persistence: None # websphere-server: None # nat-source-vip: None # portmapping-type: None # adom: ansible # ssl-client-session-state-max: None # http-ip-header: None # http-ip-header-name: None # ssl-min-version: None # ssl-certificate: None # ssl-hsts: None # arp-reply: None # ssl-hsts-include-subdomains: None # http-multiplex: None # ldb-method: None # ssl-server-session-state-timeout: None # ssl-server-min-version: None # http-cookie-domain: None # mappedport: 443 # name: Basic PNAT Map Port 10443 # ssl-cipher-suites: {'cipher': None, 'versions': None} # ssl-hpkp-primary: None # outlook-web-access: None # ssl-server-session-state-type: None # ssl-client-session-state-type: None # ssl-http-match-host: None # ssl-server-max-version: None # ssl-hpkp-report-uri: None # http-cookie-domain-from-host: None # ssl-algorithm: None # gratuitous-arp-interval: None # extport: 10443 # max-embryonic-connections: None # mode: set # http-cookie-path: None # ssl-pfs: None # ssl-server-algorithm: None ################################################## ################################################## # comment: None # ssl-send-empty-frags: None # srcintf-filter: None # ssl-max-version: None # ssl-server-session-state-max: None # ssl-hpkp: None # ssl-hsts-include-subdomains: None # mapped-addr: None # src-filter: None # server-type: None # mode: delete # ssl-hpkp-include-subdomains: None # ssl-http-location-conversion: None # https-cookie-secure: None # mappedip: None # ssl-server-cipher-suites: {'priority': None, 'cipher': None, 'versions': None} # protocol: None # ssl-hpkp-backup: None # ssl-dh-bits: None # dns-mapping-ttl: None # ssl-hsts-age: None # extaddr: None # monitor: None # service: None # ssl-hpkp-age: None # http-cookie-age: None # adom: ansible # http-cookie-share: None # ssl-server-session-state-timeout: None # color: None # ssl-mode: None # portforward: None # http-cookie-generation: None # max-embryonic-connections: None # ssl-client-fallback: None # ssl-hpkp-report-uri: None # extip: None # extintf: None # persistence: None # websphere-server: None # nat-source-vip: None # portmapping-type: None # http-ip-header-name: None # weblogic-server: None # ssl-client-session-state-max: None # http-ip-header: None # ssl-hsts: None # arp-reply: None # ssl-client-renegotiation: None # http-multiplex: None # ldb-method: None # ssl-certificate: None # ssl-server-min-version: None # http-cookie-domain: None # mappedport: None # outlook-web-access: None # ssl-cipher-suites: {'cipher': None, 'versions': None} # ssl-hpkp-primary: None # name: Basic PNAT Map Port 10443 # ssl-server-session-state-type: None # ssl-client-session-state-type: None # ssl-http-match-host: None # ssl-pfs: None # ssl-server-max-version: None # ssl-client-session-state-timeout: None # http-cookie-domain-from-host: None # extport: None # ssl-server-algorithm: None # gratuitous-arp-interval: None # http-cookie-path: None # ssl-algorithm: None # ssl-min-version: None ################################################## ################################################## # comment: None # ssl-send-empty-frags: None # srcintf-filter: None # ssl-max-version: None # ssl-server-session-state-max: None # mappedip: None # mapped-addr: None # ssl-client-session-state-timeout: None # src-filter: None # ldb-method: None # server-type: None # ssl-hpkp-include-subdomains: None # ssl-http-location-conversion: None # https-cookie-secure: None # ssl-hpkp: None # ssl-server-cipher-suites: {'priority': None, 'cipher': None, 'versions': None} # protocol: None # ssl-hpkp-backup: None # ssl-dh-bits: None # dns-mapping-ttl: None # ssl-hsts-age: None # extaddr: None # ssl-client-renegotiation: None # monitor: None # service: None # ssl-hpkp-age: None # http-cookie-age: None # weblogic-server: None # http-cookie-share: None # color: None # ssl-mode: None # portforward: None # http-cookie-generation: None # ssl-client-fallback: None # extip: None # extintf: None # persistence: None # websphere-server: None # nat-source-vip: None # portmapping-type: None # adom: ansible # ssl-client-session-state-max: None # http-ip-header: None # http-ip-header-name: None # ssl-certificate: None # ssl-hsts: None # arp-reply: None # extport: None # http-multiplex: None # ssl-server-algorithm: None # ssl-server-session-state-timeout: None # ssl-server-min-version: None # http-cookie-domain: None # mappedport: None # name: Basic StaticNAT Map # ssl-cipher-suites: {'cipher': None, 'versions': None} # ssl-hpkp-primary: None # outlook-web-access: None # ssl-server-session-state-type: None # ssl-client-session-state-type: None # ssl-http-match-host: None # ssl-server-max-version: None # ssl-hpkp-report-uri: None # http-cookie-domain-from-host: None # ssl-algorithm: None # gratuitous-arp-interval: None # ssl-hsts-include-subdomains: None # max-embryonic-connections: None # mode: delete # http-cookie-path: None # ssl-pfs: None # ssl-min-version: None ################################################## ################################################## # comment: None # ssl-send-empty-frags: None # srcintf-filter: None # ssl-max-version: None # ssl-server-session-state-max: None # mappedip: None # mapped-addr: None # src-filter: None # server-type: None # mode: delete # ssl-hpkp-include-subdomains: None # extport: None # ssl-http-location-conversion: None # https-cookie-secure: None # ssl-hpkp: None # ssl-server-cipher-suites: {'priority': None, 'cipher': None, 'versions': None} # protocol: None # ssl-hpkp-backup: None # ssl-dh-bits: None # dns-mapping-ttl: None # ssl-hsts-age: None # ssl-server-algorithm: None # ssl-client-renegotiation: None # monitor: None # service: None # ssl-hpkp-age: None # http-cookie-age: None # adom: ansible # http-cookie-share: None # ssl-server-session-state-timeout: None # color: None # ssl-mode: None # portforward: None # http-multiplex: None # http-cookie-generation: None # max-embryonic-connections: None # ssl-client-fallback: None # ssl-hpkp-report-uri: None # extip: None # extintf: None # persistence: None # websphere-server: None # nat-source-vip: None # portmapping-type: None # http-ip-header-name: None # weblogic-server: None # ssl-client-session-state-max: None # http-ip-header: None # ssl-hsts: None # arp-reply: None # extaddr: None # ssl-hpkp-primary: None # ldb-method: None # ssl-certificate: None # ssl-server-min-version: None # http-cookie-domain: None # mappedport: None # outlook-web-access: None # ssl-cipher-suites: {'cipher': None, 'versions': None} # ssl-client-session-state-type: None # name: Basic DNS Translation # ssl-server-session-state-type: None # ssl-http-match-host: None # ssl-pfs: None # ssl-client-session-state-timeout: None # http-cookie-domain-from-host: None # ssl-hsts-include-subdomains: None # ssl-server-max-version: None # gratuitous-arp-interval: None # http-cookie-path: None # ssl-algorithm: None # ssl-min-version: None ################################################## ################################################## # ldb-method: None # ssl-send-empty-frags: None # srcintf-filter: None # ssl-max-version: None # ssl-server-session-state-max: None # mappedip: None # ssl-hsts: None # mapped-addr: None # src-filter: None # server-type: None # ssl-hpkp-include-subdomains: None # ssl-client-renegotiation: None # ssl-http-location-conversion: None # https-cookie-secure: None # extip: None # ssl-hpkp: None # ssl-server-cipher-suites: {'priority': None, 'cipher': None, 'versions': None} # protocol: None # ssl-hpkp-backup: None # ssl-dh-bits: None # dns-mapping-ttl: None # ssl-hsts-age: None # extaddr: None # ssl-hpkp-primary: None # monitor: None # service: None # ssl-hpkp-age: None # http-cookie-age: None # weblogic-server: None # http-cookie-share: None # name: Basic FQDN Translation # color: None # ssl-mode: None # portforward: None # http-cookie-generation: None # ssl-client-fallback: None # http-ip-header: None # persistence: None # websphere-server: None # nat-source-vip: None # portmapping-type: None # adom: ansible # ssl-client-session-state-max: None # extintf: None # ssl-server-max-version: None # http-ip-header-name: None # ssl-certificate: None # ssl-server-session-state-type: None # arp-reply: None # ssl-hsts-include-subdomains: None # ssl-min-version: None # ssl-server-algorithm: None # ssl-server-session-state-timeout: None # ssl-server-min-version: None # http-cookie-domain: None # mappedport: None # outlook-web-access: None # ssl-cipher-suites: {'cipher': None, 'versions': None} # ssl-client-session-state-type: None # ssl-http-match-host: None # ssl-client-session-state-timeout: None # comment: None # ssl-hpkp-report-uri: None # http-cookie-domain-from-host: None # ssl-algorithm: None # gratuitous-arp-interval: None # extport: None # max-embryonic-connections: None # mode: delete # http-cookie-path: None # ssl-pfs: None # http-multiplex: None ################################################## # Test using fixture 1 # output = fmgr_fwobj_vip.fmgr_firewall_vip_modify(fmg_instance, fixture_data[0]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 2 # output = fmgr_fwobj_vip.fmgr_firewall_vip_modify(fmg_instance, fixture_data[1]['paramgram_used']) assert output['raw_response']['status']['code'] == -10131 # Test using fixture 3 # output = fmgr_fwobj_vip.fmgr_firewall_vip_modify(fmg_instance, fixture_data[2]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 4 # output = fmgr_fwobj_vip.fmgr_firewall_vip_modify(fmg_instance, fixture_data[3]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 5 # output = fmgr_fwobj_vip.fmgr_firewall_vip_modify(fmg_instance, fixture_data[4]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 6 # output = fmgr_fwobj_vip.fmgr_firewall_vip_modify(fmg_instance, fixture_data[5]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 7 # output = fmgr_fwobj_vip.fmgr_firewall_vip_modify(fmg_instance, fixture_data[6]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 8 # output = fmgr_fwobj_vip.fmgr_firewall_vip_modify(fmg_instance, fixture_data[7]['paramgram_used']) assert output['raw_response']['status']['code'] == -3 # Test using fixture 9 # output = fmgr_fwobj_vip.fmgr_firewall_vip_modify(fmg_instance, fixture_data[8]['paramgram_used']) assert output['raw_response']['status']['code'] == 0
gpl-3.0
waltervh/BornAgain-tutorial
talks/day_2/python_api_G/pyapi04_lamellar_vertical_solution2.py
3
2390
import bornagain as ba from bornagain import deg, nm def get_vertical_lamellar(): mat_a = ba.HomogeneousMaterial("PTFE", 5.20508729E-6, 1.96944292E-8) mat_b = ba.HomogeneousMaterial("HMDSO", 2.0888308E-6, 1.32605651E-8) length = 30*nm width_a = 4*nm width_b = 8*nm height = 30*nm nstack = 5 stack = ba.ParticleComposition() for i in range(0, nstack): box_a = ba.Particle(mat_a, ba.FormFactorBox(length, width_a, height)) box_b = ba.Particle(mat_b, ba.FormFactorBox(length, width_b, height)) stack.addParticle(box_a, ba.kvector_t(0.0, i*(width_a+width_b), 0.0)) stack.addParticle(box_b, ba.kvector_t(0.0, (width_a + width_b)/2. + i*(width_a+width_b), 0.0)) stack.rotate(ba.RotationEuler(45.0*deg, 90.*deg, 0.0)) # Defining particles with parameter following a distribution gate = ba.DistributionGate(0.0*deg, 180.0*deg) par_distr = ba.ParameterDistribution("/ParticleComposition/EulerRotation/Alpha", gate, 60, 0.0) particles = ba.ParticleDistribution(stack, par_distr) stack.setPosition(0.0, 0.0, width_a/2.) return particles def get_sample(): # Defining Materials m_air = ba.HomogeneousMaterial("Air", 0.0, 0.0) m_substrate = ba.HomogeneousMaterial("Substrate", 6e-06, 2e-08) # Defining Layers air = ba.Layer(m_air) substrate = ba.Layer(m_substrate) # Defining Particle Layouts and adding Particles layout = ba.ParticleLayout() layout.addParticle(get_vertical_lamellar(), 1.0, ba.kvector_t(0.0, 0.0, 10.0)) layout.setTotalParticleSurfaceDensity(1e-4) # Adding layouts to layers air.addLayout(layout) # Defining Multilayer multiLayer = ba.MultiLayer() multiLayer.addLayer(air) multiLayer.addLayer(substrate) return multiLayer def get_simulation(): simulation = ba.GISASSimulation() simulation.setDetectorParameters(200, -3.0 * deg, 3.0 * deg, 200, 0.0 * deg, 6.0 * deg) simulation.setBeamParameters(0.134 * nm, 0.2 * deg, 0.0 * deg) simulation.setBeamIntensity(1.0e+08) return simulation def run_simulation(): sample = get_sample() simulation = get_simulation() simulation.setSample(sample) simulation.runSimulation() return simulation.result() if __name__ == '__main__': result = run_simulation() ba.plot_simulation_result(result, units=ba.AxesUnits.QSPACE)
gpl-3.0
tumbl3w33d/ansible
lib/ansible/modules/network/opx/opx_cps.py
38
12542
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2018 Dell Inc. or its subsidiaries. All Rights Reserved. # # This file is part of Ansible by Red Hat # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = """ --- module: opx_cps version_added: "2.7" author: "Senthil Kumar Ganesan (@skg-net)" short_description: CPS operations on networking device running Openswitch (OPX) description: - Executes the given operation on the YANG object, using CPS API in the networking device running OpenSwitch (OPX). It uses the YANG models provided in https://github.com/open-switch/opx-base-model. options: module_name: description: - Yang path to be configured. attr_type: description: - Attribute Yang type. attr_data: description: - Attribute Yang path and their corresponding data. operation: description: - Operation to be performed on the object. default: create choices: ['delete', 'create', 'set', 'action', 'get'] db: description: - Queries/Writes the specified yang path from/to the db. type: bool default: 'no' qualifier: description: - A qualifier provides the type of object data to retrieve or act on. default: target choices: ['target', 'observed', 'proposed', 'realtime', 'registration', 'running', 'startup'] commit_event: description: - Attempts to force the auto-commit event to the specified yang object. type: bool default: 'no' requirements: - "cps" - "cps_object" - "cps_utils" """ EXAMPLES = """ - name: Create VLAN opx_cps: module_name: "dell-base-if-cmn/if/interfaces/interface" attr_data: { "base-if-vlan/if/interfaces/interface/id": 230, "if/interfaces/interface/name": "br230", "if/interfaces/interface/type": "ianaift:l2vlan" } operation: "create" - name: Get VLAN opx_cps: module_name: "dell-base-if-cmn/if/interfaces/interface" attr_data: { "if/interfaces/interface/name": "br230", } operation: "get" - name: Modify some attributes in VLAN opx_cps: module_name: "dell-base-if-cmn/if/interfaces/interface" attr_data: { "cps/key_data": { "if/interfaces/interface/name": "br230" }, "dell-if/if/interfaces/interface/untagged-ports": ["e101-008-0"], } operation: "set" - name: Delete VLAN opx_cps: module_name: "dell-base-if-cmn/if/interfaces/interface" attr_data: { "if/interfaces/interface/name": "br230", } operation: "delete" """ RETURN = """ response: description: Output from the CPS transaction. Output of CPS Get operation if CPS set/create/delete not done. returned: when a CPS transaction is successfully performed. type: list sample: [{ "data": { "base-if-vlan/if/interfaces/interface/id": 230, "cps/object-group/return-code": 0, "dell-base-if-cmn/if/interfaces/interface/if-index": 46, "if/interfaces/interface/name": "br230", "if/interfaces/interface/type": "ianaift:l2vlan" }, "key": "target/dell-base-if-cmn/if/interfaces/interface" }] cps_curr_config: description: Returns the CPS Get output i.e. the running configuration before CPS operation of set/delete is performed returned: when CPS operations set, delete type: dict sample: [{ "data": { "base-if-vlan/if/interfaces/interface/id": 230, "cps/key_data": { "if/interfaces/interface/name": "br230" }, "dell-base-if-cmn/if/interfaces/interface/if-index": 44, "dell-if/if/interfaces/interface/learning-mode": 1, "dell-if/if/interfaces/interface/mtu": 1532, "dell-if/if/interfaces/interface/phys-address": "", "dell-if/if/interfaces/interface/vlan-type": 1, "if/interfaces/interface/enabled": 0, "if/interfaces/interface/type": "ianaift:l2vlan" }, "key": "target/dell-base-if-cmn/if/interfaces/interface" }] diff: description: The actual configuration that will be pushed comparing the running configuration and input attributes returned: when CPS operations set, delete type: dict sample: { "cps/key_data": { "if/interfaces/interface/name": "br230" }, "dell-if/if/interfaces/interface/untagged-ports": [ "e101-007-0" ] } db: description: Denotes if CPS DB transaction was performed returned: when db is set to True in module options type: bool sample: True commit_event: description: Denotes if auto-commit event is set returned: when commit_event is set to True in module options type: bool sample: True """ from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six import iteritems from ansible.module_utils.network.common.utils import dict_diff try: import cps import cps_object import cps_utils HAS_CPS = True except ImportError: HAS_CPS = False def convert_cps_raw_list(raw_list): resp_list = [] if raw_list: for raw_elem in raw_list: processed_element = convert_cps_raw_data(raw_elem) if processed_element: raw_key = raw_elem['key'] individual_element = {} individual_element['data'] = processed_element individual_element['key'] = (cps.qual_from_key(raw_key) + "/" + cps.name_from_key(raw_key, 1)) resp_list.append(individual_element) return resp_list def convert_cps_raw_data(raw_elem): d = {} obj = cps_object.CPSObject(obj=raw_elem) for attr in raw_elem['data']: d[attr] = obj.get_attr_data(attr) return d def parse_cps_parameters(module_name, qualifier, attr_type, attr_data, operation=None, db=None, commit_event=None): obj = cps_object.CPSObject(module=module_name, qual=qualifier) if operation: obj.set_property('oper', operation) if attr_type: for key, val in iteritems(attr_type): cps_utils.cps_attr_types_map.add_type(key, val) for key, val in iteritems(attr_data): embed_attrs = key.split(',') embed_attrs_len = len(embed_attrs) if embed_attrs_len >= 3: obj.add_embed_attr(embed_attrs, val, embed_attrs_len - 2) else: if isinstance(val, str): val_list = val.split(',') # Treat as list if value contains ',' but is not # enclosed within {} if len(val_list) == 1 or val.startswith('{'): obj.add_attr(key, val) else: obj.add_attr(key, val_list) else: obj.add_attr(key, val) if db: cps.set_ownership_type(obj.get_key(), 'db') obj.set_property('db', True) else: obj.set_property('db', False) if commit_event: cps.set_auto_commit_event(obj.get_key(), True) obj.set_property('commit-event', True) return obj def cps_get(obj): RESULT = dict() key = obj.get() l = [] cps.get([key], l) resp_list = convert_cps_raw_list(l) RESULT["response"] = resp_list return RESULT def cps_transaction(obj): RESULT = dict() ch = {'operation': obj.get_property('oper'), 'change': obj.get()} if cps.transaction([ch]): RESULT["response"] = convert_cps_raw_list([ch['change']]) RESULT["changed"] = True else: error_msg = "Transaction error while " + obj.get_property('oper') raise RuntimeError(error_msg) return RESULT def parse_key_data(attrs): res = dict() for key, val in iteritems(attrs): if key == 'cps/key_data': res.update(val) else: res[key] = val return res def main(): """ main entry point for module execution """ argument_spec = dict( qualifier=dict(required=False, default="target", type='str', choices=['target', 'observed', 'proposed', 'realtime', 'registration', 'running', 'startup']), module_name=dict(required=True, type='str'), attr_type=dict(required=False, type='dict'), attr_data=dict(required=True, type='dict'), operation=dict(required=False, default="create", type='str', choices=['delete', 'create', 'set', 'action', 'get']), db=dict(required=False, default=False, type='bool'), commit_event=dict(required=False, default=False, type='bool') ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_CPS: module.fail_json(msg='CPS library required for this module') qualifier = module.params['qualifier'] module_name = module.params['module_name'] attr_type = module.params["attr_type"] attr_data = module.params["attr_data"] operation = module.params['operation'] db = module.params["db"] commit_event = module.params["commit_event"] RESULT = dict(changed=False, db=False, commit_event=False) if db: RESULT['db'] = True if commit_event: RESULT['commit_event'] = True try: # First do a CPS get operation get_obj = parse_cps_parameters(module_name, qualifier, attr_type, attr_data, 'get', db, commit_event) curr_config = cps_get(get_obj) if operation == 'get': RESULT.update(curr_config) else: diff = attr_data # Evaluate the changes in the attributes cfg = dict() if curr_config and curr_config['response']: cfg = curr_config['response'][0]['data'] key_d = 'cps/key_data' # diff computation is not needed for delete if operation != 'delete': configs = parse_key_data(cfg) attributes = parse_key_data(attr_data) diff = dict_diff(configs, attributes) # Append diff with any 'cps/key_data' from attr_data if diff and key_d in attr_data: diff[key_d] = attr_data[key_d] # Append diff with any 'cps/key_data' from curr_config # Needed for all operations including delete if diff and key_d in cfg: if key_d in diff: diff[key_d].update(cfg[key_d]) else: diff[key_d] = cfg[key_d] RESULT.update({"diff": diff}) # Create object for cps operation obj = parse_cps_parameters(module_name, qualifier, attr_type, diff, operation, db, commit_event) res = dict() if operation == "delete": if cfg: res = cps_transaction(obj) else: if diff: res = cps_transaction(obj) if not res and cfg: res.update({"response": curr_config['response']}) else: res.update({"cps_curr_config": curr_config['response']}) RESULT.update(res) except Exception as e: module.fail_json(msg=str(type(e).__name__) + ": " + str(e)) module.exit_json(**RESULT) if __name__ == '__main__': main()
gpl-3.0
nttks/edx-platform
lms/djangoapps/bulk_email/tests/test_err_handling.py
30
16895
# -*- coding: utf-8 -*- """ Unit tests for handling email sending errors """ from itertools import cycle from celery.states import SUCCESS, RETRY # pylint: disable=no-name-in-module, import-error from django.conf import settings from django.core.management import call_command from django.core.urlresolvers import reverse from django.db import DatabaseError import json from mock import patch, Mock from nose.plugins.attrib import attr from smtplib import SMTPDataError, SMTPServerDisconnected, SMTPConnectError from bulk_email.models import CourseEmail, SEND_TO_ALL from bulk_email.tasks import perform_delegate_email_batches, send_course_email from instructor_task.models import InstructorTask from instructor_task.subtasks import ( initialize_subtask_info, SubtaskStatus, check_subtask_is_valid, update_subtask_status, DuplicateTaskException, MAX_DATABASE_LOCK_RETRIES, ) from opaque_keys.edx.locations import SlashSeparatedCourseKey from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory class EmailTestException(Exception): """Mock exception for email testing.""" pass @attr('shard_1') @patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True)) @patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False}) class TestEmailErrors(ModuleStoreTestCase): """ Test that errors from sending email are handled properly. """ def setUp(self): super(TestEmailErrors, self).setUp() course_title = u"ẗëṡẗ title イ乇丂イ ᄊ乇丂丂ムg乇 キo尺 ムレレ тэѕт мэѕѕаБэ" self.course = CourseFactory.create(display_name=course_title) self.instructor = AdminFactory.create() self.client.login(username=self.instructor.username, password="test") # load initial content (since we don't run migrations as part of tests): call_command("loaddata", "course_email_template.json") self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}) self.send_mail_url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()}) self.success_content = { 'course_id': self.course.id.to_deprecated_string(), 'success': True, } @patch('bulk_email.tasks.get_connection', autospec=True) @patch('bulk_email.tasks.send_course_email.retry') def test_data_err_retry(self, retry, get_conn): """ Test that celery handles transient SMTPDataErrors by retrying. """ get_conn.return_value.send_messages.side_effect = SMTPDataError(455, "Throttling: Sending rate exceeded") test_email = { 'action': 'Send email', 'send_to': 'myself', 'subject': 'test subject for myself', 'message': 'test message for myself' } response = self.client.post(self.send_mail_url, test_email) self.assertEquals(json.loads(response.content), self.success_content) # Test that we retry upon hitting a 4xx error self.assertTrue(retry.called) (__, kwargs) = retry.call_args exc = kwargs['exc'] self.assertIsInstance(exc, SMTPDataError) @patch('bulk_email.tasks.get_connection', autospec=True) @patch('bulk_email.tasks.update_subtask_status') @patch('bulk_email.tasks.send_course_email.retry') def test_data_err_fail(self, retry, result, get_conn): """ Test that celery handles permanent SMTPDataErrors by failing and not retrying. """ # have every fourth email fail due to blacklisting: get_conn.return_value.send_messages.side_effect = cycle([SMTPDataError(554, "Email address is blacklisted"), None, None, None]) students = [UserFactory() for _ in xrange(settings.BULK_EMAIL_EMAILS_PER_TASK)] for student in students: CourseEnrollmentFactory.create(user=student, course_id=self.course.id) test_email = { 'action': 'Send email', 'send_to': 'all', 'subject': 'test subject for all', 'message': 'test message for all' } response = self.client.post(self.send_mail_url, test_email) self.assertEquals(json.loads(response.content), self.success_content) # We shouldn't retry when hitting a 5xx error self.assertFalse(retry.called) # Test that after the rejected email, the rest still successfully send ((_entry_id, _current_task_id, subtask_status), _kwargs) = result.call_args self.assertEquals(subtask_status.skipped, 0) expected_fails = int((settings.BULK_EMAIL_EMAILS_PER_TASK + 3) / 4.0) self.assertEquals(subtask_status.failed, expected_fails) self.assertEquals(subtask_status.succeeded, settings.BULK_EMAIL_EMAILS_PER_TASK - expected_fails) @patch('bulk_email.tasks.get_connection', autospec=True) @patch('bulk_email.tasks.send_course_email.retry') def test_disconn_err_retry(self, retry, get_conn): """ Test that celery handles SMTPServerDisconnected by retrying. """ get_conn.return_value.open.side_effect = SMTPServerDisconnected(425, "Disconnecting") test_email = { 'action': 'Send email', 'send_to': 'myself', 'subject': 'test subject for myself', 'message': 'test message for myself' } response = self.client.post(self.send_mail_url, test_email) self.assertEquals(json.loads(response.content), self.success_content) self.assertTrue(retry.called) (__, kwargs) = retry.call_args exc = kwargs['exc'] self.assertIsInstance(exc, SMTPServerDisconnected) @patch('bulk_email.tasks.get_connection', autospec=True) @patch('bulk_email.tasks.send_course_email.retry') def test_conn_err_retry(self, retry, get_conn): """ Test that celery handles SMTPConnectError by retrying. """ get_conn.return_value.open.side_effect = SMTPConnectError(424, "Bad Connection") test_email = { 'action': 'Send email', 'send_to': 'myself', 'subject': 'test subject for myself', 'message': 'test message for myself' } response = self.client.post(self.send_mail_url, test_email) self.assertEquals(json.loads(response.content), self.success_content) self.assertTrue(retry.called) (__, kwargs) = retry.call_args exc = kwargs['exc'] self.assertIsInstance(exc, SMTPConnectError) @patch('bulk_email.tasks.SubtaskStatus.increment') @patch('bulk_email.tasks.log') def test_nonexistent_email(self, mock_log, result): """ Tests retries when the email doesn't exist """ # create an InstructorTask object to pass through course_id = self.course.id entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor) task_input = {"email_id": -1} with self.assertRaises(CourseEmail.DoesNotExist): perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") ((log_str, __, email_id), __) = mock_log.warning.call_args self.assertTrue(mock_log.warning.called) self.assertIn('Failed to get CourseEmail with id', log_str) self.assertEqual(email_id, -1) self.assertFalse(result.called) def test_nonexistent_course(self): """ Tests exception when the course in the email doesn't exist """ course_id = SlashSeparatedCourseKey("I", "DONT", "EXIST") email = CourseEmail(course_id=course_id) email.save() entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor) task_input = {"email_id": email.id} # (?i) is a regex for ignore case with self.assertRaisesRegexp(ValueError, r"(?i)course not found"): perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") def test_nonexistent_to_option(self): """ Tests exception when the to_option in the email doesn't exist """ email = CourseEmail(course_id=self.course.id, to_option="IDONTEXIST") email.save() entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor) task_input = {"email_id": email.id} with self.assertRaisesRegexp(Exception, 'Unexpected bulk email TO_OPTION found: IDONTEXIST'): perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") def test_wrong_course_id_in_task(self): """ Tests exception when the course_id in task is not the same as one explicitly passed in. """ email = CourseEmail(course_id=self.course.id, to_option=SEND_TO_ALL) email.save() entry = InstructorTask.create("bogus/task/id", "task_type", "task_key", "task_input", self.instructor) task_input = {"email_id": email.id} with self.assertRaisesRegexp(ValueError, 'does not match task value'): perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") def test_wrong_course_id_in_email(self): """ Tests exception when the course_id in CourseEmail is not the same as one explicitly passed in. """ email = CourseEmail(course_id=SlashSeparatedCourseKey("bogus", "course", "id"), to_option=SEND_TO_ALL) email.save() entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor) task_input = {"email_id": email.id} with self.assertRaisesRegexp(ValueError, 'does not match email value'): perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") def test_send_email_undefined_subtask(self): # test at a lower level, to ensure that the course gets checked down below too. entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor) entry_id = entry.id to_list = ['[email protected]'] global_email_context = {'course_title': 'dummy course'} subtask_id = "subtask-id-value" subtask_status = SubtaskStatus.create(subtask_id) email_id = 1001 with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find subtasks of instructor task'): send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status.to_dict()) def test_send_email_missing_subtask(self): # test at a lower level, to ensure that the course gets checked down below too. entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor) entry_id = entry.id to_list = ['[email protected]'] global_email_context = {'course_title': 'dummy course'} subtask_id = "subtask-id-value" initialize_subtask_info(entry, "emailed", 100, [subtask_id]) different_subtask_id = "bogus-subtask-id-value" subtask_status = SubtaskStatus.create(different_subtask_id) bogus_email_id = 1001 with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find status for subtask of instructor task'): send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict()) def test_send_email_completed_subtask(self): # test at a lower level, to ensure that the course gets checked down below too. entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor) entry_id = entry.id subtask_id = "subtask-id-value" initialize_subtask_info(entry, "emailed", 100, [subtask_id]) subtask_status = SubtaskStatus.create(subtask_id, state=SUCCESS) update_subtask_status(entry_id, subtask_id, subtask_status) bogus_email_id = 1001 to_list = ['[email protected]'] global_email_context = {'course_title': 'dummy course'} new_subtask_status = SubtaskStatus.create(subtask_id) with self.assertRaisesRegexp(DuplicateTaskException, 'already completed'): send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict()) def test_send_email_running_subtask(self): # test at a lower level, to ensure that the course gets checked down below too. entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor) entry_id = entry.id subtask_id = "subtask-id-value" initialize_subtask_info(entry, "emailed", 100, [subtask_id]) subtask_status = SubtaskStatus.create(subtask_id) update_subtask_status(entry_id, subtask_id, subtask_status) check_subtask_is_valid(entry_id, subtask_id, subtask_status) bogus_email_id = 1001 to_list = ['[email protected]'] global_email_context = {'course_title': 'dummy course'} with self.assertRaisesRegexp(DuplicateTaskException, 'already being executed'): send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict()) def test_send_email_retried_subtask(self): # test at a lower level, to ensure that the course gets checked down below too. entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor) entry_id = entry.id subtask_id = "subtask-id-value" initialize_subtask_info(entry, "emailed", 100, [subtask_id]) subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=2) update_subtask_status(entry_id, subtask_id, subtask_status) bogus_email_id = 1001 to_list = ['[email protected]'] global_email_context = {'course_title': 'dummy course'} # try running with a clean subtask: new_subtask_status = SubtaskStatus.create(subtask_id) with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'): send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict()) # try again, with a retried subtask with lower count: new_subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=1) with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'): send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict()) def test_send_email_with_locked_instructor_task(self): # test at a lower level, to ensure that the course gets checked down below too. entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor) entry_id = entry.id subtask_id = "subtask-id-locked-model" initialize_subtask_info(entry, "emailed", 100, [subtask_id]) subtask_status = SubtaskStatus.create(subtask_id) bogus_email_id = 1001 to_list = ['[email protected]'] global_email_context = {'course_title': 'dummy course'} with patch('instructor_task.subtasks.InstructorTask.save') as mock_task_save: mock_task_save.side_effect = DatabaseError with self.assertRaises(DatabaseError): send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict()) self.assertEquals(mock_task_save.call_count, MAX_DATABASE_LOCK_RETRIES) def test_send_email_undefined_email(self): # test at a lower level, to ensure that the course gets checked down below too. entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor) entry_id = entry.id to_list = ['[email protected]'] global_email_context = {'course_title': 'dummy course'} subtask_id = "subtask-id-undefined-email" initialize_subtask_info(entry, "emailed", 100, [subtask_id]) subtask_status = SubtaskStatus.create(subtask_id) bogus_email_id = 1001 with self.assertRaises(CourseEmail.DoesNotExist): # we skip the call that updates subtask status, since we've not set up the InstructorTask # for the subtask, and it's not important to the test. with patch('bulk_email.tasks.update_subtask_status'): send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
agpl-3.0
julietbravo/microhh
cases/rcemip/rcemip_coarse_input.py
5
5604
import numpy as np import netCDF4 as nc float_type = "f8" #T_0 = 295. #q_0 = 0.01200 # for 295 K SST. T_0 = 300. q_0 = 0.01864 # for 300 K SST. #T_0 = 305. #q_0 = 0.02400 # for 305 K SST. def q_sat(T, p): Tc = T - 273.15 # Arden-Buck equation. e_sat = 611.21 * np.exp(17.502 * Tc / (240.97 + Tc)) Rd, Rv = 287.04, 461.5 return Rd/Rv * e_sat / (p - (1. - Rd/Rv)*e_sat) def calc_p_q_T_thl_o3(z): z_q1 = 4.0e3 z_q2 = 7.5e3 z_t = 15.e3 q_t = 1.e-14 q = q_0 * np.exp(-z /z_q1) * np.exp(-(z /z_q2)**2) # CvH hack to remove moisture jump. q_tb = q_0 * np.exp(-z_t/z_q1) * np.exp(-(z_t/z_q2)**2) q -= q_tb + q_t i_above_zt = np.where(z > z_t) q[i_above_zt] = q_t gamma = 6.7e-3 Tv_0 = (1. + 0.608*q_0)*T_0 Tv = Tv_0 - gamma*z Tv_t = Tv_0 - gamma*z_t Tv[i_above_zt] = Tv_t T = Tv / (1. + 0.608*q) g = 9.79764 Rd = 287.04 cp = 1005. p0 = 101480. print("q_sat at T_0 = ", q_sat(T_0, p0)) p = p0 * (Tv / Tv_0)**(g/(Rd*gamma)) p_tmp = p0 * (Tv_t/Tv_0)**(g/(Rd*gamma)) \ * np.exp( -( (g*(z-z_t)) / (Rd*Tv_t) ) ) p[i_above_zt] = p_tmp[i_above_zt] p00 = 1e5 thl = T*(p00/p)**(Rd/cp) g1 = 3.6478 g2 = 0.83209 g3 = 11.3515 p_hpa = p/100. o3 = g1 * p_hpa**g2 * np.exp(-p_hpa/g3) * 1e-6 return p, q, T, thl, o3 nc_file = nc.Dataset("rcemip_input.nc", mode="w", datamodel="NETCDF4", clobber=True) ### RADIATION INIT ### # Radiation profiles. z_top = 100.e3 dz = 500. z = np.arange(dz/2, z_top, dz) zh = np.arange( 0, z_top-dz/2, dz) zh = np.append(zh, z_top) p_lay, h2o, T_lay, _, o3 = calc_p_q_T_thl_o3( z) p_lev, _, T_lev, _, _ = calc_p_q_T_thl_o3(zh) co2 = 348.e-6 ch4 = 1650.e-9 n2o = 306.e-9 n2 = 0.7808 o2 = 0.2095 g1 = 3.6478 g2 = 0.83209 g3 = 11.3515 p_hpa = p_lay/100. o3 = g1 * p_hpa**g2 * np.exp(-p_hpa/g3) * 1e-6 nc_group_rad = nc_file.createGroup("radiation") nc_group_rad.createDimension("lay", p_lay.size) nc_group_rad.createDimension("lev", p_lev.size) nc_z_lay = nc_group_rad.createVariable("z_lay", float_type, ("lay")) nc_z_lev = nc_group_rad.createVariable("z_lev", float_type, ("lev")) nc_z_lay[:] = z [:] nc_z_lev[:] = zh[:] nc_p_lay = nc_group_rad.createVariable("p_lay", float_type, ("lay")) nc_p_lev = nc_group_rad.createVariable("p_lev", float_type, ("lev")) nc_p_lay[:] = p_lay[:] nc_p_lev[:] = p_lev[:] nc_T_lay = nc_group_rad.createVariable("t_lay", float_type, ("lay")) nc_T_lev = nc_group_rad.createVariable("t_lev", float_type, ("lev")) nc_T_lay[:] = T_lay[:] nc_T_lev[:] = T_lev[:] nc_CO2 = nc_group_rad.createVariable("co2", float_type, ("lay")) nc_CH4 = nc_group_rad.createVariable("ch4", float_type, ("lay")) nc_N2O = nc_group_rad.createVariable("n2o", float_type, ("lay")) nc_O3 = nc_group_rad.createVariable("o3" , float_type, ("lay")) nc_H2O = nc_group_rad.createVariable("h2o", float_type, ("lay")) nc_N2 = nc_group_rad.createVariable("n2" , float_type, ("lay")) nc_O2 = nc_group_rad.createVariable("o2" , float_type, ("lay")) nc_CFC11 = nc_group_rad.createVariable("cfc11", float_type, ("lay")) nc_CFC12 = nc_group_rad.createVariable("cfc12", float_type, ("lay")) nc_CFC22 = nc_group_rad.createVariable("cfc22", float_type, ("lay")) nc_CCL4 = nc_group_rad.createVariable("ccl4" , float_type, ("lay")) nc_CO2[:] = co2 nc_CH4[:] = ch4 nc_N2O[:] = n2o nc_O3 [:] = o3 [:] nc_H2O[:] = h2o[:] nc_N2 [:] = n2 nc_O2 [:] = o2 nc_CFC11[:] = 0. nc_CFC12[:] = 0. nc_CFC22[:] = 0. nc_CCL4 [:] = 0. ### INITIAL PROFILES ### # Get number of vertical levels and size from .ini file with open('rcemip.ini') as f: for line in f: if(line.split('=')[0]=='ktot'): kmax = int(line.split('=')[1]) if(line.split('=')[0]=='zsize'): zsize = float(line.split('=')[1]) # set the height # dz = zsize / kmax # z = np.linspace(0.5*dz, zsize-0.5*dz, kmax) # Official RCEMIP coarse vertical resolution. z1 = np.array([37., 112., 194., 288., 395., 520., 667., 843., 1062., 1331., 1664., 2055., 2505., 3000.]); z2 = np.arange(3500., 33001., 500.) z = np.concatenate((z1, z2)) z = z[:-2] zh = 0.5*(z[:-1] + z[1:]) zh = np.append(0., zh) zh = np.append(zh, zsize) if (z.size != kmax): raise RuntimeError("kmax does not match the RCEMIP profile") _, qt, _, thl, o3 = calc_p_q_T_thl_o3(z) nc_file.createDimension("z", kmax) nc_z = nc_file.createVariable("z" , float_type, ("z")) nc_z[:] = z[:] # Initial profiles. nc_group_init = nc_file.createGroup("init"); nc_thl = nc_group_init.createVariable("thl", float_type, ("z")) nc_qt = nc_group_init.createVariable("qt" , float_type, ("z")) nc_CO2 = nc_group_init.createVariable("co2", float_type, ("z")) nc_CH4 = nc_group_init.createVariable("ch4", float_type, ("z")) nc_N2O = nc_group_init.createVariable("n2o", float_type, ("z")) nc_O3 = nc_group_init.createVariable("o3" , float_type, ("z")) nc_H2O = nc_group_init.createVariable("h2o", float_type, ("z")) nc_N2 = nc_group_init.createVariable("n2" , float_type, ("z")) nc_O2 = nc_group_init.createVariable("o2" , float_type, ("z")) nc_CFC11 = nc_group_init.createVariable("cfc11", float_type, ("z")) nc_CFC12 = nc_group_init.createVariable("cfc12", float_type, ("z")) nc_CFC22 = nc_group_init.createVariable("cfc22", float_type, ("z")) nc_CCL4 = nc_group_init.createVariable("ccl4" , float_type, ("z")) nc_thl[:] = thl[:] nc_qt [:] = qt [:] nc_CO2[:] = co2 nc_CH4[:] = ch4 nc_N2O[:] = n2o nc_O3 [:] = o3 nc_H2O[:] = qt[:] nc_N2 [:] = n2 nc_O2 [:] = o2 nc_CFC11[:] = 0. nc_CFC12[:] = 0. nc_CFC22[:] = 0. nc_CCL4 [:] = 0. nc_file.close()
gpl-3.0
mapr/sahara
sahara/tests/unit/service/test_ops.py
2
4622
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from sahara.plugins import base as base_plugins from sahara.service import ops from sahara.tests.unit import base class FakeNodeGroup(): id = 'id' count = 2 instances = [1, 2] class FakePlugin(mock.Mock): node_groups = [FakeNodeGroup()] def update_infra(self, cluster): TestOPS.SEQUENCE.append('update_infra') def configure_cluster(self, cluster): TestOPS.SEQUENCE.append('configure_cluster') def start_cluster(self, cluster): TestOPS.SEQUENCE.append('start_cluster') def on_terminate_cluster(self, cluster): TestOPS.SEQUENCE.append('on_terminate_cluster') def decommission_nodes(self, cluster, instances_to_delete): TestOPS.SEQUENCE.append('decommission_nodes') def scale_cluster(self, cluster, node_group_id_map): TestOPS.SEQUENCE.append('plugin.scale_cluster') def cluster_destroy(self, ctx, cluster): TestOPS.SEQUENCE.append('cluster_destroy') class FakeINFRA(): def create_cluster(self, cluster): TestOPS.SEQUENCE.append('create_cluster') def scale_cluster(self, cluster, node_group_id_map): TestOPS.SEQUENCE.append('INFRA.scale_cluster') return True def shutdown_cluster(self, cluster): TestOPS.SEQUENCE.append('shutdown_cluster') def rollback_cluster(self, cluster, reason): TestOPS.SEQUENCE.append('rollback_cluster') class TestOPS(base.SaharaTestCase): SEQUENCE = [] @mock.patch('sahara.service.ops._update_sahara_info') @mock.patch('sahara.service.ops._prepare_provisioning', return_value=(mock.Mock(), mock.Mock(), FakePlugin())) @mock.patch('sahara.utils.general.change_cluster_status') @mock.patch('sahara.conductor.API.cluster_get') @mock.patch('sahara.service.ops.CONF') @mock.patch('sahara.service.trusts.create_trust_for_cluster') @mock.patch('sahara.conductor.API.job_execution_get_all') @mock.patch('sahara.service.edp.job_manager.run_job') def test_provision_cluster(self, p_run_job, p_job_exec, p_create_trust, p_conf, p_cluster_get, p_change_status, p_prep_provisioning, p_update_sahara_info): del self.SEQUENCE[:] ops.INFRA = FakeINFRA() ops._provision_cluster('123') # checking that order of calls is right self.assertEqual(['update_infra', 'create_cluster', 'configure_cluster', 'start_cluster'], self.SEQUENCE, 'Order of calls is wrong') @mock.patch('sahara.service.ops._prepare_provisioning', return_value=(mock.Mock(), mock.Mock(), FakePlugin())) @mock.patch('sahara.utils.general.change_cluster_status', return_value=FakePlugin()) @mock.patch('sahara.utils.general.get_instances') def test_provision_scaled_cluster(self, p_get_instances, p_change_status, p_prep_provisioning): del self.SEQUENCE[:] ops.INFRA = FakeINFRA() ops._provision_scaled_cluster('123', {'id': 1}) # checking that order of calls is right self.assertEqual(['decommission_nodes', 'INFRA.scale_cluster', 'plugin.scale_cluster'], self.SEQUENCE, 'Order of calls is wrong') @mock.patch('sahara.service.ops.CONF') @mock.patch('sahara.service.trusts.delete_trust_from_cluster') @mock.patch('sahara.context.ctx') def test_terminate_cluster(self, p_ctx, p_delete_trust, p_conf): del self.SEQUENCE[:] base_plugins.PLUGINS = FakePlugin() base_plugins.PLUGINS.get_plugin.return_value = FakePlugin() ops.INFRA = FakeINFRA() ops.conductor = FakePlugin() ops.terminate_cluster('123') # checking that order of calls is right self.assertEqual(['on_terminate_cluster', 'shutdown_cluster', 'cluster_destroy'], self.SEQUENCE, 'Order of calls is wrong')
apache-2.0
dims/python-k8sclient
k8sclient/client/models/v1_resource_quota_list.py
5
5429
# coding: utf-8 """ Copyright 2015 SmartBear Software Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems class V1ResourceQuotaList(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self): """ Swagger model :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'kind': 'str', 'api_version': 'str', 'metadata': 'V1ListMeta', 'items': 'list[V1ResourceQuota]' } self.attribute_map = { 'kind': 'kind', 'api_version': 'apiVersion', 'metadata': 'metadata', 'items': 'items' } self._kind = None self._api_version = None self._metadata = None self._items = None @property def kind(self): """ Gets the kind of this V1ResourceQuotaList. kind of object, in CamelCase; cannot be updated; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#types-kinds :return: The kind of this V1ResourceQuotaList. :rtype: str """ return self._kind @kind.setter def kind(self, kind): """ Sets the kind of this V1ResourceQuotaList. kind of object, in CamelCase; cannot be updated; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#types-kinds :param kind: The kind of this V1ResourceQuotaList. :type: str """ self._kind = kind @property def api_version(self): """ Gets the api_version of this V1ResourceQuotaList. version of the schema the object should have; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#resources :return: The api_version of this V1ResourceQuotaList. :rtype: str """ return self._api_version @api_version.setter def api_version(self, api_version): """ Sets the api_version of this V1ResourceQuotaList. version of the schema the object should have; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#resources :param api_version: The api_version of this V1ResourceQuotaList. :type: str """ self._api_version = api_version @property def metadata(self): """ Gets the metadata of this V1ResourceQuotaList. standard list metadata; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#metadata :return: The metadata of this V1ResourceQuotaList. :rtype: V1ListMeta """ return self._metadata @metadata.setter def metadata(self, metadata): """ Sets the metadata of this V1ResourceQuotaList. standard list metadata; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#metadata :param metadata: The metadata of this V1ResourceQuotaList. :type: V1ListMeta """ self._metadata = metadata @property def items(self): """ Gets the items of this V1ResourceQuotaList. items is a list of ResourceQuota objects; see http://releases.k8s.io/v1.0.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota :return: The items of this V1ResourceQuotaList. :rtype: list[V1ResourceQuota] """ return self._items @items.setter def items(self, items): """ Sets the items of this V1ResourceQuotaList. items is a list of ResourceQuota objects; see http://releases.k8s.io/v1.0.4/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota :param items: The items of this V1ResourceQuotaList. :type: list[V1ResourceQuota] """ self._items = items def to_dict(self): """ Return model properties dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() else: result[attr] = value return result def to_str(self): """ Return model properties str """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str()
apache-2.0
blindroot/django
django/db/migrations/optimizer.py
127
2872
from __future__ import unicode_literals class MigrationOptimizer(object): """ Powers the optimization process, where you provide a list of Operations and you are returned a list of equal or shorter length - operations are merged into one if possible. For example, a CreateModel and an AddField can be optimized into a new CreateModel, and CreateModel and DeleteModel can be optimized into nothing. """ def optimize(self, operations, app_label=None): """ Main optimization entry point. Pass in a list of Operation instances, get out a new list of Operation instances. Unfortunately, due to the scope of the optimization (two combinable operations might be separated by several hundred others), this can't be done as a peephole optimization with checks/output implemented on the Operations themselves; instead, the optimizer looks at each individual operation and scans forwards in the list to see if there are any matches, stopping at boundaries - operations which can't be optimized over (RunSQL, operations on the same field/model, etc.) The inner loop is run until the starting list is the same as the result list, and then the result is returned. This means that operation optimization must be stable and always return an equal or shorter list. The app_label argument is optional, but if you pass it you'll get more efficient optimization. """ # Internal tracking variable for test assertions about # of loops self._iterations = 0 while True: result = self.optimize_inner(operations, app_label) self._iterations += 1 if result == operations: return result operations = result def optimize_inner(self, operations, app_label=None): """ Inner optimization loop. """ new_operations = [] for i, operation in enumerate(operations): # Compare it to each operation after it for j, other in enumerate(operations[i + 1:]): in_between = operations[i + 1:i + j + 1] result = operation.reduce(other, in_between, app_label) if isinstance(result, list): # Optimize! Add result, then remaining others, then return new_operations.extend(result) new_operations.extend(in_between) new_operations.extend(operations[i + j + 2:]) return new_operations if not result: # We can't optimize across `other`. new_operations.append(operation) break else: new_operations.append(operation) return new_operations
bsd-3-clause
benjamindeleener/odoo
addons/crm_partner_assign/wizard/crm_forward_to_partner.py
46
9295
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from openerp.osv import fields, osv from openerp.tools.translate import _ from openerp.exceptions import UserError class crm_lead_forward_to_partner(osv.TransientModel): """ Forward info history to partners. """ _name = 'crm.lead.forward.to.partner' def _convert_to_assignation_line(self, cr, uid, lead, partner, context=None): lead_location = [] partner_location = [] if lead.country_id: lead_location.append(lead.country_id.name) if lead.city: lead_location.append(lead.city) if partner: if partner.country_id: partner_location.append(partner.country_id.name) if partner.city: partner_location.append(partner.city) return {'lead_id': lead.id, 'lead_location': ", ".join(lead_location), 'partner_assigned_id': partner and partner.id or False, 'partner_location': ", ".join(partner_location), 'lead_link': self.get_lead_portal_url(cr, uid, lead.id, lead.type, context=context), } def default_get(self, cr, uid, fields, context=None): if context is None: context = {} lead_obj = self.pool.get('crm.lead') email_template_obj = self.pool.get('mail.template') try: template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'email_template_lead_forward_mail')[1] except ValueError: template_id = False res = super(crm_lead_forward_to_partner, self).default_get(cr, uid, fields, context=context) active_ids = context.get('active_ids') default_composition_mode = context.get('default_composition_mode') res['assignation_lines'] = [] if template_id: res['body'] = email_template_obj.get_email_template(cr, uid, template_id, 0).body_html if active_ids: lead_ids = lead_obj.browse(cr, uid, active_ids, context=context) if default_composition_mode == 'mass_mail': partner_assigned_ids = lead_obj.search_geo_partner(cr, uid, active_ids, context=context) else: partner_assigned_ids = dict((lead.id, lead.partner_assigned_id and lead.partner_assigned_id.id or False) for lead in lead_ids) res['partner_id'] = lead_ids[0].partner_assigned_id.id for lead in lead_ids: partner_id = partner_assigned_ids.get(lead.id) or False partner = False if partner_id: partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context) res['assignation_lines'].append((0, 0, self._convert_to_assignation_line(cr, uid, lead, partner))) return res def action_forward(self, cr, uid, ids, context=None): lead_obj = self.pool.get('crm.lead') record = self.browse(cr, uid, ids[0], context=context) email_template_obj = self.pool.get('mail.template') try: template_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'email_template_lead_forward_mail')[1] except ValueError: raise UserError(_('The Forward Email Template is not in the database')) try: portal_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_portal')[1] except ValueError: raise UserError(_('The Portal group cannot be found')) local_context = context.copy() if not (record.forward_type == 'single'): no_email = set() for lead in record.assignation_lines: if lead.partner_assigned_id and not lead.partner_assigned_id.email: no_email.add(lead.partner_assigned_id.name) if no_email: raise UserError(_('Set an email address for the partner(s): %s') % ", ".join(no_email)) if record.forward_type == 'single' and not record.partner_id.email: raise UserError(_('Set an email address for the partner %s') % record.partner_id.name) partners_leads = {} for lead in record.assignation_lines: partner = record.forward_type == 'single' and record.partner_id or lead.partner_assigned_id lead_details = { 'lead_link': lead.lead_link, 'lead_id': lead.lead_id, } if partner: partner_leads = partners_leads.get(partner.id) if partner_leads: partner_leads['leads'].append(lead_details) else: partners_leads[partner.id] = {'partner': partner, 'leads': [lead_details]} stage_id = False if record.assignation_lines and record.assignation_lines[0].lead_id.type == 'lead': try: stage_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'stage_portal_lead_assigned')[1] except ValueError: pass for partner_id, partner_leads in partners_leads.items(): in_portal = False for contact in (partner.child_ids or [partner]): if contact.user_ids: in_portal = portal_id in [g.id for g in contact.user_ids[0].groups_id] local_context['partner_id'] = partner_leads['partner'] local_context['partner_leads'] = partner_leads['leads'] local_context['partner_in_portal'] = in_portal email_template_obj.send_mail(cr, uid, template_id, ids[0], context=local_context) lead_ids = [lead['lead_id'].id for lead in partner_leads['leads']] values = {'partner_assigned_id': partner_id, 'user_id': partner_leads['partner'].user_id.id} if stage_id: values['stage_id'] = stage_id lead_obj.write(cr, uid, lead_ids, values) self.pool.get('crm.lead').message_subscribe(cr, uid, lead_ids, [partner_id], context=context) return True def get_lead_portal_url(self, cr, uid, lead_id, type, context=None): action = type == 'opportunity' and 'action_portal_opportunities' or 'action_portal_leads' try: action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', action)[1] except ValueError: action_id = False portal_link = "%s/?db=%s#id=%s&action=%s&view_type=form" % (self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url'), cr.dbname, lead_id, action_id) return portal_link def get_portal_url(self, cr, uid, ids, context=None): portal_link = "%s/?db=%s" % (self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url'), cr.dbname) return portal_link _columns = { 'forward_type': fields.selection([('single', 'a single partner: manual selection of partner'), ('assigned', "several partners: automatic assignation, using GPS coordinates and partner's grades"), ], 'Forward selected leads to'), 'partner_id': fields.many2one('res.partner', 'Forward Leads To'), 'assignation_lines': fields.one2many('crm.lead.assignation', 'forward_id', 'Partner Assignation'), 'body': fields.html('Contents', help='Automatically sanitized HTML contents'), } _defaults = { 'forward_type': lambda self, cr, uid, c: c.get('forward_type') or 'single', } class crm_lead_assignation (osv.TransientModel): _name = 'crm.lead.assignation' _columns = { 'forward_id': fields.many2one('crm.lead.forward.to.partner', 'Partner Assignation'), 'lead_id': fields.many2one('crm.lead', 'Lead'), 'lead_location': fields.char('Lead Location', size=128), 'partner_assigned_id': fields.many2one('res.partner', 'Assigned Partner'), 'partner_location': fields.char('Partner Location', size=128), 'lead_link': fields.char('Lead Single Links', size=128), } def on_change_lead_id(self, cr, uid, ids, lead_id, context=None): if not context: context = {} if not lead_id: return {'value': {'lead_location': False}} lead = self.pool.get('crm.lead').browse(cr, uid, lead_id, context=context) lead_location = [] if lead.country_id: lead_location.append(lead.country_id.name) if lead.city: lead_location.append(lead.city) return {'value': {'lead_location': ", ".join(lead_location)}} def on_change_partner_assigned_id(self, cr, uid, ids, partner_assigned_id, context=None): if not context: context = {} if not partner_assigned_id: return {'value': {'lead_location': False}} partner = self.pool.get('res.partner').browse(cr, uid, partner_assigned_id, context=context) partner_location = [] if partner.country_id: partner_location.append(partner.country_id.name) if partner.city: partner_location.append(partner.city) return {'value': {'partner_location': ", ".join(partner_location)}}
gpl-3.0
gabrielfalcao/lettuce
tests/integration/lib/Django-1.3/tests/regressiontests/m2m_through_regress/models.py
86
2347
from datetime import datetime from django.contrib.auth.models import User from django.core import management from django.db import models # Forward declared intermediate model class Membership(models.Model): person = models.ForeignKey('Person') group = models.ForeignKey('Group') price = models.IntegerField(default=100) def __unicode__(self): return "%s is a member of %s" % (self.person.name, self.group.name) # using custom id column to test ticket #11107 class UserMembership(models.Model): id = models.AutoField(db_column='usermembership_id', primary_key=True) user = models.ForeignKey(User) group = models.ForeignKey('Group') price = models.IntegerField(default=100) def __unicode__(self): return "%s is a user and member of %s" % (self.user.username, self.group.name) class Person(models.Model): name = models.CharField(max_length=128) def __unicode__(self): return self.name class Group(models.Model): name = models.CharField(max_length=128) # Membership object defined as a class members = models.ManyToManyField(Person, through=Membership) user_members = models.ManyToManyField(User, through='UserMembership') def __unicode__(self): return self.name # A set of models that use an non-abstract inherited model as the 'through' model. class A(models.Model): a_text = models.CharField(max_length=20) class ThroughBase(models.Model): a = models.ForeignKey(A) b = models.ForeignKey('B') class Through(ThroughBase): extra = models.CharField(max_length=20) class B(models.Model): b_text = models.CharField(max_length=20) a_list = models.ManyToManyField(A, through=Through) # Using to_field on the through model class Car(models.Model): make = models.CharField(max_length=20, unique=True) drivers = models.ManyToManyField('Driver', through='CarDriver') def __unicode__(self, ): return self.make class Driver(models.Model): name = models.CharField(max_length=20, unique=True) def __unicode__(self, ): return self.name class CarDriver(models.Model): car = models.ForeignKey('Car', to_field='make') driver = models.ForeignKey('Driver', to_field='name') def __unicode__(self, ): return u"pk=%s car=%s driver=%s" % (str(self.pk), self.car, self.driver)
gpl-3.0
zifeo/nest-simulator
testsuite/manualtests/test_tsodyks_depr_fac.py
13
1136
# -*- coding: utf-8 -*- # # test_tsodyks_depr_fac.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. from scipy import * from matplotlib.pylab import * from matplotlib.mlab import * def plot_spikes(): dt = 0.1 # time resolution nbins = 1000 N = 500 # number of neurons vm = load('voltmeter-0-0-4.dat') figure(1) clf() plot(vm[:,0], vm[:,1], 'r') xlabel('time / ms') ylabel('$V_m [mV]$') savefig('test_tsodyks_depressing.png') plot_spikes() show()
gpl-2.0
WebSpider/headphones
lib/unidecode/x075.py
253
4675
data = ( 'Zhui ', # 0x00 'Ping ', # 0x01 'Bian ', # 0x02 'Zhou ', # 0x03 'Zhen ', # 0x04 'Senchigura ', # 0x05 'Ci ', # 0x06 'Ying ', # 0x07 'Qi ', # 0x08 'Xian ', # 0x09 'Lou ', # 0x0a 'Di ', # 0x0b 'Ou ', # 0x0c 'Meng ', # 0x0d 'Zhuan ', # 0x0e 'Peng ', # 0x0f 'Lin ', # 0x10 'Zeng ', # 0x11 'Wu ', # 0x12 'Pi ', # 0x13 'Dan ', # 0x14 'Weng ', # 0x15 'Ying ', # 0x16 'Yan ', # 0x17 'Gan ', # 0x18 'Dai ', # 0x19 'Shen ', # 0x1a 'Tian ', # 0x1b 'Tian ', # 0x1c 'Han ', # 0x1d 'Chang ', # 0x1e 'Sheng ', # 0x1f 'Qing ', # 0x20 'Sheng ', # 0x21 'Chan ', # 0x22 'Chan ', # 0x23 'Rui ', # 0x24 'Sheng ', # 0x25 'Su ', # 0x26 'Sen ', # 0x27 'Yong ', # 0x28 'Shuai ', # 0x29 'Lu ', # 0x2a 'Fu ', # 0x2b 'Yong ', # 0x2c 'Beng ', # 0x2d 'Feng ', # 0x2e 'Ning ', # 0x2f 'Tian ', # 0x30 'You ', # 0x31 'Jia ', # 0x32 'Shen ', # 0x33 'Zha ', # 0x34 'Dian ', # 0x35 'Fu ', # 0x36 'Nan ', # 0x37 'Dian ', # 0x38 'Ping ', # 0x39 'Ting ', # 0x3a 'Hua ', # 0x3b 'Ting ', # 0x3c 'Quan ', # 0x3d 'Zi ', # 0x3e 'Meng ', # 0x3f 'Bi ', # 0x40 'Qi ', # 0x41 'Liu ', # 0x42 'Xun ', # 0x43 'Liu ', # 0x44 'Chang ', # 0x45 'Mu ', # 0x46 'Yun ', # 0x47 'Fan ', # 0x48 'Fu ', # 0x49 'Geng ', # 0x4a 'Tian ', # 0x4b 'Jie ', # 0x4c 'Jie ', # 0x4d 'Quan ', # 0x4e 'Wei ', # 0x4f 'Fu ', # 0x50 'Tian ', # 0x51 'Mu ', # 0x52 'Tap ', # 0x53 'Pan ', # 0x54 'Jiang ', # 0x55 'Wa ', # 0x56 'Da ', # 0x57 'Nan ', # 0x58 'Liu ', # 0x59 'Ben ', # 0x5a 'Zhen ', # 0x5b 'Chu ', # 0x5c 'Mu ', # 0x5d 'Mu ', # 0x5e 'Ce ', # 0x5f 'Cen ', # 0x60 'Gai ', # 0x61 'Bi ', # 0x62 'Da ', # 0x63 'Zhi ', # 0x64 'Lue ', # 0x65 'Qi ', # 0x66 'Lue ', # 0x67 'Pan ', # 0x68 'Kesa ', # 0x69 'Fan ', # 0x6a 'Hua ', # 0x6b 'Yu ', # 0x6c 'Yu ', # 0x6d 'Mu ', # 0x6e 'Jun ', # 0x6f 'Yi ', # 0x70 'Liu ', # 0x71 'Yu ', # 0x72 'Die ', # 0x73 'Chou ', # 0x74 'Hua ', # 0x75 'Dang ', # 0x76 'Chuo ', # 0x77 'Ji ', # 0x78 'Wan ', # 0x79 'Jiang ', # 0x7a 'Sheng ', # 0x7b 'Chang ', # 0x7c 'Tuan ', # 0x7d 'Lei ', # 0x7e 'Ji ', # 0x7f 'Cha ', # 0x80 'Liu ', # 0x81 'Tatamu ', # 0x82 'Tuan ', # 0x83 'Lin ', # 0x84 'Jiang ', # 0x85 'Jiang ', # 0x86 'Chou ', # 0x87 'Bo ', # 0x88 'Die ', # 0x89 'Die ', # 0x8a 'Pi ', # 0x8b 'Nie ', # 0x8c 'Dan ', # 0x8d 'Shu ', # 0x8e 'Shu ', # 0x8f 'Zhi ', # 0x90 'Yi ', # 0x91 'Chuang ', # 0x92 'Nai ', # 0x93 'Ding ', # 0x94 'Bi ', # 0x95 'Jie ', # 0x96 'Liao ', # 0x97 'Gong ', # 0x98 'Ge ', # 0x99 'Jiu ', # 0x9a 'Zhou ', # 0x9b 'Xia ', # 0x9c 'Shan ', # 0x9d 'Xu ', # 0x9e 'Nue ', # 0x9f 'Li ', # 0xa0 'Yang ', # 0xa1 'Chen ', # 0xa2 'You ', # 0xa3 'Ba ', # 0xa4 'Jie ', # 0xa5 'Jue ', # 0xa6 'Zhi ', # 0xa7 'Xia ', # 0xa8 'Cui ', # 0xa9 'Bi ', # 0xaa 'Yi ', # 0xab 'Li ', # 0xac 'Zong ', # 0xad 'Chuang ', # 0xae 'Feng ', # 0xaf 'Zhu ', # 0xb0 'Pao ', # 0xb1 'Pi ', # 0xb2 'Gan ', # 0xb3 'Ke ', # 0xb4 'Ci ', # 0xb5 'Xie ', # 0xb6 'Qi ', # 0xb7 'Dan ', # 0xb8 'Zhen ', # 0xb9 'Fa ', # 0xba 'Zhi ', # 0xbb 'Teng ', # 0xbc 'Ju ', # 0xbd 'Ji ', # 0xbe 'Fei ', # 0xbf 'Qu ', # 0xc0 'Dian ', # 0xc1 'Jia ', # 0xc2 'Xian ', # 0xc3 'Cha ', # 0xc4 'Bing ', # 0xc5 'Ni ', # 0xc6 'Zheng ', # 0xc7 'Yong ', # 0xc8 'Jing ', # 0xc9 'Quan ', # 0xca 'Chong ', # 0xcb 'Tong ', # 0xcc 'Yi ', # 0xcd 'Kai ', # 0xce 'Wei ', # 0xcf 'Hui ', # 0xd0 'Duo ', # 0xd1 'Yang ', # 0xd2 'Chi ', # 0xd3 'Zhi ', # 0xd4 'Hen ', # 0xd5 'Ya ', # 0xd6 'Mei ', # 0xd7 'Dou ', # 0xd8 'Jing ', # 0xd9 'Xiao ', # 0xda 'Tong ', # 0xdb 'Tu ', # 0xdc 'Mang ', # 0xdd 'Pi ', # 0xde 'Xiao ', # 0xdf 'Suan ', # 0xe0 'Pu ', # 0xe1 'Li ', # 0xe2 'Zhi ', # 0xe3 'Cuo ', # 0xe4 'Duo ', # 0xe5 'Wu ', # 0xe6 'Sha ', # 0xe7 'Lao ', # 0xe8 'Shou ', # 0xe9 'Huan ', # 0xea 'Xian ', # 0xeb 'Yi ', # 0xec 'Peng ', # 0xed 'Zhang ', # 0xee 'Guan ', # 0xef 'Tan ', # 0xf0 'Fei ', # 0xf1 'Ma ', # 0xf2 'Lin ', # 0xf3 'Chi ', # 0xf4 'Ji ', # 0xf5 'Dian ', # 0xf6 'An ', # 0xf7 'Chi ', # 0xf8 'Bi ', # 0xf9 'Bei ', # 0xfa 'Min ', # 0xfb 'Gu ', # 0xfc 'Dui ', # 0xfd 'E ', # 0xfe 'Wei ', # 0xff )
gpl-3.0
flt/FitFinder
lib/jinja2/constants.py
1169
1626
# -*- coding: utf-8 -*- """ jinja.constants ~~~~~~~~~~~~~~~ Various constants. :copyright: (c) 2010 by the Jinja Team. :license: BSD, see LICENSE for more details. """ #: list of lorem ipsum words used by the lipsum() helper function LOREM_IPSUM_WORDS = u'''\ a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at auctor augue bibendum blandit class commodo condimentum congue consectetuer consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque penatibus per pharetra phasellus placerat platea porta porttitor posuere potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus viverra volutpat vulputate'''
apache-2.0
fbradyirl/home-assistant
tests/components/tplink/test_common.py
7
2372
"""Common code tests.""" from datetime import timedelta from unittest.mock import MagicMock from pyHS100 import SmartDeviceException from homeassistant.components.tplink.common import async_add_entities_retry from homeassistant.helpers.typing import HomeAssistantType async def test_async_add_entities_retry(hass: HomeAssistantType): """Test interval callback.""" async_add_entities_callback = MagicMock() # The objects that will be passed to async_add_entities_callback. objects = ["Object 1", "Object 2", "Object 3", "Object 4"] # For each call to async_add_entities_callback, the following side effects # will be triggered in order. This set of side effects accuratley simulates # 3 attempts to add all entities while also handling several return types. # To help understand what's going on, a comment exists describing what the # object list looks like throughout the iterations. callback_side_effects = [ # OB1, OB2, OB3, OB4 False, False, True, # Object 3 False, # OB1, OB2, OB4 True, # Object 1 SmartDeviceException("My error"), False, # OB2, OB4 True, # Object 2 True, # Object 4 ] callback = MagicMock(side_effect=callback_side_effects) await async_add_entities_retry( hass, async_add_entities_callback, objects, callback, interval=timedelta(milliseconds=100), ) await hass.async_block_till_done() assert callback.call_count == len(callback_side_effects) async def test_async_add_entities_retry_cancel(hass: HomeAssistantType): """Test interval callback.""" async_add_entities_callback = MagicMock() callback_side_effects = [ False, False, True, # Object 1 False, True, # Object 2 SmartDeviceException("My error"), False, True, # Object 3 True, # Object 4 ] callback = MagicMock(side_effect=callback_side_effects) objects = ["Object 1", "Object 2", "Object 3", "Object 4"] cancel = await async_add_entities_retry( hass, async_add_entities_callback, objects, callback, interval=timedelta(milliseconds=100), ) cancel() await hass.async_block_till_done() assert callback.call_count == 4
apache-2.0
sejros/PyInference
examples/infer.py
1
26060
# -*- coding: UTF-8 -*- """Модуль для работы с механизмом нечеткого вывода. Позволяет строить простейшие нечеткие экспертные системы (FES) путем построения нечетких подмножеств на деревьях особого типа. Синтаксис: >>> from FuzzySet import * >>> # создаем классификаторы: >>> C1=std_5_Classificator(name='Classifier 1') >>> C2=std_3_Classificator(name='Classifier 2', begin=0.0, end=100.0) >>> C3=std_2_Classificator(name='Classifier 3', begin=10.0, end=30.0) >>> # создаем дерево >>> T=Ruled(name='Rule system', clas=C1) >>> T.add(Ruled(name='Factor 1', clas=C2)) >>> T.add(Ruled(name='Factor 2', clas=C3)) >>> # добавляем правила >>> T.add_rule({'Factor 1':'I', 'Factor 2':'I', }, concl='I' ) >>> T.add_rule({'Factor 1':'I', 'Factor 2':'II',}, concl='II' ) >>> T.add_rule({'Factor 1':'II', 'Factor 2':'I', }, concl='II' ) >>> T.add_rule({'Factor 1':'II', 'Factor 2':'II',}, concl='III') >>> T.add_rule({'Factor 1':'III', 'Factor 2':'I', }, concl='III') >>> T.add_rule({'Factor 1':'III', 'Factor 2':'II',}, concl='III') >>> # добавляем оценки >>> T['Factor 1'].set_estim(6.5) >>> T['Factor 2'].set_estim(12.6) >>> print T.get_estim() 9.55 >>> # получаем результат с использованием разных t-норм >>> T.tnorm=min_max() >>> T.calculate() #doctest: +SKIP <FuzzySubset.Subset instance at 0x0292EF80> >>> print T.get_estim() 9.55 >>> T.tnorm=sum_prod() >>> T.calculate() #doctest: +SKIP <FuzzySubset.Subset instance at 0x02B3A3F0> >>> print T.get_estim() 9.55 >>> T.tnorm=margin() >>> T.calculate() #doctest: +SKIP <FuzzySubset.Subset instance at 0x02B3A788> >>> print T.get_estim() 9.55 """ from fuzzy.subset import Interval from fuzzy.domain import Domain from fuzzy.tnorm import MinMax class AggregationMetod(object): """ Класс определяет интерфейс к различным методам агрегации частных показателей в интегральный. Подклассы данного класса реализуют алгоритмы интеграции различных типов нечетких контроллеров. """ def calculate(self, host): """ Метод возвращает агрегированное значение """ pass class Simple(AggregationMetod): """ Метод агрегации показателей, в котором интегральный показатель расчитывается как среднее арифметическое частных. """ def calculate(self, host): est = 0.0 weight = 0.0 for child in host.childs.values(): try: est += float(child.get_estim()) except TypeError: return None weight += 1.0 if weight == 0.0: return None else: host.estimation = est/weight return host.estimation class Rules(AggregationMetod): """ Данный класс объединяет группу методов расчета интегральных показетелей, основанных на использовании системы нечетких правил. Все типы нечеткого вывода, использующие правила вывода реализуются классами, дочерними от данного. """ def __init__(self): self.rules = [] def add_rule(self, ant=None, concl='', name=''): """ Данный метод позволяет добавить систему правил, согласно которой будет вычисляться оценка текущего узла дерева (к которому применен метод) в зависимости от оценок его потомков. Нечеткое правило состоит из посылки и заключения. Посылка описывает при каких значениях факторов результирующая оценка принимает значение, описанное в заключении. В посылке перечислены имена факторов и имена термов соответствующих им классификаторов; а в заключении - имя терма результирующего параметра. Синтаксис: >>> T=Ruled() >>> T.add_rule(name='rule 1', ant={'factor': 'value'}, concl='value') #doctest: +SKIP Параметры: ant Посылка нечеткого правила. Ассоциативный массив, в ключах которого задаются имена факторов, а в значениях - соответствующие имена значений лингвистической переменной. Предполагается, что в данном массиве перечислены посылки нечеткого правила, связанные логическим И. Для ввода правил, в посылке которых встречается союз ИЛИ используйте разбиение на несколько правил. concl Заключение нечеткого правила Имя терма классификатора, соответствующее данной посылке. name имя правила, используемое опционально для удобства. """ ## # проверяем првильность параметров: ## for param, value in ant.iteritems(): ## # param должен быть среди потомков ## host[param] ## # value должен быть среди термов его классификатора ## host[param].classifier[value] ## # concl должен быть среди термов собственного классификатора ## host.classifier[concl] if not ant: ant = {} self.rules.append(Rule(ant=ant, concl=concl, name=name)) class Mamdani(Rules): """ Данный класс реализует функциональность контроллера Мамдани, то есть классический алгоритм нечеткого композитного вывода. Для каждого правила из системы правил считается его вес: как t-норма принадлежности фактических значений показателей термам, упомянутым в посылке правила. Затем терм, соответствующий заключению правила обрезается на уровне, равном весу показателей. Таким образом, для каждого правила выводится НПМ на области определения результирующего показателя. Они объединяются путем применения к ним t-конормы и получившееся НПМ и бдет являться конечным результатом процесса нечеткого вывода. """ def __init__(self): super(Mamdani, self).__init__(self) def calculate(self, host): # Начальное значение итогового НПМ. Для конормы это 0 уровень res = Interval(host.classifier.begin, host.classifier.end, tnorm=host.tnorm) * 0.0 # для каждого правила вычисляем его альфу for rule in self.rules: alpha = 1.0 # Для t-нормы начальным значением будет 1 # для каждого фактора в правиле for param, value in rule.ant.iteritems(): # значение фактора fact = host[param].get_estim() # его принадлежность в классификаторе mem = host[param].classifier[value].value(fact) alpha = host.tnorm.t_norm(alpha, mem) rule.alpha = alpha # обрезаем терм собственного классификатора уровнем альфа # и прибавляем его к существующим, используя конорму res = res.t_conorm(Interval(host.classifier.begin, host.classifier.end) * rule.alpha & \ host.classifier[rule.concl]) return res class RulesAccurate(Rules): """ Данный алгоритм нечеткого вывода в общем аналогичен контроллеру Мамдани, однако, результат определяется как среднне арифметическое взвешенное по дефаззифицированным термам заключения каждого правила, причем весами являются веса соответствующего правила. """ def calculate(self, host): sum_a = 0.0 summ = 0.0 # для каждого правила вычисляем его альфу for rule in self.rules: alpha = 1.0 # Для t-нормы начальным значением будет 1 # для каждого фактора в правиле for param, value in rule.ant.iteritems(): # значение фактора fact = host[param].get_estim() # его принадлежность в классификаторе mem = host[param].classifier[value].value(fact) alpha = host.tnorm.t_norm(alpha, mem) rule.alpha = alpha sum_a += alpha summ += host.classifier[rule.concl].centr()*alpha if sum_a == 0: return 0.0 return summ/sum_a class Rule(object): """ Описание Синтаксис: >>> Attributes: ant concl name """ def __init__(self, ant=None, concl='', name=''): if not ant: ant = {} self.concl = concl self.name = name self.ant = ant def __str__(self): res = str(self.name)+': ' for (name, value) in self.ant.iteritems(): res += str(name)+'='+value+' ' res += ' -> '+str(self.concl) return res class Tree(Domain): """ Представляет собой носитель нечеткого множества в виде иерархической структуры (дерева), в которой оценка данного узла зависит определенным образом от оценок его потомков. Конструктор данного класса создает как само дерево, так и его потомков. Листовой элемент дерева - это тот, для которого не создано ни одного потомка. Синтаксис: >>> A=Tree('tree') >>> A.add(Tree('branch 1')) >>> A.add(Tree('branch 2')) >>> A.add(Tree('branch 3')) >>> A['branch 1'].add(Tree('branch 1 1')) >>> A['branch 1'].add(Tree('branch 1 2')) >>> A['branch 1'].add(Tree('branch 1 3')) >>> A['branch 1']['branch 1 2'].add(Tree('leaf 1 2 1')) >>> A['branch 1']['branch 1 2'].add(Tree('leaf 1 2 2')) >>> A['branch 1']['branch 1 2'].add(Tree('leaf 1 2 3')) >>> A.char() branch 1 3 - None (1.0) leaf 1 2 1 - None (1.0) leaf 1 2 3 - None (1.0) leaf 1 2 2 - None (1.0) branch 1 2 - None (1.0) branch 1 1 - None (1.0) branch 1 - None (1.0) branch 2 - None (1.0) branch 3 - None (1.0) tree - None (1.0) >>> A=Tree('name', estim=2.5, weight=0.23, clas=std_3_Classifier(), tnorm=sum_prod) Параметры: name задает имя узла, по которому к нему можно будет обращаться estim степень принадлежности данного узла clas классификатор, используемый для оценки уровня данного параметра tnorm задает используемые при интеграции t-нормы и кономры. Подробнее см. FuzzyDomain.t_norm agg метод аггрегации частных показателей в интегральный. См. FuzzyDomain.AggregationMethod Переменные класса: childs name estimation weight classifier tnorm """ # TODO отделить МАИ от иерархического носителя # TODO реализовать в интерфейсе Subset иерархический носитель. # Без изъебов типа весов и классификаторов. Но с A.value() def __init__(self, name='', estim=None, agg=Simple(), clas=None, tnorm=MinMax()): self.name = name self.estimation = estim self.childs = {} self.agg = agg self.classifier = clas self.tnorm = tnorm def __str__(self): """ Для быстрого вывода основной информации о дереве, поддереве или листе, можно использовать процедуру преобразования к строковому типу. Синтаксис: >>> T=Tree('tree') >>> T.add(Tree('branch 1', 2)) >>> T.add(Tree('branch 2', 3)) >>> print T tree - 2.5 (1.0) >>> print T['branch 1'] branch 1 - 2 (1.0) Данный синтаксис можно комбинировать с синтаксисом __iter__ для вывода более полной информации о всех узлах дерева: >>> for i in T: ... print i ... """ return self.name+' - '+ str(self.get_estim()) def __iter__(self): """ Для быстрого перебора всех дочерних элементов дерева можно использовать объект данного класса как итератор. Порядок, в котором возвращаются узлы дерева соответствует алгоритму postorder traversal, то есть сначала перечисляются все дочерние узлы, затем родительский узел. и так для каждого узла, начиная с вершины. Синтаксис: >>> T=Tree('tree') >>> T.add(Tree('branch 1', 2)) >>> T.add(Tree('branch 2', 3)) >>> for i in T: ... print i.name ... branch 1 branch 2 tree """ for leaf in self.childs.values(): for i in leaf: yield i yield self def add(self, addition): """ Описание Синтаксис: Параметры: Параметр описание """ self.childs[addition.name] = addition def get_estim(self): """ Описание Синтаксис: Параметры: Параметр описание """ if self.estimation or self.estimation == 0.0: return self.estimation else: if self.childs == []: return None return self.agg.calculate(self) def set_estim(self, val): """ Описание Синтаксис: Параметры: Параметр описание """ self.estimation = val def __getitem__(self, param): """ Для быстрого доступа к любому из дочерних узлов дерева (не обязательно прямых потомков) по названию можно использовать следующий синтаксис: >>> T=Tree('tree') >>> T.add(Tree('branch 1', 2)) >>> T.add(Tree('branch 2', 3)) >>> T.char() branch 1 - 2 (1.0) branch 2 - 3 (1.0) tree - 2.5 (1.0) >>> print T['branch 1'] branch 1 - 2 (1.0) """ ## print 'im here!' return self.childs[param] # TODO интерфейс FES с модельными параметрами и возможностью задания пользовательских и изменения на лету. class Controller(object): """ Данный класс представляет интерфейс для создания нечеткого контроллера со множественными входами и выходами. Входом нечеткого контроллера называется лингвистическая переменная, имеющая имя и множество терм-значений (классификатор), которой присваивается четкое, нечеткое или лингвистическое значение. Выходом контроллера называется лингвистическая переменная, имеющая имя и множество терм-значений, значение которой рассчитывается, исходя из значений входных переменных по определенному алгоритму, который называется тип контроллера (см. FuzzyDomain.AggregationMethod). Синтаксис: >>> Параметры конструктора: input_ C помощью этого параметра задаются входные переменные контроллера. В этот параметр следует передать ассоциативный массив, ключами которого являются строки-имена входных переменных, а значениями - соответствующие классификаторы, задающие терм-множество каждой переменной. out Подобным же образом задаются и выходные переменные классификатора. rules В данный параметр передаются нечеткие правила вывода (если они требуются). Следует передать массив, в котором каждый элемент это правило, представленное в виде пары (tuple) посылки и заключения, каждая из которых представлена в виде ассоциативного массива с ключами - именами переменны и значениями - именами термов. method Данный параметр определяет тип контроллера. Фактически он задает метод (алгоритм) сводки частных показателей в интегральный. Может принимать в качестве значения имя любого подкласса FuzzyDomain.AggregationMethod. tnorm Определяет пару треугольных норм и конорм Поля класса: trees Ассоциативный массив, ключами которого являются имена выходных переменных контроллера, а значениями - соответствующие им деревья (см. FuzzyDomain.Tree) inputs Ассоциативный массив, ключами которого являются имена входных переменных контроллера, а значениями - соответствующие им множества терм-значений (классификаторы). См. FuzzySet.FuzzySet method tnorm """ def __init__(self, input_=None, out=None, method=Simple(), tnorm=MinMax()): """ Описание Синтаксис: >>> """ if not input_: input_ = {} if not out: out = {} self.method = method self.tnorm = tnorm self.trees = {} self.inputs = {} self.define_input(input_) self.define_output(out) def _char(self): """ Функция выводит данные о нечетком контроллере в человеко-читаемом виде. """ print "<Fuzzy controller>" for name in self.trees.itervalues(): for i in name: print i print print 'Rules:' for tree in self.trees.itervalues(): if isinstance(tree.agg, Rules): for rule in tree.agg.rules: print rule print print '</Fuzzy controller>' def define_input(self, input_): """ Описание Синтаксис: >>> """ self.inputs = {} for name in input_.iterkeys(): self.inputs[name] = Tree(name=name, clas=input_[name]) return self def define_output(self, out): """ Описание Синтаксис: >>> """ self.trees = {} for name in out.iterkeys(): tree = Tree(name=name, clas=out[name], agg=self.method(), tnorm=self.tnorm) for branch in self.inputs.itervalues(): tree.add(branch) self.trees[name] = tree return self def add_input(self, name, clas): """ Описание Синтаксис: >>> """ self.inputs[name] = Tree(name=name, clas=clas) def add_output(self, name, clas): """ Описание Синтаксис: >>> """ tree = Tree(name=name, clas=clas, agg=self.method(), tnorm=self.tnorm) for branch in self.inputs.itervalues(): tree.add(branch) self.trees[name] = tree def define_rules(self, rules): """ Описание Синтаксис: >>> """ if isinstance(self.method(), rules): i = 0 for rule in rules: ant, conc = rule for name in conc.iterkeys(): self.trees[name].agg.add_rule(name='rule '+str(i), ant=ant, concl=conc[name]) i += 1 return self def add_rule(self, rule, name=''): """ Описание Синтаксис: >>> """ # TODO pass def set(self, input_values): """ Описание Синтаксис: >>> """ for name in input_values.iterkeys(): self.inputs[name].set_estim(input_values[name]) def get(self): """ Описание Синтаксис: >>> """ res = {} for tree in self.trees.itervalues(): res[tree.name] = tree.get_estim() return res #TODO вывод классификаторов входов #TODO вывод классификаторов выходов #TODO вывод двумерных графиков if __name__ == "__main__": import doctest doctest.testmod(verbose=False) #~ doctest.testmod(verbose=True)
gpl-3.0
ennoborg/gramps
gramps/gen/filters/rules/_hassourceofbase.py
5
2736
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2002-2006 Donald N. Allingham # Copyright (C) 2011 Tim G L Lyons # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # #------------------------------------------------------------------------- # # Standard Python modules # #------------------------------------------------------------------------- from ...const import GRAMPS_LOCALE as glocale _ = glocale.translation.gettext #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from . import Rule #------------------------------------------------------------------------- # # HasSourceOf # #------------------------------------------------------------------------- class HasSourceOfBase(Rule): """Rule that checks for objects that have a particular source.""" labels = [ _('Source ID:') ] name = 'Object with the <source>' category = _('Citation/source filters') description = 'Matches objects who have a particular source' def prepare(self, db, user): if self.list[0] == '': self.source_handle = None self.nosource = True return self.nosource = False try: self.source_handle = db.get_source_from_gramps_id( self.list[0]).get_handle() except: self.source_handle = None def apply(self, db, object): if not self.source_handle: if self.nosource: # check whether the citation list is empty as a proxy for # there being no sources return len(object.get_all_citation_lists()) == 0 else: return False else: for citation_handle in object.get_all_citation_lists(): citation = db.get_citation_from_handle(citation_handle) if citation.get_reference_handle() == self.source_handle: return True return False
gpl-2.0
kmonsoor/python-for-android
python-build/python-libs/gdata/build/lib/atom/token_store.py
280
4048
#!/usr/bin/python # # Copyright (C) 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module provides a TokenStore class which is designed to manage auth tokens required for different services. Each token is valid for a set of scopes which is the start of a URL. An HTTP client will use a token store to find a valid Authorization header to send in requests to the specified URL. If the HTTP client determines that a token has expired or been revoked, it can remove the token from the store so that it will not be used in future requests. """ __author__ = 'api.jscudder (Jeff Scudder)' import atom.http_interface import atom.url SCOPE_ALL = 'http' class TokenStore(object): """Manages Authorization tokens which will be sent in HTTP headers.""" def __init__(self, scoped_tokens=None): self._tokens = scoped_tokens or {} def add_token(self, token): """Adds a new token to the store (replaces tokens with the same scope). Args: token: A subclass of http_interface.GenericToken. The token object is responsible for adding the Authorization header to the HTTP request. The scopes defined in the token are used to determine if the token is valid for a requested scope when find_token is called. Returns: True if the token was added, False if the token was not added becase no scopes were provided. """ if not hasattr(token, 'scopes') or not token.scopes: return False for scope in token.scopes: self._tokens[str(scope)] = token return True def find_token(self, url): """Selects an Authorization header token which can be used for the URL. Args: url: str or atom.url.Url or a list containing the same. The URL which is going to be requested. All tokens are examined to see if any scopes begin match the beginning of the URL. The first match found is returned. Returns: The token object which should execute the HTTP request. If there was no token for the url (the url did not begin with any of the token scopes available), then the atom.http_interface.GenericToken will be returned because the GenericToken calls through to the http client without adding an Authorization header. """ if url is None: return None if isinstance(url, (str, unicode)): url = atom.url.parse_url(url) if url in self._tokens: token = self._tokens[url] if token.valid_for_scope(url): return token else: del self._tokens[url] for scope, token in self._tokens.iteritems(): if token.valid_for_scope(url): return token return atom.http_interface.GenericToken() def remove_token(self, token): """Removes the token from the token_store. This method is used when a token is determined to be invalid. If the token was found by find_token, but resulted in a 401 or 403 error stating that the token was invlid, then the token should be removed to prevent future use. Returns: True if a token was found and then removed from the token store. False if the token was not in the TokenStore. """ token_found = False scopes_to_delete = [] for scope, stored_token in self._tokens.iteritems(): if stored_token == token: scopes_to_delete.append(scope) token_found = True for scope in scopes_to_delete: del self._tokens[scope] return token_found def remove_all_tokens(self): self._tokens = {}
apache-2.0
IRSO/irsosav
node-v4/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input_test.py
1841
3207
#!/usr/bin/env python # Copyright 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unit tests for the input.py file.""" import gyp.input import unittest import sys class TestFindCycles(unittest.TestCase): def setUp(self): self.nodes = {} for x in ('a', 'b', 'c', 'd', 'e'): self.nodes[x] = gyp.input.DependencyGraphNode(x) def _create_dependency(self, dependent, dependency): dependent.dependencies.append(dependency) dependency.dependents.append(dependent) def test_no_cycle_empty_graph(self): for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_no_cycle_line(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['d']) for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_no_cycle_dag(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['a'], self.nodes['c']) self._create_dependency(self.nodes['b'], self.nodes['c']) for label, node in self.nodes.iteritems(): self.assertEquals([], node.FindCycles()) def test_cycle_self_reference(self): self._create_dependency(self.nodes['a'], self.nodes['a']) self.assertEquals([[self.nodes['a'], self.nodes['a']]], self.nodes['a'].FindCycles()) def test_cycle_two_nodes(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['a']) self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]], self.nodes['a'].FindCycles()) self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]], self.nodes['b'].FindCycles()) def test_two_cycles(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['a']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['b']) cycles = self.nodes['a'].FindCycles() self.assertTrue( [self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles) self.assertTrue( [self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles) self.assertEquals(2, len(cycles)) def test_big_cycle(self): self._create_dependency(self.nodes['a'], self.nodes['b']) self._create_dependency(self.nodes['b'], self.nodes['c']) self._create_dependency(self.nodes['c'], self.nodes['d']) self._create_dependency(self.nodes['d'], self.nodes['e']) self._create_dependency(self.nodes['e'], self.nodes['a']) self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['c'], self.nodes['d'], self.nodes['e'], self.nodes['a']]], self.nodes['a'].FindCycles()) if __name__ == '__main__': unittest.main()
gpl-3.0
urda/mrbutler-bot
tests/unit/mrb_common/commanding/test_command.py
1
1391
""" Copyright 2017 Peter Urda Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from unittest import TestCase from unittest.mock import MagicMock from discord import Message from mrb_common.commanding import Command class TestCommand(TestCase): def setUp(self): self.mock_function = MagicMock() self.mock_message = MagicMock(spec=Message) # type: Message def test_exec(self): """Verify that calling exec on a command runs the stored function""" command = Command(self.mock_function) command.execute(self.mock_message) self.assertEqual(self.mock_function.call_count, 1) def test_exec_called_with_message(self): """Verify that calling exec passes a message along to the function""" command = Command(self.mock_function) command.execute(self.mock_message) self.mock_function.assert_called_with(self.mock_message)
apache-2.0
adeepkit01/networks
.waf-1.8.13-4da49748f68a49832130c7ef124357f6/waflib/Tools/flex.py
21
1036
#! /usr/bin/env python # encoding: utf-8 # WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file import waflib.TaskGen,os,re def decide_ext(self,node): if'cxx'in self.features: return['.lex.cc'] return['.lex.c'] def flexfun(tsk): env=tsk.env bld=tsk.generator.bld wd=bld.variant_dir def to_list(xx): if isinstance(xx,str):return[xx] return xx tsk.last_cmd=lst=[] lst.extend(to_list(env['FLEX'])) lst.extend(to_list(env['FLEXFLAGS'])) inputs=[a.path_from(bld.bldnode)for a in tsk.inputs] if env.FLEX_MSYS: inputs=[x.replace(os.sep,'/')for x in inputs] lst.extend(inputs) lst=[x for x in lst if x] txt=bld.cmd_and_log(lst,cwd=wd,env=env.env or None,quiet=0) tsk.outputs[0].write(txt.replace('\r\n','\n').replace('\r','\n')) waflib.TaskGen.declare_chain(name='flex',rule=flexfun,ext_in='.l',decider=decide_ext,) def configure(conf): conf.find_program('flex',var='FLEX') conf.env.FLEXFLAGS=['-t'] if re.search(r"\\msys\\[0-9.]+\\bin\\flex.exe$",conf.env.FLEX[0]): conf.env.FLEX_MSYS=True
gpl-2.0
jakew02/android_kernel_lge_msm8992
tools/perf/tests/attr.py
3174
9441
#! /usr/bin/python import os import sys import glob import optparse import tempfile import logging import shutil import ConfigParser class Fail(Exception): def __init__(self, test, msg): self.msg = msg self.test = test def getMsg(self): return '\'%s\' - %s' % (self.test.path, self.msg) class Unsup(Exception): def __init__(self, test): self.test = test def getMsg(self): return '\'%s\'' % self.test.path class Event(dict): terms = [ 'cpu', 'flags', 'type', 'size', 'config', 'sample_period', 'sample_type', 'read_format', 'disabled', 'inherit', 'pinned', 'exclusive', 'exclude_user', 'exclude_kernel', 'exclude_hv', 'exclude_idle', 'mmap', 'comm', 'freq', 'inherit_stat', 'enable_on_exec', 'task', 'watermark', 'precise_ip', 'mmap_data', 'sample_id_all', 'exclude_host', 'exclude_guest', 'exclude_callchain_kernel', 'exclude_callchain_user', 'wakeup_events', 'bp_type', 'config1', 'config2', 'branch_sample_type', 'sample_regs_user', 'sample_stack_user', ] def add(self, data): for key, val in data: log.debug(" %s = %s" % (key, val)) self[key] = val def __init__(self, name, data, base): log.debug(" Event %s" % name); self.name = name; self.group = '' self.add(base) self.add(data) def compare_data(self, a, b): # Allow multiple values in assignment separated by '|' a_list = a.split('|') b_list = b.split('|') for a_item in a_list: for b_item in b_list: if (a_item == b_item): return True elif (a_item == '*') or (b_item == '*'): return True return False def equal(self, other): for t in Event.terms: log.debug(" [%s] %s %s" % (t, self[t], other[t])); if not self.has_key(t) or not other.has_key(t): return False if not self.compare_data(self[t], other[t]): return False return True def diff(self, other): for t in Event.terms: if not self.has_key(t) or not other.has_key(t): continue if not self.compare_data(self[t], other[t]): log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) # Test file description needs to have following sections: # [config] # - just single instance in file # - needs to specify: # 'command' - perf command name # 'args' - special command arguments # 'ret' - expected command return value (0 by default) # # [eventX:base] # - one or multiple instances in file # - expected values assignments class Test(object): def __init__(self, path, options): parser = ConfigParser.SafeConfigParser() parser.read(path) log.warning("running '%s'" % path) self.path = path self.test_dir = options.test_dir self.perf = options.perf self.command = parser.get('config', 'command') self.args = parser.get('config', 'args') try: self.ret = parser.get('config', 'ret') except: self.ret = 0 self.expect = {} self.result = {} log.debug(" loading expected events"); self.load_events(path, self.expect) def is_event(self, name): if name.find("event") == -1: return False else: return True def load_events(self, path, events): parser_event = ConfigParser.SafeConfigParser() parser_event.read(path) # The event record section header contains 'event' word, # optionaly followed by ':' allowing to load 'parent # event' first as a base for section in filter(self.is_event, parser_event.sections()): parser_items = parser_event.items(section); base_items = {} # Read parent event if there's any if (':' in section): base = section[section.index(':') + 1:] parser_base = ConfigParser.SafeConfigParser() parser_base.read(self.test_dir + '/' + base) base_items = parser_base.items('event') e = Event(section, parser_items, base_items) events[section] = e def run_cmd(self, tempdir): cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir, self.perf, self.command, tempdir, self.args) ret = os.WEXITSTATUS(os.system(cmd)) log.info(" '%s' ret %d " % (cmd, ret)) if ret != int(self.ret): raise Unsup(self) def compare(self, expect, result): match = {} log.debug(" compare"); # For each expected event find all matching # events in result. Fail if there's not any. for exp_name, exp_event in expect.items(): exp_list = [] log.debug(" matching [%s]" % exp_name) for res_name, res_event in result.items(): log.debug(" to [%s]" % res_name) if (exp_event.equal(res_event)): exp_list.append(res_name) log.debug(" ->OK") else: log.debug(" ->FAIL"); log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list))) # we did not any matching event - fail if (not exp_list): exp_event.diff(res_event) raise Fail(self, 'match failure'); match[exp_name] = exp_list # For each defined group in the expected events # check we match the same group in the result. for exp_name, exp_event in expect.items(): group = exp_event.group if (group == ''): continue for res_name in match[exp_name]: res_group = result[res_name].group if res_group not in match[group]: raise Fail(self, 'group failure') log.debug(" group: [%s] matches group leader %s" % (exp_name, str(match[group]))) log.debug(" matched") def resolve_groups(self, events): for name, event in events.items(): group_fd = event['group_fd']; if group_fd == '-1': continue; for iname, ievent in events.items(): if (ievent['fd'] == group_fd): event.group = iname log.debug('[%s] has group leader [%s]' % (name, iname)) break; def run(self): tempdir = tempfile.mkdtemp(); try: # run the test script self.run_cmd(tempdir); # load events expectation for the test log.debug(" loading result events"); for f in glob.glob(tempdir + '/event*'): self.load_events(f, self.result); # resolve group_fd to event names self.resolve_groups(self.expect); self.resolve_groups(self.result); # do the expectation - results matching - both ways self.compare(self.expect, self.result) self.compare(self.result, self.expect) finally: # cleanup shutil.rmtree(tempdir) def run_tests(options): for f in glob.glob(options.test_dir + '/' + options.test): try: Test(f, options).run() except Unsup, obj: log.warning("unsupp %s" % obj.getMsg()) def setup_log(verbose): global log level = logging.CRITICAL if verbose == 1: level = logging.WARNING if verbose == 2: level = logging.INFO if verbose >= 3: level = logging.DEBUG log = logging.getLogger('test') log.setLevel(level) ch = logging.StreamHandler() ch.setLevel(level) formatter = logging.Formatter('%(message)s') ch.setFormatter(formatter) log.addHandler(ch) USAGE = '''%s [OPTIONS] -d dir # tests dir -p path # perf binary -t test # single test -v # verbose level ''' % sys.argv[0] def main(): parser = optparse.OptionParser(usage=USAGE) parser.add_option("-t", "--test", action="store", type="string", dest="test") parser.add_option("-d", "--test-dir", action="store", type="string", dest="test_dir") parser.add_option("-p", "--perf", action="store", type="string", dest="perf") parser.add_option("-v", "--verbose", action="count", dest="verbose") options, args = parser.parse_args() if args: parser.error('FAILED wrong arguments %s' % ' '.join(args)) return -1 setup_log(options.verbose) if not options.test_dir: print 'FAILED no -d option specified' sys.exit(-1) if not options.test: options.test = 'test*' try: run_tests(options) except Fail, obj: print "FAILED %s" % obj.getMsg(); sys.exit(-1) sys.exit(0) if __name__ == '__main__': main()
gpl-2.0
jorik041/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/style/filter.py
196
11782
# Copyright (C) 2010 Chris Jerdonek ([email protected]) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Contains filter-related code.""" def validate_filter_rules(filter_rules, all_categories): """Validate the given filter rules, and raise a ValueError if not valid. Args: filter_rules: A list of boolean filter rules, for example-- ["-whitespace", "+whitespace/braces"] all_categories: A list of all available category names, for example-- ["whitespace/tabs", "whitespace/braces"] Raises: ValueError: An error occurs if a filter rule does not begin with "+" or "-" or if a filter rule does not match the beginning of some category name in the list of all available categories. """ for rule in filter_rules: if not (rule.startswith('+') or rule.startswith('-')): raise ValueError('Invalid filter rule "%s": every rule ' "must start with + or -." % rule) for category in all_categories: if category.startswith(rule[1:]): break else: raise ValueError('Suspected incorrect filter rule "%s": ' "the rule does not match the beginning " "of any category name." % rule) class _CategoryFilter(object): """Filters whether to check style categories.""" def __init__(self, filter_rules=None): """Create a category filter. Args: filter_rules: A list of strings that are filter rules, which are strings beginning with the plus or minus symbol (+/-). The list should include any default filter rules at the beginning. Defaults to the empty list. Raises: ValueError: Invalid filter rule if a rule does not start with plus ("+") or minus ("-"). """ if filter_rules is None: filter_rules = [] self._filter_rules = filter_rules self._should_check_category = {} # Cached dictionary of category to True/False def __str__(self): return ",".join(self._filter_rules) # Useful for unit testing. def __eq__(self, other): """Return whether this CategoryFilter instance is equal to another.""" return self._filter_rules == other._filter_rules # Useful for unit testing. def __ne__(self, other): # Python does not automatically deduce from __eq__(). return not (self == other) def should_check(self, category): """Return whether the category should be checked. The rules for determining whether a category should be checked are as follows. By default all categories should be checked. Then apply the filter rules in order from first to last, with later flags taking precedence. A filter rule applies to a category if the string after the leading plus/minus (+/-) matches the beginning of the category name. A plus (+) means the category should be checked, while a minus (-) means the category should not be checked. """ if category in self._should_check_category: return self._should_check_category[category] should_check = True # All categories checked by default. for rule in self._filter_rules: if not category.startswith(rule[1:]): continue should_check = rule.startswith('+') self._should_check_category[category] = should_check # Update cache. return should_check class FilterConfiguration(object): """Supports filtering with path-specific and user-specified rules.""" def __init__(self, base_rules=None, path_specific=None, user_rules=None): """Create a FilterConfiguration instance. Args: base_rules: The starting list of filter rules to use for processing. The default is the empty list, which by itself would mean that all categories should be checked. path_specific: A list of (sub_paths, path_rules) pairs that stores the path-specific filter rules for appending to the base rules. The "sub_paths" value is a list of path substrings. If a file path contains one of the substrings, then the corresponding path rules are appended. The first substring match takes precedence, i.e. only the first match triggers an append. The "path_rules" value is a list of filter rules that can be appended to the base rules. user_rules: A list of filter rules that is always appended to the base rules and any path rules. In other words, the user rules take precedence over the everything. In practice, the user rules are provided by the user from the command line. """ if base_rules is None: base_rules = [] if path_specific is None: path_specific = [] if user_rules is None: user_rules = [] self._base_rules = base_rules self._path_specific = path_specific self._path_specific_lower = None """The backing store for self._get_path_specific_lower().""" self._user_rules = user_rules self._path_rules_to_filter = {} """Cached dictionary of path rules to CategoryFilter instance.""" # The same CategoryFilter instance can be shared across # multiple keys in this dictionary. This allows us to take # greater advantage of the caching done by # CategoryFilter.should_check(). self._path_to_filter = {} """Cached dictionary of file path to CategoryFilter instance.""" # Useful for unit testing. def __eq__(self, other): """Return whether this FilterConfiguration is equal to another.""" if self._base_rules != other._base_rules: return False if self._path_specific != other._path_specific: return False if self._user_rules != other._user_rules: return False return True # Useful for unit testing. def __ne__(self, other): # Python does not automatically deduce this from __eq__(). return not self.__eq__(other) # We use the prefix "_get" since the name "_path_specific_lower" # is already taken up by the data attribute backing store. def _get_path_specific_lower(self): """Return a copy of self._path_specific with the paths lower-cased.""" if self._path_specific_lower is None: self._path_specific_lower = [] for (sub_paths, path_rules) in self._path_specific: sub_paths = map(str.lower, sub_paths) self._path_specific_lower.append((sub_paths, path_rules)) return self._path_specific_lower def _path_rules_from_path(self, path): """Determine the path-specific rules to use, and return as a tuple. This method returns a tuple rather than a list so the return value can be passed to _filter_from_path_rules() without change. """ path = path.lower() for (sub_paths, path_rules) in self._get_path_specific_lower(): for sub_path in sub_paths: if path.find(sub_path) > -1: return tuple(path_rules) return () # Default to the empty tuple. def _filter_from_path_rules(self, path_rules): """Return the CategoryFilter associated to the given path rules. Args: path_rules: A tuple of path rules. We require a tuple rather than a list so the value can be used as a dictionary key in self._path_rules_to_filter. """ # We reuse the same CategoryFilter where possible to take # advantage of the caching they do. if path_rules not in self._path_rules_to_filter: rules = list(self._base_rules) # Make a copy rules.extend(path_rules) rules.extend(self._user_rules) self._path_rules_to_filter[path_rules] = _CategoryFilter(rules) return self._path_rules_to_filter[path_rules] def _filter_from_path(self, path): """Return the CategoryFilter associated to a path.""" if path not in self._path_to_filter: path_rules = self._path_rules_from_path(path) filter = self._filter_from_path_rules(path_rules) self._path_to_filter[path] = filter return self._path_to_filter[path] def should_check(self, category, path): """Return whether the given category should be checked. This method determines whether a category should be checked by checking the category name against the filter rules for the given path. For a given path, the filter rules are the combination of the base rules, the path-specific rules, and the user-provided rules -- in that order. As we will describe below, later rules in the list take precedence. The path-specific rules are the rules corresponding to the first element of the "path_specific" parameter that contains a string case-insensitively matching some substring of the path. If there is no such element, there are no path-specific rules for that path. Given a list of filter rules, the logic for determining whether a category should be checked is as follows. By default all categories should be checked. Then apply the filter rules in order from first to last, with later flags taking precedence. A filter rule applies to a category if the string after the leading plus/minus (+/-) matches the beginning of the category name. A plus (+) means the category should be checked, while a minus (-) means the category should not be checked. Args: category: The category name. path: The path of the file being checked. """ return self._filter_from_path(path).should_check(category)
bsd-3-clause
Syncano/syncano-cli
syncano_cli/base/formatters.py
1
3355
# -*- coding: utf-8 -*- import click import six from syncano_cli.base.options import BottomSpacedOpt, ColorSchema, DefaultOpt, OptionsBase, WarningOpt class Formatter(object): indent = ' ' not_set = '-- not set --' def write(self, single_line, *options): option = self.get_options(options) styles = option.map_click() single_line = self._indent(single_line, option) self._write(single_line, option, **styles) def write_lines(self, lines, *options): lines = lines.splitlines() if isinstance(lines, six.string_types) else lines for line in lines: self.write(line, *options) def write_block(self, lines, *options): self.write_lines(lines, *options) self.separator() @classmethod def empty_line(cls): click.echo() def separator(self, size=70, indent=1): self.write(size * '-', DefaultOpt(indent=indent), WarningOpt(), BottomSpacedOpt()) def display_config(self, config): for name, value in six.iteritems(config): self.write('{}{:20} {}'.format( self.indent, click.style(name, fg=ColorSchema.PROMPT), click.style(value, fg=ColorSchema.INFO) )) if not config: self.write('No config specified yet.', DefaultOpt(indent=2)) self.empty_line() def get_options(self, options): option_list = list(options) or [] option_list.insert(0, DefaultOpt()) return OptionsBase.build_options(option_list) def format_object(self, dictionary, indent=1, skip_fields=None): skip_fields = skip_fields or [] indent += 1 for key, value in six.iteritems(dictionary): if isinstance(value, dict): self.write('{}:'.format(click.style(key, fg=ColorSchema.PROMPT)), DefaultOpt(indent=indent)) self.format_object(value, indent=indent, skip_fields=skip_fields) elif isinstance(value, list): self.format_list(value, key=key, indent=indent) else: if key in skip_fields: continue self.write('{}: {}'.format( click.style(key, fg=ColorSchema.PROMPT), click.style(value, fg=ColorSchema.INFO) ), DefaultOpt(indent=indent)) def format_list(self, data_list, key=None, indent=2, skip_fields=None): skip_fields = skip_fields or [] for el in data_list: if isinstance(el, dict): self.format_object(el, indent=indent, skip_fields=skip_fields) else: if key: self.write('{}: {}'.format( click.style(key, fg=ColorSchema.PROMPT), click.style(el, fg=ColorSchema.INFO) ), DefaultOpt(indent=indent)) else: self.write('{}'.format(el), DefaultOpt(indent=indent)) self.empty_line() def _write(self, line, options, **styles): if options.space_top: self.empty_line() click.echo(click.style(line, **styles)) if options.space_bottom: self.empty_line() def _indent(self, line, options): return '{}{}'.format(self.indent * options.indent, line)
mit
DeepRNN/visual_question_answering
episodic_memory.py
1
3592
import tensorflow as tf from utils.nn import NN class AttnGRU(object): """ Attention-based GRU (used by the Episodic Memory Module). """ def __init__(self, config): self.nn = NN(config) self.num_units = config.num_gru_units def __call__(self, inputs, state, attention): with tf.variable_scope('attn_gru'): r_input = tf.concat([inputs, state], axis = 1) r_input = self.nn.dropout(r_input) r = self.nn.dense(r_input, units = self.num_units, activation = None, use_bias = False, name = 'fc1') b = tf.get_variable('fc1/bias', shape = [self.num_units], initializer = tf.constant_initializer(1.0)) r = tf.nn.bias_add(r, b) r = tf.sigmoid(r) c_input = tf.concat([inputs, r*state], axis = 1) c_input = self.nn.dropout(c_input) c = self.nn.dense(c_input, units = self.num_units, activation = tf.tanh, name = 'fc2') new_state = attention * c + (1 - attention) * state return new_state class EpisodicMemory(object): """ Episodic Memory Module. """ def __init__(self, config, num_facts, question, facts): self.nn = NN(config) self.num_units = config.num_gru_units self.num_facts = num_facts self.question = question self.facts = facts self.attention = config.attention if self.attention == 'gru': self.attn_gru = AttnGRU(config) def new_fact(self, memory): """ Get the context vector by using either soft attention or attention-based GRU. """ fact_list = tf.unstack(self.facts, axis = 1) mixed_fact = tf.zeros_like(fact_list[0]) with tf.variable_scope('attend'): attentions = self.attend(memory) if self.attention == 'gru': with tf.variable_scope('attn_gate') as scope: attentions = tf.unstack(attentions, axis = 1) for ctx, att in zip(fact_list, attentions): mixed_fact = self.attn_gru(ctx, mixed_fact, tf.expand_dims(att, 1)) scope.reuse_variables() else: mixed_fact = tf.reduce_sum(self.facts*tf.expand_dims(attentions, 2), axis = 1) return mixed_fact def attend(self, memory): """ Get the attention weights. """ c = self.facts q = tf.tile(tf.expand_dims(self.question, 1), [1, self.num_facts, 1]) m = tf.tile(tf.expand_dims(memory, 1), [1, self.num_facts, 1]) z = tf.concat([c*q, c*m, tf.abs(c-q), tf.abs(c-m)], 2) z = tf.reshape(z, [-1, 4*self.num_units]) z = self.nn.dropout(z) z1 = self.nn.dense(z, units = self.num_units, activation = tf.tanh, name = 'fc1') z1 = self.nn.dropout(z1) z2 = self.nn.dense(z1, units = 1, activation = None, use_bias = False, name = 'fc2') z2 = tf.reshape(z2, [-1, self.num_facts]) attentions = tf.nn.softmax(z2) return attentions
mit
marcore/pok-eco
xapi/patterns/video.py
1
1417
import json from tincan import ( Activity, ActivityDefinition, LanguageMap ) from xapi.patterns.base import BasePattern from xapi.patterns.eco_verbs import ( LoadVideoVerb, PlayVideoVerb ) from xapi.utils import get_usage_key class BaseVideoRule(BasePattern): # pylint: disable=abstract-method def convert(self, evt, course_id): verb = self.get_verb() # pylint: disable=no-member module_id = None try: module_id = evt['event']['id'] except TypeError: internal_event = json.loads(evt['event']) module_id = internal_event['id'] obj = Activity( id=self.get_block_id(course_id, module_id), definition=ActivityDefinition( name=LanguageMap({'en-US': module_id}), type="http://activitystrea.ms/schema/1.0/video" ) ) return verb, obj @staticmethod def get_block_id(course_id, module_id): return get_usage_key(course_id, module_id) class PlayVideoRule(BaseVideoRule, PlayVideoVerb): def match(self, evt, course_id): return (evt['event_type'] == 'play_video' and evt['event_source'] == 'browser') class LoadVideoRule(BaseVideoRule, LoadVideoVerb): def match(self, evt, course_id): return (evt['event_type'] == 'load_video' and evt['event_source'] == 'browser')
agpl-3.0
dhermes/gcloud-python
videointelligence/google/cloud/videointelligence_v1p2beta1/types.py
2
1763
# -*- coding: utf-8 -*- # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import sys from google.api_core.protobuf_helpers import get_messages from google.api import http_pb2 from google.cloud.videointelligence_v1p2beta1.proto import video_intelligence_pb2 from google.longrunning import operations_pb2 from google.protobuf import any_pb2 from google.protobuf import descriptor_pb2 from google.protobuf import duration_pb2 from google.protobuf import empty_pb2 from google.protobuf import timestamp_pb2 from google.rpc import status_pb2 _shared_modules = [ http_pb2, operations_pb2, any_pb2, descriptor_pb2, duration_pb2, empty_pb2, timestamp_pb2, status_pb2, ] _local_modules = [video_intelligence_pb2] names = [] for module in _shared_modules: for name, message in get_messages(module).items(): setattr(sys.modules[__name__], name, message) names.append(name) for module in _local_modules: for name, message in get_messages(module).items(): message.__module__ = "google.cloud.videointelligence_v1p2beta1.types" setattr(sys.modules[__name__], name, message) names.append(name) __all__ = tuple(sorted(names))
apache-2.0
lucafavatella/intellij-community
python/testData/inspections/PyPropertyDefinitionInspection26/src/prop_test.py
44
1129
class A(object): def __init__(self): self._x = 1 @property def foo(self): return self._x @foo.setter def foo(self, x): self._x = x @foo.deleter def foo(self): pass @property def boo(self): return self._x @boo.setter # name mismatch def boo1(self, x): self._x = x @boo.deleter # name mismatch def boo2(self): pass @property def moo(self): # should return pass @moo.setter def moo(self, x): # shouldn't return return 1 @moo.deleter def moo(self): # shouldn't return return self._x @qoo.setter # unknown qoo is reported in ref inspection def qoo(self, v): self._x = v @property def futuroo(self): raise NotImplementedError("Override!") # ok though no return @property def futuroo(self): """Docstring.""" raise NotImplementedError("Override!") # ok though no return @property def xoo(self): return self._x @xoo.setter def xoo(self, x): self._x = x return get_foo2 = lambda self: 'foo2' foo2 = property(get_foo2) @property @abstractproperty def abstract_property(self): pass
apache-2.0
mykonosbiennale/mykonosbiennale.github.io
festival/management/commands/rename_images.py
1
32746
# -*- coding: utf-8 -*- # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 import time, traceback import collections import re import os import sys import csv import pprint import optparse from django.core.files.base import ContentFile from nameparser import HumanName from django.core.management.base import BaseCommand from django.conf import settings from django.utils.text import slugify from festivaly import models as festivaly_models from festival import models as festival_models from filmfestival import models as filmffestival_models class Command(BaseCommand): help = '''rename all images''' def handle(self, *args, **kwargs): for film in filmffestival_models.Film.objects.all(): # self.rename_poster(film) self.rename_stills(film) def rename_poster(self, film): year = film.project.festival.year festival = film.project.festival.slug if film.poster: print film.poster.name ext = os.path.splitext(film.poster.name)[-1] poster_name = "images/mykonos-biennale-{}-{}-{}-{}-poster{}".format( year, festival, film.project.slug, film.slug.strip('-'), ext ) poster = ContentFile(film.poster.read()) poster.name = poster_name film.poster = poster film.save() print film.poster.name def rename_stills(self, film): print film.slug for i, image in enumerate(film.filmfestival_image_related.all()): print i, image.image.name # # for festival in festivaly_models.Festival.objects.all(): # # print (festival, festival.slug, festival.get_absolute_url()) # # for project in festivaly_models.Project.objects.all(): # # print (project, project.slug, project.get_absolute_url()) # # for festival_project in festivaly_models.FestivalProject.objects.all(): # # festival_project.save() # # print (festival_project, festival_project.slug, festival_project.get_absolute_url()) # # # f_2017 = festivaly_models.Festival.objects.get(year=2017) # # for project in festivaly_models.Project.objects.all(): # # festival_project,_ = festivaly_models.FestivalProject.objects.get_or_create( # # festival= f_2017, # # project = project, # # name = project.name, # # text = project.text) # # # for festival in festival_models.Festival.objects.all(): # # for projectx in festival.projectx_set.all(): # # print festival, projectx # # def rec_2(self): # for artist in festival_models.Artist.objects.all(): # try: # festivaly_models.Participant.objects.get(name=artist.name) # except: # self.mirgate_artist(artist) # # # for festival_project in festivaly_models.FestivalProject.objects.filter(festival__year=2015): # # ps = festival_models.ProjectSeason.objects.filter(project__title = festival_project.name).first() # # if ps: # # print('found', ps, 'matches',festival_project) # # # for artist in set([art.artist for art in ps.art_set.all()]): # # def purge(self): # print festivaly_models.FilmDirector.objects.count() # pees = [fd.participant for fd in festivaly_models.FilmDirector.objects.all()] # map(lambda x: x.delete(), pees) # # def process_names(self, text): # for name in re.split("\s*[\/&,]\s*", text): # yield HumanName(name) # # # build album for each art piece # # # album = { # # 'festivalproject':festivalproject, # # 'name': '{} - {}'.format(artist.name, work), # # 'text': (works[work][0].description + '\n\n'+ works[work][0].text).strip(), # # 'media':[] # # } # # artwork = festivaly_models.Art.objects.create( # # name = album['name'], # # text = album['text'] # # ) # # def migrate_film(self, old_film): # # image = ContentFile(old_film.poster.read()) # dir_list = [str(name) for name in self.process_names(old_film.dir_by)] # slug = slugify(old_film.title + '-' + ' '.join(dir_list)) # festivalproject = festivaly_models.FestivalProject.objects.get(festival__year=2015, project__name={ # 'Dramatic Nights': 'Dramatic Nights', # 'Video Graffiti': 'Video Graffiti', # 'Dance': 'Video Graffiti', # 'Video Grafitti': 'Video Graffiti', # 'Documentary': 'Dramatic Nights', # }[old_film.film_type]) # poster = None # if old_film.poster: # poster = ContentFile(old_film.poster.read()) # poster_name = 'mykonos-biennale-{}-{}-{}-post{}'.format( # festivalproject.festival.slug, # festivalproject.project.slug, # old_film.slug, # os.path.splitext(old_film.poster.name)[1] # ) # # # if old_film.trailer_video: # # #trailer_video = ContentFile(old_film.trailer_video.read()) # # trailer_video_name = 'mykonos-biennale-{}-{}-{}-trailer{}'.format( # # festivalproject.festival.slug, # # festivalproject.project.slug, # # old_film.slug, # # os.path.splitext(old_film.poster.name)[1] # # ) # def synopsis(film): # if film.log_line: # return film.log_line # elif film.synopsis_125: # return film.synopsis_125 # elif film.synopsis_250: # return film.synopsis_250 # else: # return film.synopsis # # stills, _ = festivaly_models.Album.objects.get_or_create( # name="{} dir. by {}".format(old_film.title, ', '.join(dir_list)), # defaults=dict( # text=synopsis(old_film), # ) # ) # # film, created = festivaly_models.Film.objects.get_or_create(ref=old_film.ref, # defaults=dict( # film_source=old_film.source.lower(), # ref=old_film.ref, # entry_status=old_film.status.lower(), # film_type={ # 'Dramatic Nights': 'short', # 'Video Graffiti': 'art_video', # 'Video Grafitti': 'art_video', # 'Dance': 'dance', # 'Documentary': 'documentary', # }.get(old_film.film_type), # name=old_film.title, # original_title=old_film.original_title, # sub_by=old_film.sub_by, # contact_email=old_film.contact_email, # contact_phone=old_film.contact_phone, # posted_on_facebook=old_film.posted_on_facebook, # subtitles=old_film.subtitles, # language=old_film.language, # actors=old_film.actors, # year=old_film.year, # runtime=old_film.runtime, # country=old_film.country, # projection_copy=old_film.projection_copy, # projection_copy_url=old_film.projection_copy_url, # coming=False, # present=old_film.present, # when=old_film.when, # log_line=old_film.log_line, # synopsis=old_film.synopsis, # synopsis_125=old_film.synopsis_125, # synopsis_250=old_film.synopsis_250, # first_time=old_film.first_time, # twitter=old_film.twitter, # facebook=old_film.facebook, # other_social_media=old_film.other_social_media, # url=old_film.url, # screenwriters=old_film.screenwriters, # producers=old_film.producers, # exec_producers=old_film.exec_producers, # co_producers=old_film.co_producers, # cinematographers=old_film.cinematographers, # product_designers=old_film.product_designers, # art_directors=old_film.art_directors, # editors=old_film.editors, # sound_editors=old_film.sound_editors, # composers=old_film.composers, # crew=old_film.crew, # screenings=old_film.screenings, # genres=old_film.genres, # niches=old_film.niches, # info=old_film.info, # directors_statement=old_film.directors_statement, # production_notes=old_film.production_notes, # poster=poster, # trailer_url=old_film.trailer_url, # trailer_embed=old_film.trailer_embed, # stills=stills # ) # ) # # for i, image in enumerate(old_film.filmfestival_image_related.all()): # # new_image = ContentFile(image.image.read()) # image_name = 'mykonos-biennale-{}-{}-{}-still{}{}'.format( # festivalproject.festival.slug, # festivalproject.project.slug, # old_film.slug, # ('-%d' % i) if i else '', # os.path.splitext(image.image.name)[1] # ) # media, created = festivaly_models.Media.objects.get_or_create( # name='still of {}{}'.format(film.name, (' (%d)' % i) if i else ''), # defaults=dict( # text=stills.text, # image=new_image, # ) # ) # if created: film.stills.media.add(media) # return film # # def process_films(self): # for director in festivaly_models.FilmDirector.objects.all(): # print director.participant.name # # def process_filmdirectors(self): # def process_names(text): # for name in re.split("\s*[\/&,]\s*", text): # yield HumanName(name) # # for film in filmffestival_models.Film.objects.filter(status=filmffestival_models.Film.SELECTED): # if festivaly_models.Film.objects.filter(ref=film.ref).first(): # continue # print film, film.film_type, film.dir_by # new_film = self.migrate_film(film) # file_type = 'Video Graffiti' if film.film_type == 'Video Grafitti' else 'Dramatic Nights' # # festivalproject = festivaly_models.FestivalProject.objects.get(festival__year=2015, project__name=file_type) # directors = [str(name) for name in process_names(film.dir_by)] # # print '\t directors:', map(str, directors) # submitter = HumanName(film.sub_by) if film.sub_by.strip() else None # director_submitter = None # if not submitter: # director_submitter = directors[0] # elif submitter in directors: # # print 'MATCH' # director_submitter = directors[directors.index(submitter)] # else: # pass # print 'NO MATCH', submitter # if director_submitter: # pass # # print '\t\t director submitted', director_submitter # # print '\t\t ', film.contact_email # # print '\t\t ', film.contact_phone # for director in directors: # if director == director_submitter: # participant, _ = festivaly_models.Participant.objects.get_or_create(name=str(director), # defaults=dict( # phone=film.contact_phone, # email=film.contact_email # )) # else: # participant, _ = festivaly_models.Participant.objects.get_or_create(name=str(director)) # film_director, created = festivaly_models.FilmDirector.objects.get_or_create(participant=participant, # festival_project=festivalproject) # film_director.films.add(new_film) # # return # # # if film.actors: print '\t actors:', [str(name) for name in process_names(film.actors)] # # if film.producers: print '\t producers:', [str(name) for name in process_names(film.producers)] # # if film.exec_producers: print '\t exec_producers:', [str(name) for name in process_names(film.exec_producers)] # # if film.co_producers: print '\t co_producers:', [str(name) for name in process_names(film.co_producers)] # # if film.cinematographers: print '\t cinematographers:', [str(name) for name in process_names(film.cinematographers)] # # if film.screenwriters: print '\t screenwriters:', [str(name) for name in process_names(film.screenwriters)] # # if film.editors: print '\t editors:', [str(name) for name in process_names(film.editors)] # # if film.sound_editors: print '\t sound_editors:', [str(name) for name in process_names(film.sound_editors)] # # if film.composers: print '\t composers:', [str(name) for name in process_names(film.composers)] # # if film.art_directors: print '\t art_directors:', [str(name) for name in process_names(film.art_directors)] # # if film.crew: print '\t crew:', [str(name) for name in process_names(film.crew)] # # def list_festivals(self): # for festival in festivaly_models.Festival.objects.all(): # print (festival) # # def mirgate_artist(self, old_artist): # participant = self.add_participant(old_artist) # print participant # artworks = collections.defaultdict(list) # # collect the art by project # for art in old_artist.art_set.all(): # festivalproject = festivaly_models.FestivalProject.objects.get(festival__year=2015, # project__name=art.project_x.project.title) # artworks[festivalproject].append(art) # for festivalproject in artworks: # participation, _ = festivaly_models.Artist.objects.get_or_create(festival_project=festivalproject, # participant=participant) # # collect the images by art piece # works = collections.defaultdict(list) # for photo in artworks[festivalproject]: # works[photo.title].append(photo) # # create album for each art piece # for work in works: # print '{} - {}'.format(participant.name, work) # artwork, created = festivaly_models.Art.objects.get_or_create( # name='{} - {}'.format(participant.name, work), # text=(works[work][0].description + '\n\n' + works[work][0].text).strip(), # ) # print artwork, created # if created: # participation.artwork.add(artwork) # for i, photo in enumerate(works[work]): # image = ContentFile(photo.photo.read()) # image.name = 'mykonos-biennale-{}-{}-{}-{}{}{}'.format( # festivalproject.festival.slug, # festivalproject.project.slug, # photo.artist.slug, # photo.slug, # ('-%d' % i) if i else '', # os.path.splitext(photo.photo.name)[1] # ) # media, created = festivaly_models.Media.objects.get_or_create( # name='{} - {}{}'.format(participant.name, photo.title, ('(%d)' % i) if i else ''), # defaults=dict( # image=image, # text=(photo.description + '\n\n' + photo.text).strip(), # ) # ) # print media, created # if created: artwork.media.add(media) # # # def rec_art(self): # # for artist in festivaly_models.Artist.objects.all(): # # if artist.artwork.first() == None: # # print artist # # old_artist = festival_models.Artist.objects.get(name=artist.participant.name) # # print 'old_artist:', old_artist # # projects = collections.defaultdict(list) # # for art in old_artist.art_set.all(): # # projects[art.project_x].append(art) # # for project in projects: # # old_festival = project.festival # # festivalproject = festivaly_models.FestivalProject.objects.get(festival__year=old_festival.year, project__name=project.project.title) # # works = collections.defaultdict(list) # # for work in projects[project]: # # works[work.title].append(work) # # for work in works: # # # album = { # # 'festivalproject':festivalproject, # # 'name': '{} - {}'.format(artist.name, work), # # 'text': (works[work][0].description + '\n\n'+ works[work][0].text).strip(), # # 'media':[] # # } # # artwork = festivaly_models.Art.objects.create( # # name = album['name'], # # text = album['text'] # # ) # # artist.artwork.add(artwork) # # print 'artwork', artwork.pk # # for i,p in enumerate(works[work]): # # image = ContentFile(p.photo.read()) # # image.name = 'mykonos-biennale-{}-{}-{}-{}{}{}'.format( # # festivalproject.festival.slug, # # festivalproject.project.slug, # # p.artist.slug, # # p.slug, # # ('-%d' % i) if i else '', # # os.path.splitext(p.photo.name)[1] # # ) # # media = festivaly_models.Media.objects.create( # # image = image, # # name = '{} - {}'.format(artist.name, p.title), # # text = (p.description + '\n\n'+ p.text).strip(), # # ) # # artwork.media.add(media) # # print 'media', media.pk # # print 'album', album # # # print 'project- art:', project, projects[project] # # print '\t old project:', art, art.project_x # # festival = art.project_x.festival # # print '\t new project:', festivaly_models.FestivalProject.objects.get(festival__year=festival.year, project__name=art.project_x.project.title) # # # break # # # for i, art in enumerate(festivaly_models.Art.objects.all()): # # print i, 'art', art, 'artist', [a.participant.name for a in art.artist.all()] # # # def migrate_2015_art(self): # # artists = collections.defaultdict(list) # # for ps in festival_models.ProjectSeason.objects.all(): # # for art in ps.art_set.all(): # # artists[art.artist.name].append((ps, art)) # # for a in artists: # # print a, len(artists[a]) # # work = {} # # work_images = collections.defaultdict(list) # # if 'XXVenieri' not in a: # # for ips, art in artists[a]: # # # # print """ # # # title: {title} # # # slug: {slug} # # # show: {show} # # # leader: {leader} # # # description: {description} # # # text: {text} # # # photo: {photo} # # # """.format(**vars(art)) # # fp = festivaly_models.FestivalProject.objects.get(festival__year=2015, name=ips.project.title) # # participant = festivaly_models.Participant.objects.get(name=a) # # artistp,_ = festivaly_models.Artist.objects.get_or_create(festival_project=fp, participant=participant) # # work[(artistp, art.title)] = art # # work_images[(artistp, art.title)].append(art) # # #continue # # for k in work: # # artistp = k[0] # # art = work[k] # # text = art.description + '\n\n'+ art.text # # artwork,created = festivaly_models.Art.objects.get_or_create( # # name=art.title, # # defaults={ 'text': text.strip()} # # ) # # print artwork,created # # if created: # # artistp.artwork.add(artwork) # # for i, img in enumerate(work_images[k]): # # image = ContentFile(img.photo.read()) # # image.name = 'mykonos-biennale-{}-{}-{}-{}{}{}'.format( # # k[0].festival_project.festival.slug, # # k[0].festival_project.project.slug, # # art.artist.slug, # # art.slug, # # ('-%d' % i) if i else '', # # os.path.splitext(img.photo.name)[1] # # ) # # media = festivaly_models.Media.objects.create( # # image = image, # # name = artwork.name, # # text = artwork.text # # ) # # artwork.media.add(media) # # # def rec_2015_artists(self): # # for artist in festival_models.Artist.objects.filter(visible=True): # # try: # # found = festivaly_models.Participant.objects.get(name=artist.name) # # except: # # print "not Found", artist.name # # for art in artist.art_set.all(): # # print '\t', art, art.project_x.pk # # if not 'Lommel' in artist.name: # # self.add_participant(artist) # # def add_participant(self, artist): # print ('\t %s' % artist) # if "The" in artist.name: # sort_by = artist.name[4:].strip() # else: # name = HumanName(artist.name) # sort_by = "{} {}".format(name.last, name.first).strip() # headshot = artist.headshot # participant, created = festivaly_models.Participant.objects.get_or_create( # name=artist.name, # defaults=dict( # sort_by=sort_by, # text=artist.bio, # statement=artist.statement, # email=artist.email, # country=artist.country, # phone=artist.phone, # homepage=artist.homepage, # ) # ) # if created: # if artist.headshot: # participant.headshot = ContentFile(artist.headshot.read()) # participant.headshot.name = 'mykonos-biennale-artist-{}{}'.format(artist.slug, os.path.splitext( # artist.headshot.name)[1]) # participant.save() # print participant # return participant # # # def add_artist(self, artist): # # print ('\t %s' % artist) # # if "The" in artist.name: # # sort_by = artist.name[4:].strip() # # else: # # name = HumanName(artist.name) # # sort_by = "{} {}".format(name.last, name.first).strip() # # headshot = artist.headshot # # if artist.headshot: # # headshot = ContentFile(artist.headshot.read()) # # headshot.name = 'mykonos-biennale-artist-{}{}'.format(artist.slug, os.path.splitext(artist.headshot.name)[1]) # # new_artist = festivaly_models.Artist.objects.get_or_create( # # festival_project = festival_project, # # participant = festivaly_models.Participant.objects.get_or_create( # # name = artist.name, # # defaults = dict( # # sort_by = sort_by, # # text = artist.bio, # # statement = artist.statement, # # email = artist.email, # # country = artist.country, # # phone = artist.phone, # # homepage = artist.homepage, # # headshot = headshot # # ) # # )[0] # # ) # # print new_artist # # return new_artist # # # def mirgate_2015_artists(self): # # for festival_project in festivaly_models.FestivalProject.objects.filter(festival__year=2015): # # ps = festival_models.ProjectSeason.objects.filter(project__title = festival_project.name).first() # # if ps: # # print('found', ps, 'matches',festival_project) # # # for artist in set([art.artist for art in ps.art_set.all()]): # # print ('\t %s' % artist) # # if "The" in artist.name: # # sort_by = artist.name[4:].strip() # # else: # # name = HumanName(artist.name) # # sort_by = "{} {}".format(name.last, name.first).strip() # # headshot = artist.headshot # # if artist.headshot: # # headshot = ContentFile(artist.headshot.read()) # # headshot.name = 'mykonos-biennale-artist-{}{}'.format(artist.slug, os.path.splitext(artist.headshot.name)[1]) # # artist = festivaly_models.Artist.objects.get_or_create( # # festival_project = festival_project, # # participant = festivaly_models.Participant.objects.get_or_create( # # name = artist.name, # # defaults = dict( # # sort_by = sort_by, # # text = artist.bio, # # statement = artist.statement, # # email = artist.email, # # country = artist.country, # # phone = artist.phone, # # homepage = artist.homepage, # # headshot = headshot # # ) # # )[0] # # ) # # print artist # # else: # # print('no match for',festival_project) # # #
apache-2.0
autvincere/bureau-veritas-food
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/common.py
1292
20063
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import with_statement import collections import errno import filecmp import os.path import re import tempfile import sys # A minimal memoizing decorator. It'll blow up if the args aren't immutable, # among other "problems". class memoize(object): def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): try: return self.cache[args] except KeyError: result = self.func(*args) self.cache[args] = result return result class GypError(Exception): """Error class representing an error, which is to be presented to the user. The main entry point will catch and display this. """ pass def ExceptionAppend(e, msg): """Append a message to the given exception's message.""" if not e.args: e.args = (msg,) elif len(e.args) == 1: e.args = (str(e.args[0]) + ' ' + msg,) else: e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:] def FindQualifiedTargets(target, qualified_list): """ Given a list of qualified targets, return the qualified targets for the specified |target|. """ return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target] def ParseQualifiedTarget(target): # Splits a qualified target into a build file, target name and toolset. # NOTE: rsplit is used to disambiguate the Windows drive letter separator. target_split = target.rsplit(':', 1) if len(target_split) == 2: [build_file, target] = target_split else: build_file = None target_split = target.rsplit('#', 1) if len(target_split) == 2: [target, toolset] = target_split else: toolset = None return [build_file, target, toolset] def ResolveTarget(build_file, target, toolset): # This function resolves a target into a canonical form: # - a fully defined build file, either absolute or relative to the current # directory # - a target name # - a toolset # # build_file is the file relative to which 'target' is defined. # target is the qualified target. # toolset is the default toolset for that target. [parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target) if parsed_build_file: if build_file: # If a relative path, parsed_build_file is relative to the directory # containing build_file. If build_file is not in the current directory, # parsed_build_file is not a usable path as-is. Resolve it by # interpreting it as relative to build_file. If parsed_build_file is # absolute, it is usable as a path regardless of the current directory, # and os.path.join will return it as-is. build_file = os.path.normpath(os.path.join(os.path.dirname(build_file), parsed_build_file)) # Further (to handle cases like ../cwd), make it relative to cwd) if not os.path.isabs(build_file): build_file = RelativePath(build_file, '.') else: build_file = parsed_build_file if parsed_toolset: toolset = parsed_toolset return [build_file, target, toolset] def BuildFile(fully_qualified_target): # Extracts the build file from the fully qualified target. return ParseQualifiedTarget(fully_qualified_target)[0] def GetEnvironFallback(var_list, default): """Look up a key in the environment, with fallback to secondary keys and finally falling back to a default value.""" for var in var_list: if var in os.environ: return os.environ[var] return default def QualifiedTarget(build_file, target, toolset): # "Qualified" means the file that a target was defined in and the target # name, separated by a colon, suffixed by a # and the toolset name: # /path/to/file.gyp:target_name#toolset fully_qualified = build_file + ':' + target if toolset: fully_qualified = fully_qualified + '#' + toolset return fully_qualified @memoize def RelativePath(path, relative_to, follow_path_symlink=True): # Assuming both |path| and |relative_to| are relative to the current # directory, returns a relative path that identifies path relative to # relative_to. # If |follow_symlink_path| is true (default) and |path| is a symlink, then # this method returns a path to the real file represented by |path|. If it is # false, this method returns a path to the symlink. If |path| is not a # symlink, this option has no effect. # Convert to normalized (and therefore absolute paths). if follow_path_symlink: path = os.path.realpath(path) else: path = os.path.abspath(path) relative_to = os.path.realpath(relative_to) # On Windows, we can't create a relative path to a different drive, so just # use the absolute path. if sys.platform == 'win32': if (os.path.splitdrive(path)[0].lower() != os.path.splitdrive(relative_to)[0].lower()): return path # Split the paths into components. path_split = path.split(os.path.sep) relative_to_split = relative_to.split(os.path.sep) # Determine how much of the prefix the two paths share. prefix_len = len(os.path.commonprefix([path_split, relative_to_split])) # Put enough ".." components to back up out of relative_to to the common # prefix, and then append the part of path_split after the common prefix. relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \ path_split[prefix_len:] if len(relative_split) == 0: # The paths were the same. return '' # Turn it back into a string and we're done. return os.path.join(*relative_split) @memoize def InvertRelativePath(path, toplevel_dir=None): """Given a path like foo/bar that is relative to toplevel_dir, return the inverse relative path back to the toplevel_dir. E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path))) should always produce the empty string, unless the path contains symlinks. """ if not path: return path toplevel_dir = '.' if toplevel_dir is None else toplevel_dir return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path)) def FixIfRelativePath(path, relative_to): # Like RelativePath but returns |path| unchanged if it is absolute. if os.path.isabs(path): return path return RelativePath(path, relative_to) def UnrelativePath(path, relative_to): # Assuming that |relative_to| is relative to the current directory, and |path| # is a path relative to the dirname of |relative_to|, returns a path that # identifies |path| relative to the current directory. rel_dir = os.path.dirname(relative_to) return os.path.normpath(os.path.join(rel_dir, path)) # re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at # http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02 # and the documentation for various shells. # _quote is a pattern that should match any argument that needs to be quoted # with double-quotes by EncodePOSIXShellArgument. It matches the following # characters appearing anywhere in an argument: # \t, \n, space parameter separators # # comments # $ expansions (quoted to always expand within one argument) # % called out by IEEE 1003.1 XCU.2.2 # & job control # ' quoting # (, ) subshell execution # *, ?, [ pathname expansion # ; command delimiter # <, >, | redirection # = assignment # {, } brace expansion (bash) # ~ tilde expansion # It also matches the empty string, because "" (or '') is the only way to # represent an empty string literal argument to a POSIX shell. # # This does not match the characters in _escape, because those need to be # backslash-escaped regardless of whether they appear in a double-quoted # string. _quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$') # _escape is a pattern that should match any character that needs to be # escaped with a backslash, whether or not the argument matched the _quote # pattern. _escape is used with re.sub to backslash anything in _escape's # first match group, hence the (parentheses) in the regular expression. # # _escape matches the following characters appearing anywhere in an argument: # " to prevent POSIX shells from interpreting this character for quoting # \ to prevent POSIX shells from interpreting this character for escaping # ` to prevent POSIX shells from interpreting this character for command # substitution # Missing from this list is $, because the desired behavior of # EncodePOSIXShellArgument is to permit parameter (variable) expansion. # # Also missing from this list is !, which bash will interpret as the history # expansion character when history is enabled. bash does not enable history # by default in non-interactive shells, so this is not thought to be a problem. # ! was omitted from this list because bash interprets "\!" as a literal string # including the backslash character (avoiding history expansion but retaining # the backslash), which would not be correct for argument encoding. Handling # this case properly would also be problematic because bash allows the history # character to be changed with the histchars shell variable. Fortunately, # as history is not enabled in non-interactive shells and # EncodePOSIXShellArgument is only expected to encode for non-interactive # shells, there is no room for error here by ignoring !. _escape = re.compile(r'(["\\`])') def EncodePOSIXShellArgument(argument): """Encodes |argument| suitably for consumption by POSIX shells. argument may be quoted and escaped as necessary to ensure that POSIX shells treat the returned value as a literal representing the argument passed to this function. Parameter (variable) expansions beginning with $ are allowed to remain intact without escaping the $, to allow the argument to contain references to variables to be expanded by the shell. """ if not isinstance(argument, str): argument = str(argument) if _quote.search(argument): quote = '"' else: quote = '' encoded = quote + re.sub(_escape, r'\\\1', argument) + quote return encoded def EncodePOSIXShellList(list): """Encodes |list| suitably for consumption by POSIX shells. Returns EncodePOSIXShellArgument for each item in list, and joins them together using the space character as an argument separator. """ encoded_arguments = [] for argument in list: encoded_arguments.append(EncodePOSIXShellArgument(argument)) return ' '.join(encoded_arguments) def DeepDependencyTargets(target_dicts, roots): """Returns the recursive list of target dependencies.""" dependencies = set() pending = set(roots) while pending: # Pluck out one. r = pending.pop() # Skip if visited already. if r in dependencies: continue # Add it. dependencies.add(r) # Add its children. spec = target_dicts[r] pending.update(set(spec.get('dependencies', []))) pending.update(set(spec.get('dependencies_original', []))) return list(dependencies - set(roots)) def BuildFileTargets(target_list, build_file): """From a target_list, returns the subset from the specified build_file. """ return [p for p in target_list if BuildFile(p) == build_file] def AllTargets(target_list, target_dicts, build_file): """Returns all targets (direct and dependencies) for the specified build_file. """ bftargets = BuildFileTargets(target_list, build_file) deptargets = DeepDependencyTargets(target_dicts, bftargets) return bftargets + deptargets def WriteOnDiff(filename): """Write to a file only if the new contents differ. Arguments: filename: name of the file to potentially write to. Returns: A file like object which will write to temporary file and only overwrite the target if it differs (on close). """ class Writer(object): """Wrapper around file which only covers the target if it differs.""" def __init__(self): # Pick temporary file. tmp_fd, self.tmp_path = tempfile.mkstemp( suffix='.tmp', prefix=os.path.split(filename)[1] + '.gyp.', dir=os.path.split(filename)[0]) try: self.tmp_file = os.fdopen(tmp_fd, 'wb') except Exception: # Don't leave turds behind. os.unlink(self.tmp_path) raise def __getattr__(self, attrname): # Delegate everything else to self.tmp_file return getattr(self.tmp_file, attrname) def close(self): try: # Close tmp file. self.tmp_file.close() # Determine if different. same = False try: same = filecmp.cmp(self.tmp_path, filename, False) except OSError, e: if e.errno != errno.ENOENT: raise if same: # The new file is identical to the old one, just get rid of the new # one. os.unlink(self.tmp_path) else: # The new file is different from the old one, or there is no old one. # Rename the new file to the permanent name. # # tempfile.mkstemp uses an overly restrictive mode, resulting in a # file that can only be read by the owner, regardless of the umask. # There's no reason to not respect the umask here, which means that # an extra hoop is required to fetch it and reset the new file's mode. # # No way to get the umask without setting a new one? Set a safe one # and then set it back to the old value. umask = os.umask(077) os.umask(umask) os.chmod(self.tmp_path, 0666 & ~umask) if sys.platform == 'win32' and os.path.exists(filename): # NOTE: on windows (but not cygwin) rename will not replace an # existing file, so it must be preceded with a remove. Sadly there # is no way to make the switch atomic. os.remove(filename) os.rename(self.tmp_path, filename) except Exception: # Don't leave turds behind. os.unlink(self.tmp_path) raise return Writer() def EnsureDirExists(path): """Make sure the directory for |path| exists.""" try: os.makedirs(os.path.dirname(path)) except OSError: pass def GetFlavor(params): """Returns |params.flavor| if it's set, the system's default flavor else.""" flavors = { 'cygwin': 'win', 'win32': 'win', 'darwin': 'mac', } if 'flavor' in params: return params['flavor'] if sys.platform in flavors: return flavors[sys.platform] if sys.platform.startswith('sunos'): return 'solaris' if sys.platform.startswith('freebsd'): return 'freebsd' if sys.platform.startswith('openbsd'): return 'openbsd' if sys.platform.startswith('netbsd'): return 'netbsd' if sys.platform.startswith('aix'): return 'aix' return 'linux' def CopyTool(flavor, out_path): """Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it to |out_path|.""" # aix and solaris just need flock emulation. mac and win use more complicated # support scripts. prefix = { 'aix': 'flock', 'solaris': 'flock', 'mac': 'mac', 'win': 'win' }.get(flavor, None) if not prefix: return # Slurp input file. source_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix) with open(source_path) as source_file: source = source_file.readlines() # Add header and write it out. tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix) with open(tool_path, 'w') as tool_file: tool_file.write( ''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:])) # Make file executable. os.chmod(tool_path, 0755) # From Alex Martelli, # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 # ASPN: Python Cookbook: Remove duplicates from a sequence # First comment, dated 2001/10/13. # (Also in the printed Python Cookbook.) def uniquer(seq, idfun=None): if idfun is None: idfun = lambda x: x seen = {} result = [] for item in seq: marker = idfun(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result # Based on http://code.activestate.com/recipes/576694/. class OrderedSet(collections.MutableSet): def __init__(self, iterable=None): self.end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.map = {} # key --> [key, prev, next] if iterable is not None: self |= iterable def __len__(self): return len(self.map) def __contains__(self, key): return key in self.map def add(self, key): if key not in self.map: end = self.end curr = end[1] curr[2] = end[1] = self.map[key] = [key, curr, end] def discard(self, key): if key in self.map: key, prev_item, next_item = self.map.pop(key) prev_item[2] = next_item next_item[1] = prev_item def __iter__(self): end = self.end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] # The second argument is an addition that causes a pylint warning. def pop(self, last=True): # pylint: disable=W0221 if not self: raise KeyError('set is empty') key = self.end[1][0] if last else self.end[2][0] self.discard(key) return key def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self)) def __eq__(self, other): if isinstance(other, OrderedSet): return len(self) == len(other) and list(self) == list(other) return set(self) == set(other) # Extensions to the recipe. def update(self, iterable): for i in iterable: if i not in self: self.add(i) class CycleError(Exception): """An exception raised when an unexpected cycle is detected.""" def __init__(self, nodes): self.nodes = nodes def __str__(self): return 'CycleError: cycle involving: ' + str(self.nodes) def TopologicallySorted(graph, get_edges): r"""Topologically sort based on a user provided edge definition. Args: graph: A list of node names. get_edges: A function mapping from node name to a hashable collection of node names which this node has outgoing edges to. Returns: A list containing all of the node in graph in topological order. It is assumed that calling get_edges once for each node and caching is cheaper than repeatedly calling get_edges. Raises: CycleError in the event of a cycle. Example: graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'} def GetEdges(node): return re.findall(r'\$\(([^))]\)', graph[node]) print TopologicallySorted(graph.keys(), GetEdges) ==> ['a', 'c', b'] """ get_edges = memoize(get_edges) visited = set() visiting = set() ordered_nodes = [] def Visit(node): if node in visiting: raise CycleError(visiting) if node in visited: return visited.add(node) visiting.add(node) for neighbor in get_edges(node): Visit(neighbor) visiting.remove(node) ordered_nodes.insert(0, node) for node in sorted(graph): Visit(node) return ordered_nodes def CrossCompileRequested(): # TODO: figure out how to not build extra host objects in the # non-cross-compile case when this is enabled, and enable unconditionally. return (os.environ.get('GYP_CROSSCOMPILE') or os.environ.get('AR_host') or os.environ.get('CC_host') or os.environ.get('CXX_host') or os.environ.get('AR_target') or os.environ.get('CC_target') or os.environ.get('CXX_target'))
mit
mrjaydee82/SinLessKerne1-m8-GPE
toolchains/a8483/share/gdb/python/gdb/command/explore.py
137
26824
# GDB 'explore' command. # Copyright (C) 2012-2013 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Implementation of the GDB 'explore' command using the GDB Python API.""" import gdb import sys if sys.version_info[0] > 2: # Python 3 renamed raw_input to input raw_input = input class Explorer(object): """Internal class which invokes other explorers.""" # This map is filled by the Explorer.init_env() function type_code_to_explorer_map = { } _SCALAR_TYPE_LIST = ( gdb.TYPE_CODE_CHAR, gdb.TYPE_CODE_INT, gdb.TYPE_CODE_BOOL, gdb.TYPE_CODE_FLT, gdb.TYPE_CODE_VOID, gdb.TYPE_CODE_ENUM, ) @staticmethod def guard_expr(expr): length = len(expr) guard = False if expr[0] == '(' and expr[length-1] == ')': pass else: i = 0 while i < length: c = expr[i] if (c == '_' or ('a' <= c and c <= 'z') or ('A' <= c and c <= 'Z') or ('0' <= c and c <= '9')): pass else: guard = True break i += 1 if guard: return "(" + expr + ")" else: return expr @staticmethod def explore_expr(expr, value, is_child): """Main function to explore an expression value. Arguments: expr: The expression string that is being explored. value: The gdb.Value value of the expression. is_child: Boolean value to indicate if the expression is a child. An expression is a child if it is derived from the main expression entered by the user. For example, if the user entered an expression which evaluates to a struct, then when exploring the fields of the struct, is_child is set to True internally. Returns: No return value. """ type_code = value.type.code if type_code in Explorer.type_code_to_explorer_map: explorer_class = Explorer.type_code_to_explorer_map[type_code] while explorer_class.explore_expr(expr, value, is_child): pass else: print ("Explorer for type '%s' not yet available.\n" % str(value.type)) @staticmethod def explore_type(name, datatype, is_child): """Main function to explore a data type. Arguments: name: The string representing the path to the data type being explored. datatype: The gdb.Type value of the data type being explored. is_child: Boolean value to indicate if the name is a child. A name is a child if it is derived from the main name entered by the user. For example, if the user entered the name of struct type, then when exploring the fields of the struct, is_child is set to True internally. Returns: No return value. """ type_code = datatype.code if type_code in Explorer.type_code_to_explorer_map: explorer_class = Explorer.type_code_to_explorer_map[type_code] while explorer_class.explore_type(name, datatype, is_child): pass else: print ("Explorer for type '%s' not yet available.\n" % str(datatype)) @staticmethod def init_env(): """Initializes the Explorer environment. This function should be invoked before starting any exploration. If invoked before an exploration, it need not be invoked for subsequent explorations. """ Explorer.type_code_to_explorer_map = { gdb.TYPE_CODE_CHAR : ScalarExplorer, gdb.TYPE_CODE_INT : ScalarExplorer, gdb.TYPE_CODE_BOOL : ScalarExplorer, gdb.TYPE_CODE_FLT : ScalarExplorer, gdb.TYPE_CODE_VOID : ScalarExplorer, gdb.TYPE_CODE_ENUM : ScalarExplorer, gdb.TYPE_CODE_STRUCT : CompoundExplorer, gdb.TYPE_CODE_UNION : CompoundExplorer, gdb.TYPE_CODE_PTR : PointerExplorer, gdb.TYPE_CODE_REF : ReferenceExplorer, gdb.TYPE_CODE_TYPEDEF : TypedefExplorer, gdb.TYPE_CODE_ARRAY : ArrayExplorer } @staticmethod def is_scalar_type(type): """Checks whether a type is a scalar type. A type is a scalar type of its type is gdb.TYPE_CODE_CHAR or gdb.TYPE_CODE_INT or gdb.TYPE_CODE_BOOL or gdb.TYPE_CODE_FLT or gdb.TYPE_CODE_VOID or gdb.TYPE_CODE_ENUM. Arguments: type: The type to be checked. Returns: 'True' if 'type' is a scalar type. 'False' otherwise. """ return type.code in Explorer._SCALAR_TYPE_LIST @staticmethod def return_to_parent_value(): """A utility function which prints that the current exploration session is returning to the parent value. Useful when exploring values. """ print ("\nReturning to parent value...\n") @staticmethod def return_to_parent_value_prompt(): """A utility function which prompts the user to press the 'enter' key so that the exploration session can shift back to the parent value. Useful when exploring values. """ raw_input("\nPress enter to return to parent value: ") @staticmethod def return_to_enclosing_type(): """A utility function which prints that the current exploration session is returning to the enclosing type. Useful when exploring types. """ print ("\nReturning to enclosing type...\n") @staticmethod def return_to_enclosing_type_prompt(): """A utility function which prompts the user to press the 'enter' key so that the exploration session can shift back to the enclosing type. Useful when exploring types. """ raw_input("\nPress enter to return to enclosing type: ") class ScalarExplorer(object): """Internal class used to explore scalar values.""" @staticmethod def explore_expr(expr, value, is_child): """Function to explore scalar values. See Explorer.explore_expr and Explorer.is_scalar_type for more information. """ print ("'%s' is a scalar value of type '%s'." % (expr, value.type)) print ("%s = %s" % (expr, str(value))) if is_child: Explorer.return_to_parent_value_prompt() Explorer.return_to_parent_value() return False @staticmethod def explore_type(name, datatype, is_child): """Function to explore scalar types. See Explorer.explore_type and Explorer.is_scalar_type for more information. """ if datatype.code == gdb.TYPE_CODE_ENUM: if is_child: print ("%s is of an enumerated type '%s'." % (name, str(datatype))) else: print ("'%s' is an enumerated type." % name) else: if is_child: print ("%s is of a scalar type '%s'." % (name, str(datatype))) else: print ("'%s' is a scalar type." % name) if is_child: Explorer.return_to_enclosing_type_prompt() Explorer.return_to_enclosing_type() return False class PointerExplorer(object): """Internal class used to explore pointer values.""" @staticmethod def explore_expr(expr, value, is_child): """Function to explore pointer values. See Explorer.explore_expr for more information. """ print ("'%s' is a pointer to a value of type '%s'" % (expr, str(value.type.target()))) option = raw_input("Continue exploring it as a pointer to a single " "value [y/n]: ") if option == "y": deref_value = None try: deref_value = value.dereference() str(deref_value) except gdb.MemoryError: print ("'%s' a pointer pointing to an invalid memory " "location." % expr) if is_child: Explorer.return_to_parent_value_prompt() return False Explorer.explore_expr("*%s" % Explorer.guard_expr(expr), deref_value, is_child) return False option = raw_input("Continue exploring it as a pointer to an " "array [y/n]: ") if option == "y": while True: index = 0 try: index = int(raw_input("Enter the index of the element you " "want to explore in '%s': " % expr)) except ValueError: break element_expr = "%s[%d]" % (Explorer.guard_expr(expr), index) element = value[index] try: str(element) except gdb.MemoryError: print ("Cannot read value at index %d." % index) continue Explorer.explore_expr(element_expr, element, True) return False if is_child: Explorer.return_to_parent_value() return False @staticmethod def explore_type(name, datatype, is_child): """Function to explore pointer types. See Explorer.explore_type for more information. """ target_type = datatype.target() print ("\n%s is a pointer to a value of type '%s'." % (name, str(target_type))) Explorer.explore_type("the pointee type of %s" % name, target_type, is_child) return False class ReferenceExplorer(object): """Internal class used to explore reference (TYPE_CODE_REF) values.""" @staticmethod def explore_expr(expr, value, is_child): """Function to explore array values. See Explorer.explore_expr for more information. """ referenced_value = value.referenced_value() Explorer.explore_expr(expr, referenced_value, is_child) return False @staticmethod def explore_type(name, datatype, is_child): """Function to explore pointer types. See Explorer.explore_type for more information. """ target_type = datatype.target() Explorer.explore_type(name, target_type, is_child) return False class ArrayExplorer(object): """Internal class used to explore arrays.""" @staticmethod def explore_expr(expr, value, is_child): """Function to explore array values. See Explorer.explore_expr for more information. """ target_type = value.type.target() print ("'%s' is an array of '%s'." % (expr, str(target_type))) index = 0 try: index = int(raw_input("Enter the index of the element you want to " "explore in '%s': " % expr)) except ValueError: if is_child: Explorer.return_to_parent_value() return False element = None try: element = value[index] str(element) except gdb.MemoryError: print ("Cannot read value at index %d." % index) raw_input("Press enter to continue... ") return True Explorer.explore_expr("%s[%d]" % (Explorer.guard_expr(expr), index), element, True) return True @staticmethod def explore_type(name, datatype, is_child): """Function to explore array types. See Explorer.explore_type for more information. """ target_type = datatype.target() print ("%s is an array of '%s'." % (name, str(target_type))) Explorer.explore_type("the array element of %s" % name, target_type, is_child) return False class CompoundExplorer(object): """Internal class used to explore struct, classes and unions.""" @staticmethod def _print_fields(print_list): """Internal function which prints the fields of a struct/class/union. """ max_field_name_length = 0 for pair in print_list: if max_field_name_length < len(pair[0]): max_field_name_length = len(pair[0]) for pair in print_list: print (" %*s = %s" % (max_field_name_length, pair[0], pair[1])) @staticmethod def _get_real_field_count(fields): real_field_count = 0; for field in fields: if not field.artificial: real_field_count = real_field_count + 1 return real_field_count @staticmethod def explore_expr(expr, value, is_child): """Function to explore structs/classes and union values. See Explorer.explore_expr for more information. """ datatype = value.type type_code = datatype.code fields = datatype.fields() if type_code == gdb.TYPE_CODE_STRUCT: type_desc = "struct/class" else: type_desc = "union" if CompoundExplorer._get_real_field_count(fields) == 0: print ("The value of '%s' is a %s of type '%s' with no fields." % (expr, type_desc, str(value.type))) if is_child: Explorer.return_to_parent_value_prompt() return False print ("The value of '%s' is a %s of type '%s' with the following " "fields:\n" % (expr, type_desc, str(value.type))) has_explorable_fields = False choice_to_compound_field_map = { } current_choice = 0 print_list = [ ] for field in fields: if field.artificial: continue field_full_name = Explorer.guard_expr(expr) + "." + field.name if field.is_base_class: field_value = value.cast(field.type) else: field_value = value[field.name] literal_value = "" if type_code == gdb.TYPE_CODE_UNION: literal_value = ("<Enter %d to explore this field of type " "'%s'>" % (current_choice, str(field.type))) has_explorable_fields = True else: if Explorer.is_scalar_type(field.type): literal_value = ("%s .. (Value of type '%s')" % (str(field_value), str(field.type))) else: if field.is_base_class: field_desc = "base class" else: field_desc = "field" literal_value = ("<Enter %d to explore this %s of type " "'%s'>" % (current_choice, field_desc, str(field.type))) has_explorable_fields = True choice_to_compound_field_map[str(current_choice)] = ( field_full_name, field_value) current_choice = current_choice + 1 print_list.append((field.name, literal_value)) CompoundExplorer._print_fields(print_list) print ("") if has_explorable_fields: choice = raw_input("Enter the field number of choice: ") if choice in choice_to_compound_field_map: Explorer.explore_expr(choice_to_compound_field_map[choice][0], choice_to_compound_field_map[choice][1], True) return True else: if is_child: Explorer.return_to_parent_value() else: if is_child: Explorer.return_to_parent_value_prompt() return False @staticmethod def explore_type(name, datatype, is_child): """Function to explore struct/class and union types. See Explorer.explore_type for more information. """ type_code = datatype.code type_desc = "" if type_code == gdb.TYPE_CODE_STRUCT: type_desc = "struct/class" else: type_desc = "union" fields = datatype.fields() if CompoundExplorer._get_real_field_count(fields) == 0: if is_child: print ("%s is a %s of type '%s' with no fields." % (name, type_desc, str(datatype))) Explorer.return_to_enclosing_type_prompt() else: print ("'%s' is a %s with no fields." % (name, type_desc)) return False if is_child: print ("%s is a %s of type '%s' " "with the following fields:\n" % (name, type_desc, str(datatype))) else: print ("'%s' is a %s with the following " "fields:\n" % (name, type_desc)) has_explorable_fields = False current_choice = 0 choice_to_compound_field_map = { } print_list = [ ] for field in fields: if field.artificial: continue if field.is_base_class: field_desc = "base class" else: field_desc = "field" rhs = ("<Enter %d to explore this %s of type '%s'>" % (current_choice, field_desc, str(field.type))) print_list.append((field.name, rhs)) choice_to_compound_field_map[str(current_choice)] = ( field.name, field.type, field_desc) current_choice = current_choice + 1 CompoundExplorer._print_fields(print_list) print ("") if len(choice_to_compound_field_map) > 0: choice = raw_input("Enter the field number of choice: ") if choice in choice_to_compound_field_map: if is_child: new_name = ("%s '%s' of %s" % (choice_to_compound_field_map[choice][2], choice_to_compound_field_map[choice][0], name)) else: new_name = ("%s '%s' of '%s'" % (choice_to_compound_field_map[choice][2], choice_to_compound_field_map[choice][0], name)) Explorer.explore_type(new_name, choice_to_compound_field_map[choice][1], True) return True else: if is_child: Explorer.return_to_enclosing_type() else: if is_child: Explorer.return_to_enclosing_type_prompt() return False class TypedefExplorer(object): """Internal class used to explore values whose type is a typedef.""" @staticmethod def explore_expr(expr, value, is_child): """Function to explore typedef values. See Explorer.explore_expr for more information. """ actual_type = value.type.strip_typedefs() print ("The value of '%s' is of type '%s' " "which is a typedef of type '%s'" % (expr, str(value.type), str(actual_type))) Explorer.explore_expr(expr, value.cast(actual_type), is_child) return False @staticmethod def explore_type(name, datatype, is_child): """Function to explore typedef types. See Explorer.explore_type for more information. """ actual_type = datatype.strip_typedefs() if is_child: print ("The type of %s is a typedef of type '%s'." % (name, str(actual_type))) else: print ("The type '%s' is a typedef of type '%s'." % (name, str(actual_type))) Explorer.explore_type(name, actual_type, is_child) return False class ExploreUtils(object): """Internal class which provides utilities for the main command classes.""" @staticmethod def check_args(name, arg_str): """Utility to check if adequate number of arguments are passed to an explore command. Arguments: name: The name of the explore command. arg_str: The argument string passed to the explore command. Returns: True if adequate arguments are passed, false otherwise. Raises: gdb.GdbError if adequate arguments are not passed. """ if len(arg_str) < 1: raise gdb.GdbError("ERROR: '%s' requires an argument." % name) return False else: return True @staticmethod def get_type_from_str(type_str): """A utility function to deduce the gdb.Type value from a string representing the type. Arguments: type_str: The type string from which the gdb.Type value should be deduced. Returns: The deduced gdb.Type value if possible, None otherwise. """ try: # Assume the current language to be C/C++ and make a try. return gdb.parse_and_eval("(%s *)0" % type_str).type.target() except RuntimeError: # If assumption of current language to be C/C++ was wrong, then # lookup the type using the API. try: return gdb.lookup_type(type_str) except RuntimeError: return None @staticmethod def get_value_from_str(value_str): """A utility function to deduce the gdb.Value value from a string representing the value. Arguments: value_str: The value string from which the gdb.Value value should be deduced. Returns: The deduced gdb.Value value if possible, None otherwise. """ try: return gdb.parse_and_eval(value_str) except RuntimeError: return None class ExploreCommand(gdb.Command): """Explore a value or a type valid in the current context. Usage: explore ARG - ARG is either a valid expression or a type name. - At any stage of exploration, hit the return key (instead of a choice, if any) to return to the enclosing type or value. """ def __init__(self): super(ExploreCommand, self).__init__(name = "explore", command_class = gdb.COMMAND_DATA, prefix = True) def invoke(self, arg_str, from_tty): if ExploreUtils.check_args("explore", arg_str) == False: return # Check if it is a value value = ExploreUtils.get_value_from_str(arg_str) if value is not None: Explorer.explore_expr(arg_str, value, False) return # If it is not a value, check if it is a type datatype = ExploreUtils.get_type_from_str(arg_str) if datatype is not None: Explorer.explore_type(arg_str, datatype, False) return # If it is neither a value nor a type, raise an error. raise gdb.GdbError( ("'%s' neither evaluates to a value nor is a type " "in the current context." % arg_str)) class ExploreValueCommand(gdb.Command): """Explore value of an expression valid in the current context. Usage: explore value ARG - ARG is a valid expression. - At any stage of exploration, hit the return key (instead of a choice, if any) to return to the enclosing value. """ def __init__(self): super(ExploreValueCommand, self).__init__( name = "explore value", command_class = gdb.COMMAND_DATA) def invoke(self, arg_str, from_tty): if ExploreUtils.check_args("explore value", arg_str) == False: return value = ExploreUtils.get_value_from_str(arg_str) if value is None: raise gdb.GdbError( (" '%s' does not evaluate to a value in the current " "context." % arg_str)) return Explorer.explore_expr(arg_str, value, False) class ExploreTypeCommand(gdb.Command): """Explore a type or the type of an expression valid in the current context. Usage: explore type ARG - ARG is a valid expression or a type name. - At any stage of exploration, hit the return key (instead of a choice, if any) to return to the enclosing type. """ def __init__(self): super(ExploreTypeCommand, self).__init__( name = "explore type", command_class = gdb.COMMAND_DATA) def invoke(self, arg_str, from_tty): if ExploreUtils.check_args("explore type", arg_str) == False: return datatype = ExploreUtils.get_type_from_str(arg_str) if datatype is not None: Explorer.explore_type(arg_str, datatype, False) return value = ExploreUtils.get_value_from_str(arg_str) if value is not None: print ("'%s' is of type '%s'." % (arg_str, str(value.type))) Explorer.explore_type(str(value.type), value.type, False) return raise gdb.GdbError(("'%s' is not a type or value in the current " "context." % arg_str)) Explorer.init_env() ExploreCommand() ExploreValueCommand() ExploreTypeCommand()
gpl-2.0
attakei/readthedocs-oauth
readthedocs/vcs_support/backends/git.py
29
6852
import re import logging import csv import os from StringIO import StringIO from readthedocs.projects.exceptions import ProjectImportError from readthedocs.vcs_support.base import BaseVCS, VCSVersion log = logging.getLogger(__name__) class Backend(BaseVCS): supports_tags = True supports_branches = True fallback_branch = 'master' # default branch def __init__(self, *args, **kwargs): super(Backend, self).__init__(*args, **kwargs) self.token = kwargs.get('token', None) self.repo_url = self._get_clone_url() def _get_clone_url(self): if '://' in self.repo_url: hacked_url = self.repo_url.split('://')[1] hacked_url = re.sub('.git$', '', hacked_url) clone_url = 'https://%s' % hacked_url if self.token: clone_url = 'https://%s@%s' % (self.token, hacked_url) return clone_url # Don't edit URL because all hosts aren't the same # else: # clone_url = 'git://%s' % (hacked_url) return self.repo_url def set_remote_url(self, url): return self.run('git', 'remote', 'set-url', 'origin', url) def update(self): # Use checkout() to update repo self.checkout() def repo_exists(self): code, out, err = self.run('git', 'status') return code == 0 def fetch(self): code, out, err = self.run('git', 'fetch', '--tags', '--prune') if code != 0: raise ProjectImportError( "Failed to get code from '%s' (git fetch): %s\n\nStderr:\n\n%s\n\n" % ( self.repo_url, code, err) ) def checkout_revision(self, revision=None): if not revision: branch = self.default_branch or self.fallback_branch revision = 'origin/%s' % branch code, out, err = self.run('git', 'checkout', '--force', '--quiet', revision) if code != 0: log.warning("Failed to checkout revision '%s': %s" % ( revision, code)) return [code, out, err] def clone(self): code, out, err = self.run('git', 'clone', '--recursive', '--quiet', self.repo_url, '.') if code != 0: raise ProjectImportError( "Failed to get code from '%s' (git clone): %s" % ( self.repo_url, code) ) @property def tags(self): retcode, stdout, err = self.run('git', 'show-ref', '--tags') # error (or no tags found) if retcode != 0: return [] return self.parse_tags(stdout) def parse_tags(self, data): """ Parses output of show-ref --tags, eg: 3b32886c8d3cb815df3793b3937b2e91d0fb00f1 refs/tags/2.0.0 bd533a768ff661991a689d3758fcfe72f455435d refs/tags/2.0.1 c0288a17899b2c6818f74e3a90b77e2a1779f96a refs/tags/2.0.2 a63a2de628a3ce89034b7d1a5ca5e8159534eef0 refs/tags/2.1.0.beta2 c7fc3d16ed9dc0b19f0d27583ca661a64562d21e refs/tags/2.1.0.rc1 edc0a2d02a0cc8eae8b67a3a275f65cd126c05b1 refs/tags/2.1.0.rc2 Into VCSTag objects with the tag name as verbose_name and the commit hash as identifier. """ # parse the lines into a list of tuples (commit-hash, tag ref name) raw_tags = csv.reader(StringIO(data), delimiter=' ') vcs_tags = [] for row in raw_tags: row = filter(lambda f: f != '', row) if row == []: continue commit_hash, name = row clean_name = name.split('/')[-1] vcs_tags.append(VCSVersion(self, commit_hash, clean_name)) return vcs_tags @property def branches(self): # Only show remote branches retcode, stdout, err = self.run('git', 'branch', '-r') # error (or no tags found) if retcode != 0: return [] return self.parse_branches(stdout) def parse_branches(self, data): """ Parse output of git branch -r, eg: origin/2.0.X origin/HEAD -> origin/master origin/develop origin/master origin/release/2.0.0 origin/release/2.1.0 """ clean_branches = [] raw_branches = csv.reader(StringIO(data), delimiter=' ') for branch in raw_branches: branch = filter(lambda f: f != '' and f != '*', branch) # Handle empty branches if len(branch): branch = branch[0] if branch.startswith('origin/'): cut_len = len('origin/') slug = branch[cut_len:].replace('/', '-') if slug in ['HEAD']: continue clean_branches.append(VCSVersion(self, branch, slug)) else: # Believe this is dead code. slug = branch.replace('/', '-') clean_branches.append(VCSVersion(self, branch, slug)) return clean_branches @property def commit(self): retcode, stdout, err = self.run('git', 'rev-parse', 'HEAD') return stdout.strip() def checkout(self, identifier=None): self.check_working_dir() # Clone or update repository if self.repo_exists(): self.set_remote_url(self.repo_url) self.fetch() else: self.make_clean_working_dir() self.clone() # Find proper identifier if not identifier: identifier = self.default_branch or self.fallback_branch identifier = self.find_ref(identifier) # Checkout the correct identifier for this branch. code, out, err = self.checkout_revision(identifier) if code != 0: return code, out, err # Clean any remains of previous checkouts self.run('git', 'clean', '-d', '-f', '-f') # Update submodules self.run('git', 'submodule', 'sync') self.run('git', 'submodule', 'update', '--init', '--recursive', '--force') return code, out, err def find_ref(self, ref): # Check if ref starts with 'origin/' if ref.startswith('origin/'): return ref # Check if ref is a branch of the origin remote if self.ref_exists('remotes/origin/' + ref): return 'origin/' + ref return ref def ref_exists(self, ref): code, out, err = self.run('git', 'show-ref', ref) return code == 0 @property def env(self): env = super(Backend, self).env env['GIT_DIR'] = os.path.join(self.working_dir, '.git') return env
mit
jrversteegh/flexx
flexxamples/testers/mouse_and_touch.py
2
2009
""" This example is intended to test mouse/touch events. """ from time import time from flexx import flx class Test(flx.Widget): def init(self): self.t = time() with flx.HFix(): self.label1 = flx.Label(flex=2, style='overflow-y:scroll; font-size:60%;') flx.Widget(flex=1) with flx.VFix(flex=2): flx.Widget(flex=1) test_widget1 = flx.Widget(flex=2, style='background: #afa;') flx.Widget(flex=1) test_widget2 = flx.Widget(flex=2, style='background: #faa;') flx.Widget(flex=1) flx.Widget(flex=1) self.label2 = flx.Label(flex=1, style='overflow-y:scroll; font-size:60%;') for name in ['pointerdown', 'pointermove', 'pointerup', 'pointercancel', 'mousedown', 'mousemove', 'mouseup', 'click', 'dblclick', 'touchstart', 'touchmove', 'touchend', 'touchcancel' ]: test_widget1.node.addEventListener(name, lambda e: self.show_event1(e.type)) def reaction(*events): for ev in events: self.show_event2(ev.type) test_widget2.reaction(reaction, 'pointer_down', 'pointer_move', 'pointer_up', 'pointer_cancel', 'pointer_click', 'pointer_double_click', ) @flx.action def show_event1(self, name): dt = time() - self.t lines = self.label1.html.split('<br>') lines = lines[:200] lines.insert(0, f'{dt:.1f} {name}') self.label1.set_html('<br>'.join(lines)) @flx.action def show_event2(self, name): dt = time() - self.t lines = self.label2.html.split('<br>') lines = lines[:200] lines.insert(0, f'{dt:.1f} {name}') self.label2.set_html('<br>'.join(lines)) a = flx.App(Test) m = a.launch() flx.run()
bsd-2-clause
bloyl/mne-python
mne/datasets/sleep_physionet/_utils.py
7
8646
# -*- coding: utf-8 -*- # Authors: Alexandre Gramfort <[email protected]> # Joan Massich <[email protected]> # # License: BSD Style. import os import os.path as op import numpy as np from distutils.version import LooseVersion from ...utils import (_fetch_file, verbose, _TempDir, _check_pandas_installed, _on_missing) from ..utils import _get_path AGE_SLEEP_RECORDS = op.join(op.dirname(__file__), 'age_records.csv') TEMAZEPAM_SLEEP_RECORDS = op.join(op.dirname(__file__), 'temazepam_records.csv') TEMAZEPAM_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/ST-subjects.xls' # noqa: E501 TEMAZEPAM_RECORDS_URL_SHA1 = 'f52fffe5c18826a2bd4c5d5cb375bb4a9008c885' AGE_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/SC-subjects.xls' # noqa: E501 AGE_RECORDS_URL_SHA1 = '0ba6650892c5d33a8e2b3f62ce1cc9f30438c54f' sha1sums_fname = op.join(op.dirname(__file__), 'SHA1SUMS') def _fetch_one(fname, hashsum, path, force_update, base_url): # Fetch the file url = base_url + '/' + fname destination = op.join(path, fname) if not op.isfile(destination) or force_update: if op.isfile(destination): os.remove(destination) if not op.isdir(op.dirname(destination)): os.makedirs(op.dirname(destination)) _fetch_file(url, destination, print_destination=False, hash_=hashsum, hash_type='sha1') return destination @verbose def _data_path(path=None, force_update=False, update_path=None, verbose=None): """Get path to local copy of EEG Physionet age Polysomnography dataset URL. This is a low-level function useful for getting a local copy of a remote Polysomnography dataset :footcite:`KempEtAl2000` which is available at PhysioNet :footcite:`GoldbergerEtAl2000`. Parameters ---------- path : None | str Location of where to look for the data storing location. If None, the environment variable or config parameter ``MNE_DATASETS_PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist, the "~/mne_data" directory is used. If the dataset is not found under the given path, the data will be automatically downloaded to the specified folder. force_update : bool Force update of the dataset even if a local copy exists. update_path : bool | None If True, set the MNE_DATASETS_PHYSIONET_SLEEP_PATH in mne-python config to the given path. If None, the user is prompted. %(verbose)s Returns ------- path : list of str Local path to the given data file. This path is contained inside a list of length one, for compatibility. References ---------- .. footbibliography:: """ # noqa: E501 key = 'PHYSIONET_SLEEP_PATH' name = 'PHYSIONET_SLEEP' path = _get_path(path, key, name) return op.join(path, 'physionet-sleep-data') def _update_sleep_temazepam_records(fname=TEMAZEPAM_SLEEP_RECORDS): """Help function to download Physionet's temazepam dataset records.""" pd = _check_pandas_installed() tmp = _TempDir() # Download subjects info. subjects_fname = op.join(tmp, 'ST-subjects.xls') _fetch_file(url=TEMAZEPAM_RECORDS_URL, file_name=subjects_fname, hash_=TEMAZEPAM_RECORDS_URL_SHA1, hash_type='sha1') # Load and Massage the checksums. sha1_df = pd.read_csv(sha1sums_fname, sep=' ', header=None, names=['sha', 'fname'], engine='python') select_age_records = (sha1_df.fname.str.startswith('ST') & sha1_df.fname.str.endswith('edf')) sha1_df = sha1_df[select_age_records] sha1_df['id'] = [name[:6] for name in sha1_df.fname] # Load and massage the data. data = pd.read_excel(subjects_fname, header=[0, 1]) if LooseVersion(pd.__version__) >= LooseVersion('0.24.0'): data = data.set_index(('Subject - age - sex', 'Nr')) data.index.name = 'subject' data.columns.names = [None, None] data = (data.set_index([('Subject - age - sex', 'Age'), ('Subject - age - sex', 'M1/F2')], append=True) .stack(level=0).reset_index()) data = data.rename(columns={('Subject - age - sex', 'Age'): 'age', ('Subject - age - sex', 'M1/F2'): 'sex', 'level_3': 'drug'}) data['id'] = ['ST7{:02d}{:1d}'.format(s, n) for s, n in zip(data.subject, data['night nr'])] data = pd.merge(sha1_df, data, how='outer', on='id') data['record type'] = (data.fname.str.split('-', expand=True)[1] .str.split('.', expand=True)[0] .astype('category')) data = data.set_index(['id', 'subject', 'age', 'sex', 'drug', 'lights off', 'night nr', 'record type']).unstack() data.columns = [l1 + '_' + l2 for l1, l2 in data.columns] if LooseVersion(pd.__version__) < LooseVersion('0.21.0'): data = data.reset_index().drop(labels=['id'], axis=1) else: data = data.reset_index().drop(columns=['id']) data['sex'] = (data.sex.astype('category') .cat.rename_categories({1: 'male', 2: 'female'})) data['drug'] = data['drug'].str.split(expand=True)[0] data['subject_orig'] = data['subject'] data['subject'] = data.index // 2 # to make sure index is from 0 to 21 # Save the data. data.to_csv(fname, index=False) def _update_sleep_age_records(fname=AGE_SLEEP_RECORDS): """Help function to download Physionet's age dataset records.""" pd = _check_pandas_installed() tmp = _TempDir() # Download subjects info. subjects_fname = op.join(tmp, 'SC-subjects.xls') _fetch_file(url=AGE_RECORDS_URL, file_name=subjects_fname, hash_=AGE_RECORDS_URL_SHA1, hash_type='sha1') # Load and Massage the checksums. sha1_df = pd.read_csv(sha1sums_fname, sep=' ', header=None, names=['sha', 'fname'], engine='python') select_age_records = (sha1_df.fname.str.startswith('SC') & sha1_df.fname.str.endswith('edf')) sha1_df = sha1_df[select_age_records] sha1_df['id'] = [name[:6] for name in sha1_df.fname] # Load and massage the data. data = pd.read_excel(subjects_fname) data = data.rename(index=str, columns={'sex (F=1)': 'sex', 'LightsOff': 'lights off'}) data['sex'] = (data.sex.astype('category') .cat.rename_categories({1: 'female', 2: 'male'})) data['id'] = ['SC4{:02d}{:1d}'.format(s, n) for s, n in zip(data.subject, data.night)] data = data.set_index('id').join(sha1_df.set_index('id')).dropna() data['record type'] = (data.fname.str.split('-', expand=True)[1] .str.split('.', expand=True)[0] .astype('category')) if LooseVersion(pd.__version__) < LooseVersion('0.21.0'): data = data.reset_index().drop(labels=['id'], axis=1) else: data = data.reset_index().drop(columns=['id']) data = data[['subject', 'night', 'record type', 'age', 'sex', 'lights off', 'sha', 'fname']] # Save the data. data.to_csv(fname, index=False) def _check_subjects(subjects, n_subjects, missing=None, on_missing='raise'): """Check whether subjects are available. Parameters ---------- subjects : list Subject numbers to be checked. n_subjects : int Number of subjects available. missing : list | None Subject numbers that are missing. on_missing : 'raise' | 'warn' | 'ignore' What to do if one or several subjects are not available. Valid keys are 'raise' | 'warn' | 'ignore'. Default is 'error'. If on_missing is 'warn' it will proceed but warn, if 'ignore' it will proceed silently. """ valid_subjects = np.arange(n_subjects) if missing is not None: valid_subjects = np.setdiff1d(valid_subjects, missing) unknown_subjects = np.setdiff1d(subjects, valid_subjects) if unknown_subjects.size > 0: subjects_list = ', '.join([str(s) for s in unknown_subjects]) msg = (f'This dataset contains subjects 0 to {n_subjects - 1} with ' f'missing subjects {missing}. Unknown subjects: ' f'{subjects_list}.') _on_missing(on_missing, msg)
bsd-3-clause
mspark93/VTK
ThirdParty/Twisted/twisted/python/test/pullpipe.py
41
1238
#!/usr/bin/python # -*- test-case-name: twisted.python.test.test_sendmsg -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. import sys, os from struct import unpack # This makes me sad. Why aren't things nice? sys.path.insert(0, __file__.rsplit('/', 4)[0]) from twisted.python.sendmsg import recv1msg def recvfd(socketfd): """ Receive a file descriptor from a L{send1msg} message on the given C{AF_UNIX} socket. @param socketfd: An C{AF_UNIX} socket, attached to another process waiting to send sockets via the ancillary data mechanism in L{send1msg}. @param fd: C{int} @return: a 2-tuple of (new file descriptor, description). @rtype: 2-tuple of (C{int}, C{str}) """ data, flags, ancillary = recv1msg(socketfd) [(cmsg_level, cmsg_type, packedFD)] = ancillary # cmsg_level and cmsg_type really need to be SOL_SOCKET / SCM_RIGHTS, but # since those are the *only* standard values, there's not much point in # checking. [unpackedFD] = unpack("i", packedFD) return (unpackedFD, data) if __name__ == '__main__': fd, description = recvfd(int(sys.argv[1])) os.write(fd, "Test fixture data: %s.\n" % (description,)) os.close(fd)
bsd-3-clause
danielgoncalves/PyNFe
pynfe/entidades/base.py
1
1483
# -*- coding: utf-8 -*- # # PyNFe/pynfe/entidades/base.py # # Projeto PyNFe # Copyright (C) 2010 Marinho Brandão et al # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 3 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # class Entidade(object): _fonte_dados = None def __init__(self, **kwargs): # Codigo para dinamizar a criacao de instancias de entidade, # aplicando os valores dos atributos na instanciacao for k, v in kwargs.items(): setattr(self, k, v) # Adiciona o objeto à fonte de dados informada if not self._fonte_dados: from fonte_dados import _fonte_dados self._fonte_dados = _fonte_dados self._fonte_dados.adicionar_objeto(self) def __repr__(self): return '<%s %s>'%(self.__class__.__name__, str(self)) class Lote(object): pass
lgpl-3.0
lucaceresoli/linux
arch/ia64/scripts/unwcheck.py
13143
1714
#!/usr/bin/python # # Usage: unwcheck.py FILE # # This script checks the unwind info of each function in file FILE # and verifies that the sum of the region-lengths matches the total # length of the function. # # Based on a shell/awk script originally written by Harish Patil, # which was converted to Perl by Matthew Chapman, which was converted # to Python by David Mosberger. # import os import re import sys if len(sys.argv) != 2: print "Usage: %s FILE" % sys.argv[0] sys.exit(2) readelf = os.getenv("READELF", "readelf") start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") rlen_pattern = re.compile(".*rlen=([0-9]+)") def check_func (func, slots, rlen_sum): if slots != rlen_sum: global num_errors num_errors += 1 if not func: func = "[%#x-%#x]" % (start, end) print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) return num_funcs = 0 num_errors = 0 func = False slots = 0 rlen_sum = 0 for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): m = start_pattern.match(line) if m: check_func(func, slots, rlen_sum) func = m.group(1) start = long(m.group(2), 16) end = long(m.group(3), 16) slots = 3 * (end - start) / 16 rlen_sum = 0L num_funcs += 1 else: m = rlen_pattern.match(line) if m: rlen_sum += long(m.group(1)) check_func(func, slots, rlen_sum) if num_errors == 0: print "No errors detected in %u functions." % num_funcs else: if num_errors > 1: err="errors" else: err="error" print "%u %s detected in %u functions." % (num_errors, err, num_funcs) sys.exit(1)
gpl-2.0
flyfei/python-for-android
python3-alpha/python3-src/Lib/distutils/file_util.py
54
7810
"""distutils.file_util Utility functions for operating on single files. """ import os from distutils.errors import DistutilsFileError from distutils import log # for generating verbose output in 'copy_file()' _copy_action = { None: 'copying', 'hard': 'hard linking', 'sym': 'symbolically linking' } def _copy_file_contents(src, dst, buffer_size=16*1024): """Copy the file 'src' to 'dst'; both must be filenames. Any error opening either file, reading from 'src', or writing to 'dst', raises DistutilsFileError. Data is read/written in chunks of 'buffer_size' bytes (default 16k). No attempt is made to handle anything apart from regular files. """ # Stolen from shutil module in the standard library, but with # custom error-handling added. fsrc = None fdst = None try: try: fsrc = open(src, 'rb') except os.error as e: raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror)) if os.path.exists(dst): try: os.unlink(dst) except os.error as e: raise DistutilsFileError( "could not delete '%s': %s" % (dst, e.strerror)) try: fdst = open(dst, 'wb') except os.error as e: raise DistutilsFileError( "could not create '%s': %s" % (dst, e.strerror)) while True: try: buf = fsrc.read(buffer_size) except os.error as e: raise DistutilsFileError( "could not read from '%s': %s" % (src, e.strerror)) if not buf: break try: fdst.write(buf) except os.error as e: raise DistutilsFileError( "could not write to '%s': %s" % (dst, e.strerror)) finally: if fdst: fdst.close() if fsrc: fsrc.close() def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0, link=None, verbose=1, dry_run=0): """Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is copied there with the same name; otherwise, it must be a filename. (If the file exists, it will be ruthlessly clobbered.) If 'preserve_mode' is true (the default), the file's mode (type and permission bits, or whatever is analogous on the current platform) is copied. If 'preserve_times' is true (the default), the last-modified and last-access times are copied as well. If 'update' is true, 'src' will only be copied if 'dst' does not exist, or if 'dst' does exist but is older than 'src'. 'link' allows you to make hard links (os.link) or symbolic links (os.symlink) instead of copying: set it to "hard" or "sym"; if it is None (the default), files are copied. Don't set 'link' on systems that don't support it: 'copy_file()' doesn't check if hard or symbolic linking is available. Under Mac OS, uses the native file copy function in macostools; on other systems, uses '_copy_file_contents()' to copy file contents. Return a tuple (dest_name, copied): 'dest_name' is the actual name of the output file, and 'copied' is true if the file was copied (or would have been copied, if 'dry_run' true). """ # XXX if the destination file already exists, we clobber it if # copying, but blow up if linking. Hmmm. And I don't know what # macostools.copyfile() does. Should definitely be consistent, and # should probably blow up if destination exists and we would be # changing it (ie. it's not already a hard/soft link to src OR # (not update) and (src newer than dst). from distutils.dep_util import newer from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE if not os.path.isfile(src): raise DistutilsFileError( "can't copy '%s': doesn't exist or not a regular file" % src) if os.path.isdir(dst): dir = dst dst = os.path.join(dst, os.path.basename(src)) else: dir = os.path.dirname(dst) if update and not newer(src, dst): if verbose >= 1: log.debug("not copying %s (output up-to-date)", src) return (dst, 0) try: action = _copy_action[link] except KeyError: raise ValueError("invalid value '%s' for 'link' argument" % link) if verbose >= 1: if os.path.basename(dst) == os.path.basename(src): log.info("%s %s -> %s", action, src, dir) else: log.info("%s %s -> %s", action, src, dst) if dry_run: return (dst, 1) # If linking (hard or symbolic), use the appropriate system call # (Unix only, of course, but that's the caller's responsibility) elif link == 'hard': if not (os.path.exists(dst) and os.path.samefile(src, dst)): os.link(src, dst) elif link == 'sym': if not (os.path.exists(dst) and os.path.samefile(src, dst)): os.symlink(src, dst) # Otherwise (non-Mac, not linking), copy the file contents and # (optionally) copy the times and mode. else: _copy_file_contents(src, dst) if preserve_mode or preserve_times: st = os.stat(src) # According to David Ascher <[email protected]>, utime() should be done # before chmod() (at least under NT). if preserve_times: os.utime(dst, (st[ST_ATIME], st[ST_MTIME])) if preserve_mode: os.chmod(dst, S_IMODE(st[ST_MODE])) return (dst, 1) # XXX I suspect this is Unix-specific -- need porting help! def move_file (src, dst, verbose=1, dry_run=0): """Move a file 'src' to 'dst'. If 'dst' is a directory, the file will be moved into it with the same name; otherwise, 'src' is just renamed to 'dst'. Return the new full name of the file. Handles cross-device moves on Unix using 'copy_file()'. What about other systems??? """ from os.path import exists, isfile, isdir, basename, dirname import errno if verbose >= 1: log.info("moving %s -> %s", src, dst) if dry_run: return dst if not isfile(src): raise DistutilsFileError("can't move '%s': not a regular file" % src) if isdir(dst): dst = os.path.join(dst, basename(src)) elif exists(dst): raise DistutilsFileError( "can't move '%s': destination '%s' already exists" % (src, dst)) if not isdir(dirname(dst)): raise DistutilsFileError( "can't move '%s': destination '%s' not a valid path" % (src, dst)) copy_it = False try: os.rename(src, dst) except os.error as e: (num, msg) = e if num == errno.EXDEV: copy_it = True else: raise DistutilsFileError( "couldn't move '%s' to '%s': %s" % (src, dst, msg)) if copy_it: copy_file(src, dst, verbose=verbose) try: os.unlink(src) except os.error as e: (num, msg) = e try: os.unlink(dst) except os.error: pass raise DistutilsFileError( "couldn't move '%s' to '%s' by copy/delete: " "delete '%s' failed: %s" % (src, dst, src, msg)) return dst def write_file (filename, contents): """Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it. """ f = open(filename, "w") try: for line in contents: f.write(line + "\n") finally: f.close()
apache-2.0
paour/weblate
weblate/trans/decorators.py
2
1236
# -*- coding: utf-8 -*- # # Copyright © 2012 - 2014 Michal Čihař <[email protected]> # # This file is part of Weblate <http://weblate.org/> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from django.contrib.auth.decorators import user_passes_test def any_permission_required(*args): ''' A decorator which checks user has any of the given permissions. permission required can not be used in its place as that takes only a single permission. ''' def test_func(user): for perm in args: if user.has_perm(perm): return True return False return user_passes_test(test_func)
gpl-3.0
garethsaxby/python_koans
python3/koans/about_tuples.py
1
2290
#!/usr/bin/env python # -*- coding: utf-8 -*- from runner.koan import * class AboutTuples(Koan): def test_creating_a_tuple(self): count_of_three = (1, 2, 5) self.assertEqual(5, count_of_three[2]) def test_tuples_are_immutable_so_item_assignment_is_not_possible(self): count_of_three = (1, 2, 5) try: count_of_three[2] = "three" except TypeError as ex: msg = ex.args[0] # Note, assertRegex() uses regular expression pattern matching, # so you don't have to copy the whole message. self.assertRegex(msg, "assign") def test_tuples_are_immutable_so_appending_is_not_possible(self): count_of_three = (1, 2, 5) with self.assertRaises(AttributeError): count_of_three.append("boom") # Tuples are less flexible than lists, but faster. def test_tuples_can_only_be_changed_through_replacement(self): count_of_three = (1, 2, 5) list_count = list(count_of_three) list_count.append("boom") count_of_three = tuple(list_count) self.assertEqual((1, 2, 5, 'boom'), count_of_three) def test_tuples_of_one_look_peculiar(self): self.assertEqual(int, (1).__class__) self.assertEqual(tuple, (1,).__class__) self.assertEqual(("Hello comma!",), ("Hello comma!", )) def test_tuple_constructor_can_be_surprising(self): self.assertEqual(('S', 'u', 'r', 'p', 'r', 'i', 's', 'e', '!'), tuple("Surprise!")) def test_creating_empty_tuples(self): self.assertEqual(() , ()) self.assertEqual(() , tuple()) #Sometimes less confusing def test_tuples_can_be_embedded(self): lat = (37, 14, 6, 'N') lon = (115, 48, 40, 'W') place = ('Area 51', lat, lon) self.assertEqual(('Area 51', (37, 14, 6, 'N'), (115, 48, 40, 'W')), place) def test_tuples_are_good_for_representing_records(self): locations = [ ("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')), ("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')), ] locations.append( ("Cthulu", (26, 40, 1, 'N'), (70, 45, 7, 'W')) ) self.assertEqual("Cthulu", locations[2][0]) self.assertEqual(15.56, locations[0][1][2])
mit
Buggaboo/gimp-plugin-export-layers
export_layers/pygimplib/tee.py
1
5139
#------------------------------------------------------------------------------- # # This file is part of pygimplib. # # Copyright (C) 2014, 2015 khalim19 <[email protected]> # # pygimplib is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # pygimplib is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with pygimplib. If not, see <http://www.gnu.org/licenses/>. # #------------------------------------------------------------------------------- """ This module defines a class to log "stdout" and "stderr" output to the specified file, much like the Unix "tee" command. """ #=============================================================================== from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals from __future__ import division str = unicode #=============================================================================== import sys from datetime import datetime #=============================================================================== def get_log_header(log_header_title): return '\n'.join(('', '=' * 80, log_header_title, str(datetime.now()), '\n')) # Original version: # http://mail.python.org/pipermail/python-list/2007-May/438106.html # Author: Peter Otten class Tee(object): """ This class copies stdout or stderr output to a specified file, much like the Unix "tee" command. Attributes: * `stream` - Either `sys.stdout` or `sys.stderr`. Other objects are invalid and raise `ValueError`. * `log_header_title` - Header text to write when writing into the file for the first time. """ __STATES = _RUNNING_FIRST_TIME, _RUNNING, _NOT_RUNNING = (0, 1, 2) def __init__(self, stream, file_object, log_header_title=None, start=True, flush_file=False): """ Parameters: * `file_object` - File or file-like object to write to. * `start` - If True, start `Tee` upon instantiation, otherwise don't. To start later, pass `start=False` and call the `start()` method when desired. * `flush_file` - If True, flush the file after each write. """ self._streams = {sys.stdout: 'stdout', sys.stderr: 'stderr'} self.log_header_title = log_header_title if log_header_title is not None else "" self.flush_file = flush_file self._file = None self._state = self._NOT_RUNNING self._orig_stream = None self._stream_name = "" self._stream = None self.stream = stream if start: self.start(file_object) def __del__(self): if self.is_running(): self.stop() @property def stream(self): return self._stream @stream.setter def stream(self, value): self._stream = value if value in self._streams: self._stream_name = self._streams[value] else: raise ValueError("invalid stream; must be sys.stdout or sys.stderr") def start(self, file_object): """ Start `Tee` if not started during the object instantiation. Parameters: * `file_object` - File or file-like object to write to. """ self._orig_stream = self.stream setattr(sys, self._stream_name, self) self._file = file_object self._state = self._RUNNING_FIRST_TIME def stop(self): """ Stop `Tee`, i.e. stop writing to the file. """ setattr(sys, self._stream_name, self._orig_stream) self._file.close() self._file = None self._state = self._NOT_RUNNING def is_running(self): """ True if `Tee` is running (i.e. writing to file), False otherwise. """ return self._state != self._NOT_RUNNING def write(self, data): """ Write output to the stream and the file specified in this object. This is a method normally used by `sys.stdout`, `sys.stderr` and file-like objects to write output. """ if self._state == self._RUNNING_FIRST_TIME: self._file.write(get_log_header(self.log_header_title).encode()) self._write_with_flush(data + b'\n') self._state = self._RUNNING else: if not self.flush_file: self.write = self._write else: self.write = self._write_with_flush def _write(self, data): self._file.write(data) self._stream.write(data) def _write_with_flush(self, data): self._file.write(data) self._file.flush() self._stream.write(data) def flush(self): """ Flush output. This is a method implemented in `sys.stdout`, `sys.stderr` and file-like objects to flush the internal buffer and force writing output immediately. """ self._file.flush() self._stream.flush()
gpl-3.0
enitihas/SAC-Website
venv/bin/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py
2929
13359
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Shy Shalom # Portions created by the Initial Developer are Copyright (C) 2005 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetprober import CharSetProber from .constants import eNotMe, eDetecting from .compat import wrap_ord # This prober doesn't actually recognize a language or a charset. # It is a helper prober for the use of the Hebrew model probers ### General ideas of the Hebrew charset recognition ### # # Four main charsets exist in Hebrew: # "ISO-8859-8" - Visual Hebrew # "windows-1255" - Logical Hebrew # "ISO-8859-8-I" - Logical Hebrew # "x-mac-hebrew" - ?? Logical Hebrew ?? # # Both "ISO" charsets use a completely identical set of code points, whereas # "windows-1255" and "x-mac-hebrew" are two different proper supersets of # these code points. windows-1255 defines additional characters in the range # 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific # diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6. # x-mac-hebrew defines similar additional code points but with a different # mapping. # # As far as an average Hebrew text with no diacritics is concerned, all four # charsets are identical with respect to code points. Meaning that for the # main Hebrew alphabet, all four map the same values to all 27 Hebrew letters # (including final letters). # # The dominant difference between these charsets is their directionality. # "Visual" directionality means that the text is ordered as if the renderer is # not aware of a BIDI rendering algorithm. The renderer sees the text and # draws it from left to right. The text itself when ordered naturally is read # backwards. A buffer of Visual Hebrew generally looks like so: # "[last word of first line spelled backwards] [whole line ordered backwards # and spelled backwards] [first word of first line spelled backwards] # [end of line] [last word of second line] ... etc' " # adding punctuation marks, numbers and English text to visual text is # naturally also "visual" and from left to right. # # "Logical" directionality means the text is ordered "naturally" according to # the order it is read. It is the responsibility of the renderer to display # the text from right to left. A BIDI algorithm is used to place general # punctuation marks, numbers and English text in the text. # # Texts in x-mac-hebrew are almost impossible to find on the Internet. From # what little evidence I could find, it seems that its general directionality # is Logical. # # To sum up all of the above, the Hebrew probing mechanism knows about two # charsets: # Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are # backwards while line order is natural. For charset recognition purposes # the line order is unimportant (In fact, for this implementation, even # word order is unimportant). # Logical Hebrew - "windows-1255" - normal, naturally ordered text. # # "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be # specifically identified. # "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew # that contain special punctuation marks or diacritics is displayed with # some unconverted characters showing as question marks. This problem might # be corrected using another model prober for x-mac-hebrew. Due to the fact # that x-mac-hebrew texts are so rare, writing another model prober isn't # worth the effort and performance hit. # #### The Prober #### # # The prober is divided between two SBCharSetProbers and a HebrewProber, # all of which are managed, created, fed data, inquired and deleted by the # SBCSGroupProber. The two SBCharSetProbers identify that the text is in # fact some kind of Hebrew, Logical or Visual. The final decision about which # one is it is made by the HebrewProber by combining final-letter scores # with the scores of the two SBCharSetProbers to produce a final answer. # # The SBCSGroupProber is responsible for stripping the original text of HTML # tags, English characters, numbers, low-ASCII punctuation characters, spaces # and new lines. It reduces any sequence of such characters to a single space. # The buffer fed to each prober in the SBCS group prober is pure text in # high-ASCII. # The two SBCharSetProbers (model probers) share the same language model: # Win1255Model. # The first SBCharSetProber uses the model normally as any other # SBCharSetProber does, to recognize windows-1255, upon which this model was # built. The second SBCharSetProber is told to make the pair-of-letter # lookup in the language model backwards. This in practice exactly simulates # a visual Hebrew model using the windows-1255 logical Hebrew model. # # The HebrewProber is not using any language model. All it does is look for # final-letter evidence suggesting the text is either logical Hebrew or visual # Hebrew. Disjointed from the model probers, the results of the HebrewProber # alone are meaningless. HebrewProber always returns 0.00 as confidence # since it never identifies a charset by itself. Instead, the pointer to the # HebrewProber is passed to the model probers as a helper "Name Prober". # When the Group prober receives a positive identification from any prober, # it asks for the name of the charset identified. If the prober queried is a # Hebrew model prober, the model prober forwards the call to the # HebrewProber to make the final decision. In the HebrewProber, the # decision is made according to the final-letters scores maintained and Both # model probers scores. The answer is returned in the form of the name of the # charset identified, either "windows-1255" or "ISO-8859-8". # windows-1255 / ISO-8859-8 code points of interest FINAL_KAF = 0xea NORMAL_KAF = 0xeb FINAL_MEM = 0xed NORMAL_MEM = 0xee FINAL_NUN = 0xef NORMAL_NUN = 0xf0 FINAL_PE = 0xf3 NORMAL_PE = 0xf4 FINAL_TSADI = 0xf5 NORMAL_TSADI = 0xf6 # Minimum Visual vs Logical final letter score difference. # If the difference is below this, don't rely solely on the final letter score # distance. MIN_FINAL_CHAR_DISTANCE = 5 # Minimum Visual vs Logical model score difference. # If the difference is below this, don't rely at all on the model score # distance. MIN_MODEL_DISTANCE = 0.01 VISUAL_HEBREW_NAME = "ISO-8859-8" LOGICAL_HEBREW_NAME = "windows-1255" class HebrewProber(CharSetProber): def __init__(self): CharSetProber.__init__(self) self._mLogicalProber = None self._mVisualProber = None self.reset() def reset(self): self._mFinalCharLogicalScore = 0 self._mFinalCharVisualScore = 0 # The two last characters seen in the previous buffer, # mPrev and mBeforePrev are initialized to space in order to simulate # a word delimiter at the beginning of the data self._mPrev = ' ' self._mBeforePrev = ' ' # These probers are owned by the group prober. def set_model_probers(self, logicalProber, visualProber): self._mLogicalProber = logicalProber self._mVisualProber = visualProber def is_final(self, c): return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE, FINAL_TSADI] def is_non_final(self, c): # The normal Tsadi is not a good Non-Final letter due to words like # 'lechotet' (to chat) containing an apostrophe after the tsadi. This # apostrophe is converted to a space in FilterWithoutEnglishLetters # causing the Non-Final tsadi to appear at an end of a word even # though this is not the case in the original text. # The letters Pe and Kaf rarely display a related behavior of not being # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak' # for example legally end with a Non-Final Pe or Kaf. However, the # benefit of these letters as Non-Final letters outweighs the damage # since these words are quite rare. return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE] def feed(self, aBuf): # Final letter analysis for logical-visual decision. # Look for evidence that the received buffer is either logical Hebrew # or visual Hebrew. # The following cases are checked: # 1) A word longer than 1 letter, ending with a final letter. This is # an indication that the text is laid out "naturally" since the # final letter really appears at the end. +1 for logical score. # 2) A word longer than 1 letter, ending with a Non-Final letter. In # normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi, # should not end with the Non-Final form of that letter. Exceptions # to this rule are mentioned above in isNonFinal(). This is an # indication that the text is laid out backwards. +1 for visual # score # 3) A word longer than 1 letter, starting with a final letter. Final # letters should not appear at the beginning of a word. This is an # indication that the text is laid out backwards. +1 for visual # score. # # The visual score and logical score are accumulated throughout the # text and are finally checked against each other in GetCharSetName(). # No checking for final letters in the middle of words is done since # that case is not an indication for either Logical or Visual text. # # We automatically filter out all 7-bit characters (replace them with # spaces) so the word boundary detection works properly. [MAP] if self.get_state() == eNotMe: # Both model probers say it's not them. No reason to continue. return eNotMe aBuf = self.filter_high_bit_only(aBuf) for cur in aBuf: if cur == ' ': # We stand on a space - a word just ended if self._mBeforePrev != ' ': # next-to-last char was not a space so self._mPrev is not a # 1 letter word if self.is_final(self._mPrev): # case (1) [-2:not space][-1:final letter][cur:space] self._mFinalCharLogicalScore += 1 elif self.is_non_final(self._mPrev): # case (2) [-2:not space][-1:Non-Final letter][ # cur:space] self._mFinalCharVisualScore += 1 else: # Not standing on a space if ((self._mBeforePrev == ' ') and (self.is_final(self._mPrev)) and (cur != ' ')): # case (3) [-2:space][-1:final letter][cur:not space] self._mFinalCharVisualScore += 1 self._mBeforePrev = self._mPrev self._mPrev = cur # Forever detecting, till the end or until both model probers return # eNotMe (handled above) return eDetecting def get_charset_name(self): # Make the decision: is it Logical or Visual? # If the final letter score distance is dominant enough, rely on it. finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore if finalsub >= MIN_FINAL_CHAR_DISTANCE: return LOGICAL_HEBREW_NAME if finalsub <= -MIN_FINAL_CHAR_DISTANCE: return VISUAL_HEBREW_NAME # It's not dominant enough, try to rely on the model scores instead. modelsub = (self._mLogicalProber.get_confidence() - self._mVisualProber.get_confidence()) if modelsub > MIN_MODEL_DISTANCE: return LOGICAL_HEBREW_NAME if modelsub < -MIN_MODEL_DISTANCE: return VISUAL_HEBREW_NAME # Still no good, back to final letter distance, maybe it'll save the # day. if finalsub < 0.0: return VISUAL_HEBREW_NAME # (finalsub > 0 - Logical) or (don't know what to do) default to # Logical. return LOGICAL_HEBREW_NAME def get_state(self): # Remain active as long as any of the model probers are active. if (self._mLogicalProber.get_state() == eNotMe) and \ (self._mVisualProber.get_state() == eNotMe): return eNotMe return eDetecting
apache-2.0
pyfisch/servo
tests/wpt/web-platform-tests/tools/third_party/h2/test/test_invalid_headers.py
25
34188
# -*- coding: utf-8 -*- """ test_invalid_headers.py ~~~~~~~~~~~~~~~~~~~~~~~ This module contains tests that use invalid header blocks, and validates that they fail appropriately. """ import itertools import pytest import h2.config import h2.connection import h2.errors import h2.events import h2.exceptions import h2.settings import h2.utilities import hyperframe.frame from hypothesis import given from hypothesis.strategies import binary, lists, tuples HEADERS_STRATEGY = lists(tuples(binary(min_size=1), binary())) class TestInvalidFrameSequences(object): """ Invalid header sequences cause ProtocolErrors to be thrown when received. """ base_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ('user-agent', 'someua/0.0.1'), ] invalid_header_blocks = [ base_request_headers + [('Uppercase', 'name')], base_request_headers + [(':late', 'pseudo-header')], [(':path', 'duplicate-pseudo-header')] + base_request_headers, base_request_headers + [('connection', 'close')], base_request_headers + [('proxy-connection', 'close')], base_request_headers + [('keep-alive', 'close')], base_request_headers + [('transfer-encoding', 'gzip')], base_request_headers + [('upgrade', 'super-protocol/1.1')], base_request_headers + [('te', 'chunked')], base_request_headers + [('host', 'notexample.com')], base_request_headers + [(' name', 'name with leading space')], base_request_headers + [('name ', 'name with trailing space')], base_request_headers + [('name', ' value with leading space')], base_request_headers + [('name', 'value with trailing space ')], [header for header in base_request_headers if header[0] != ':authority'], ] server_config = h2.config.H2Configuration( client_side=False, header_encoding='utf-8' ) @pytest.mark.parametrize('headers', invalid_header_blocks) def test_headers_event(self, frame_factory, headers): """ Test invalid headers are rejected with PROTOCOL_ERROR. """ c = h2.connection.H2Connection(config=self.server_config) c.receive_data(frame_factory.preamble()) c.clear_outbound_data_buffer() f = frame_factory.build_headers_frame(headers) data = f.serialize() with pytest.raises(h2.exceptions.ProtocolError): c.receive_data(data) expected_frame = frame_factory.build_goaway_frame( last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR ) assert c.data_to_send() == expected_frame.serialize() @pytest.mark.parametrize('headers', invalid_header_blocks) def test_push_promise_event(self, frame_factory, headers): """ If a PUSH_PROMISE header frame is received with an invalid header block it is rejected with a PROTOCOL_ERROR. """ c = h2.connection.H2Connection() c.initiate_connection() c.send_headers( stream_id=1, headers=self.base_request_headers, end_stream=True ) c.clear_outbound_data_buffer() f = frame_factory.build_push_promise_frame( stream_id=1, promised_stream_id=2, headers=headers ) data = f.serialize() with pytest.raises(h2.exceptions.ProtocolError): c.receive_data(data) expected_frame = frame_factory.build_goaway_frame( last_stream_id=0, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR ) assert c.data_to_send() == expected_frame.serialize() @pytest.mark.parametrize('headers', invalid_header_blocks) def test_push_promise_skipping_validation(self, frame_factory, headers): """ If we have ``validate_inbound_headers`` disabled, then invalid header blocks in push promise frames are allowed to pass. """ config = h2.config.H2Configuration( client_side=True, validate_inbound_headers=False, header_encoding='utf-8' ) c = h2.connection.H2Connection(config=config) c.initiate_connection() c.send_headers( stream_id=1, headers=self.base_request_headers, end_stream=True ) c.clear_outbound_data_buffer() f = frame_factory.build_push_promise_frame( stream_id=1, promised_stream_id=2, headers=headers ) data = f.serialize() events = c.receive_data(data) assert len(events) == 1 pp_event = events[0] assert pp_event.headers == headers @pytest.mark.parametrize('headers', invalid_header_blocks) def test_headers_event_skipping_validation(self, frame_factory, headers): """ If we have ``validate_inbound_headers`` disabled, then all of these invalid header blocks are allowed to pass. """ config = h2.config.H2Configuration( client_side=False, validate_inbound_headers=False, header_encoding='utf-8' ) c = h2.connection.H2Connection(config=config) c.receive_data(frame_factory.preamble()) f = frame_factory.build_headers_frame(headers) data = f.serialize() events = c.receive_data(data) assert len(events) == 1 request_event = events[0] assert request_event.headers == headers def test_transfer_encoding_trailers_is_valid(self, frame_factory): """ Transfer-Encoding trailers is allowed by the filter. """ headers = ( self.base_request_headers + [('te', 'trailers')] ) c = h2.connection.H2Connection(config=self.server_config) c.receive_data(frame_factory.preamble()) f = frame_factory.build_headers_frame(headers) data = f.serialize() events = c.receive_data(data) assert len(events) == 1 request_event = events[0] assert request_event.headers == headers def test_pseudo_headers_rejected_in_trailer(self, frame_factory): """ Ensure we reject pseudo headers included in trailers """ trailers = [(':path', '/'), ('extra', 'value')] c = h2.connection.H2Connection(config=self.server_config) c.receive_data(frame_factory.preamble()) c.clear_outbound_data_buffer() header_frame = frame_factory.build_headers_frame( self.base_request_headers ) trailer_frame = frame_factory.build_headers_frame( trailers, flags=["END_STREAM"] ) head = header_frame.serialize() trailer = trailer_frame.serialize() c.receive_data(head) # Raise exception if pseudo header in trailer with pytest.raises(h2.exceptions.ProtocolError) as e: c.receive_data(trailer) assert "pseudo-header in trailer" in str(e) # Test appropriate response frame is generated expected_frame = frame_factory.build_goaway_frame( last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR ) assert c.data_to_send() == expected_frame.serialize() class TestSendingInvalidFrameSequences(object): """ Trying to send invalid header sequences cause ProtocolErrors to be thrown. """ base_request_headers = [ (':authority', 'example.com'), (':path', '/'), (':scheme', 'https'), (':method', 'GET'), ('user-agent', 'someua/0.0.1'), ] invalid_header_blocks = [ base_request_headers + [(':late', 'pseudo-header')], [(':path', 'duplicate-pseudo-header')] + base_request_headers, base_request_headers + [('te', 'chunked')], base_request_headers + [('host', 'notexample.com')], [header for header in base_request_headers if header[0] != ':authority'], ] strippable_header_blocks = [ base_request_headers + [('connection', 'close')], base_request_headers + [('proxy-connection', 'close')], base_request_headers + [('keep-alive', 'close')], base_request_headers + [('transfer-encoding', 'gzip')], base_request_headers + [('upgrade', 'super-protocol/1.1')] ] all_header_blocks = invalid_header_blocks + strippable_header_blocks server_config = h2.config.H2Configuration(client_side=False) @pytest.mark.parametrize('headers', invalid_header_blocks) def test_headers_event(self, frame_factory, headers): """ Test sending invalid headers raise a ProtocolError. """ c = h2.connection.H2Connection() c.initiate_connection() # Clear the data, then try to send headers. c.clear_outbound_data_buffer() with pytest.raises(h2.exceptions.ProtocolError): c.send_headers(1, headers) @pytest.mark.parametrize('headers', invalid_header_blocks) def test_send_push_promise(self, frame_factory, headers): """ Sending invalid headers in a push promise raises a ProtocolError. """ c = h2.connection.H2Connection(config=self.server_config) c.initiate_connection() c.receive_data(frame_factory.preamble()) header_frame = frame_factory.build_headers_frame( self.base_request_headers ) c.receive_data(header_frame.serialize()) # Clear the data, then try to send a push promise. c.clear_outbound_data_buffer() with pytest.raises(h2.exceptions.ProtocolError): c.push_stream( stream_id=1, promised_stream_id=2, request_headers=headers ) @pytest.mark.parametrize('headers', all_header_blocks) def test_headers_event_skipping_validation(self, frame_factory, headers): """ If we have ``validate_outbound_headers`` disabled, then all of these invalid header blocks are allowed to pass. """ config = h2.config.H2Configuration( validate_outbound_headers=False ) c = h2.connection.H2Connection(config=config) c.initiate_connection() # Clear the data, then send headers. c.clear_outbound_data_buffer() c.send_headers(1, headers) # Ensure headers are still normalized. norm_headers = h2.utilities.normalize_outbound_headers(headers, None) f = frame_factory.build_headers_frame(norm_headers) assert c.data_to_send() == f.serialize() @pytest.mark.parametrize('headers', all_header_blocks) def test_push_promise_skipping_validation(self, frame_factory, headers): """ If we have ``validate_outbound_headers`` disabled, then all of these invalid header blocks are allowed to pass. """ config = h2.config.H2Configuration( client_side=False, validate_outbound_headers=False, ) c = h2.connection.H2Connection(config=config) c.initiate_connection() c.receive_data(frame_factory.preamble()) header_frame = frame_factory.build_headers_frame( self.base_request_headers ) c.receive_data(header_frame.serialize()) # Create push promise frame with normalized headers. frame_factory.refresh_encoder() norm_headers = h2.utilities.normalize_outbound_headers(headers, None) pp_frame = frame_factory.build_push_promise_frame( stream_id=1, promised_stream_id=2, headers=norm_headers ) # Clear the data, then send a push promise. c.clear_outbound_data_buffer() c.push_stream( stream_id=1, promised_stream_id=2, request_headers=headers ) assert c.data_to_send() == pp_frame.serialize() @pytest.mark.parametrize('headers', all_header_blocks) def test_headers_event_skip_normalization(self, frame_factory, headers): """ If we have ``normalize_outbound_headers`` disabled, then all of these invalid header blocks are sent through unmodified. """ config = h2.config.H2Configuration( validate_outbound_headers=False, normalize_outbound_headers=False ) c = h2.connection.H2Connection(config=config) c.initiate_connection() f = frame_factory.build_headers_frame( headers, stream_id=1, ) # Clear the data, then send headers. c.clear_outbound_data_buffer() c.send_headers(1, headers) assert c.data_to_send() == f.serialize() @pytest.mark.parametrize('headers', all_header_blocks) def test_push_promise_skip_normalization(self, frame_factory, headers): """ If we have ``normalize_outbound_headers`` disabled, then all of these invalid header blocks are allowed to pass unmodified. """ config = h2.config.H2Configuration( client_side=False, validate_outbound_headers=False, normalize_outbound_headers=False, ) c = h2.connection.H2Connection(config=config) c.initiate_connection() c.receive_data(frame_factory.preamble()) header_frame = frame_factory.build_headers_frame( self.base_request_headers ) c.receive_data(header_frame.serialize()) frame_factory.refresh_encoder() pp_frame = frame_factory.build_push_promise_frame( stream_id=1, promised_stream_id=2, headers=headers ) # Clear the data, then send a push promise. c.clear_outbound_data_buffer() c.push_stream( stream_id=1, promised_stream_id=2, request_headers=headers ) assert c.data_to_send() == pp_frame.serialize() @pytest.mark.parametrize('headers', strippable_header_blocks) def test_strippable_headers(self, frame_factory, headers): """ Test connection related headers are removed before sending. """ c = h2.connection.H2Connection() c.initiate_connection() # Clear the data, then try to send headers. c.clear_outbound_data_buffer() c.send_headers(1, headers) f = frame_factory.build_headers_frame(self.base_request_headers) assert c.data_to_send() == f.serialize() class TestFilter(object): """ Test the filter function directly. These tests exists to confirm the behaviour of the filter function in a wide range of scenarios. Many of these scenarios may not be legal for HTTP/2 and so may never hit the function, but it's worth validating that it behaves as expected anyway. """ validation_functions = [ h2.utilities.validate_headers, h2.utilities.validate_outbound_headers ] hdr_validation_combos = [ h2.utilities.HeaderValidationFlags( is_client, is_trailer, is_response_header, is_push_promise ) for is_client, is_trailer, is_response_header, is_push_promise in ( itertools.product([True, False], repeat=4) ) ] hdr_validation_response_headers = [ flags for flags in hdr_validation_combos if flags.is_response_header ] hdr_validation_request_headers_no_trailer = [ flags for flags in hdr_validation_combos if not (flags.is_trailer or flags.is_response_header) ] invalid_request_header_blocks_bytes = ( # First, missing :method ( (b':authority', b'google.com'), (b':path', b'/'), (b':scheme', b'https'), ), # Next, missing :path ( (b':authority', b'google.com'), (b':method', b'GET'), (b':scheme', b'https'), ), # Next, missing :scheme ( (b':authority', b'google.com'), (b':method', b'GET'), (b':path', b'/'), ), # Finally, path present but empty. ( (b':authority', b'google.com'), (b':method', b'GET'), (b':scheme', b'https'), (b':path', b''), ), ) invalid_request_header_blocks_unicode = ( # First, missing :method ( (u':authority', u'google.com'), (u':path', u'/'), (u':scheme', u'https'), ), # Next, missing :path ( (u':authority', u'google.com'), (u':method', u'GET'), (u':scheme', u'https'), ), # Next, missing :scheme ( (u':authority', u'google.com'), (u':method', u'GET'), (u':path', u'/'), ), # Finally, path present but empty. ( (u':authority', u'google.com'), (u':method', u'GET'), (u':scheme', u'https'), (u':path', u''), ), ) # All headers that are forbidden from either request or response blocks. forbidden_request_headers_bytes = (b':status',) forbidden_request_headers_unicode = (u':status',) forbidden_response_headers_bytes = ( b':path', b':scheme', b':authority', b':method' ) forbidden_response_headers_unicode = ( u':path', u':scheme', u':authority', u':method' ) @pytest.mark.parametrize('validation_function', validation_functions) @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) @given(headers=HEADERS_STRATEGY) def test_range_of_acceptable_outputs(self, headers, validation_function, hdr_validation_flags): """ The header validation functions either return the data unchanged or throw a ProtocolError. """ try: assert headers == list(validation_function( headers, hdr_validation_flags)) except h2.exceptions.ProtocolError: assert True @pytest.mark.parametrize('hdr_validation_flags', hdr_validation_combos) def test_invalid_pseudo_headers(self, hdr_validation_flags): headers = [(b':custom', b'value')] with pytest.raises(h2.exceptions.ProtocolError): list(h2.utilities.validate_headers(headers, hdr_validation_flags)) @pytest.mark.parametrize('validation_function', validation_functions) @pytest.mark.parametrize( 'hdr_validation_flags', hdr_validation_request_headers_no_trailer ) def test_matching_authority_host_headers(self, validation_function, hdr_validation_flags): """ If a header block has :authority and Host headers and they match, the headers should pass through unchanged. """ headers = [ (b':authority', b'example.com'), (b':path', b'/'), (b':scheme', b'https'), (b':method', b'GET'), (b'host', b'example.com'), ] assert headers == list(h2.utilities.validate_headers( headers, hdr_validation_flags )) @pytest.mark.parametrize( 'hdr_validation_flags', hdr_validation_response_headers ) def test_response_header_without_status(self, hdr_validation_flags): headers = [(b'content-length', b'42')] with pytest.raises(h2.exceptions.ProtocolError): list(h2.utilities.validate_headers(headers, hdr_validation_flags)) @pytest.mark.parametrize( 'hdr_validation_flags', hdr_validation_request_headers_no_trailer ) @pytest.mark.parametrize( 'header_block', ( invalid_request_header_blocks_bytes + invalid_request_header_blocks_unicode ) ) def test_outbound_req_header_missing_pseudo_headers(self, hdr_validation_flags, header_block): with pytest.raises(h2.exceptions.ProtocolError): list( h2.utilities.validate_outbound_headers( header_block, hdr_validation_flags ) ) @pytest.mark.parametrize( 'hdr_validation_flags', hdr_validation_request_headers_no_trailer ) @pytest.mark.parametrize( 'header_block', invalid_request_header_blocks_bytes ) def test_inbound_req_header_missing_pseudo_headers(self, hdr_validation_flags, header_block): with pytest.raises(h2.exceptions.ProtocolError): list( h2.utilities.validate_headers( header_block, hdr_validation_flags ) ) @pytest.mark.parametrize( 'hdr_validation_flags', hdr_validation_request_headers_no_trailer ) @pytest.mark.parametrize( 'invalid_header', forbidden_request_headers_bytes + forbidden_request_headers_unicode ) def test_outbound_req_header_extra_pseudo_headers(self, hdr_validation_flags, invalid_header): """ Outbound request header blocks containing the forbidden request headers fail validation. """ headers = [ (b':path', b'/'), (b':scheme', b'https'), (b':authority', b'google.com'), (b':method', b'GET'), ] headers.append((invalid_header, b'some value')) with pytest.raises(h2.exceptions.ProtocolError): list( h2.utilities.validate_outbound_headers( headers, hdr_validation_flags ) ) @pytest.mark.parametrize( 'hdr_validation_flags', hdr_validation_request_headers_no_trailer ) @pytest.mark.parametrize( 'invalid_header', forbidden_request_headers_bytes ) def test_inbound_req_header_extra_pseudo_headers(self, hdr_validation_flags, invalid_header): """ Inbound request header blocks containing the forbidden request headers fail validation. """ headers = [ (b':path', b'/'), (b':scheme', b'https'), (b':authority', b'google.com'), (b':method', b'GET'), ] headers.append((invalid_header, b'some value')) with pytest.raises(h2.exceptions.ProtocolError): list(h2.utilities.validate_headers(headers, hdr_validation_flags)) @pytest.mark.parametrize( 'hdr_validation_flags', hdr_validation_response_headers ) @pytest.mark.parametrize( 'invalid_header', forbidden_response_headers_bytes + forbidden_response_headers_unicode ) def test_outbound_resp_header_extra_pseudo_headers(self, hdr_validation_flags, invalid_header): """ Outbound response header blocks containing the forbidden response headers fail validation. """ headers = [(b':status', b'200')] headers.append((invalid_header, b'some value')) with pytest.raises(h2.exceptions.ProtocolError): list( h2.utilities.validate_outbound_headers( headers, hdr_validation_flags ) ) @pytest.mark.parametrize( 'hdr_validation_flags', hdr_validation_response_headers ) @pytest.mark.parametrize( 'invalid_header', forbidden_response_headers_bytes ) def test_inbound_resp_header_extra_pseudo_headers(self, hdr_validation_flags, invalid_header): """ Inbound response header blocks containing the forbidden response headers fail validation. """ headers = [(b':status', b'200')] headers.append((invalid_header, b'some value')) with pytest.raises(h2.exceptions.ProtocolError): list(h2.utilities.validate_headers(headers, hdr_validation_flags)) class TestOversizedHeaders(object): """ Tests that oversized header blocks are correctly rejected. This replicates the "HPACK Bomb" attack, and confirms that we're resistant against it. """ request_header_block = [ (b':method', b'GET'), (b':authority', b'example.com'), (b':scheme', b'https'), (b':path', b'/'), ] response_header_block = [ (b':status', b'200'), ] # The first header block contains a single header that fills the header # table. To do that, we'll give it a single-character header name and a # 4063 byte header value. This will make it exactly the size of the header # table. It must come last, so that it evicts all other headers. # This block must be appended to either a request or response block. first_header_block = [ (b'a', b'a' * 4063), ] # The second header "block" is actually a custom HEADERS frame body that # simply repeatedly refers to the first entry for 16kB. Each byte has the # high bit set (0x80), and then uses the remaining 7 bits to encode the # number 62 (0x3e), leading to a repeat of the byte 0xbe. second_header_block = b'\xbe' * 2**14 server_config = h2.config.H2Configuration(client_side=False) def test_hpack_bomb_request(self, frame_factory): """ A HPACK bomb request causes the connection to be torn down with the error code ENHANCE_YOUR_CALM. """ c = h2.connection.H2Connection(config=self.server_config) c.receive_data(frame_factory.preamble()) c.clear_outbound_data_buffer() f = frame_factory.build_headers_frame( self.request_header_block + self.first_header_block ) data = f.serialize() c.receive_data(data) # Build the attack payload. attack_frame = hyperframe.frame.HeadersFrame(stream_id=3) attack_frame.data = self.second_header_block attack_frame.flags.add('END_HEADERS') data = attack_frame.serialize() with pytest.raises(h2.exceptions.DenialOfServiceError): c.receive_data(data) expected_frame = frame_factory.build_goaway_frame( last_stream_id=1, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM ) assert c.data_to_send() == expected_frame.serialize() def test_hpack_bomb_response(self, frame_factory): """ A HPACK bomb response causes the connection to be torn down with the error code ENHANCE_YOUR_CALM. """ c = h2.connection.H2Connection() c.initiate_connection() c.send_headers( stream_id=1, headers=self.request_header_block ) c.send_headers( stream_id=3, headers=self.request_header_block ) c.clear_outbound_data_buffer() f = frame_factory.build_headers_frame( self.response_header_block + self.first_header_block ) data = f.serialize() c.receive_data(data) # Build the attack payload. attack_frame = hyperframe.frame.HeadersFrame(stream_id=3) attack_frame.data = self.second_header_block attack_frame.flags.add('END_HEADERS') data = attack_frame.serialize() with pytest.raises(h2.exceptions.DenialOfServiceError): c.receive_data(data) expected_frame = frame_factory.build_goaway_frame( last_stream_id=0, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM ) assert c.data_to_send() == expected_frame.serialize() def test_hpack_bomb_push(self, frame_factory): """ A HPACK bomb push causes the connection to be torn down with the error code ENHANCE_YOUR_CALM. """ c = h2.connection.H2Connection() c.initiate_connection() c.send_headers( stream_id=1, headers=self.request_header_block ) c.clear_outbound_data_buffer() f = frame_factory.build_headers_frame( self.response_header_block + self.first_header_block ) data = f.serialize() c.receive_data(data) # Build the attack payload. We need to shrink it by four bytes because # the promised_stream_id consumes four bytes of body. attack_frame = hyperframe.frame.PushPromiseFrame(stream_id=3) attack_frame.promised_stream_id = 2 attack_frame.data = self.second_header_block[:-4] attack_frame.flags.add('END_HEADERS') data = attack_frame.serialize() with pytest.raises(h2.exceptions.DenialOfServiceError): c.receive_data(data) expected_frame = frame_factory.build_goaway_frame( last_stream_id=0, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM ) assert c.data_to_send() == expected_frame.serialize() def test_reject_headers_when_list_size_shrunk(self, frame_factory): """ When we've shrunk the header list size, we reject new header blocks that violate the new size. """ c = h2.connection.H2Connection(config=self.server_config) c.receive_data(frame_factory.preamble()) c.clear_outbound_data_buffer() # Receive the first request, which causes no problem. f = frame_factory.build_headers_frame( stream_id=1, headers=self.request_header_block ) data = f.serialize() c.receive_data(data) # Now, send a settings change. It's un-ACKed at this time. A new # request arrives, also without incident. c.update_settings({h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 50}) c.clear_outbound_data_buffer() f = frame_factory.build_headers_frame( stream_id=3, headers=self.request_header_block ) data = f.serialize() c.receive_data(data) # We get a SETTINGS ACK. f = frame_factory.build_settings_frame({}, ack=True) data = f.serialize() c.receive_data(data) # Now a third request comes in. This explodes. f = frame_factory.build_headers_frame( stream_id=5, headers=self.request_header_block ) data = f.serialize() with pytest.raises(h2.exceptions.DenialOfServiceError): c.receive_data(data) expected_frame = frame_factory.build_goaway_frame( last_stream_id=3, error_code=h2.errors.ErrorCodes.ENHANCE_YOUR_CALM ) assert c.data_to_send() == expected_frame.serialize() def test_reject_headers_when_table_size_shrunk(self, frame_factory): """ When we've shrunk the header table size, we reject header blocks that do not respect the change. """ c = h2.connection.H2Connection(config=self.server_config) c.receive_data(frame_factory.preamble()) c.clear_outbound_data_buffer() # Receive the first request, which causes no problem. f = frame_factory.build_headers_frame( stream_id=1, headers=self.request_header_block ) data = f.serialize() c.receive_data(data) # Now, send a settings change. It's un-ACKed at this time. A new # request arrives, also without incident. c.update_settings({h2.settings.SettingCodes.HEADER_TABLE_SIZE: 128}) c.clear_outbound_data_buffer() f = frame_factory.build_headers_frame( stream_id=3, headers=self.request_header_block ) data = f.serialize() c.receive_data(data) # We get a SETTINGS ACK. f = frame_factory.build_settings_frame({}, ack=True) data = f.serialize() c.receive_data(data) # Now a third request comes in. This explodes, as it does not contain # a dynamic table size update. f = frame_factory.build_headers_frame( stream_id=5, headers=self.request_header_block ) data = f.serialize() with pytest.raises(h2.exceptions.ProtocolError): c.receive_data(data) expected_frame = frame_factory.build_goaway_frame( last_stream_id=3, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR ) assert c.data_to_send() == expected_frame.serialize() def test_reject_headers_exceeding_table_size(self, frame_factory): """ When the remote peer sends a dynamic table size update that exceeds our setting, we reject it. """ c = h2.connection.H2Connection(config=self.server_config) c.receive_data(frame_factory.preamble()) c.clear_outbound_data_buffer() # Receive the first request, which causes no problem. f = frame_factory.build_headers_frame( stream_id=1, headers=self.request_header_block ) data = f.serialize() c.receive_data(data) # Now a second request comes in that sets the table size too high. # This explodes. frame_factory.change_table_size(c.local_settings.header_table_size + 1) f = frame_factory.build_headers_frame( stream_id=5, headers=self.request_header_block ) data = f.serialize() with pytest.raises(h2.exceptions.ProtocolError): c.receive_data(data) expected_frame = frame_factory.build_goaway_frame( last_stream_id=1, error_code=h2.errors.ErrorCodes.PROTOCOL_ERROR ) assert c.data_to_send() == expected_frame.serialize()
mpl-2.0
Trois-Six/ansible-modules-core
cloud/openstack/_quantum_network.py
37
10270
#!/usr/bin/python #coding: utf-8 -*- # (c) 2013, Benno Joy <[email protected]> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. try: try: from neutronclient.neutron import client except ImportError: from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient HAVE_DEPS = True except ImportError: HAVE_DEPS = False DOCUMENTATION = ''' --- module: quantum_network version_added: "1.4" deprecated: Deprecated in 2.0. Use os_network instead short_description: Creates/Removes networks from OpenStack description: - Add or Remove network from OpenStack. options: login_username: description: - login username to authenticate to keystone required: true default: admin login_password: description: - Password of login user required: true default: 'yes' login_tenant_name: description: - The tenant name of the login user required: true default: 'yes' tenant_name: description: - The name of the tenant for whom the network is created required: false default: None auth_url: description: - The keystone url for authentication required: false default: 'http://127.0.0.1:35357/v2.0/' region_name: description: - Name of the region required: false default: None state: description: - Indicate desired state of the resource choices: ['present', 'absent'] default: present name: description: - Name to be assigned to the nework required: true default: None provider_network_type: description: - The type of the network to be created, gre, vlan, local. Available types depend on the plugin. The Quantum service decides if not specified. required: false default: None provider_physical_network: description: - The physical network which would realize the virtual network for flat and vlan networks. required: false default: None provider_segmentation_id: description: - The id that has to be assigned to the network, in case of vlan networks that would be vlan id and for gre the tunnel id required: false default: None router_external: description: - If 'yes', specifies that the virtual network is a external network (public). required: false default: false shared: description: - Whether this network is shared or not required: false default: false admin_state_up: description: - Whether the state should be marked as up or down required: false default: true requirements: - "python >= 2.6" - "python-neutronclient or python-quantumclient" - "python-keystoneclient" ''' EXAMPLES = ''' # Create a GRE backed Quantum network with tunnel id 1 for tenant1 - quantum_network: name=t1network tenant_name=tenant1 state=present provider_network_type=gre provider_segmentation_id=1 login_username=admin login_password=admin login_tenant_name=admin # Create an external network - quantum_network: name=external_network state=present provider_network_type=local router_external=yes login_username=admin login_password=admin login_tenant_name=admin ''' _os_keystone = None _os_tenant_id = None def _get_ksclient(module, kwargs): try: kclient = ksclient.Client(username=kwargs.get('login_username'), password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) except Exception, e: module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) global _os_keystone _os_keystone = kclient return kclient def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception, e: module.fail_json(msg = "Error getting network endpoint: %s " %e.message) return endpoint def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) kwargs = { 'token': token, 'endpoint_url': endpoint } try: neutron = client.Client('2.0', **kwargs) except Exception, e: module.fail_json(msg = " Error in connecting to neutron: %s " %e.message) return neutron def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: _os_tenant_id = _os_keystone.tenant_id else: tenant_name = module.params['tenant_name'] for tenant in _os_keystone.tenants.list(): if tenant.name == tenant_name: _os_tenant_id = tenant.id break if not _os_tenant_id: module.fail_json(msg = "The tenant id cannot be found, please check the parameters") def _get_net_id(neutron, module): kwargs = { 'tenant_id': _os_tenant_id, 'name': module.params['name'], } try: networks = neutron.list_networks(**kwargs) except Exception, e: module.fail_json(msg = "Error in listing neutron networks: %s" % e.message) if not networks['networks']: return None return networks['networks'][0]['id'] def _create_network(module, neutron): neutron.format = 'json' network = { 'name': module.params.get('name'), 'tenant_id': _os_tenant_id, 'provider:network_type': module.params.get('provider_network_type'), 'provider:physical_network': module.params.get('provider_physical_network'), 'provider:segmentation_id': module.params.get('provider_segmentation_id'), 'router:external': module.params.get('router_external'), 'shared': module.params.get('shared'), 'admin_state_up': module.params.get('admin_state_up'), } if module.params['provider_network_type'] == 'local': network.pop('provider:physical_network', None) network.pop('provider:segmentation_id', None) if module.params['provider_network_type'] == 'flat': network.pop('provider:segmentation_id', None) if module.params['provider_network_type'] == 'gre': network.pop('provider:physical_network', None) if module.params['provider_network_type'] is None: network.pop('provider:network_type', None) network.pop('provider:physical_network', None) network.pop('provider:segmentation_id', None) try: net = neutron.create_network({'network':network}) except Exception, e: module.fail_json(msg = "Error in creating network: %s" % e.message) return net['network']['id'] def _delete_network(module, net_id, neutron): try: id = neutron.delete_network(net_id) except Exception, e: module.fail_json(msg = "Error in deleting the network: %s" % e.message) return True def main(): argument_spec = openstack_argument_spec() argument_spec.update(dict( name = dict(required=True), tenant_name = dict(default=None), provider_network_type = dict(default=None, choices=['local', 'vlan', 'flat', 'gre']), provider_physical_network = dict(default=None), provider_segmentation_id = dict(default=None), router_external = dict(default=False, type='bool'), shared = dict(default=False, type='bool'), admin_state_up = dict(default=True, type='bool'), state = dict(default='present', choices=['absent', 'present']) )) module = AnsibleModule(argument_spec=argument_spec) if not HAVE_DEPS: module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required') if module.params['provider_network_type'] in ['vlan' , 'flat']: if not module.params['provider_physical_network']: module.fail_json(msg = " for vlan and flat networks, variable provider_physical_network should be set.") if module.params['provider_network_type'] in ['vlan', 'gre']: if not module.params['provider_segmentation_id']: module.fail_json(msg = " for vlan & gre networks, variable provider_segmentation_id should be set.") neutron = _get_neutron_client(module, module.params) _set_tenant_id(module) if module.params['state'] == 'present': network_id = _get_net_id(neutron, module) if not network_id: network_id = _create_network(module, neutron) module.exit_json(changed = True, result = "Created", id = network_id) else: module.exit_json(changed = False, result = "Success", id = network_id) if module.params['state'] == 'absent': network_id = _get_net_id(neutron, module) if not network_id: module.exit_json(changed = False, result = "Success") else: _delete_network(module, network_id, neutron) module.exit_json(changed = True, result = "Deleted") # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
gpl-3.0
EmbodiedCognition/pagoda
setup.py
1
1082
import os import setuptools README = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'README.rst') setuptools.setup( name='pagoda', version='0.1.0', packages=setuptools.find_packages(), package_data={'': ['*.peg']}, author='UT Vision, Cognition, and Action Lab', author_email='[email protected]', description='pyglet + ode + numpy: a simulation framework', long_description=open(README).read(), license='MIT', url='http://github.com/EmbodiedCognition/pagoda/', keywords=('simulation ' 'physics ' 'ode ' 'visualization ' ), install_requires=['click', 'numpy', 'parsimonious', 'pyglet'], classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Physics', 'Topic :: Scientific/Engineering :: Visualization', ], )
mit
kmoocdev2/edx-platform
lms/envs/test.py
1
20878
# -*- coding: utf-8 -*- """ This config file runs the simplest dev environment using sqlite, and db-based sessions. Assumes structure: /envroot/ /db # This is where it'll write the database file /edx-platform # The location of this repo /log # Where we're going to write log files """ # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=wildcard-import, unused-wildcard-import # Pylint gets confused by path.py instances, which report themselves as class # objects. As a result, pylint applies the wrong regex in validating names, # and throws spurious errors. Therefore, we disable invalid-name checking. # pylint: disable=invalid-name from .common import * import os from path import Path as path from uuid import uuid4 from util.db import NoOpMigrationModules from openedx.core.lib.derived import derive_settings from openedx.core.lib.tempdir import mkdtemp_clean # This patch disables the commit_on_success decorator during tests # in TestCase subclasses. from util.testing import patch_testcase, patch_sessions patch_testcase() patch_sessions() # Allow all hosts during tests, we use a lot of different ones all over the codebase. ALLOWED_HOSTS = [ '*' ] # Silence noisy logs to make troubleshooting easier when tests fail. import logging LOG_OVERRIDES = [ ('factory.generate', logging.ERROR), ('factory.containers', logging.ERROR), ] for log_name, log_level in LOG_OVERRIDES: logging.getLogger(log_name).setLevel(log_level) # mongo connection settings MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017')) MONGO_HOST = os.environ.get('EDXAPP_TEST_MONGO_HOST', 'localhost') os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = 'localhost:8000-9000' THIS_UUID = uuid4().hex[:5] # can't test start dates with this True, but on the other hand, # can test everything else :) FEATURES['DISABLE_START_DATES'] = True # Most tests don't use the discussion service, so we turn it off to speed them up. # Tests that do can enable this flag, but must use the UrlResetMixin class to force urls.py # to reload. For consistency in user-experience, keep the value of this setting in sync with # the one in cms/envs/test.py FEATURES['ENABLE_DISCUSSION_SERVICE'] = False FEATURES['ENABLE_SERVICE_STATUS'] = True FEATURES['ENABLE_SHOPPING_CART'] = True FEATURES['ENABLE_VERIFIED_CERTIFICATES'] = True # Toggles embargo on for testing FEATURES['EMBARGO'] = True FEATURES['ENABLE_COMBINED_LOGIN_REGISTRATION'] = True # Enable the milestones app in tests to be consistent with it being enabled in production FEATURES['MILESTONES_APP'] = True FEATURES['ENABLE_ENROLLMENT_TRACK_USER_PARTITION'] = True FEATURES['ENABLE_BULK_ENROLLMENT_VIEW'] = True DEFAULT_MOBILE_AVAILABLE = True # Need wiki for courseware views to work. TODO (vshnayder): shouldn't need it. WIKI_ENABLED = True # Enable a parental consent age limit for testing PARENTAL_CONSENT_AGE_LIMIT = 13 # Local Directories TEST_ROOT = path("test_root") # Want static files in the same dir for running on jenkins. STATIC_ROOT = TEST_ROOT / "staticfiles" WEBPACK_LOADER['DEFAULT']['STATS_FILE'] = STATIC_ROOT / "webpack-stats.json" STATUS_MESSAGE_PATH = TEST_ROOT / "status_message.json" COURSES_ROOT = TEST_ROOT / "data" DATA_DIR = COURSES_ROOT COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data" # Where the content data is checked out. This may not exist on jenkins. GITHUB_REPO_ROOT = ENV_ROOT / "data" USE_I18N = True LANGUAGE_CODE = 'en' # tests assume they will get English. XQUEUE_INTERFACE = { "url": "http://sandbox-xqueue.edx.org", "django_auth": { "username": "lms", "password": "***REMOVED***" }, "basic_auth": ('anant', 'agarwal'), } XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds # Don't rely on a real staff grading backend MOCK_STAFF_GRADING = True MOCK_PEER_GRADING = True ############################ STATIC FILES ############################# # TODO (cpennington): We need to figure out how envs/test.py can inject things # into common.py so that we don't have to repeat this sort of thing STATICFILES_DIRS = [ COMMON_ROOT / "static", PROJECT_ROOT / "static", ] STATICFILES_DIRS += [ (course_dir, COMMON_TEST_DATA_ROOT / course_dir) for course_dir in os.listdir(COMMON_TEST_DATA_ROOT) if os.path.isdir(COMMON_TEST_DATA_ROOT / course_dir) ] # Avoid having to run collectstatic before the unit test suite # If we don't add these settings, then Django templates that can't # find pipelined assets will raise a ValueError. # http://stackoverflow.com/questions/12816941/unit-testing-with-django-pipeline STATICFILES_STORAGE = 'pipeline.storage.NonPackagingPipelineStorage' # Don't use compression during tests PIPELINE_JS_COMPRESSOR = None update_module_store_settings( MODULESTORE, module_store_options={ 'fs_root': TEST_ROOT / "data", }, xml_store_options={ 'data_dir': mkdtemp_clean(dir=TEST_ROOT), # never inadvertently load all the XML courses }, doc_store_settings={ 'host': MONGO_HOST, 'port': MONGO_PORT_NUM, 'db': 'test_xmodule_{}'.format(THIS_UUID), 'collection': 'test_modulestore', }, ) CONTENTSTORE = { 'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore', 'DOC_STORE_CONFIG': { 'host': MONGO_HOST, 'db': 'test_xcontent_{}'.format(THIS_UUID), 'port': MONGO_PORT_NUM, } } DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'ATOMIC_REQUESTS': True, }, 'student_module_history': { 'ENGINE': 'django.db.backends.sqlite3', }, } if os.environ.get('DISABLE_MIGRATIONS'): # Create tables directly from apps' models. This can be removed once we upgrade # to Django 1.9, which allows setting MIGRATION_MODULES to None in order to skip migrations. MIGRATION_MODULES = NoOpMigrationModules() # Make sure we test with the extended history table FEATURES['ENABLE_CSMH_EXTENDED'] = True INSTALLED_APPS.append('coursewarehistoryextended') CACHES = { # This is the cache used for most things. # In staging/prod envs, the sessions also live here. 'default': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, # The general cache is what you get if you use our util.cache. It's used for # things like caching the course.xml file for different A/B test groups. # We set it to be a DummyCache to force reloading of course.xml in dev. # In staging environments, we would grab VERSION from data uploaded by the # push process. 'general': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, 'mongo_metadata_inheritance': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, 'loc_cache': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, 'course_structure_cache': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, } # Dummy secret key for dev SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd' ############################# SECURITY SETTINGS ################################ # Default to advanced security in common.py, so tests can reset here to use # a simpler security model FEATURES['ENFORCE_PASSWORD_POLICY'] = False FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False FEATURES['SQUELCH_PII_IN_LOGS'] = False FEATURES['PREVENT_CONCURRENT_LOGINS'] = False FEATURES['ADVANCED_SECURITY'] = False PASSWORD_MIN_LENGTH = None PASSWORD_COMPLEXITY = {} ######### Third-party auth ########## FEATURES['ENABLE_THIRD_PARTY_AUTH'] = True AUTHENTICATION_BACKENDS = [ 'social_core.backends.google.GoogleOAuth2', 'social_core.backends.linkedin.LinkedinOAuth2', 'social_core.backends.facebook.FacebookOAuth2', 'social_core.backends.azuread.AzureADOAuth2', 'social_core.backends.twitter.TwitterOAuth', 'third_party_auth.dummy.DummyBackend', 'third_party_auth.saml.SAMLAuthBackend', 'third_party_auth.lti.LTIAuthBackend', ] + AUTHENTICATION_BACKENDS THIRD_PARTY_AUTH_CUSTOM_AUTH_FORMS = { 'custom1': { 'secret_key': 'opensesame', 'url': '/misc/my-custom-registration-form', 'error_url': '/misc/my-custom-sso-error-page' }, } ################################## OPENID ##################################### FEATURES['AUTH_USE_OPENID'] = True FEATURES['AUTH_USE_OPENID_PROVIDER'] = True ################################## SHIB ####################################### FEATURES['AUTH_USE_SHIB'] = True FEATURES['SHIB_DISABLE_TOS'] = True FEATURES['RESTRICT_ENROLL_BY_REG_METHOD'] = True OPENID_CREATE_USERS = False OPENID_UPDATE_DETAILS_FROM_SREG = True OPENID_USE_AS_ADMIN_LOGIN = False OPENID_PROVIDER_TRUSTED_ROOTS = ['*'] ############################## OAUTH2 Provider ################################ FEATURES['ENABLE_OAUTH2_PROVIDER'] = True # don't cache courses for testing OIDC_COURSE_HANDLER_CACHE_TIMEOUT = 0 ########################### Settings for JWTs ################################## RESTRICTED_APPLICATION_JWT_ISSUER = { 'ISSUER': 'restricted-app', 'SECRET_KEY': 'restricted-secret', 'AUDIENCE': 'restricted-app', } JWT_AUTH.update({ 'JWT_ISSUERS': [ DEFAULT_JWT_ISSUER, RESTRICTED_APPLICATION_JWT_ISSUER, ], }) ########################### External REST APIs ################################# FEATURES['ENABLE_MOBILE_REST_API'] = True FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True ###################### Payment ##############################3 # Enable fake payment processing page FEATURES['ENABLE_PAYMENT_FAKE'] = True # Configure the payment processor to use the fake processing page # Since both the fake payment page and the shoppingcart app are using # the same settings, we can generate this randomly and guarantee # that they are using the same secret. from random import choice import secrets from string import letters, digits, punctuation RANDOM_SHARED_SECRET = ''.join( secrets.choice(letters + digits + punctuation) for x in range(250) ) CC_PROCESSOR_NAME = 'CyberSource2' CC_PROCESSOR['CyberSource2']['SECRET_KEY'] = RANDOM_SHARED_SECRET CC_PROCESSOR['CyberSource2']['ACCESS_KEY'] = "0123456789012345678901" CC_PROCESSOR['CyberSource2']['PROFILE_ID'] = "edx" CC_PROCESSOR['CyberSource2']['PURCHASE_ENDPOINT'] = "/shoppingcart/payment_fake" FEATURES['STORE_BILLING_INFO'] = True ########################### SYSADMIN DASHBOARD ################################ FEATURES['ENABLE_SYSADMIN_DASHBOARD'] = True GIT_REPO_DIR = TEST_ROOT / "course_repos" ################################# CELERY ###################################### CELERY_ALWAYS_EAGER = True CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend' CLEAR_REQUEST_CACHE_ON_TASK_COMPLETION = False ######################### MARKETING SITE ############################### MKTG_URL_LINK_MAP = { 'ABOUT': 'about', 'CONTACT': 'contact', 'HELP_CENTER': 'help-center', 'COURSES': 'courses', 'ROOT': 'root', 'TOS': 'tos', 'HONOR': 'honor', 'PRIVACY': 'privacy', 'CAREERS': 'careers', 'NEWS': 'news', 'PRESS': 'press', 'BLOG': 'blog', 'DONATE': 'donate', 'SITEMAP.XML': 'sitemap_xml', # Verified Certificates 'WHAT_IS_VERIFIED_CERT': 'verified-certificate', } SUPPORT_SITE_LINK = 'https://support.example.com' PASSWORD_RESET_SUPPORT_LINK = 'https://support.example.com/password-reset-help.html' ACTIVATION_EMAIL_SUPPORT_LINK = 'https://support.example.com/activation-email-help.html' ############################ STATIC FILES ############################# DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' MEDIA_ROOT = TEST_ROOT / "uploads" MEDIA_URL = "/static/uploads/" STATICFILES_DIRS.append(("uploads", MEDIA_ROOT)) _NEW_STATICFILES_DIRS = [] # Strip out any static files that aren't in the repository root # so that the tests can run with only the edx-platform directory checked out for static_dir in STATICFILES_DIRS: # Handle both tuples and non-tuple directory definitions try: _, data_dir = static_dir except ValueError: data_dir = static_dir if data_dir.startswith(REPO_ROOT): _NEW_STATICFILES_DIRS.append(static_dir) STATICFILES_DIRS = _NEW_STATICFILES_DIRS FILE_UPLOAD_TEMP_DIR = TEST_ROOT / "uploads" FILE_UPLOAD_HANDLERS = [ 'django.core.files.uploadhandler.MemoryFileUploadHandler', 'django.core.files.uploadhandler.TemporaryFileUploadHandler', ] BLOCK_STRUCTURES_SETTINGS['PRUNING_ACTIVE'] = True ########################### Server Ports ################################### # These ports are carefully chosen so that if the browser needs to # access them, they will be available through the SauceLabs SSH tunnel LETTUCE_SERVER_PORT = 8003 XQUEUE_PORT = 8040 YOUTUBE_PORT = 8031 LTI_PORT = 8765 VIDEO_SOURCE_PORT = 8777 FEATURES['PREVIEW_LMS_BASE'] = "preview.localhost" ############### Module Store Items ########## PREVIEW_DOMAIN = FEATURES['PREVIEW_LMS_BASE'].split(':')[0] HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = { PREVIEW_DOMAIN: 'draft-preferred' } ################### Make tests faster PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.SHA1PasswordHasher', 'django.contrib.auth.hashers.MD5PasswordHasher', ] ### This enables the Metrics tab for the Instructor dashboard ########### FEATURES['CLASS_DASHBOARD'] = True ################### Make tests quieter # OpenID spews messages like this to stderr, we don't need to see them: # Generated checkid_setup request to http://testserver/openid/provider/login/ with assocication {HMAC-SHA1}{51d49995}{s/kRmA==} import openid.oidutil openid.oidutil.log = lambda message, level=0: None # Include a non-ascii character in PLATFORM_NAME to uncover possible UnicodeEncodeErrors in tests. PLATFORM_NAME = u"édX" SITE_NAME = "edx.org" # set up some testing for microsites FEATURES['USE_MICROSITES'] = True MICROSITE_ROOT_DIR = COMMON_ROOT / 'test' / 'test_sites' MICROSITE_CONFIGURATION = { "test_site": { "domain_prefix": "test-site", "university": "test_site", "platform_name": "Test Site", "logo_image_url": "test_site/images/header-logo.png", "email_from_address": "[email protected]", "ACTIVATION_EMAIL_FROM_ADDRESS": "[email protected]", "payment_support_email": "[email protected]", "ENABLE_MKTG_SITE": False, "SITE_NAME": "test_site.localhost", "course_org_filter": "TestSiteX", "course_about_show_social_links": False, "css_overrides_file": "test_site/css/test_site.css", "show_partners": False, "show_homepage_promo_video": False, "course_index_overlay_text": "This is a Test Site Overlay Text.", "course_index_overlay_logo_file": "test_site/images/header-logo.png", "homepage_overlay_html": "<h1>This is a Test Site Overlay HTML</h1>", "ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER": False, "COURSE_CATALOG_VISIBILITY_PERMISSION": "see_in_catalog", "COURSE_ABOUT_VISIBILITY_PERMISSION": "see_about_page", "ENABLE_SHOPPING_CART": True, "ENABLE_PAID_COURSE_REGISTRATION": True, "SESSION_COOKIE_DOMAIN": "test_site.localhost", "LINKEDIN_COMPANY_ID": "test", "FACEBOOK_APP_ID": "12345678908", "urls": { 'ABOUT': 'test-site/about', 'PRIVACY': 'test-site/privacy', 'TOS_AND_HONOR': 'test-site/tos-and-honor', }, }, "site_with_logistration": { "domain_prefix": "logistration", "university": "logistration", "platform_name": "Test logistration", "logo_image_url": "test_site/images/header-logo.png", "email_from_address": "[email protected]", "ACTIVATION_EMAIL_FROM_ADDRESS": "[email protected]", "payment_support_email": "[email protected]", "ENABLE_MKTG_SITE": False, "ENABLE_COMBINED_LOGIN_REGISTRATION": True, "SITE_NAME": "test_site.localhost", "course_org_filter": "LogistrationX", "course_about_show_social_links": False, "css_overrides_file": "test_site/css/test_site.css", "show_partners": False, "show_homepage_promo_video": False, "course_index_overlay_text": "Logistration.", "course_index_overlay_logo_file": "test_site/images/header-logo.png", "homepage_overlay_html": "<h1>This is a Logistration HTML</h1>", "ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER": False, "COURSE_CATALOG_VISIBILITY_PERMISSION": "see_in_catalog", "COURSE_ABOUT_VISIBILITY_PERMISSION": "see_about_page", "ENABLE_SHOPPING_CART": True, "ENABLE_PAID_COURSE_REGISTRATION": True, "SESSION_COOKIE_DOMAIN": "test_logistration.localhost", }, "default": { "university": "default_university", "domain_prefix": "www", } } MICROSITE_TEST_HOSTNAME = 'test-site.testserver' MICROSITE_LOGISTRATION_HOSTNAME = 'logistration.testserver' TEST_THEME = COMMON_ROOT / "test" / "test-theme" # add extra template directory for test-only templates MAKO_TEMPLATE_DIRS_BASE.extend([ COMMON_ROOT / 'test' / 'templates', COMMON_ROOT / 'test' / 'test_sites', REPO_ROOT / 'openedx' / 'core' / 'djangolib' / 'tests' / 'templates', ]) # Setting for the testing of Software Secure Result Callback VERIFY_STUDENT["SOFTWARE_SECURE"] = { "API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB", "API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", } VIDEO_CDN_URL = { 'CN': 'http://api.xuetangx.com/edx/video?s3_url=' } ######### dashboard git log settings ######### MONGODB_LOG = { 'host': MONGO_HOST, 'port': MONGO_PORT_NUM, 'user': '', 'password': '', 'db': 'xlog', } NOTES_DISABLED_TABS = [] # Enable EdxNotes for tests. FEATURES['ENABLE_EDXNOTES'] = True # Enable courseware search for tests FEATURES['ENABLE_COURSEWARE_SEARCH'] = True # Enable dashboard search for tests FEATURES['ENABLE_DASHBOARD_SEARCH'] = True # Use MockSearchEngine as the search engine for test scenario SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine" FACEBOOK_APP_SECRET = "Test" FACEBOOK_APP_ID = "Test" FACEBOOK_API_VERSION = "v2.8" ######### custom courses ######### INSTALLED_APPS += ['lms.djangoapps.ccx', 'openedx.core.djangoapps.ccxcon.apps.CCXConnectorConfig'] FEATURES['CUSTOM_COURSES_EDX'] = True # Set dummy values for profile image settings. PROFILE_IMAGE_BACKEND = { 'class': 'storages.backends.overwrite.OverwriteStorage', 'options': { 'location': MEDIA_ROOT, 'base_url': 'http://example-storage.com/profile-images/', }, } PROFILE_IMAGE_DEFAULT_FILENAME = 'default' PROFILE_IMAGE_DEFAULT_FILE_EXTENSION = 'png' PROFILE_IMAGE_SECRET_KEY = 'secret' PROFILE_IMAGE_MAX_BYTES = 1024 * 1024 PROFILE_IMAGE_MIN_BYTES = 100 # Enable the LTI provider feature for testing FEATURES['ENABLE_LTI_PROVIDER'] = True INSTALLED_APPS.append('lti_provider.apps.LtiProviderConfig') AUTHENTICATION_BACKENDS.append('lti_provider.users.LtiBackend') # ORGANIZATIONS FEATURES['ORGANIZATIONS_APP'] = True # Financial assistance page FEATURES['ENABLE_FINANCIAL_ASSISTANCE_FORM'] = True COURSE_CATALOG_API_URL = 'https://catalog.example.com/api/v1' COMPREHENSIVE_THEME_DIRS = [REPO_ROOT / "themes", REPO_ROOT / "common/test"] COMPREHENSIVE_THEME_LOCALE_PATHS = [REPO_ROOT / "themes/conf/locale", ] LMS_ROOT_URL = "http://localhost:8000" ECOMMERCE_API_URL = 'https://ecommerce.example.com/api/v2/' ENTERPRISE_API_URL = 'http://enterprise.example.com/enterprise/api/v1/' ENTERPRISE_CONSENT_API_URL = 'http://enterprise.example.com/consent/api/v1/' ACTIVATION_EMAIL_FROM_ADDRESS = '[email protected]' TEMPLATES[0]['OPTIONS']['debug'] = True ########################### DRF default throttle rates ############################ # Increasing rates to enable test cases hitting registration view succesfully. # Lower rate is causing view to get blocked, causing test case failure. REST_FRAMEWORK['DEFAULT_THROTTLE_RATES']['registration_validation'] = '100/minute' ########################## VIDEO TRANSCRIPTS STORAGE ############################ VIDEO_TRANSCRIPTS_SETTINGS = dict( VIDEO_TRANSCRIPTS_MAX_BYTES=3 * 1024 * 1024, # 3 MB STORAGE_KWARGS=dict( location=MEDIA_ROOT, base_url=MEDIA_URL, ), DIRECTORY_PREFIX='video-transcripts/', ) ####################### Plugin Settings ########################## from openedx.core.djangoapps.plugins import plugin_settings, constants as plugin_constants plugin_settings.add_plugins(__name__, plugin_constants.ProjectType.LMS, plugin_constants.SettingsType.TEST) ########################## Derive Any Derived Settings ####################### derive_settings(__name__)
agpl-3.0
xombiemp/CouchPotatoServer
libs/rtorrent/compat.py
180
1258
# Copyright (c) 2013 Chris Lucas, <[email protected]> # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import sys def is_py3(): return sys.version_info[0] == 3 if is_py3(): import xmlrpc.client as xmlrpclib else: import xmlrpclib
gpl-3.0
renzon/appengineepython
backend/apps/curso_app/curso_facade.py
1
1664
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from gaegraph.business_base import NodeSearch, DeleteNode from curso_app.curso_commands import ListCursoCommand, SaveCursoCommand, UpdateCursoCommand, CursoForm,\ GetCursoCommand, DeleteCursoCommand def save_curso_cmd(**curso_properties): """ Command to save Curso entity :param curso_properties: a dict of properties to save on model :return: a Command that save Curso, validating and localizing properties received as strings """ return SaveCursoCommand(**curso_properties) def update_curso_cmd(curso_id, **curso_properties): """ Command to update Curso entity with id equals 'curso_id' :param curso_properties: a dict of properties to update model :return: a Command that update Curso, validating and localizing properties received as strings """ return UpdateCursoCommand(curso_id, **curso_properties) def list_cursos_cmd(): """ Command to list Curso entities ordered by their creation dates :return: a Command proceed the db operations when executed """ return ListCursoCommand() def curso_form(**kwargs): """ Function to get Curso's detail form. :param kwargs: form properties :return: Form """ return CursoForm(**kwargs) def get_curso_cmd(curso_id): """ Find curso by her id :param curso_id: the curso id :return: Command """ return GetCursoCommand(curso_id) def delete_curso_cmd(curso_id): """ Construct a command to delete a Curso :param curso_id: curso's id :return: Command """ return DeleteCursoCommand(curso_id)
mit
abilian/abilian-core
src/abilian/services/viewtracker/models.py
1
1611
from datetime import datetime from sqlalchemy.orm import relationship from sqlalchemy.schema import Column, ForeignKey from sqlalchemy.types import DateTime, Integer from abilian.core.entities import Entity, db from abilian.core.models.subjects import User # TODO: remove duplicate def _default_from(column): """Helper for default and onupdates parameters in a Column definitions. Returns a `context-sensitive default function <http://docs.sqlalchemy.org/en/rel_0_8/core/defaults.html#context- sensitive-default-functions>`_ to set value from another column. """ def _default_value(context): return context.current_parameters[column] return _default_value class View(db.Model): __tablename__ = "view" id = Column(Integer, primary_key=True, autoincrement=True, nullable=False) #: viewed entity id entity_id = Column(Integer, default=_default_from("_fk_entity_id"), nullable=False) _fk_entity_id = Column(Integer, ForeignKey(Entity.id, ondelete="SET NULL")) entity = relationship(Entity, foreign_keys=_fk_entity_id) #: user id user_id = Column(Integer, ForeignKey(User.id), nullable=False) user = relationship(User, foreign_keys=user_id) hits = db.relationship( "Hit", backref="view", order_by="Hit.viewed_at", lazy="dynamic" ) class Hit(db.Model): __tablename__ = "hit" id = Column(Integer, primary_key=True, autoincrement=True, nullable=False) view_id = db.Column(db.Integer, db.ForeignKey(View.id)) #: time of view viewed_at = Column(DateTime, default=datetime.utcnow, nullable=True)
lgpl-2.1
grlee77/nipype
nipype/workflows/dmri/fsl/artifacts.py
9
39638
# coding: utf-8 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: import os import nipype.pipeline.engine as pe from nipype.interfaces.io import JSONFileGrabber from nipype.interfaces import utility as niu from nipype.interfaces import freesurfer as fs from nipype.interfaces import ants from nipype.interfaces import fsl from .utils import * def all_fmb_pipeline(name='hmc_sdc_ecc', fugue_params=dict(smooth3d=2.0)): """ Builds a pipeline including three artifact corrections: head-motion correction (HMC), susceptibility-derived distortion correction (SDC), and Eddy currents-derived distortion correction (ECC). The displacement fields from each kind of distortions are combined. Thus, only one interpolation occurs between input data and result. .. warning:: this workflow rotates the gradients table (*b*-vectors) [Leemans09]_. Examples -------- >>> from nipype.workflows.dmri.fsl.artifacts import all_fmb_pipeline >>> allcorr = all_fmb_pipeline() >>> allcorr.inputs.inputnode.in_file = 'epi.nii' >>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval' >>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec' >>> allcorr.inputs.inputnode.bmap_mag = 'magnitude.nii' >>> allcorr.inputs.inputnode.bmap_pha = 'phase.nii' >>> allcorr.inputs.inputnode.epi_param = 'epi_param.txt' >>> allcorr.run() # doctest: +SKIP """ inputnode = pe.Node(niu.IdentityInterface( fields=['in_file', 'in_bvec', 'in_bval', 'bmap_pha', 'bmap_mag', 'epi_param']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['out_file', 'out_mask', 'out_bvec']), name='outputnode') list_b0 = pe.Node(niu.Function( input_names=['in_bval'], output_names=['out_idx'], function=b0_indices), name='B0indices') avg_b0_0 = pe.Node(niu.Function( input_names=['in_file', 'index'], output_names=['out_file'], function=time_avg), name='b0_avg_pre') avg_b0_1 = pe.Node(niu.Function( input_names=['in_file', 'index'], output_names=['out_file'], function=time_avg), name='b0_avg_post') bet_dwi0 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_pre') bet_dwi1 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_post') hmc = hmc_pipeline() sdc = sdc_fmb(fugue_params=fugue_params) ecc = ecc_pipeline() unwarp = apply_all_corrections() wf = pe.Workflow(name=name) wf.connect([ (inputnode, hmc, [('in_file', 'inputnode.in_file'), ('in_bvec', 'inputnode.in_bvec'), ('in_bval', 'inputnode.in_bval')]), (inputnode, list_b0, [('in_bval', 'in_bval')]), (inputnode, avg_b0_0, [('in_file', 'in_file')]), (list_b0, avg_b0_0, [('out_idx', 'index')]), (avg_b0_0, bet_dwi0, [('out_file', 'in_file')]), (bet_dwi0, hmc, [('mask_file', 'inputnode.in_mask')]), (hmc, sdc, [ ('outputnode.out_file', 'inputnode.in_file')]), (bet_dwi0, sdc, [('mask_file', 'inputnode.in_mask')]), (inputnode, sdc, [('bmap_pha', 'inputnode.bmap_pha'), ('bmap_mag', 'inputnode.bmap_mag'), ('epi_param', 'inputnode.settings')]), (list_b0, sdc, [('out_idx', 'inputnode.in_ref')]), (hmc, ecc, [ ('outputnode.out_xfms', 'inputnode.in_xfms')]), (inputnode, ecc, [('in_file', 'inputnode.in_file'), ('in_bval', 'inputnode.in_bval')]), (bet_dwi0, ecc, [('mask_file', 'inputnode.in_mask')]), (ecc, avg_b0_1, [('outputnode.out_file', 'in_file')]), (list_b0, avg_b0_1, [('out_idx', 'index')]), (avg_b0_1, bet_dwi1, [('out_file', 'in_file')]), (inputnode, unwarp, [('in_file', 'inputnode.in_dwi')]), (hmc, unwarp, [('outputnode.out_xfms', 'inputnode.in_hmc')]), (ecc, unwarp, [('outputnode.out_xfms', 'inputnode.in_ecc')]), (sdc, unwarp, [('outputnode.out_warp', 'inputnode.in_sdc')]), (hmc, outputnode, [('outputnode.out_bvec', 'out_bvec')]), (unwarp, outputnode, [('outputnode.out_file', 'out_file')]), (bet_dwi1, outputnode, [('mask_file', 'out_mask')]) ]) return wf def all_peb_pipeline(name='hmc_sdc_ecc', epi_params=dict(echospacing=0.77e-3, acc_factor=3, enc_dir='y-', epi_factor=1), altepi_params=dict(echospacing=0.77e-3, acc_factor=3, enc_dir='y', epi_factor=1)): """ Builds a pipeline including three artifact corrections: head-motion correction (HMC), susceptibility-derived distortion correction (SDC), and Eddy currents-derived distortion correction (ECC). .. warning:: this workflow rotates the gradients table (*b*-vectors) [Leemans09]_. Examples -------- >>> from nipype.workflows.dmri.fsl.artifacts import all_peb_pipeline >>> allcorr = all_peb_pipeline() >>> allcorr.inputs.inputnode.in_file = 'epi.nii' >>> allcorr.inputs.inputnode.alt_file = 'epi_rev.nii' >>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval' >>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec' >>> allcorr.run() # doctest: +SKIP """ inputnode = pe.Node(niu.IdentityInterface( fields=['in_file', 'in_bvec', 'in_bval', 'alt_file']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['out_file', 'out_mask', 'out_bvec']), name='outputnode') avg_b0_0 = pe.Node(niu.Function( input_names=['in_dwi', 'in_bval'], output_names=['out_file'], function=b0_average), name='b0_avg_pre') avg_b0_1 = pe.Node(niu.Function( input_names=['in_dwi', 'in_bval'], output_names=['out_file'], function=b0_average), name='b0_avg_post') bet_dwi0 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_pre') bet_dwi1 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_post') hmc = hmc_pipeline() sdc = sdc_peb(epi_params=epi_params, altepi_params=altepi_params) ecc = ecc_pipeline() unwarp = apply_all_corrections() wf = pe.Workflow(name=name) wf.connect([ (inputnode, hmc, [('in_file', 'inputnode.in_file'), ('in_bvec', 'inputnode.in_bvec'), ('in_bval', 'inputnode.in_bval')]), (inputnode, avg_b0_0, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (avg_b0_0, bet_dwi0, [('out_file', 'in_file')]), (bet_dwi0, hmc, [('mask_file', 'inputnode.in_mask')]), (hmc, sdc, [ ('outputnode.out_file', 'inputnode.in_file')]), (bet_dwi0, sdc, [('mask_file', 'inputnode.in_mask')]), (inputnode, sdc, [('in_bval', 'inputnode.in_bval'), ('alt_file', 'inputnode.alt_file')]), (inputnode, ecc, [('in_file', 'inputnode.in_file'), ('in_bval', 'inputnode.in_bval')]), (bet_dwi0, ecc, [('mask_file', 'inputnode.in_mask')]), (hmc, ecc, [ ('outputnode.out_xfms', 'inputnode.in_xfms')]), (ecc, avg_b0_1, [('outputnode.out_file', 'in_dwi')]), (inputnode, avg_b0_1, [('in_bval', 'in_bval')]), (avg_b0_1, bet_dwi1, [('out_file', 'in_file')]), (inputnode, unwarp, [('in_file', 'inputnode.in_dwi')]), (hmc, unwarp, [('outputnode.out_xfms', 'inputnode.in_hmc')]), (ecc, unwarp, [('outputnode.out_xfms', 'inputnode.in_ecc')]), (sdc, unwarp, [('outputnode.out_warp', 'inputnode.in_sdc')]), (hmc, outputnode, [('outputnode.out_bvec', 'out_bvec')]), (unwarp, outputnode, [('outputnode.out_file', 'out_file')]), (bet_dwi1, outputnode, [('mask_file', 'out_mask')]) ]) return wf def all_fsl_pipeline(name='fsl_all_correct', epi_params=dict(echospacing=0.77e-3, acc_factor=3, enc_dir='y-'), altepi_params=dict(echospacing=0.77e-3, acc_factor=3, enc_dir='y')): """ Workflow that integrates FSL ``topup`` and ``eddy``. .. warning:: this workflow rotates the gradients table (*b*-vectors) [Leemans09]_. .. warning:: this workflow does not perform jacobian modulation of each *DWI* [Jones10]_. Examples -------- >>> from nipype.workflows.dmri.fsl.artifacts import all_fsl_pipeline >>> allcorr = all_fsl_pipeline() >>> allcorr.inputs.inputnode.in_file = 'epi.nii' >>> allcorr.inputs.inputnode.alt_file = 'epi_rev.nii' >>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval' >>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec' >>> allcorr.run() # doctest: +SKIP """ inputnode = pe.Node(niu.IdentityInterface( fields=['in_file', 'in_bvec', 'in_bval', 'alt_file']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['out_file', 'out_mask', 'out_bvec']), name='outputnode') def _gen_index(in_file): import numpy as np import nibabel as nb import os out_file = os.path.abspath('index.txt') vols = nb.load(in_file).get_data().shape[-1] np.savetxt(out_file, np.ones((vols,)).T) return out_file avg_b0_0 = pe.Node(niu.Function( input_names=['in_dwi', 'in_bval'], output_names=['out_file'], function=b0_average), name='b0_avg_pre') bet_dwi0 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_pre') sdc = sdc_peb(epi_params=epi_params, altepi_params=altepi_params) ecc = pe.Node(fsl.Eddy(method='jac'), name='fsl_eddy') rot_bvec = pe.Node(niu.Function( input_names=['in_bvec', 'eddy_params'], output_names=['out_file'], function=eddy_rotate_bvecs), name='Rotate_Bvec') avg_b0_1 = pe.Node(niu.Function( input_names=['in_dwi', 'in_bval'], output_names=['out_file'], function=b0_average), name='b0_avg_post') bet_dwi1 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True), name='bet_dwi_post') wf = pe.Workflow(name=name) wf.connect([ (inputnode, avg_b0_0, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (avg_b0_0, bet_dwi0, [('out_file', 'in_file')]), (bet_dwi0, sdc, [('mask_file', 'inputnode.in_mask')]), (inputnode, sdc, [('in_file', 'inputnode.in_file'), ('alt_file', 'inputnode.alt_file'), ('in_bval', 'inputnode.in_bval')]), (sdc, ecc, [('topup.out_enc_file', 'in_acqp'), ('topup.out_fieldcoef', 'in_topup_fieldcoef'), ('topup.out_movpar', 'in_topup_movpar')]), (bet_dwi0, ecc, [('mask_file', 'in_mask')]), (inputnode, ecc, [('in_file', 'in_file'), (('in_file', _gen_index), 'in_index'), ('in_bval', 'in_bval'), ('in_bvec', 'in_bvec')]), (inputnode, rot_bvec, [('in_bvec', 'in_bvec')]), (ecc, rot_bvec, [('out_parameter', 'eddy_params')]), (ecc, avg_b0_1, [('out_corrected', 'in_dwi')]), (inputnode, avg_b0_1, [('in_bval', 'in_bval')]), (avg_b0_1, bet_dwi1, [('out_file', 'in_file')]), (ecc, outputnode, [('out_corrected', 'out_file')]), (rot_bvec, outputnode, [('out_file', 'out_bvec')]), (bet_dwi1, outputnode, [('mask_file', 'out_mask')]) ]) return wf def hmc_pipeline(name='motion_correct'): """ HMC stands for head-motion correction. Creates a pipeline that corrects for head motion artifacts in dMRI sequences. It takes a series of diffusion weighted images and rigidly co-registers them to one reference image. Finally, the `b`-matrix is rotated accordingly [Leemans09]_ making use of the rotation matrix obtained by FLIRT. Search angles have been limited to 4 degrees, based on results in [Yendiki13]_. A list of rigid transformation matrices is provided, so that transforms can be chained. This is useful to correct for artifacts with only one interpolation process (as previously discussed `here <https://github.com/nipy/nipype/pull/530#issuecomment-14505042>`_), and also to compute nuisance regressors as proposed by [Yendiki13]_. .. warning:: This workflow rotates the `b`-vectors, so please be advised that not all the dicom converters ensure the consistency between the resulting nifti orientation and the gradients table (e.g. dcm2nii checks it). .. admonition:: References .. [Leemans09] Leemans A, and Jones DK, `The B-matrix must be rotated when correcting for subject motion in DTI data <http://dx.doi.org/10.1002/mrm.21890>`_, Magn Reson Med. 61(6):1336-49. 2009. doi: 10.1002/mrm.21890. .. [Yendiki13] Yendiki A et al., `Spurious group differences due to head motion in a diffusion MRI study <http://dx.doi.org/10.1016/j.neuroimage.2013.11.027>`_. Neuroimage. 21(88C):79-90. 2013. doi: 10.1016/j.neuroimage.2013.11.027 Example ------- >>> from nipype.workflows.dmri.fsl.artifacts import hmc_pipeline >>> hmc = hmc_pipeline() >>> hmc.inputs.inputnode.in_file = 'diffusion.nii' >>> hmc.inputs.inputnode.in_bvec = 'diffusion.bvec' >>> hmc.inputs.inputnode.in_bval = 'diffusion.bval' >>> hmc.inputs.inputnode.in_mask = 'mask.nii' >>> hmc.run() # doctest: +SKIP Inputs:: inputnode.in_file - input dwi file inputnode.in_mask - weights mask of reference image (a file with data \ range in [0.0, 1.0], indicating the weight of each voxel when computing the \ metric. inputnode.in_bvec - gradients file (b-vectors) inputnode.ref_num (optional, default=0) index of the b0 volume that \ should be taken as reference Outputs:: outputnode.out_file - corrected dwi file outputnode.out_bvec - rotated gradient vectors table outputnode.out_xfms - list of transformation matrices """ from nipype.workflows.data import get_flirt_schedule params = dict(dof=6, bgvalue=0, save_log=True, no_search=True, # cost='mutualinfo', cost_func='mutualinfo', bins=64, schedule=get_flirt_schedule('hmc')) inputnode = pe.Node(niu.IdentityInterface( fields=['in_file', 'ref_num', 'in_bvec', 'in_bval', 'in_mask']), name='inputnode') split = pe.Node(niu.Function( output_names=['out_ref', 'out_mov', 'out_bval', 'volid'], input_names=['in_file', 'in_bval', 'ref_num'], function=hmc_split), name='SplitDWI') flirt = dwi_flirt(flirt_param=params) insmat = pe.Node(niu.Function(input_names=['inlist', 'volid'], output_names=['out'], function=insert_mat), name='InsertRefmat') rot_bvec = pe.Node(niu.Function( function=rotate_bvecs, input_names=['in_bvec', 'in_matrix'], output_names=['out_file']), name='Rotate_Bvec') outputnode = pe.Node(niu.IdentityInterface( fields=['out_file', 'out_bvec', 'out_xfms']), name='outputnode') wf = pe.Workflow(name=name) wf.connect([ (inputnode, split, [('in_file', 'in_file'), ('in_bval', 'in_bval'), ('ref_num', 'ref_num')]), (inputnode, flirt, [('in_mask', 'inputnode.ref_mask')]), (split, flirt, [('out_ref', 'inputnode.reference'), ('out_mov', 'inputnode.in_file'), ('out_bval', 'inputnode.in_bval')]), (flirt, insmat, [('outputnode.out_xfms', 'inlist')]), (split, insmat, [('volid', 'volid')]), (inputnode, rot_bvec, [('in_bvec', 'in_bvec')]), (insmat, rot_bvec, [('out', 'in_matrix')]), (rot_bvec, outputnode, [('out_file', 'out_bvec')]), (flirt, outputnode, [('outputnode.out_file', 'out_file')]), (insmat, outputnode, [('out', 'out_xfms')]) ]) return wf def ecc_pipeline(name='eddy_correct'): """ ECC stands for Eddy currents correction. Creates a pipeline that corrects for artifacts induced by Eddy currents in dMRI sequences. It takes a series of diffusion weighted images and linearly co-registers them to one reference image (the average of all b0s in the dataset). DWIs are also modulated by the determinant of the Jacobian as indicated by [Jones10]_ and [Rohde04]_. A list of rigid transformation matrices can be provided, sourcing from a :func:`.hmc_pipeline` workflow, to initialize registrations in a *motion free* framework. A list of affine transformation matrices is available as output, so that transforms can be chained (discussion `here <https://github.com/nipy/nipype/pull/530#issuecomment-14505042>`_). .. admonition:: References .. [Jones10] Jones DK, `The signal intensity must be modulated by the determinant of the Jacobian when correcting for eddy currents in diffusion MRI <http://cds.ismrm.org/protected/10MProceedings/files/1644_129.pdf>`_, Proc. ISMRM 18th Annual Meeting, (2010). .. [Rohde04] Rohde et al., `Comprehensive Approach for Correction of Motion and Distortion in Diffusion-Weighted MRI <http://stbb.nichd.nih.gov/pdf/com_app_cor_mri04.pdf>`_, MRM 51:103-114 (2004). Example ------- >>> from nipype.workflows.dmri.fsl.artifacts import ecc_pipeline >>> ecc = ecc_pipeline() >>> ecc.inputs.inputnode.in_file = 'diffusion.nii' >>> ecc.inputs.inputnode.in_bval = 'diffusion.bval' >>> ecc.inputs.inputnode.in_mask = 'mask.nii' >>> ecc.run() # doctest: +SKIP Inputs:: inputnode.in_file - input dwi file inputnode.in_mask - weights mask of reference image (a file with data \ range sin [0.0, 1.0], indicating the weight of each voxel when computing the \ metric. inputnode.in_bval - b-values table inputnode.in_xfms - list of matrices to initialize registration (from \ head-motion correction) Outputs:: outputnode.out_file - corrected dwi file outputnode.out_xfms - list of transformation matrices """ from nipype.workflows.data import get_flirt_schedule params = dict(dof=12, no_search=True, interp='spline', bgvalue=0, schedule=get_flirt_schedule('ecc')) # cost='normmi', cost_func='normmi', bins=64, inputnode = pe.Node(niu.IdentityInterface( fields=['in_file', 'in_bval', 'in_mask', 'in_xfms']), name='inputnode') avg_b0 = pe.Node(niu.Function( input_names=['in_dwi', 'in_bval'], output_names=['out_file'], function=b0_average), name='b0_avg') pick_dws = pe.Node(niu.Function( input_names=['in_dwi', 'in_bval', 'b'], output_names=['out_file'], function=extract_bval), name='ExtractDWI') pick_dws.inputs.b = 'diff' flirt = dwi_flirt(flirt_param=params, excl_nodiff=True) mult = pe.MapNode(fsl.BinaryMaths(operation='mul'), name='ModulateDWIs', iterfield=['in_file', 'operand_value']) thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'], name='RemoveNegative') split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') get_mat = pe.Node(niu.Function( input_names=['in_bval', 'in_xfms'], output_names=['out_files'], function=recompose_xfm), name='GatherMatrices') merge = pe.Node(niu.Function( input_names=['in_dwi', 'in_bval', 'in_corrected'], output_names=['out_file'], function=recompose_dwi), name='MergeDWIs') outputnode = pe.Node(niu.IdentityInterface( fields=['out_file', 'out_xfms']), name='outputnode') wf = pe.Workflow(name=name) wf.connect([ (inputnode, avg_b0, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (inputnode, pick_dws, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (inputnode, merge, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (inputnode, flirt, [('in_mask', 'inputnode.ref_mask'), ('in_xfms', 'inputnode.in_xfms'), ('in_bval', 'inputnode.in_bval')]), (inputnode, get_mat, [('in_bval', 'in_bval')]), (avg_b0, flirt, [('out_file', 'inputnode.reference')]), (pick_dws, flirt, [('out_file', 'inputnode.in_file')]), (flirt, get_mat, [('outputnode.out_xfms', 'in_xfms')]), (flirt, mult, [(('outputnode.out_xfms', _xfm_jacobian), 'operand_value')]), (flirt, split, [('outputnode.out_file', 'in_file')]), (split, mult, [('out_files', 'in_file')]), (mult, thres, [('out_file', 'in_file')]), (thres, merge, [('out_file', 'in_corrected')]), (get_mat, outputnode, [('out_files', 'out_xfms')]), (merge, outputnode, [('out_file', 'out_file')]) ]) return wf def sdc_fmb(name='fmb_correction', interp='Linear', fugue_params=dict(smooth3d=2.0)): """ SDC stands for susceptibility distortion correction. FMB stands for fieldmap-based. The fieldmap based (FMB) method implements SDC by using a mapping of the B0 field as proposed by [Jezzard95]_. This workflow uses the implementation of FSL (`FUGUE <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FUGUE>`_). Phase unwrapping is performed using `PRELUDE <http://fsl.fmrib.ox.ac.uk/fsl/fsl-4.1.9/fugue/prelude.html>`_ [Jenkinson03]_. Preparation of the fieldmap is performed reproducing the script in FSL `fsl_prepare_fieldmap <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/FUGUE/Guide#SIEMENS_data>`_. Example ------- >>> from nipype.workflows.dmri.fsl.artifacts import sdc_fmb >>> fmb = sdc_fmb() >>> fmb.inputs.inputnode.in_file = 'diffusion.nii' >>> fmb.inputs.inputnode.in_ref = range(0, 30, 6) >>> fmb.inputs.inputnode.in_mask = 'mask.nii' >>> fmb.inputs.inputnode.bmap_mag = 'magnitude.nii' >>> fmb.inputs.inputnode.bmap_pha = 'phase.nii' >>> fmb.inputs.inputnode.settings = 'epi_param.txt' >>> fmb.run() # doctest: +SKIP .. warning:: Only SIEMENS format fieldmaps are supported. .. admonition:: References .. [Jezzard95] Jezzard P, and Balaban RS, `Correction for geometric distortion in echo planar images from B0 field variations <http://dx.doi.org/10.1002/mrm.1910340111>`_, MRM 34(1):65-73. (1995). doi: 10.1002/mrm.1910340111. .. [Jenkinson03] Jenkinson M., `Fast, automated, N-dimensional phase-unwrapping algorithm <http://dx.doi.org/10.1002/mrm.10354>`_, MRM 49(1):193-197, 2003, doi: 10.1002/mrm.10354. """ epi_defaults = {'delta_te': 2.46e-3, 'echospacing': 0.77e-3, 'acc_factor': 2, 'enc_dir': u'AP'} inputnode = pe.Node(niu.IdentityInterface( fields=['in_file', 'in_ref', 'in_mask', 'bmap_pha', 'bmap_mag', 'settings']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['out_file', 'out_vsm', 'out_warp']), name='outputnode') r_params = pe.Node(JSONFileGrabber(defaults=epi_defaults), name='SettingsGrabber') eff_echo = pe.Node(niu.Function(function=_eff_t_echo, input_names=['echospacing', 'acc_factor'], output_names=['eff_echo']), name='EffEcho') firstmag = pe.Node(fsl.ExtractROI(t_min=0, t_size=1), name='GetFirst') n4 = pe.Node(ants.N4BiasFieldCorrection(dimension=3), name='Bias') bet = pe.Node(fsl.BET(frac=0.4, mask=True), name='BrainExtraction') dilate = pe.Node(fsl.maths.MathsCommand( nan2zeros=True, args='-kernel sphere 5 -dilM'), name='MskDilate') pha2rads = pe.Node(niu.Function( input_names=['in_file'], output_names=['out_file'], function=siemens2rads), name='PreparePhase') prelude = pe.Node(fsl.PRELUDE(process3d=True), name='PhaseUnwrap') rad2rsec = pe.Node(niu.Function( input_names=['in_file', 'delta_te'], output_names=['out_file'], function=rads2radsec), name='ToRadSec') baseline = pe.Node(niu.Function( input_names=['in_file', 'index'], output_names=['out_file'], function=time_avg), name='Baseline') fmm2b0 = pe.Node(ants.Registration(output_warped_image=True), name="FMm_to_B0") fmm2b0.inputs.transforms = ['Rigid'] * 2 fmm2b0.inputs.transform_parameters = [(1.0,)] * 2 fmm2b0.inputs.number_of_iterations = [[50], [20]] fmm2b0.inputs.dimension = 3 fmm2b0.inputs.metric = ['Mattes', 'Mattes'] fmm2b0.inputs.metric_weight = [1.0] * 2 fmm2b0.inputs.radius_or_number_of_bins = [64, 64] fmm2b0.inputs.sampling_strategy = ['Regular', 'Random'] fmm2b0.inputs.sampling_percentage = [None, 0.2] fmm2b0.inputs.convergence_threshold = [1.e-5, 1.e-8] fmm2b0.inputs.convergence_window_size = [20, 10] fmm2b0.inputs.smoothing_sigmas = [[6.0], [2.0]] fmm2b0.inputs.sigma_units = ['vox'] * 2 fmm2b0.inputs.shrink_factors = [[6], [1]] # ,[1] ] fmm2b0.inputs.use_estimate_learning_rate_once = [True] * 2 fmm2b0.inputs.use_histogram_matching = [True] * 2 fmm2b0.inputs.initial_moving_transform_com = 0 fmm2b0.inputs.collapse_output_transforms = True fmm2b0.inputs.winsorize_upper_quantile = 0.995 applyxfm = pe.Node(ants.ApplyTransforms( dimension=3, interpolation=interp), name='FMp_to_B0') pre_fugue = pe.Node(fsl.FUGUE(save_fmap=True), name='PreliminaryFugue') demean = pe.Node(niu.Function( input_names=['in_file', 'in_mask'], output_names=['out_file'], function=demean_image), name='DemeanFmap') cleanup = cleanup_edge_pipeline() addvol = pe.Node(niu.Function( input_names=['in_file'], output_names=['out_file'], function=add_empty_vol), name='AddEmptyVol') vsm = pe.Node(fsl.FUGUE(save_shift=True, **fugue_params), name="ComputeVSM") split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs') unwarp = pe.MapNode(fsl.FUGUE(icorr=True, forward_warping=False), iterfield=['in_file'], name='UnwarpDWIs') thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'], name='RemoveNegative') vsm2dfm = vsm2warp() vsm2dfm.inputs.inputnode.scaling = 1.0 wf = pe.Workflow(name=name) wf.connect([ (inputnode, r_params, [('settings', 'in_file')]), (r_params, eff_echo, [('echospacing', 'echospacing'), ('acc_factor', 'acc_factor')]), (inputnode, pha2rads, [('bmap_pha', 'in_file')]), (inputnode, firstmag, [('bmap_mag', 'in_file')]), (inputnode, baseline, [('in_file', 'in_file'), ('in_ref', 'index')]), (firstmag, n4, [('roi_file', 'input_image')]), (n4, bet, [('output_image', 'in_file')]), (bet, dilate, [('mask_file', 'in_file')]), (pha2rads, prelude, [('out_file', 'phase_file')]), (n4, prelude, [('output_image', 'magnitude_file')]), (dilate, prelude, [('out_file', 'mask_file')]), (r_params, rad2rsec, [('delta_te', 'delta_te')]), (prelude, rad2rsec, [('unwrapped_phase_file', 'in_file')]), (baseline, fmm2b0, [('out_file', 'fixed_image')]), (n4, fmm2b0, [('output_image', 'moving_image')]), (inputnode, fmm2b0, [('in_mask', 'fixed_image_mask')]), (dilate, fmm2b0, [('out_file', 'moving_image_mask')]), (baseline, applyxfm, [('out_file', 'reference_image')]), (rad2rsec, applyxfm, [('out_file', 'input_image')]), (fmm2b0, applyxfm, [ ('forward_transforms', 'transforms'), ('forward_invert_flags', 'invert_transform_flags')]), (applyxfm, pre_fugue, [('output_image', 'fmap_in_file')]), (inputnode, pre_fugue, [('in_mask', 'mask_file')]), (pre_fugue, demean, [('fmap_out_file', 'in_file')]), (inputnode, demean, [('in_mask', 'in_mask')]), (demean, cleanup, [('out_file', 'inputnode.in_file')]), (inputnode, cleanup, [('in_mask', 'inputnode.in_mask')]), (cleanup, addvol, [('outputnode.out_file', 'in_file')]), (inputnode, vsm, [('in_mask', 'mask_file')]), (addvol, vsm, [('out_file', 'fmap_in_file')]), (r_params, vsm, [('delta_te', 'asym_se_time')]), (eff_echo, vsm, [('eff_echo', 'dwell_time')]), (inputnode, split, [('in_file', 'in_file')]), (split, unwarp, [('out_files', 'in_file')]), (vsm, unwarp, [('shift_out_file', 'shift_in_file')]), (r_params, unwarp, [ (('enc_dir', _fix_enc_dir), 'unwarp_direction')]), (unwarp, thres, [('unwarped_file', 'in_file')]), (thres, merge, [('out_file', 'in_files')]), (r_params, vsm2dfm, [ (('enc_dir', _fix_enc_dir), 'inputnode.enc_dir')]), (merge, vsm2dfm, [('merged_file', 'inputnode.in_ref')]), (vsm, vsm2dfm, [('shift_out_file', 'inputnode.in_vsm')]), (merge, outputnode, [('merged_file', 'out_file')]), (vsm, outputnode, [('shift_out_file', 'out_vsm')]), (vsm2dfm, outputnode, [('outputnode.out_warp', 'out_warp')]) ]) return wf def sdc_peb(name='peb_correction', epi_params=dict(echospacing=0.77e-3, acc_factor=3, enc_dir='y-', epi_factor=1), altepi_params=dict(echospacing=0.77e-3, acc_factor=3, enc_dir='y', epi_factor=1)): """ SDC stands for susceptibility distortion correction. PEB stands for phase-encoding-based. The phase-encoding-based (PEB) method implements SDC by acquiring diffusion images with two different enconding directions [Andersson2003]_. The most typical case is acquiring with opposed phase-gradient blips (e.g. *A>>>P* and *P>>>A*, or equivalently, *-y* and *y*) as in [Chiou2000]_, but it is also possible to use orthogonal configurations [Cordes2000]_ (e.g. *A>>>P* and *L>>>R*, or equivalently *-y* and *x*). This workflow uses the implementation of FSL (`TOPUP <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/TOPUP>`_). Example ------- >>> from nipype.workflows.dmri.fsl.artifacts import sdc_peb >>> peb = sdc_peb() >>> peb.inputs.inputnode.in_file = 'epi.nii' >>> peb.inputs.inputnode.alt_file = 'epi_rev.nii' >>> peb.inputs.inputnode.in_bval = 'diffusion.bval' >>> peb.inputs.inputnode.in_mask = 'mask.nii' >>> peb.run() # doctest: +SKIP .. admonition:: References .. [Andersson2003] Andersson JL et al., `How to correct susceptibility distortions in spin-echo echo-planar images: application to diffusion tensor imaging <http://dx.doi.org/10.1016/S1053-8119(03)00336-7>`_. Neuroimage. 2003 Oct;20(2):870-88. doi: 10.1016/S1053-8119(03)00336-7 .. [Cordes2000] Cordes D et al., Geometric distortion correction in EPI using two images with orthogonal phase-encoding directions, in Proc. ISMRM (8), p.1712, Denver, US, 2000. .. [Chiou2000] Chiou JY, and Nalcioglu O, A simple method to correct off-resonance related distortion in echo planar imaging, in Proc. ISMRM (8), p.1712, Denver, US, 2000. """ inputnode = pe.Node(niu.IdentityInterface( fields=['in_file', 'in_bval', 'in_mask', 'alt_file', 'ref_num']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['out_file', 'out_vsm', 'out_warp']), name='outputnode') b0_ref = pe.Node(fsl.ExtractROI(t_size=1), name='b0_ref') b0_alt = pe.Node(fsl.ExtractROI(t_size=1), name='b0_alt') b0_comb = pe.Node(niu.Merge(2), name='b0_list') b0_merge = pe.Node(fsl.Merge(dimension='t'), name='b0_merged') topup = pe.Node(fsl.TOPUP(), name='topup') topup.inputs.encoding_direction = [epi_params['enc_dir'], altepi_params['enc_dir']] readout = compute_readout(epi_params) topup.inputs.readout_times = [readout, compute_readout(altepi_params)] unwarp = pe.Node(fsl.ApplyTOPUP(in_index=[1], method='jac'), name='unwarp') # scaling = pe.Node(niu.Function(input_names=['in_file', 'enc_dir'], # output_names=['factor'], function=_get_zoom), # name='GetZoom') # scaling.inputs.enc_dir = epi_params['enc_dir'] vsm2dfm = vsm2warp() vsm2dfm.inputs.inputnode.enc_dir = epi_params['enc_dir'] vsm2dfm.inputs.inputnode.scaling = readout wf = pe.Workflow(name=name) wf.connect([ (inputnode, b0_ref, [('in_file', 'in_file'), (('ref_num', _checkrnum), 't_min')]), (inputnode, b0_alt, [('alt_file', 'in_file'), (('ref_num', _checkrnum), 't_min')]), (b0_ref, b0_comb, [('roi_file', 'in1')]), (b0_alt, b0_comb, [('roi_file', 'in2')]), (b0_comb, b0_merge, [('out', 'in_files')]), (b0_merge, topup, [('merged_file', 'in_file')]), (topup, unwarp, [('out_fieldcoef', 'in_topup_fieldcoef'), ('out_movpar', 'in_topup_movpar'), ('out_enc_file', 'encoding_file')]), (inputnode, unwarp, [('in_file', 'in_files')]), (unwarp, outputnode, [('out_corrected', 'out_file')]), # (b0_ref, scaling, [('roi_file', 'in_file')]), # (scaling, vsm2dfm, [('factor', 'inputnode.scaling')]), (b0_ref, vsm2dfm, [('roi_file', 'inputnode.in_ref')]), (topup, vsm2dfm, [('out_field', 'inputnode.in_vsm')]), (topup, outputnode, [('out_field', 'out_vsm')]), (vsm2dfm, outputnode, [('outputnode.out_warp', 'out_warp')]) ]) return wf def remove_bias(name='bias_correct'): """ This workflow estimates a single multiplicative bias field from the averaged *b0* image, as suggested in [Jeurissen2014]_. .. admonition:: References .. [Jeurissen2014] Jeurissen B. et al., `Multi-tissue constrained spherical deconvolution for improved analysis of multi-shell diffusion MRI data <http://dx.doi.org/10.1016/j.neuroimage.2014.07.061>`_. NeuroImage (2014). doi: 10.1016/j.neuroimage.2014.07.061 Example ------- >>> from nipype.workflows.dmri.fsl.artifacts import remove_bias >>> bias = remove_bias() >>> bias.inputs.inputnode.in_file = 'epi.nii' >>> bias.inputs.inputnode.in_bval = 'diffusion.bval' >>> bias.inputs.inputnode.in_mask = 'mask.nii' >>> bias.run() # doctest: +SKIP """ inputnode = pe.Node(niu.IdentityInterface( fields=['in_file', 'in_bval', 'in_mask']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface(fields=['out_file']), name='outputnode') avg_b0 = pe.Node(niu.Function( input_names=['in_dwi', 'in_bval'], output_names=['out_file'], function=b0_average), name='b0_avg') n4 = pe.Node(ants.N4BiasFieldCorrection( dimension=3, save_bias=True, bspline_fitting_distance=600), name='Bias_b0') split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs') mult = pe.MapNode(fsl.MultiImageMaths(op_string='-div %s'), iterfield=['in_file'], name='RemoveBiasOfDWIs') thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'], name='RemoveNegative') merge = pe.Node(fsl.utils.Merge(dimension='t'), name='MergeDWIs') wf = pe.Workflow(name=name) wf.connect([ (inputnode, avg_b0, [('in_file', 'in_dwi'), ('in_bval', 'in_bval')]), (avg_b0, n4, [('out_file', 'input_image')]), (inputnode, n4, [('in_mask', 'mask_image')]), (inputnode, split, [('in_file', 'in_file')]), (n4, mult, [('bias_image', 'operand_files')]), (split, mult, [('out_files', 'in_file')]), (mult, thres, [('out_file', 'in_file')]), (thres, merge, [('out_file', 'in_files')]), (merge, outputnode, [('merged_file', 'out_file')]) ]) return wf def _eff_t_echo(echospacing, acc_factor): eff_echo = echospacing / (1.0 * acc_factor) return eff_echo def _fix_enc_dir(enc_dir): enc_dir = enc_dir.lower() if enc_dir == 'lr': return 'x-' if enc_dir == 'rl': return 'x' if enc_dir == 'ap': return 'y-' if enc_dir == 'pa': return 'y' return enc_dir def _checkrnum(ref_num): from nipype.interfaces.base import isdefined if (ref_num is None) or not isdefined(ref_num): return 0 return ref_num def _nonb0(in_bval): import numpy as np bvals = np.loadtxt(in_bval) return np.where(bvals != 0)[0].tolist() def _xfm_jacobian(in_xfm): import numpy as np from math import fabs return [fabs(np.linalg.det(np.loadtxt(xfm))) for xfm in in_xfm] def _get_zoom(in_file, enc_dir): import nibabel as nb zooms = nb.load(in_file).get_header().get_zooms() if 'y' in enc_dir: return zooms[1] elif 'x' in enc_dir: return zooms[0] elif 'z' in enc_dir: return zooms[2] else: raise ValueError('Wrong encoding direction string')
bsd-3-clause
Electrex/Electroactive-N6
arch/ia64/scripts/unwcheck.py
13143
1714
#!/usr/bin/python # # Usage: unwcheck.py FILE # # This script checks the unwind info of each function in file FILE # and verifies that the sum of the region-lengths matches the total # length of the function. # # Based on a shell/awk script originally written by Harish Patil, # which was converted to Perl by Matthew Chapman, which was converted # to Python by David Mosberger. # import os import re import sys if len(sys.argv) != 2: print "Usage: %s FILE" % sys.argv[0] sys.exit(2) readelf = os.getenv("READELF", "readelf") start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]") rlen_pattern = re.compile(".*rlen=([0-9]+)") def check_func (func, slots, rlen_sum): if slots != rlen_sum: global num_errors num_errors += 1 if not func: func = "[%#x-%#x]" % (start, end) print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum) return num_funcs = 0 num_errors = 0 func = False slots = 0 rlen_sum = 0 for line in os.popen("%s -u %s" % (readelf, sys.argv[1])): m = start_pattern.match(line) if m: check_func(func, slots, rlen_sum) func = m.group(1) start = long(m.group(2), 16) end = long(m.group(3), 16) slots = 3 * (end - start) / 16 rlen_sum = 0L num_funcs += 1 else: m = rlen_pattern.match(line) if m: rlen_sum += long(m.group(1)) check_func(func, slots, rlen_sum) if num_errors == 0: print "No errors detected in %u functions." % num_funcs else: if num_errors > 1: err="errors" else: err="error" print "%u %s detected in %u functions." % (num_errors, err, num_funcs) sys.exit(1)
gpl-2.0
fuentesdt/InsightToolkit-dev
Wrapping/Generators/Python/Tests/notYetUsable/ImageRegistration4.py
4
3959
#========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #==========================================================================*/ from InsightToolkit import * from sys import argv # # Read the fixed and moving images using filenames # from the command line arguments # fixedImageReader = itkImageFileReaderF2_New() movingImageReader = itkImageFileReaderF2_New() fixedImageReader.SetFileName( argv[1] ) movingImageReader.SetFileName( argv[2] ) fixedImageReader.Update() movingImageReader.Update() fixedImage = fixedImageReader.GetOutput() movingImage = movingImageReader.GetOutput() # # Instantiate the classes for the registration framework # registration = itkImageRegistrationMethodF2F2_New() imageMetric = itkMattesMutualInformationImageToImageMetricF2F2_New() transform = itkTranslationTransform2_New() optimizer = itkRegularStepGradientDescentOptimizer_New() interpolator = itkLinearInterpolateImageFunctionF2D_New() imageMetric.SetNumberOfHistogramBins( 20 ); imageMetric.SetNumberOfSpatialSamples( 10000 ); registration.SetOptimizer( optimizer.GetPointer() ) registration.SetTransform( transform.GetPointer() ) registration.SetInterpolator( interpolator.GetPointer() ) registration.SetMetric( imageMetric.GetPointer() ) registration.SetFixedImage( fixedImage ) registration.SetMovingImage( movingImage ) registration.SetFixedImageRegion( fixedImage.GetBufferedRegion() ) transform.SetIdentity() initialParameters = transform.GetParameters() registration.SetInitialTransformParameters( initialParameters ) # # Iteration Observer # def iterationUpdate(): currentParameter = transform.GetParameters() print "M: %f P: %f %f " % ( optimizer.GetValue(), currentParameter.GetElement(0), currentParameter.GetElement(1) ) iterationCommand = itkPyCommand_New() iterationCommand.SetCommandCallable( iterationUpdate ) optimizer.AddObserver( itkIterationEvent(), iterationCommand.GetPointer() ) # # Define optimizer parameters # optimizer.SetMaximumStepLength( 4.00 ) optimizer.SetMinimumStepLength( 0.01 ) optimizer.SetNumberOfIterations( 200 ) print "Starting registration" # # Start the registration process # registration.Update() # # Get the final parameters of the transformation # finalParameters = registration.GetLastTransformParameters() print "Final Registration Parameters " print "Translation X = %f" % (finalParameters.GetElement(0),) print "Translation Y = %f" % (finalParameters.GetElement(1),) # # Now, we use the final transform for resampling the # moving image. # resampler = itkResampleImageFilterF2F2_New() resampler.SetTransform( transform.GetPointer() ) resampler.SetInput( movingImage ) region = fixedImage.GetLargestPossibleRegion() resampler.SetSize( region.GetSize() ) resampler.SetOutputSpacing( fixedImage.GetSpacing() ) resampler.SetOutputOrigin( fixedImage.GetOrigin() ) resampler.SetDefaultPixelValue( 100 ) outputCast = itkRescaleIntensityImageFilterF2US2_New() outputCast.SetOutputMinimum( 0 ) outputCast.SetOutputMaximum( 65535 ) outputCast.SetInput(resampler.GetOutput()) # # Write the resampled image # writer = itkImageFileWriterUS2_New() writer.SetFileName( argv[3] ) writer.SetInput( outputCast.GetOutput() ) writer.Update()
apache-2.0
cfenoy/easybuild-easyblocks
easybuild/easyblocks/t/tbb.py
5
4276
## # Copyright 2009-2015 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for installing the Intel Threading Building Blocks (TBB) library, implemented as an easyblock @author: Stijn De Weirdt (Ghent University) @author: Dries Verdegem (Ghent University) @author: Kenneth Hoste (Ghent University) @author: Pieter De Baets (Ghent University) @author: Jens Timmerman (Ghent University) """ import os import shutil import glob from distutils.version import LooseVersion from easybuild.easyblocks.generic.intelbase import IntelBase, ACTIVATION_NAME_2012, LICENSE_FILE_NAME_2012 from easybuild.tools.build_log import EasyBuildError class EB_tbb(IntelBase): """EasyBlock for tbb, threading building blocks""" def __init__(self, *args, **kwargs): """Initialisation of custom class variables for tbb""" super(EB_tbb, self).__init__(*args, **kwargs) self.libpath = 'UNKNOWN' def install_step(self): """Custom install step, to add extra symlinks""" silent_cfg_names_map = None if LooseVersion(self.version) < LooseVersion('4.2'): silent_cfg_names_map = { 'activation_name': ACTIVATION_NAME_2012, 'license_file_name': LICENSE_FILE_NAME_2012, } super(EB_tbb, self).install_step(silent_cfg_names_map=silent_cfg_names_map) # save libdir os.chdir(self.installdir) if LooseVersion(self.version) < LooseVersion('4.1.0'): libglob = 'tbb/lib/intel64/cc*libc*_kernel*' else: libglob = 'tbb/lib/intel64/gcc*' libs = sorted(glob.glob(libglob), key=LooseVersion) if len(libs): libdir = libs[-1] # take the last one, should be ordered by cc get_version. # we're only interested in the last bit libdir = libdir.split('/')[-1] else: raise EasyBuildError("No libs found using %s in %s", libglob, self.installdir) self.libdir = libdir self.libpath = os.path.join('tbb', 'libs', 'intel64', libdir) self.log.debug("self.libpath: %s" % self.libpath) # applications go looking into tbb/lib so we move what's in there to libs # and symlink the right lib from /tbb/libs/intel64/... to lib install_libpath = os.path.join(self.installdir, 'tbb', 'lib') shutil.move(install_libpath, os.path.join(self.installdir, 'tbb', 'libs')) os.symlink(os.path.join(self.installdir, self.libpath), install_libpath) def sanity_check_step(self): custom_paths = { 'files': [], 'dirs': ['tbb/bin', 'tbb/lib', 'tbb/libs'], } super(EB_tbb, self).sanity_check_step(custom_paths=custom_paths) def make_module_extra(self): """Add correct path to lib to LD_LIBRARY_PATH. and intel license file""" txt = super(EB_tbb, self).make_module_extra() txt += self.module_generator.prepend_paths('LD_LIBRARY_PATH', [self.libpath]) txt += self.module_generator.prepend_paths('LIBRARY_PATH', [self.libpath]) txt += self.module_generator.prepend_paths('CPATH', [os.path.join('tbb', 'include')]) txt += self.module_generator.set_environment('TBBROOT', os.path.join(self.installdir, 'tbb')) return txt
gpl-2.0
gkotton/neutron
neutron/services/firewall/agents/firewall_agent_api.py
10
2533
# Copyright (c) 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo.config import cfg from oslo import messaging from neutron.common import rpc as n_rpc from neutron.openstack.common import log as logging LOG = logging.getLogger(__name__) FWaaSOpts = [ cfg.StrOpt( 'driver', default='', help=_("Name of the FWaaS Driver")), cfg.BoolOpt( 'enabled', default=False, help=_("Enable FWaaS")), ] cfg.CONF.register_opts(FWaaSOpts, 'fwaas') class FWaaSPluginApiMixin(object): """Agent side of the FWaaS agent to FWaaS Plugin RPC API.""" def __init__(self, topic, host): self.host = host target = messaging.Target(topic=topic, version='1.0') self.client = n_rpc.get_client(target) def set_firewall_status(self, context, firewall_id, status): """Make a RPC to set the status of a firewall.""" cctxt = self.client.prepare() return cctxt.call(context, 'set_firewall_status', host=self.host, firewall_id=firewall_id, status=status) def firewall_deleted(self, context, firewall_id): """Make a RPC to indicate that the firewall resources are deleted.""" cctxt = self.client.prepare() return cctxt.call(context, 'firewall_deleted', host=self.host, firewall_id=firewall_id) class FWaaSAgentRpcCallbackMixin(object): """Mixin for FWaaS agent Implementations.""" def __init__(self, host): super(FWaaSAgentRpcCallbackMixin, self).__init__(host) def create_firewall(self, context, firewall, host): """Handle RPC cast from plugin to create a firewall.""" pass def update_firewall(self, context, firewall, host): """Handle RPC cast from plugin to update a firewall.""" pass def delete_firewall(self, context, firewall, host): """Handle RPC cast from plugin to delete a firewall.""" pass
apache-2.0
atul-bhouraskar/django
django/core/checks/messages.py
319
2383
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.utils.encoding import force_str, python_2_unicode_compatible # Levels DEBUG = 10 INFO = 20 WARNING = 30 ERROR = 40 CRITICAL = 50 @python_2_unicode_compatible class CheckMessage(object): def __init__(self, level, msg, hint=None, obj=None, id=None): assert isinstance(level, int), "The first argument should be level." self.level = level self.msg = msg self.hint = hint self.obj = obj self.id = id def __eq__(self, other): return all(getattr(self, attr) == getattr(other, attr) for attr in ['level', 'msg', 'hint', 'obj', 'id']) def __ne__(self, other): return not (self == other) def __str__(self): from django.db import models if self.obj is None: obj = "?" elif isinstance(self.obj, models.base.ModelBase): # We need to hardcode ModelBase and Field cases because its __str__ # method doesn't return "applabel.modellabel" and cannot be changed. obj = self.obj._meta.label else: obj = force_str(self.obj) id = "(%s) " % self.id if self.id else "" hint = "\n\tHINT: %s" % self.hint if self.hint else '' return "%s: %s%s%s" % (obj, id, self.msg, hint) def __repr__(self): return "<%s: level=%r, msg=%r, hint=%r, obj=%r, id=%r>" % \ (self.__class__.__name__, self.level, self.msg, self.hint, self.obj, self.id) def is_serious(self): return self.level >= ERROR def is_silenced(self): from django.conf import settings return self.id in settings.SILENCED_SYSTEM_CHECKS class Debug(CheckMessage): def __init__(self, *args, **kwargs): super(Debug, self).__init__(DEBUG, *args, **kwargs) class Info(CheckMessage): def __init__(self, *args, **kwargs): super(Info, self).__init__(INFO, *args, **kwargs) class Warning(CheckMessage): def __init__(self, *args, **kwargs): super(Warning, self).__init__(WARNING, *args, **kwargs) class Error(CheckMessage): def __init__(self, *args, **kwargs): super(Error, self).__init__(ERROR, *args, **kwargs) class Critical(CheckMessage): def __init__(self, *args, **kwargs): super(Critical, self).__init__(CRITICAL, *args, **kwargs)
bsd-3-clause
s40523242/2016fallcp_hw
course/week2.py
19
12340
# 開始透過 ggame 程式庫進行網際繪圖 # ggame 手冊 # http://brythonserver.github.io/ggame/ ''' from ggame import App, Color, LineStyle, Sprite from ggame import RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset # Three primary colors with no transparency (alpha = 1.0) red = Color(0xff0000, 1.0) green = Color(0x00ff00, 1.0) blue = Color(0x0000ff, 1.0) black = Color(0x000000, 1.0) # Define a line style that is a thin (1 pixel) wide black line thinline = LineStyle(1, black) # A graphics asset that represents a rectangle rectangle = RectangleAsset(50, 20, thinline, blue) # Now display a rectangle Sprite(rectangle) myapp = App() myapp.run() ''' ''' ############################################### from ggame import App, ImageAsset, Sprite # Create a displayed object at 0,0 using an image asset Sprite(ImageAsset("ggame/bunny.jpg"), (0,0)) # Create the app, with a 500x500 pixel stage app = App(500,500) # Run the app app.run() ''' ''' ############################################### from ggame import App, Color, LineStyle, Sprite from ggame import RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset # Three primary colors with no transparency (alpha = 1.0) white = Color(0xE3E3E3, 1.0) yellow = Color(0xEDF55F, 1.0) blue = Color(0x0000FF, 1.0) green = Color(0x84F564, 1.0) black = Color(0x4D4843, 1.0) # Define a line style that is a thin (1 pixel) wide black line thinline = LineStyle(1, black) # A graphics asset that represents a rectangle rectangle = RectangleAsset(400, 200, thinline, green) ellipse = EllipseAsset(30, 30, thinline, yellow) ellipse2 = EllipseAsset(70, 40, thinline, blue) polygon = PolygonAsset([(-85, 100), (25, -75), (100, 125), (-85, 100)], thinline, black) polygon2 = PolygonAsset([(100, 100), (225, -125), (350, 125), (100, 100)], thinline, black) # Now display a rectangle Sprite(rectangle, (100, 200)) Sprite(ellipse, (75, 40)) Sprite(ellipse2, (325, 325)) Sprite(polygon, (150, 150)) Sprite(polygon2, (150, 150)) myapp = App() myapp.run() ''' ############################################# """ picture.py Author: <Hayden Hatfield> Credit: <list sources used, if any> Assignment:Picture Use the ggame library to "paint" a graphical picture of something (e.g. a house, a face or landscape). Use at least: 1. Three different Color objects. 2. Ten different Sprite objects. 3. One (or more) RectangleAsset objects. 4. One (or more) CircleAsset objects. 5. One (or more) EllipseAsset objects. 6. One (or more) PolygonAsset objects. See: https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics for general information on how to use ggame. See: http://brythonserver.github.io/ggame/ for detailed information on ggame. """ ''' from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset # add your code here \/ \/ \/ red = Color(0xff0000, 1.0) green = Color(0x00ff00, 1.0) blue = Color(0x0000ff, 1.0) black = Color(0x000000, 1.0) darkgreen = Color(0x0e270f, 1.0) nightsky = Color(0x16152b, 1.0) ground = Color(0x06471a, 1.0) wood = Color(0x542a0c, 1.0) moonwhite = Color(0xfff3aa, 1.0) grey = Color(0x818181, 1.0) line = LineStyle(2, black) trunk = RectangleAsset( 20, 50, line, wood ) tri13 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri14 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri15 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri16 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) trunk = RectangleAsset( 20, 50, line, wood ) tri9 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri10 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri11 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri12 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) trunk = RectangleAsset( 20, 50, line, wood ) tri5 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri6 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri7 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri8 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) trunk = RectangleAsset( 20, 50, line, wood ) tri1 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri2 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri3 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) tri4 = PolygonAsset([(0,100),(200,100),(100,0)], line, darkgreen ) moon = EllipseAsset(40, 40, line, moonwhite ) rock = CircleAsset(75, line, grey) background = RectangleAsset( 1000, 1000, line, nightsky ) ground = RectangleAsset( 1000, 100, line, ground ) Sprite(background, (0, 0)) Sprite(tri9, (450, 575)) Sprite(tri10, (450, 525)) Sprite(tri11, (450, 475)) Sprite(tri12, (450, 425)) Sprite(trunk, (540, 675)) Sprite(tri13, (250, 550)) Sprite(tri14, (250, 500)) Sprite(tri15, (250, 450)) Sprite(tri16, (250, 400)) Sprite(trunk, (340, 650)) Sprite(tri5, (150, 575)) Sprite(tri6, (150, 525)) Sprite(tri7, (150, 475)) Sprite(tri8, (150, 425)) Sprite(trunk, (240, 675)) Sprite(tri1, (20, 550)) Sprite(tri2, (20, 500)) Sprite(tri3, (20, 450)) Sprite(tri4, (20, 400)) Sprite(trunk, (110, 650)) Sprite(moon, (900, 50)) Sprite(rock, (925, 700)) Sprite(ground, (0, 700)) # add your code here /\ /\ /\ myapp = App() myapp.run() ''' ############################################### """ picture.py Author: Payton Credit: Andreas and colorpicker.com Assignment: Use the ggame library to "paint" a graphical picture of something (e.g. a house, a face or landscape). Use at least: 1. Three different Color objects. 2. Ten different Sprite objects. 3. One (or more) RectangleAsset objects. 4. One (or more) CircleAsset objects. 5. One (or more) EllipseAsset objects. 6. One (or more) PolygonAsset objects. See: https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics for general information on how to use ggame. See: http://brythonserver.github.io/ggame/ for detailed information on ggame. """ from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset # add your code here \/ \/ \/ """ picture.py Author: <your name here> Credit: <list sources used, if any> Assignment: Use the ggame library to "paint" a graphical picture of something (e.g. a house, a face or landscape). Use at least: 1. Three different Color objects. 2. Ten different Sprite objects. 3. One (or more) RectangleAsset objects. 4. One (or more) CircleAsset objects. 5. One (or more) EllipseAsset objects. 6. One (or more) PolygonAsset objects. See: https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics for general information on how to use ggame. See: http://brythonserver.github.io/ggame/ for detailed information on ggame. """ ''' from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset # add your code here \/ \/ \/ red = Color(0xff0000, 1.0) green = Color(0x00ff00, 1.0) blue = Color(0x0000ff, 1.0) black = Color(0x000000, 1.0) teal = Color(0x24B5B5, 1.0) yellow = Color(0xFFF700, 1.0) purple = Color(0x6F00FF, 1.0) orange = Color(0xFF6A00, 1.0) white = Color(0xFFFFFF, 1.0) #lines thinline = LineStyle(1, black) thickerline = LineStyle(10, green) greenline = LineStyle(1, green) redline = LineStyle(1, red) blueline = LineStyle(1, blue) tealline = LineStyle(1, teal) yellowline = LineStyle(1, yellow) purpleline = LineStyle(1, purple) orangeline = LineStyle(1, orange) #shapes house = RectangleAsset(300, 300, thinline, teal) grassyhill = CircleAsset(600, greenline, green) sun = CircleAsset(150, yellowline, yellow) roof = PolygonAsset([(5,120), (165,1), (335,120)], tealline, purple) windowa = RectangleAsset(85, 85, thinline, blue) windowb = RectangleAsset(85, 85, thinline, blue) windowc = RectangleAsset(85, 85, thinline, blue) windowd = RectangleAsset(85, 85, thinline, blue) door = RectangleAsset(70, 90, thinline, red) knob = CircleAsset(10, thinline, black) sandyhill = CircleAsset(600, orangeline, orange) moon = EllipseAsset(150, 150, thinline, white) #sprites Sprite(sandyhill, (300, 1100)) Sprite(grassyhill, (680, 1000)) Sprite(house, (520, 240)) Sprite(sun, (1020, 150)) Sprite(roof, (500, 119)) Sprite(windowa, (540, 270)) Sprite(windowb, (720, 270)) Sprite(windowc, (540, 400)) Sprite(windowd, (720, 400)) Sprite(door, (640, 440)) Sprite(knob, (700, 490)) Sprite(moon, (320, 150)) # add your code here /\ /\ /\ myapp = App() myapp.run() # add your code here /\ /\ /\ myapp = App() myapp.run() ''' ########################################## """ picture.py Author: Avery Wallis Credit: None so far Assignment: Use the ggame library to "paint" a graphical picture of something (e.g. a house, a face or landscape). Use at least: 1. Three different Color objects. 2. Ten different Sprite objects. 3. One (or more) RectangleAsset objects. 4. One (or more) CircleAsset objects. 5. One (or more) EllipseAsset objects. 6. One (or more) PolygonAsset objects. See: https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics for general information on how to use ggame. See: http://brythonserver.github.io/ggame/ for detailed information on ggame. """ ################################## ''' from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, LineAsset, PolygonAsset red = Color(0xff0000, 1.0) green = Color(0x00ff00, 1.0) blue = Color(0x0000ff, 1.0) black = Color(0x000000, 1.0) orange = Color(0xffa500, 1.0) skin =Color(0xFCD15B, 1.0) wall=Color(0xE8E8E8, 1.0) orange=Color(0xFFa500,1.0) plat=Color(0xB9BDBB,1.0) gooy=Color(0xCDF238,1.0) white=Color(0xFFFFFF,1.0) darkblue=Color(0x052099,1.0) thinline= LineStyle(1, black) thickline= LineStyle(5, black) thickishline= LineStyle(2.5, black) noline=LineStyle(0, black) portalline=LineStyle(1, blue) portalline2=LineStyle(1, orange) wall=RectangleAsset(500,500, noline, wall) blueportal=EllipseAsset(27, 60, noline, blue) orangeportal=EllipseAsset(27, 60, noline, orange) innerportal=EllipseAsset(24, 57, noline, white) exit=CircleAsset(70, thinline, plat) exit2=CircleAsset(20, thinline, plat) plat=RectangleAsset(250, 50, noline, plat) # 決定 x 方向的 delta 與 y 方向的 delta, 這裡為各 300 doorline=LineAsset(300, 300, thinline) goo=PolygonAsset([(0,500),(800,500),(800,600,),(0,600)],noline,gooy) Sprite(wall, (400,20)) Sprite(wall, (100,20)) Sprite(exit, (800,100)) Sprite(exit2, (800, 100)) # 決定線的起點 line1 = Sprite(doorline, (0, 0)) Sprite(doorline, (10, 100)) line1.x = 100 Sprite(plat, (100,400)) Sprite(plat, (650, 150)) Sprite(orangeportal, (700,90)) Sprite(innerportal, (700,90)) Sprite(blueportal, (200,340)) Sprite(innerportal, (200,340)) Sprite(goo, (100,0)) myapp = App() myapp.run() # 請問, 如何畫方格紙上的線, 每個小方塊為 20 x 20 個 pixels ''' ######################################## ''' from ggame import App, Color, LineStyle, Sprite from ggame import CircleAsset red = Color(0xff0000, 1.0) green = Color(0x00ff00, 1.0) blue = Color(0x0000ff, 1.0) black = Color(0x000000, 1.0) thinline = LineStyle(1, black) mycircle = CircleAsset(5, thinline, blue) xcoordinates = range(100, 600, 10) # Generate a list of sprites that form a line! sprites = [Sprite(mycircle, (x, x*0.5 + 100)) for x in xcoordinates] myapp = App() myapp.run() ''' ################################# """ multiplication-table.py Author: Hayden Hatfield Credit: Mr. Denison Assignment: Write and submit a Python program that prints a multiplication table. The user must be able to determine the width and height of the table before it is printed. The final multiplication table should look like this: Width of multiplication table: 10 Height of multiplication table: 8 1 2 3 4 5 6 7 8 9 10 2 4 6 8 10 12 14 16 18 20 3 6 9 12 15 18 21 24 27 30 4 8 12 16 20 24 28 32 36 40 5 10 15 20 25 30 35 40 45 50 6 12 18 24 30 36 42 48 54 60 7 14 21 28 35 42 49 56 63 70 8 16 24 32 40 48 56 64 72 80 """ Width = input("Width of multiplication table: ") Height = input("Height of multiplication table: ") for y in range(1, int(Height)+1 ): for x in range(1, int(Width)+1 ): print (y*x, end = " ") pass print()
agpl-3.0
calfonso/ansible
lib/ansible/parsing/dataloader.py
11
18574
# (c) 2012-2014, Michael DeHaan <[email protected]> # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import copy import os import os.path import re import tempfile from ansible import constants as C from ansible.errors import AnsibleFileNotFound, AnsibleParserError from ansible.module_utils.basic import is_executable from ansible.module_utils.six import binary_type, text_type from ansible.module_utils._text import to_bytes, to_native, to_text from ansible.parsing.quoting import unquote from ansible.parsing.utils.yaml import from_yaml from ansible.parsing.vault import VaultLib, b_HEADER, is_encrypted, is_encrypted_file, parse_vaulttext_envelope from ansible.utils.path import unfrackpath try: from __main__ import display except ImportError: from ansible.utils.display import Display display = Display() # Tries to determine if a path is inside a role, last dir must be 'tasks' # this is not perfect but people should really avoid 'tasks' dirs outside roles when using Ansible. RE_TASKS = re.compile(u'(?:^|%s)+tasks%s?$' % (os.path.sep, os.path.sep)) class DataLoader: ''' The DataLoader class is used to load and parse YAML or JSON content, either from a given file name or from a string that was previously read in through other means. A Vault password can be specified, and any vault-encrypted files will be decrypted. Data read from files will also be cached, so the file will never be read from disk more than once. Usage: dl = DataLoader() # optionally: dl.set_vault_password('foo') ds = dl.load('...') ds = dl.load_from_file('/path/to/file') ''' def __init__(self): self._basedir = '.' self._FILE_CACHE = dict() self._tempfiles = set() # initialize the vault stuff with an empty password # TODO: replace with a ref to something that can get the password # a creds/auth provider # self.set_vault_password(None) self._vaults = {} self._vault = VaultLib() self.set_vault_secrets(None) # TODO: since we can query vault_secrets late, we could provide this to DataLoader init def set_vault_secrets(self, vault_secrets): self._vault.secrets = vault_secrets def load(self, data, file_name='<string>', show_content=True): '''Backwards compat for now''' return from_yaml(data, file_name, show_content, self._vault.secrets) def load_from_file(self, file_name, cache=True, unsafe=False): ''' Loads data from a file, which can contain either JSON or YAML. ''' file_name = self.path_dwim(file_name) display.debug("Loading data from %s" % file_name) # if the file has already been read in and cached, we'll # return those results to avoid more file/vault operations if cache and file_name in self._FILE_CACHE: parsed_data = self._FILE_CACHE[file_name] else: # read the file contents and load the data structure from them (b_file_data, show_content) = self._get_file_contents(file_name) file_data = to_text(b_file_data, errors='surrogate_or_strict') parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content) # cache the file contents for next time self._FILE_CACHE[file_name] = parsed_data if unsafe: return parsed_data else: # return a deep copy here, so the cache is not affected return copy.deepcopy(parsed_data) def path_exists(self, path): path = self.path_dwim(path) return os.path.exists(to_bytes(path, errors='surrogate_or_strict')) def is_file(self, path): path = self.path_dwim(path) return os.path.isfile(to_bytes(path, errors='surrogate_or_strict')) or path == os.devnull def is_directory(self, path): path = self.path_dwim(path) return os.path.isdir(to_bytes(path, errors='surrogate_or_strict')) def list_directory(self, path): path = self.path_dwim(path) return os.listdir(path) def is_executable(self, path): '''is the given path executable?''' path = self.path_dwim(path) return is_executable(path) def _decrypt_if_vault_data(self, b_vault_data, b_file_name=None): '''Decrypt b_vault_data if encrypted and return b_data and the show_content flag''' if not is_encrypted(b_vault_data): show_content = True return b_vault_data, show_content b_ciphertext, b_version, cipher_name, vault_id = parse_vaulttext_envelope(b_vault_data) b_data = self._vault.decrypt(b_vault_data, filename=b_file_name) show_content = False return b_data, show_content def _get_file_contents(self, file_name): ''' Reads the file contents from the given file name If the contents are vault-encrypted, it will decrypt them and return the decrypted data :arg file_name: The name of the file to read. If this is a relative path, it will be expanded relative to the basedir :raises AnsibleFileNotFOund: if the file_name does not refer to a file :raises AnsibleParserError: if we were unable to read the file :return: Returns a byte string of the file contents ''' if not file_name or not isinstance(file_name, (binary_type, text_type)): raise AnsibleParserError("Invalid filename: '%s'" % str(file_name)) b_file_name = to_bytes(self.path_dwim(file_name)) # This is what we really want but have to fix unittests to make it pass # if not os.path.exists(b_file_name) or not os.path.isfile(b_file_name): if not self.path_exists(b_file_name) or not self.is_file(b_file_name): raise AnsibleFileNotFound("Unable to retrieve file contents", file_name=file_name) try: with open(b_file_name, 'rb') as f: data = f.read() return self._decrypt_if_vault_data(data, b_file_name) except (IOError, OSError) as e: raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, str(e)), orig_exc=e) def get_basedir(self): ''' returns the current basedir ''' return self._basedir def set_basedir(self, basedir): ''' sets the base directory, used to find files when a relative path is given ''' if basedir is not None: self._basedir = to_text(basedir) def path_dwim(self, given): ''' make relative paths work like folks expect. ''' given = unquote(given) given = to_text(given, errors='surrogate_or_strict') if given.startswith(to_text(os.path.sep)) or given.startswith(u'~'): path = given else: basedir = to_text(self._basedir, errors='surrogate_or_strict') path = os.path.join(basedir, given) return unfrackpath(path, follow=False) def _is_role(self, path): ''' imperfect role detection, roles are still valid w/o tasks|meta/main.yml|yaml|etc ''' b_path = to_bytes(path, errors='surrogate_or_strict') b_upath = to_bytes(unfrackpath(path, follow=False), errors='surrogate_or_strict') for b_finddir in (b'meta', b'tasks'): for b_suffix in (b'.yml', b'.yaml', b''): b_main = b'main%s' % (b_suffix) b_tasked = os.path.join(b_finddir, b_main) if ( RE_TASKS.search(path) and os.path.exists(os.path.join(b_path, b_main)) or os.path.exists(os.path.join(b_upath, b_tasked)) or os.path.exists(os.path.join(os.path.dirname(b_path), b_tasked)) ): return True return False def path_dwim_relative(self, path, dirname, source, is_role=False): ''' find one file in either a role or playbook dir with or without explicitly named dirname subdirs Used in action plugins and lookups to find supplemental files that could be in either place. ''' search = [] source = to_text(source, errors='surrogate_or_strict') # I have full path, nothing else needs to be looked at if source.startswith(to_text(os.path.sep)) or source.startswith(u'~'): search.append(unfrackpath(source, follow=False)) else: # base role/play path + templates/files/vars + relative filename search.append(os.path.join(path, dirname, source)) basedir = unfrackpath(path, follow=False) # not told if role, but detect if it is a role and if so make sure you get correct base path if not is_role: is_role = self._is_role(path) if is_role and RE_TASKS.search(path): basedir = unfrackpath(os.path.dirname(path), follow=False) cur_basedir = self._basedir self.set_basedir(basedir) # resolved base role/play path + templates/files/vars + relative filename search.append(unfrackpath(os.path.join(basedir, dirname, source), follow=False)) self.set_basedir(cur_basedir) if is_role and not source.endswith(dirname): # look in role's tasks dir w/o dirname search.append(unfrackpath(os.path.join(basedir, 'tasks', source), follow=False)) # try to create absolute path for loader basedir + templates/files/vars + filename search.append(unfrackpath(os.path.join(dirname, source), follow=False)) # try to create absolute path for loader basedir search.append(unfrackpath(os.path.join(basedir, source), follow=False)) # try to create absolute path for dirname + filename search.append(self.path_dwim(os.path.join(dirname, source))) # try to create absolute path for filename search.append(self.path_dwim(source)) for candidate in search: if os.path.exists(to_bytes(candidate, errors='surrogate_or_strict')): break return candidate def path_dwim_relative_stack(self, paths, dirname, source, is_role=False): ''' find one file in first path in stack taking roles into account and adding play basedir as fallback :arg paths: A list of text strings which are the paths to look for the filename in. :arg dirname: A text string representing a directory. The directory is prepended to the source to form the path to search for. :arg source: A text string which is the filename to search for :rtype: A text string :returns: An absolute path to the filename ``source`` if found :raises: An AnsibleFileNotFound Exception if the file is found to exist in the search paths ''' b_dirname = to_bytes(dirname) b_source = to_bytes(source) result = None search = [] if source is None: display.warning('Invalid request to find a file that matches a "null" value') elif source and (source.startswith('~') or source.startswith(os.path.sep)): # path is absolute, no relative needed, check existence and return source test_path = unfrackpath(b_source, follow=False) if os.path.exists(to_bytes(test_path, errors='surrogate_or_strict')): result = test_path else: display.debug(u'evaluation_path:\n\t%s' % '\n\t'.join(paths)) for path in paths: upath = unfrackpath(path, follow=False) b_upath = to_bytes(upath, errors='surrogate_or_strict') b_mydir = os.path.dirname(b_upath) # if path is in role and 'tasks' not there already, add it into the search if (is_role or self._is_role(path)) and b_mydir.endswith(b'tasks'): search.append(os.path.join(os.path.dirname(b_mydir), b_dirname, b_source)) search.append(os.path.join(b_mydir, b_source)) else: # don't add dirname if user already is using it in source if b_source.split(b'/')[0] != dirname: search.append(os.path.join(b_upath, b_dirname, b_source)) search.append(os.path.join(b_upath, b_source)) # always append basedir as last resort # don't add dirname if user already is using it in source if b_source.split(b'/')[0] != dirname: search.append(os.path.join(to_bytes(self.get_basedir()), b_dirname, b_source)) search.append(os.path.join(to_bytes(self.get_basedir()), b_source)) display.debug(u'search_path:\n\t%s' % to_text(b'\n\t'.join(search))) for b_candidate in search: display.vvvvv(u'looking for "%s" at "%s"' % (source, to_text(b_candidate))) if os.path.exists(b_candidate): result = to_text(b_candidate) break if result is None: raise AnsibleFileNotFound(file_name=source, paths=[to_text(p) for p in search]) return result def _create_content_tempfile(self, content): ''' Create a tempfile containing defined content ''' fd, content_tempfile = tempfile.mkstemp() f = os.fdopen(fd, 'wb') content = to_bytes(content) try: f.write(content) except Exception as err: os.remove(content_tempfile) raise Exception(err) finally: f.close() return content_tempfile def get_real_file(self, file_path, decrypt=True): """ If the file is vault encrypted return a path to a temporary decrypted file If the file is not encrypted then the path is returned Temporary files are cleanup in the destructor """ if not file_path or not isinstance(file_path, (binary_type, text_type)): raise AnsibleParserError("Invalid filename: '%s'" % to_native(file_path)) b_file_path = to_bytes(file_path, errors='surrogate_or_strict') if not self.path_exists(b_file_path) or not self.is_file(b_file_path): raise AnsibleFileNotFound(file_name=file_path) real_path = self.path_dwim(file_path) try: if decrypt: with open(to_bytes(real_path), 'rb') as f: # Limit how much of the file is read since we do not know # whether this is a vault file and therefore it could be very # large. if is_encrypted_file(f, count=len(b_HEADER)): # if the file is encrypted and no password was specified, # the decrypt call would throw an error, but we check first # since the decrypt function doesn't know the file name data = f.read() if not self._vault.secrets: raise AnsibleParserError("A vault password or secret must be specified to decrypt %s" % to_native(file_path)) data = self._vault.decrypt(data, filename=real_path) # Make a temp file real_path = self._create_content_tempfile(data) self._tempfiles.add(real_path) return real_path except (IOError, OSError) as e: raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (to_native(real_path), to_native(e)), orig_exc=e) def cleanup_tmp_file(self, file_path): """ Removes any temporary files created from a previous call to get_real_file. file_path must be the path returned from a previous call to get_real_file. """ if file_path in self._tempfiles: os.unlink(file_path) self._tempfiles.remove(file_path) def cleanup_all_tmp_files(self): for f in self._tempfiles: try: self.cleanup_tmp_file(f) except Exception as e: display.warning("Unable to cleanup temp files: %s" % to_native(e)) def find_vars_files(self, path, name, extensions=None, allow_dir=True): """ Find vars files in a given path with specified name. This will find files in a dir named <name>/ or a file called <name> ending in known extensions. """ b_path = to_bytes(os.path.join(path, name)) found = [] if extensions is None: # Look for file with no extension first to find dir before file extensions = [''] + C.YAML_FILENAME_EXTENSIONS # add valid extensions to name for ext in extensions: if '.' in ext: full_path = b_path + to_bytes(ext) elif ext: full_path = b'.'.join([b_path, to_bytes(ext)]) else: full_path = b_path if self.path_exists(full_path): if self.is_directory(full_path): if allow_dir: found.extend(self._get_dir_vars_files(to_text(full_path), extensions)) else: next else: found.append(full_path) break return found def _get_dir_vars_files(self, path, extensions): found = [] for spath in sorted(self.list_directory(path)): if not spath.startswith(u'.') and not spath.endswith(u'~'): # skip hidden and backups ext = os.path.splitext(spath)[-1] full_spath = os.path.join(path, spath) if self.is_directory(full_spath) and not ext: # recursive search if dir found.extend(self._get_dir_vars_files(full_spath, extensions)) elif self.is_file(full_spath) and (not ext or to_text(ext) in extensions): # only consider files with valid extensions or no extension found.append(full_spath) return found
gpl-3.0
robinro/ansible
lib/ansible/utils/module_docs_fragments/ios.py
101
4360
# # (c) 2015, Peter Sprygada <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. class ModuleDocFragment(object): # Standard files documentation fragment DOCUMENTATION = """ options: authorize: description: - Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be used instead. default: no choices: ['yes', 'no'] auth_pass: description: - Specifies the password to use if required to enter privileged mode on the remote device. If I(authorize) is false, then this argument does nothing. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. default: none provider: description: - A dict object containing connection details. default: null suboptions: host: description: - Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport. required: true port: description: - Specifies the port to use when building the connection to the remote device. default: 22 username: description: - Configures the username to use to authenticate the connection to the remote device. This value is used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead. password: description: - Specifies the password to use to authenticate the connection to the remote device. This value is used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead. default: null timeout: description: - Specifies the timeout in seconds for communicating with the network device for either connecting or sending commands. If the timeout is exceeded before the operation is completed, the module will error. default: 10 ssh_keyfile: description: - Specifies the SSH key to use to authenticate the connection to the remote device. This value is the path to the key used to authenticate the SSH session. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead. authorize: description: - Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be used instead. default: no choices: ['yes', 'no'] auth_pass: description: - Specifies the password to use if required to enter privileged mode on the remote device. If I(authorize) is false, then this argument does nothing. If the value is not specified in the task, the value of environment variable C(ANSIBLE_NET_AUTH_PASS) will be used instead. default: none """
gpl-3.0
JVenberg/PokemonGo-Bot-Desktop
pywin/Lib/encodings/koi8_u.py
593
14018
""" Python Character Mapping Codec koi8_u generated from 'python-mappings/KOI8-U.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='koi8-u', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL u'\u2580' # 0x8B -> UPPER HALF BLOCK u'\u2584' # 0x8C -> LOWER HALF BLOCK u'\u2588' # 0x8D -> FULL BLOCK u'\u258c' # 0x8E -> LEFT HALF BLOCK u'\u2590' # 0x8F -> RIGHT HALF BLOCK u'\u2591' # 0x90 -> LIGHT SHADE u'\u2592' # 0x91 -> MEDIUM SHADE u'\u2593' # 0x92 -> DARK SHADE u'\u2320' # 0x93 -> TOP HALF INTEGRAL u'\u25a0' # 0x94 -> BLACK SQUARE u'\u2219' # 0x95 -> BULLET OPERATOR u'\u221a' # 0x96 -> SQUARE ROOT u'\u2248' # 0x97 -> ALMOST EQUAL TO u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO u'\xa0' # 0x9A -> NO-BREAK SPACE u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL u'\xb0' # 0x9C -> DEGREE SIGN u'\xb2' # 0x9D -> SUPERSCRIPT TWO u'\xb7' # 0x9E -> MIDDLE DOT u'\xf7' # 0x9F -> DIVISION SIGN u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO u'\u0454' # 0xA4 -> CYRILLIC SMALL LETTER UKRAINIAN IE u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT u'\u0456' # 0xA6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I u'\u0457' # 0xA7 -> CYRILLIC SMALL LETTER YI (UKRAINIAN) u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE u'\u0491' # 0xAD -> CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO u'\u0404' # 0xB4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT u'\u0406' # 0xB6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I u'\u0407' # 0xB7 -> CYRILLIC CAPITAL LETTER YI (UKRAINIAN) u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE u'\u0490' # 0xBD -> CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL u'\xa9' # 0xBF -> COPYRIGHT SIGN u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
mit
rue89-tech/edx-platform
lms/djangoapps/survey/tests/test_models.py
63
7761
""" Python tests for the Survey models """ from collections import OrderedDict from django.test import TestCase from django.test.client import Client from django.contrib.auth.models import User from survey.exceptions import SurveyFormNotFound, SurveyFormNameAlreadyExists from django.core.exceptions import ValidationError from survey.models import SurveyForm class SurveyModelsTests(TestCase): """ All tests for the Survey models.py file """ def setUp(self): """ Set up the test data used in the specific tests """ super(SurveyModelsTests, self).setUp() self.client = Client() # Create two accounts self.password = 'abc' self.student = User.objects.create_user('student', '[email protected]', self.password) self.student2 = User.objects.create_user('student2', '[email protected]', self.password) self.test_survey_name = 'TestForm' self.test_form = '<li><input name="field1" /></li><li><input name="field2" /></li><li><select name="ddl"><option>1</option></select></li>' self.test_form_update = '<input name="field1" />' self.student_answers = OrderedDict({ 'field1': 'value1', 'field2': 'value2', }) self.student2_answers = OrderedDict({ 'field1': 'value3' }) def _create_test_survey(self): """ Helper method to set up test form """ return SurveyForm.create(self.test_survey_name, self.test_form) def test_form_not_found_raise_exception(self): """ Asserts that when looking up a form that does not exist """ with self.assertRaises(SurveyFormNotFound): SurveyForm.get(self.test_survey_name) def test_form_not_found_none(self): """ Asserts that when looking up a form that does not exist """ self.assertIsNone(SurveyForm.get(self.test_survey_name, throw_if_not_found=False)) def test_create_new_form(self): """ Make sure we can create a new form a look it up """ survey = self._create_test_survey() self.assertIsNotNone(survey) new_survey = SurveyForm.get(self.test_survey_name) self.assertIsNotNone(new_survey) self.assertEqual(new_survey.form, self.test_form) def test_unicode_rendering(self): """ See if the survey form returns the expected unicode string """ survey = self._create_test_survey() self.assertIsNotNone(survey) self.assertEquals(unicode(survey), self.test_survey_name) def test_create_form_with_malformed_html(self): """ Make sure that if a SurveyForm is saved with unparseable html an exception is thrown """ with self.assertRaises(ValidationError): SurveyForm.create('badform', '<input name="oops" /><<<>') def test_create_form_with_no_fields(self): """ Make sure that if a SurveyForm is saved without any named fields an exception is thrown """ with self.assertRaises(ValidationError): SurveyForm.create('badform', '<p>no input fields here</p>') with self.assertRaises(ValidationError): SurveyForm.create('badform', '<input id="input_without_name" />') def test_create_form_already_exists(self): """ Make sure we can't create two surveys of the same name """ self._create_test_survey() with self.assertRaises(SurveyFormNameAlreadyExists): self._create_test_survey() def test_create_form_update_existing(self): """ Make sure we can update an existing form """ survey = self._create_test_survey() self.assertIsNotNone(survey) survey = SurveyForm.create(self.test_survey_name, self.test_form_update, update_if_exists=True) self.assertIsNotNone(survey) survey = SurveyForm.get(self.test_survey_name) self.assertIsNotNone(survey) self.assertEquals(survey.form, self.test_form_update) def test_survey_has_no_answers(self): """ Create a new survey and assert that there are no answers to that survey """ survey = self._create_test_survey() self.assertEquals(len(survey.get_answers()), 0) def test_user_has_no_answers(self): """ Create a new survey with no answers in it and check that a user is determined to not have answered it """ survey = self._create_test_survey() self.assertFalse(survey.has_user_answered_survey(self.student)) self.assertEquals(len(survey.get_answers()), 0) def test_single_user_answers(self): """ Create a new survey and add answers to it """ survey = self._create_test_survey() self.assertIsNotNone(survey) survey.save_user_answers(self.student, self.student_answers) self.assertTrue(survey.has_user_answered_survey(self.student)) all_answers = survey.get_answers() self.assertEquals(len(all_answers.keys()), 1) self.assertTrue(self.student.id in all_answers) self.assertEquals(all_answers[self.student.id], self.student_answers) answers = survey.get_answers(self.student) self.assertEquals(len(answers.keys()), 1) self.assertTrue(self.student.id in answers) self.assertEquals(all_answers[self.student.id], self.student_answers) def test_multiple_user_answers(self): """ Create a new survey and add answers to it """ survey = self._create_test_survey() self.assertIsNotNone(survey) survey.save_user_answers(self.student, self.student_answers) survey.save_user_answers(self.student2, self.student2_answers) self.assertTrue(survey.has_user_answered_survey(self.student)) all_answers = survey.get_answers() self.assertEquals(len(all_answers.keys()), 2) self.assertTrue(self.student.id in all_answers) self.assertTrue(self.student2.id in all_answers) self.assertEquals(all_answers[self.student.id], self.student_answers) self.assertEquals(all_answers[self.student2.id], self.student2_answers) answers = survey.get_answers(self.student) self.assertEquals(len(answers.keys()), 1) self.assertTrue(self.student.id in answers) self.assertEquals(all_answers[self.student.id], self.student_answers) answers = survey.get_answers(self.student2) self.assertEquals(len(answers.keys()), 1) self.assertTrue(self.student2.id in answers) self.assertEquals(all_answers[self.student2.id], self.student2_answers) def test_limit_num_users(self): """ Verify that the limit_num_users parameter to get_answers() works as intended """ survey = self._create_test_survey() survey.save_user_answers(self.student, self.student_answers) survey.save_user_answers(self.student2, self.student2_answers) # even though we have 2 users submitted answers # limit the result set to just 1 all_answers = survey.get_answers(limit_num_users=1) self.assertEquals(len(all_answers.keys()), 1) def test_get_field_names(self): """ Create a new survey and add answers to it """ survey = self._create_test_survey() self.assertIsNotNone(survey) survey.save_user_answers(self.student, self.student_answers) survey.save_user_answers(self.student2, self.student2_answers) names = survey.get_field_names() self.assertEqual(sorted(names), ['ddl', 'field1', 'field2'])
agpl-3.0
tom-slick/pygooglevoice
googlevoice/util.py
40
8838
import re from sys import stdout from xml.parsers.expat import ParserCreate from time import gmtime from datetime import datetime from pprint import pprint try: from urllib2 import build_opener,install_opener, \ HTTPCookieProcessor,Request,urlopen from urllib import urlencode,quote except ImportError: from urllib.request import build_opener,install_opener, \ HTTPCookieProcessor,Request,urlopen from urllib.parse import urlencode,quote try: from http.cookiejar import LWPCookieJar as CookieJar except ImportError: from cookielib import LWPCookieJar as CookieJar try: from json import loads except ImportError: from simplejson import loads try: input = raw_input except NameError: input = input sha1_re = re.compile(r'^[a-fA-F0-9]{40}$') def print_(*values, **kwargs): """ Implementation of Python3's print function Prints the values to a stream, or to sys.stdout by default. Optional keyword arguments: file: a file-like object (stream); defaults to the current sys.stdout. sep: string inserted between values, default a space. end: string appended after the last value, default a newline. """ fo = kwargs.pop('file', stdout) fo.write(kwargs.pop('sep', ' ').join(map(str, values))) fo.write(kwargs.pop('end', '\n')) fo.flush() def is_sha1(s): """ Returns ``True`` if the string is a SHA1 hash """ return bool(sha1_re.match(s)) def validate_response(response): """ Validates that the JSON response is A-OK """ try: assert 'ok' in response and response['ok'] except AssertionError: raise ValidationError('There was a problem with GV: %s' % response) def load_and_validate(response): """ Loads JSON data from http response then validates """ validate_response(loads(response.read())) class ValidationError(Exception): """ Bombs when response code back from Voice 500s """ class LoginError(Exception): """ Occurs when login credentials are incorrect """ class ParsingError(Exception): """ Happens when XML feed parsing fails """ class JSONError(Exception): """ Failed JSON deserialization """ class DownloadError(Exception): """ Cannot download message, probably not in voicemail/recorded """ class ForwardingError(Exception): """ Forwarding number given was incorrect """ class AttrDict(dict): def __getattr__(self, attr): if attr in self: return self[attr] class Phone(AttrDict): """ Wrapper for phone objects used for phone specific methods Attributes are: * id: int * phoneNumber: i18n phone number * formattedNumber: humanized phone number string * we: data dict * wd: data dict * verified: bool * name: strign label * smsEnabled: bool * scheduleSet: bool * policyBitmask: int * weekdayTimes: list * dEPRECATEDDisabled: bool * weekdayAllDay: bool * telephonyVerified * weekendTimes: list * active: bool * weekendAllDay: bool * enabledForOthers: bool * type: int (1 - Home, 2 - Mobile, 3 - Work, 4 - Gizmo) """ def __init__(self, voice, data): self.voice = voice super(Phone, self).__init__(data) def enable(self,): """ Enables this phone for usage """ return self.__call_forwarding() def disable(self): """ Disables this phone """ return self.__call_forwarding('0') def __call_forwarding(self, enabled='1'): """ Enables or disables this phone """ self.voice.__validate_special_page('default_forward', {'enabled':enabled, 'phoneId': self.id}) def __str__(self): return self.phoneNumber def __repr__(self): return '<Phone %s>' % self.phoneNumber class Message(AttrDict): """ Wrapper for all call/sms message instances stored in Google Voice Attributes are: * id: SHA1 identifier * isTrash: bool * displayStartDateTime: datetime * star: bool * isSpam: bool * startTime: gmtime * labels: list * displayStartTime: time * children: str * note: str * isRead: bool * displayNumber: str * relativeStartTime: str * phoneNumber: str * type: int """ def __init__(self, folder, id, data): assert is_sha1(id), 'Message id not a SHA1 hash' self.folder = folder self.id = id super(AttrDict, self).__init__(data) self['startTime'] = gmtime(int(self['startTime'])/1000) self['displayStartDateTime'] = datetime.strptime( self['displayStartDateTime'], '%m/%d/%y %I:%M %p') self['displayStartTime'] = self['displayStartDateTime'].time() def delete(self, trash=1): """ Moves this message to the Trash. Use ``message.delete(0)`` to move it out of the Trash. """ self.folder.voice.__messages_post('delete', self.id, trash=trash) def star(self, star=1): """ Star this message. Use ``message.star(0)`` to unstar it. """ self.folder.voice.__messages_post('star', self.id, star=star) def mark(self, read=1): """ Mark this message as read. Use ``message.mark(0)`` to mark it as unread. """ self.folder.voice.__messages_post('mark', self.id, read=read) def download(self, adir=None): """ Download the message MP3 (if any). Saves files to ``adir`` (defaults to current directory). Message hashes can be found in ``self.voicemail().messages`` for example. Returns location of saved file. """ return self.folder.voice.download(self, adir) def __str__(self): return self.id def __repr__(self): return '<Message #%s (%s)>' % (self.id, self.phoneNumber) class Folder(AttrDict): """ Folder wrapper for feeds from Google Voice Attributes are: * totalSize: int (aka ``__len__``) * unreadCounts: dict * resultsPerPage: int * messages: list of Message instances """ def __init__(self, voice, name, data): self.voice = voice self.name = name super(AttrDict, self).__init__(data) def messages(self): """ Returns a list of all messages in this folder """ return [Message(self, *i) for i in self['messages'].items()] messages = property(messages) def __len__(self): return self['totalSize'] def __repr__(self): return '<Folder %s (%s)>' % (self.name, len(self)) class XMLParser(object): """ XML Parser helper that can dig json and html out of the feeds. The parser takes a ``Voice`` instance, page name, and function to grab data from. Calling the parser calls the data function once, sets up the ``json`` and ``html`` attributes and returns a ``Folder`` instance for the given page:: >>> o = XMLParser(voice, 'voicemail', lambda: 'some xml payload') >>> o() ... <Folder ...> >>> o.json ... 'some json payload' >>> o.data ... 'loaded json payload' >>> o.html ... 'some html payload' """ attr = None def start_element(self, name, attrs): if name in ('json','html'): self.attr = name def end_element(self, name): self.attr = None def char_data(self, data): if self.attr and data: setattr(self, self.attr, getattr(self, self.attr) + data) def __init__(self, voice, name, datafunc): self.json, self.html = '','' self.datafunc = datafunc self.voice = voice self.name = name def __call__(self): self.json, self.html = '','' parser = ParserCreate() parser.StartElementHandler = self.start_element parser.EndElementHandler = self.end_element parser.CharacterDataHandler = self.char_data try: data = self.datafunc() parser.Parse(data, 1) except: raise ParsingError return self.folder def folder(self): """ Returns associated ``Folder`` instance for given page (``self.name``) """ return Folder(self.voice, self.name, self.data) folder = property(folder) def data(self): """ Returns the parsed json information after calling the XMLParser """ try: return loads(self.json) except: raise JSONError data = property(data)
bsd-3-clause
friedrich420/N910G-AEL-Kernel-Lollipop-Sources
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
4653
3596
# EventClass.py # # This is a library defining some events types classes, which could # be used by other scripts to analyzing the perf samples. # # Currently there are just a few classes defined for examples, # PerfEvent is the base class for all perf event sample, PebsEvent # is a HW base Intel x86 PEBS event, and user could add more SW/HW # event classes based on requirements. import struct # Event types, user could add more here EVTYPE_GENERIC = 0 EVTYPE_PEBS = 1 # Basic PEBS event EVTYPE_PEBS_LL = 2 # PEBS event with load latency info EVTYPE_IBS = 3 # # Currently we don't have good way to tell the event type, but by # the size of raw buffer, raw PEBS event with load latency data's # size is 176 bytes, while the pure PEBS event's size is 144 bytes. # def create_event(name, comm, dso, symbol, raw_buf): if (len(raw_buf) == 144): event = PebsEvent(name, comm, dso, symbol, raw_buf) elif (len(raw_buf) == 176): event = PebsNHM(name, comm, dso, symbol, raw_buf) else: event = PerfEvent(name, comm, dso, symbol, raw_buf) return event class PerfEvent(object): event_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC): self.name = name self.comm = comm self.dso = dso self.symbol = symbol self.raw_buf = raw_buf self.ev_type = ev_type PerfEvent.event_num += 1 def show(self): print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso) # # Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer # contains the context info when that event happened: the EFLAGS and # linear IP info, as well as all the registers. # class PebsEvent(PerfEvent): pebs_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS): tmp_buf=raw_buf[0:80] flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf) self.flags = flags self.ip = ip self.ax = ax self.bx = bx self.cx = cx self.dx = dx self.si = si self.di = di self.bp = bp self.sp = sp PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) PebsEvent.pebs_num += 1 del tmp_buf # # Intel Nehalem and Westmere support PEBS plus Load Latency info which lie # in the four 64 bit words write after the PEBS data: # Status: records the IA32_PERF_GLOBAL_STATUS register value # DLA: Data Linear Address (EIP) # DSE: Data Source Encoding, where the latency happens, hit or miss # in L1/L2/L3 or IO operations # LAT: the actual latency in cycles # class PebsNHM(PebsEvent): pebs_nhm_num = 0 def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL): tmp_buf=raw_buf[144:176] status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf) self.status = status self.dla = dla self.dse = dse self.lat = lat PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) PebsNHM.pebs_nhm_num += 1 del tmp_buf
gpl-2.0
AlexanderDolgan/sputnik
wp-content/themes/node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/win_tool.py
395
12634
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Utility functions for Windows builds. These functions are executed via gyp-win-tool when using the ninja generator. """ import os import re import shutil import subprocess import stat import string import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__)) # A regex matching an argument corresponding to the output filename passed to # link.exe. _LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE) def main(args): executor = WinTool() exit_code = executor.Dispatch(args) if exit_code is not None: sys.exit(exit_code) class WinTool(object): """This class performs all the Windows tooling steps. The methods can either be executed directly, or dispatched from an argument list.""" def _UseSeparateMspdbsrv(self, env, args): """Allows to use a unique instance of mspdbsrv.exe per linker instead of a shared one.""" if len(args) < 1: raise Exception("Not enough arguments") if args[0] != 'link.exe': return # Use the output filename passed to the linker to generate an endpoint name # for mspdbsrv.exe. endpoint_name = None for arg in args: m = _LINK_EXE_OUT_ARG.match(arg) if m: endpoint_name = re.sub(r'\W+', '', '%s_%d' % (m.group('out'), os.getpid())) break if endpoint_name is None: return # Adds the appropriate environment variable. This will be read by link.exe # to know which instance of mspdbsrv.exe it should connect to (if it's # not set then the default endpoint is used). env['_MSPDBSRV_ENDPOINT_'] = endpoint_name def Dispatch(self, args): """Dispatches a string command to a method.""" if len(args) < 1: raise Exception("Not enough arguments") method = "Exec%s" % self._CommandifyName(args[0]) return getattr(self, method)(*args[1:]) def _CommandifyName(self, name_string): """Transforms a tool name like recursive-mirror to RecursiveMirror.""" return name_string.title().replace('-', '') def _GetEnv(self, arch): """Gets the saved environment from a file for a given architecture.""" # The environment is saved as an "environment block" (see CreateProcess # and msvs_emulation for details). We convert to a dict here. # Drop last 2 NULs, one for list terminator, one for trailing vs. separator. pairs = open(arch).read()[:-2].split('\0') kvs = [item.split('=', 1) for item in pairs] return dict(kvs) def ExecStamp(self, path): """Simple stamp command.""" open(path, 'w').close() def ExecRecursiveMirror(self, source, dest): """Emulation of rm -rf out && cp -af in out.""" if os.path.exists(dest): if os.path.isdir(dest): def _on_error(fn, path, excinfo): # The operation failed, possibly because the file is set to # read-only. If that's why, make it writable and try the op again. if not os.access(path, os.W_OK): os.chmod(path, stat.S_IWRITE) fn(path) shutil.rmtree(dest, onerror=_on_error) else: if not os.access(dest, os.W_OK): # Attempt to make the file writable before deleting it. os.chmod(dest, stat.S_IWRITE) os.unlink(dest) if os.path.isdir(source): shutil.copytree(source, dest) else: shutil.copy2(source, dest) def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args): """Filter diagnostic output from link that looks like: ' Creating library ui.dll.lib and object ui.dll.exp' This happens when there are exports from the dll or exe. """ env = self._GetEnv(arch) if use_separate_mspdbsrv == 'True': self._UseSeparateMspdbsrv(env, args) link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]), shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = link.communicate() for line in out.splitlines(): if not line.startswith(' Creating library '): print line return link.returncode def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname, mt, rc, intermediate_manifest, *manifests): """A wrapper for handling creating a manifest resource and then executing a link command.""" # The 'normal' way to do manifests is to have link generate a manifest # based on gathering dependencies from the object files, then merge that # manifest with other manifests supplied as sources, convert the merged # manifest to a resource, and then *relink*, including the compiled # version of the manifest resource. This breaks incremental linking, and # is generally overly complicated. Instead, we merge all the manifests # provided (along with one that includes what would normally be in the # linker-generated one, see msvs_emulation.py), and include that into the # first and only link. We still tell link to generate a manifest, but we # only use that to assert that our simpler process did not miss anything. variables = { 'python': sys.executable, 'arch': arch, 'out': out, 'ldcmd': ldcmd, 'resname': resname, 'mt': mt, 'rc': rc, 'intermediate_manifest': intermediate_manifest, 'manifests': ' '.join(manifests), } add_to_ld = '' if manifests: subprocess.check_call( '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo ' '-manifest %(manifests)s -out:%(out)s.manifest' % variables) if embed_manifest == 'True': subprocess.check_call( '%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest' ' %(out)s.manifest.rc %(resname)s' % variables) subprocess.check_call( '%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s ' '%(out)s.manifest.rc' % variables) add_to_ld = ' %(out)s.manifest.res' % variables subprocess.check_call(ldcmd + add_to_ld) # Run mt.exe on the theoretically complete manifest we generated, merging # it with the one the linker generated to confirm that the linker # generated one does not add anything. This is strictly unnecessary for # correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not # used in a #pragma comment. if manifests: # Merge the intermediate one with ours to .assert.manifest, then check # that .assert.manifest is identical to ours. subprocess.check_call( '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo ' '-manifest %(out)s.manifest %(intermediate_manifest)s ' '-out:%(out)s.assert.manifest' % variables) assert_manifest = '%(out)s.assert.manifest' % variables our_manifest = '%(out)s.manifest' % variables # Load and normalize the manifests. mt.exe sometimes removes whitespace, # and sometimes doesn't unfortunately. with open(our_manifest, 'rb') as our_f: with open(assert_manifest, 'rb') as assert_f: our_data = our_f.read().translate(None, string.whitespace) assert_data = assert_f.read().translate(None, string.whitespace) if our_data != assert_data: os.unlink(out) def dump(filename): sys.stderr.write('%s\n-----\n' % filename) with open(filename, 'rb') as f: sys.stderr.write(f.read() + '\n-----\n') dump(intermediate_manifest) dump(our_manifest) dump(assert_manifest) sys.stderr.write( 'Linker generated manifest "%s" added to final manifest "%s" ' '(result in "%s"). ' 'Were /MANIFEST switches used in #pragma statements? ' % ( intermediate_manifest, our_manifest, assert_manifest)) return 1 def ExecManifestWrapper(self, arch, *args): """Run manifest tool with environment set. Strip out undesirable warning (some XML blocks are recognized by the OS loader, but not the manifest tool).""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if line and 'manifest authoring warning 81010002' not in line: print line return popen.returncode def ExecManifestToRc(self, arch, *args): """Creates a resource file pointing a SxS assembly manifest. |args| is tuple containing path to resource file, path to manifest file and resource name which can be "1" (for executables) or "2" (for DLLs).""" manifest_path, resource_path, resource_name = args with open(resource_path, 'wb') as output: output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % ( resource_name, os.path.abspath(manifest_path).replace('\\', '/'))) def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl, *flags): """Filter noisy filenames output from MIDL compile step that isn't quietable via command line flags. """ args = ['midl', '/nologo'] + list(flags) + [ '/out', outdir, '/tlb', tlb, '/h', h, '/dlldata', dlldata, '/iid', iid, '/proxy', proxy, idl] env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() # Filter junk out of stdout, and write filtered versions. Output we want # to filter is pairs of lines that look like this: # Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl # objidl.idl lines = out.splitlines() prefixes = ('Processing ', '64 bit Processing ') processing = set(os.path.basename(x) for x in lines if x.startswith(prefixes)) for line in lines: if not line.startswith(prefixes) and line not in processing: print line return popen.returncode def ExecAsmWrapper(self, arch, *args): """Filter logo banner from invocations of asm.exe.""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if (not line.startswith('Copyright (C) Microsoft Corporation') and not line.startswith('Microsoft (R) Macro Assembler') and not line.startswith(' Assembling: ') and line): print line return popen.returncode def ExecRcWrapper(self, arch, *args): """Filter logo banner from invocations of rc.exe. Older versions of RC don't support the /nologo flag.""" env = self._GetEnv(arch) popen = subprocess.Popen(args, shell=True, env=env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = popen.communicate() for line in out.splitlines(): if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and not line.startswith('Copyright (C) Microsoft Corporation') and line): print line return popen.returncode def ExecActionWrapper(self, arch, rspfile, *dir): """Runs an action command line from a response file using the environment for |arch|. If |dir| is supplied, use that as the working directory.""" env = self._GetEnv(arch) # TODO(scottmg): This is a temporary hack to get some specific variables # through to actions that are set after gyp-time. http://crbug.com/333738. for k, v in os.environ.iteritems(): if k not in env: env[k] = v args = open(rspfile).read() dir = dir[0] if dir else None return subprocess.call(args, shell=True, env=env, cwd=dir) def ExecClCompile(self, project_dir, selected_files): """Executed by msvs-ninja projects when the 'ClCompile' target is used to build selected C/C++ files.""" project_dir = os.path.relpath(project_dir, BASE_DIR) selected_files = selected_files.split(';') ninja_targets = [os.path.join(project_dir, filename) + '^^' for filename in selected_files] cmd = ['ninja.exe'] cmd.extend(ninja_targets) return subprocess.call(cmd, shell=True, cwd=BASE_DIR) if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
gpl-2.0
lumig242/Hue-Integration-with-CDAP
desktop/core/ext-py/tablib-0.10.0/tablib/packages/yaml3/composer.py
273
4881
__all__ = ['Composer', 'ComposerError'] from .error import MarkedYAMLError from .events import * from .nodes import * class ComposerError(MarkedYAMLError): pass class Composer: def __init__(self): self.anchors = {} def check_node(self): # Drop the STREAM-START event. if self.check_event(StreamStartEvent): self.get_event() # If there are more documents available? return not self.check_event(StreamEndEvent) def get_node(self): # Get the root node of the next document. if not self.check_event(StreamEndEvent): return self.compose_document() def get_single_node(self): # Drop the STREAM-START event. self.get_event() # Compose a document if the stream is not empty. document = None if not self.check_event(StreamEndEvent): document = self.compose_document() # Ensure that the stream contains no more documents. if not self.check_event(StreamEndEvent): event = self.get_event() raise ComposerError("expected a single document in the stream", document.start_mark, "but found another document", event.start_mark) # Drop the STREAM-END event. self.get_event() return document def compose_document(self): # Drop the DOCUMENT-START event. self.get_event() # Compose the root node. node = self.compose_node(None, None) # Drop the DOCUMENT-END event. self.get_event() self.anchors = {} return node def compose_node(self, parent, index): if self.check_event(AliasEvent): event = self.get_event() anchor = event.anchor if anchor not in self.anchors: raise ComposerError(None, None, "found undefined alias %r" % anchor, event.start_mark) return self.anchors[anchor] event = self.peek_event() anchor = event.anchor if anchor is not None: if anchor in self.anchors: raise ComposerError("found duplicate anchor %r; first occurence" % anchor, self.anchors[anchor].start_mark, "second occurence", event.start_mark) self.descend_resolver(parent, index) if self.check_event(ScalarEvent): node = self.compose_scalar_node(anchor) elif self.check_event(SequenceStartEvent): node = self.compose_sequence_node(anchor) elif self.check_event(MappingStartEvent): node = self.compose_mapping_node(anchor) self.ascend_resolver() return node def compose_scalar_node(self, anchor): event = self.get_event() tag = event.tag if tag is None or tag == '!': tag = self.resolve(ScalarNode, event.value, event.implicit) node = ScalarNode(tag, event.value, event.start_mark, event.end_mark, style=event.style) if anchor is not None: self.anchors[anchor] = node return node def compose_sequence_node(self, anchor): start_event = self.get_event() tag = start_event.tag if tag is None or tag == '!': tag = self.resolve(SequenceNode, None, start_event.implicit) node = SequenceNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style) if anchor is not None: self.anchors[anchor] = node index = 0 while not self.check_event(SequenceEndEvent): node.value.append(self.compose_node(node, index)) index += 1 end_event = self.get_event() node.end_mark = end_event.end_mark return node def compose_mapping_node(self, anchor): start_event = self.get_event() tag = start_event.tag if tag is None or tag == '!': tag = self.resolve(MappingNode, None, start_event.implicit) node = MappingNode(tag, [], start_event.start_mark, None, flow_style=start_event.flow_style) if anchor is not None: self.anchors[anchor] = node while not self.check_event(MappingEndEvent): #key_event = self.peek_event() item_key = self.compose_node(node, None) #if item_key in node.value: # raise ComposerError("while composing a mapping", start_event.start_mark, # "found duplicate key", key_event.start_mark) item_value = self.compose_node(node, item_key) #node.value[item_key] = item_value node.value.append((item_key, item_value)) end_event = self.get_event() node.end_mark = end_event.end_mark return node
apache-2.0
centrofermi/e3pipe
tasks/e3dqm.py
1
1620
#!/usr/bin/env python # ********************************************************************* # * Copyright (C) 2014 Luca Baldini ([email protected]) * # * * # * For the license terms see the file LICENSE, distributed * # * along with this software. * # ********************************************************************* # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from e3pipe.__logging__ import logger, startmsg, abort from e3pipe.dqm.E3DqmRunMonitor import E3DqmRunMonitor def e3dqm(dstFilePath, outputFolder = None): """ Read the DST and run the data quality monitoring. """ monitor = E3DqmRunMonitor(dstFilePath, outputFolder) monitor.run() if __name__ == '__main__': from optparse import OptionParser parser = OptionParser() (opts, args) = parser.parse_args() e3dqm(args[0])
gpl-3.0
openego/dingo
tests/core/test_core.py
1
1146
import pytest from egoio.tools import db from sqlalchemy.orm import sessionmaker import oedialect from ding0.core import NetworkDing0 class TestNetworkDing0(object): @pytest.fixture def emptyNetworkDing0(self): """ Returns an empty NetworkDing0 object for testing """ return NetworkDing0() @pytest.fixture def oedb_session(self): """ Returns an ego.io oedb session and closes it on finishing the test """ engine = db.connection(readonly=True) session = sessionmaker(bind=engine)() yield session print("closing session") session.close() def test_empty_mv_grid_districts(self, emptyNetworkDing0): mv_grid_districts = list(emptyNetworkDing0.mv_grid_districts()) empty_list = [] assert mv_grid_districts == empty_list def test_import_mv_grid_districts(self, oedb_session): with pytest.raises(TypeError): NetworkDing0.import_mv_grid_districts( oedb_session, mv_grid_districts_no=['5'] ) # def test_run_ding0(self): # pass
agpl-3.0
hxddh/youtube-dl
youtube_dl/extractor/vbox7.py
92
2298
# encoding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_urllib_parse, compat_urllib_request, compat_urlparse, ) from ..utils import ( ExtractorError, ) class Vbox7IE(InfoExtractor): _VALID_URL = r'http://(?:www\.)?vbox7\.com/play:(?P<id>[^/]+)' _TEST = { 'url': 'http://vbox7.com/play:249bb972c2', 'md5': '99f65c0c9ef9b682b97313e052734c3f', 'info_dict': { 'id': '249bb972c2', 'ext': 'mp4', 'title': 'Смях! Чудо - чист за секунди - Скрита камера', }, } def _real_extract(self, url): video_id = self._match_id(url) # need to get the page 3 times for the correct jsSecretToken cookie # which is necessary for the correct title def get_session_id(): redirect_page = self._download_webpage(url, video_id) session_id_url = self._search_regex( r'var\s*url\s*=\s*\'([^\']+)\';', redirect_page, 'session id url') self._download_webpage( compat_urlparse.urljoin(url, session_id_url), video_id, 'Getting session id') get_session_id() get_session_id() webpage = self._download_webpage(url, video_id, 'Downloading redirect page') title = self._html_search_regex(r'<title>(.*)</title>', webpage, 'title').split('/')[0].strip() info_url = "http://vbox7.com/play/magare.do" data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id}) info_request = compat_urllib_request.Request(info_url, data) info_request.add_header('Content-Type', 'application/x-www-form-urlencoded') info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage') if info_response is None: raise ExtractorError('Unable to extract the media url') (final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&')) return { 'id': video_id, 'url': final_url, 'title': title, 'thumbnail': thumbnail_url, }
unlicense
compulab/trimslice-android-kernel
tools/perf/scripts/python/sctop.py
11180
1924
# system call top # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Periodically displays system-wide system call totals, broken down by # syscall. If a [comm] arg is specified, only syscalls called by # [comm] are displayed. If an [interval] arg is specified, the display # will be refreshed every [interval] seconds. The default interval is # 3 seconds. import os, sys, thread, time sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * usage = "perf script -s sctop.py [comm] [interval]\n"; for_comm = None default_interval = 3 interval = default_interval if len(sys.argv) > 3: sys.exit(usage) if len(sys.argv) > 2: for_comm = sys.argv[1] interval = int(sys.argv[2]) elif len(sys.argv) > 1: try: interval = int(sys.argv[1]) except ValueError: for_comm = sys.argv[1] interval = default_interval syscalls = autodict() def trace_begin(): thread.start_new_thread(print_syscall_totals, (interval,)) pass def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(interval): while 1: clear_term() if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): try: print "%-40s %10d\n" % (syscall_name(id), val), except TypeError: pass syscalls.clear() time.sleep(interval)
gpl-2.0
MalloyPower/parsing-python
front-end/testsuite-python-lib/Python-3.2/Lib/idlelib/MultiCall.py
54
17510
""" MultiCall - a class which inherits its methods from a Tkinter widget (Text, for example), but enables multiple calls of functions per virtual event - all matching events will be called, not only the most specific one. This is done by wrapping the event functions - event_add, event_delete and event_info. MultiCall recognizes only a subset of legal event sequences. Sequences which are not recognized are treated by the original Tk handling mechanism. A more-specific event will be called before a less-specific event. The recognized sequences are complete one-event sequences (no emacs-style Ctrl-X Ctrl-C, no shortcuts like <3>), for all types of events. Key/Button Press/Release events can have modifiers. The recognized modifiers are Shift, Control, Option and Command for Mac, and Control, Alt, Shift, Meta/M for other platforms. For all events which were handled by MultiCall, a new member is added to the event instance passed to the binded functions - mc_type. This is one of the event type constants defined in this module (such as MC_KEYPRESS). For Key/Button events (which are handled by MultiCall and may receive modifiers), another member is added - mc_state. This member gives the state of the recognized modifiers, as a combination of the modifier constants also defined in this module (for example, MC_SHIFT). Using these members is absolutely portable. The order by which events are called is defined by these rules: 1. A more-specific event will be called before a less-specific event. 2. A recently-binded event will be called before a previously-binded event, unless this conflicts with the first rule. Each function will be called at most once for each event. """ import sys import re import tkinter from idlelib import macosxSupport # the event type constants, which define the meaning of mc_type MC_KEYPRESS=0; MC_KEYRELEASE=1; MC_BUTTONPRESS=2; MC_BUTTONRELEASE=3; MC_ACTIVATE=4; MC_CIRCULATE=5; MC_COLORMAP=6; MC_CONFIGURE=7; MC_DEACTIVATE=8; MC_DESTROY=9; MC_ENTER=10; MC_EXPOSE=11; MC_FOCUSIN=12; MC_FOCUSOUT=13; MC_GRAVITY=14; MC_LEAVE=15; MC_MAP=16; MC_MOTION=17; MC_MOUSEWHEEL=18; MC_PROPERTY=19; MC_REPARENT=20; MC_UNMAP=21; MC_VISIBILITY=22; # the modifier state constants, which define the meaning of mc_state MC_SHIFT = 1<<0; MC_CONTROL = 1<<2; MC_ALT = 1<<3; MC_META = 1<<5 MC_OPTION = 1<<6; MC_COMMAND = 1<<7 # define the list of modifiers, to be used in complex event types. if macosxSupport.runningAsOSXApp(): _modifiers = (("Shift",), ("Control",), ("Option",), ("Command",)) _modifier_masks = (MC_SHIFT, MC_CONTROL, MC_OPTION, MC_COMMAND) else: _modifiers = (("Control",), ("Alt",), ("Shift",), ("Meta", "M")) _modifier_masks = (MC_CONTROL, MC_ALT, MC_SHIFT, MC_META) # a dictionary to map a modifier name into its number _modifier_names = dict([(name, number) for number in range(len(_modifiers)) for name in _modifiers[number]]) # A binder is a class which binds functions to one type of event. It has two # methods: bind and unbind, which get a function and a parsed sequence, as # returned by _parse_sequence(). There are two types of binders: # _SimpleBinder handles event types with no modifiers and no detail. # No Python functions are called when no events are binded. # _ComplexBinder handles event types with modifiers and a detail. # A Python function is called each time an event is generated. class _SimpleBinder: def __init__(self, type, widget, widgetinst): self.type = type self.sequence = '<'+_types[type][0]+'>' self.widget = widget self.widgetinst = widgetinst self.bindedfuncs = [] self.handlerid = None def bind(self, triplet, func): if not self.handlerid: def handler(event, l = self.bindedfuncs, mc_type = self.type): event.mc_type = mc_type wascalled = {} for i in range(len(l)-1, -1, -1): func = l[i] if func not in wascalled: wascalled[func] = True r = func(event) if r: return r self.handlerid = self.widget.bind(self.widgetinst, self.sequence, handler) self.bindedfuncs.append(func) def unbind(self, triplet, func): self.bindedfuncs.remove(func) if not self.bindedfuncs: self.widget.unbind(self.widgetinst, self.sequence, self.handlerid) self.handlerid = None def __del__(self): if self.handlerid: self.widget.unbind(self.widgetinst, self.sequence, self.handlerid) # An int in range(1 << len(_modifiers)) represents a combination of modifiers # (if the least significent bit is on, _modifiers[0] is on, and so on). # _state_subsets gives for each combination of modifiers, or *state*, # a list of the states which are a subset of it. This list is ordered by the # number of modifiers is the state - the most specific state comes first. _states = range(1 << len(_modifiers)) _state_names = [''.join(m[0]+'-' for i, m in enumerate(_modifiers) if (1 << i) & s) for s in _states] def expand_substates(states): '''For each item of states return a list containing all combinations of that item with individual bits reset, sorted by the number of set bits. ''' def nbits(n): "number of bits set in n base 2" nb = 0 while n: n, rem = divmod(n, 2) nb += rem return nb statelist = [] for state in states: substates = list(set(state & x for x in states)) substates.sort(key=nbits, reverse=True) statelist.append(substates) return statelist _state_subsets = expand_substates(_states) # _state_codes gives for each state, the portable code to be passed as mc_state _state_codes = [] for s in _states: r = 0 for i in range(len(_modifiers)): if (1 << i) & s: r |= _modifier_masks[i] _state_codes.append(r) class _ComplexBinder: # This class binds many functions, and only unbinds them when it is deleted. # self.handlerids is the list of seqs and ids of binded handler functions. # The binded functions sit in a dictionary of lists of lists, which maps # a detail (or None) and a state into a list of functions. # When a new detail is discovered, handlers for all the possible states # are binded. def __create_handler(self, lists, mc_type, mc_state): def handler(event, lists = lists, mc_type = mc_type, mc_state = mc_state, ishandlerrunning = self.ishandlerrunning, doafterhandler = self.doafterhandler): ishandlerrunning[:] = [True] event.mc_type = mc_type event.mc_state = mc_state wascalled = {} r = None for l in lists: for i in range(len(l)-1, -1, -1): func = l[i] if func not in wascalled: wascalled[func] = True r = l[i](event) if r: break if r: break ishandlerrunning[:] = [] # Call all functions in doafterhandler and remove them from list while doafterhandler: doafterhandler.pop()() if r: return r return handler def __init__(self, type, widget, widgetinst): self.type = type self.typename = _types[type][0] self.widget = widget self.widgetinst = widgetinst self.bindedfuncs = {None: [[] for s in _states]} self.handlerids = [] # we don't want to change the lists of functions while a handler is # running - it will mess up the loop and anyway, we usually want the # change to happen from the next event. So we have a list of functions # for the handler to run after it finishes calling the binded functions. # It calls them only once. # ishandlerrunning is a list. An empty one means no, otherwise - yes. # this is done so that it would be mutable. self.ishandlerrunning = [] self.doafterhandler = [] for s in _states: lists = [self.bindedfuncs[None][i] for i in _state_subsets[s]] handler = self.__create_handler(lists, type, _state_codes[s]) seq = '<'+_state_names[s]+self.typename+'>' self.handlerids.append((seq, self.widget.bind(self.widgetinst, seq, handler))) def bind(self, triplet, func): if triplet[2] not in self.bindedfuncs: self.bindedfuncs[triplet[2]] = [[] for s in _states] for s in _states: lists = [ self.bindedfuncs[detail][i] for detail in (triplet[2], None) for i in _state_subsets[s] ] handler = self.__create_handler(lists, self.type, _state_codes[s]) seq = "<%s%s-%s>"% (_state_names[s], self.typename, triplet[2]) self.handlerids.append((seq, self.widget.bind(self.widgetinst, seq, handler))) doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].append(func) if not self.ishandlerrunning: doit() else: self.doafterhandler.append(doit) def unbind(self, triplet, func): doit = lambda: self.bindedfuncs[triplet[2]][triplet[0]].remove(func) if not self.ishandlerrunning: doit() else: self.doafterhandler.append(doit) def __del__(self): for seq, id in self.handlerids: self.widget.unbind(self.widgetinst, seq, id) # define the list of event types to be handled by MultiEvent. the order is # compatible with the definition of event type constants. _types = ( ("KeyPress", "Key"), ("KeyRelease",), ("ButtonPress", "Button"), ("ButtonRelease",), ("Activate",), ("Circulate",), ("Colormap",), ("Configure",), ("Deactivate",), ("Destroy",), ("Enter",), ("Expose",), ("FocusIn",), ("FocusOut",), ("Gravity",), ("Leave",), ("Map",), ("Motion",), ("MouseWheel",), ("Property",), ("Reparent",), ("Unmap",), ("Visibility",), ) # which binder should be used for every event type? _binder_classes = (_ComplexBinder,) * 4 + (_SimpleBinder,) * (len(_types)-4) # A dictionary to map a type name into its number _type_names = dict([(name, number) for number in range(len(_types)) for name in _types[number]]) _keysym_re = re.compile(r"^\w+$") _button_re = re.compile(r"^[1-5]$") def _parse_sequence(sequence): """Get a string which should describe an event sequence. If it is successfully parsed as one, return a tuple containing the state (as an int), the event type (as an index of _types), and the detail - None if none, or a string if there is one. If the parsing is unsuccessful, return None. """ if not sequence or sequence[0] != '<' or sequence[-1] != '>': return None words = sequence[1:-1].split('-') modifiers = 0 while words and words[0] in _modifier_names: modifiers |= 1 << _modifier_names[words[0]] del words[0] if words and words[0] in _type_names: type = _type_names[words[0]] del words[0] else: return None if _binder_classes[type] is _SimpleBinder: if modifiers or words: return None else: detail = None else: # _ComplexBinder if type in [_type_names[s] for s in ("KeyPress", "KeyRelease")]: type_re = _keysym_re else: type_re = _button_re if not words: detail = None elif len(words) == 1 and type_re.match(words[0]): detail = words[0] else: return None return modifiers, type, detail def _triplet_to_sequence(triplet): if triplet[2]: return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'-'+ \ triplet[2]+'>' else: return '<'+_state_names[triplet[0]]+_types[triplet[1]][0]+'>' _multicall_dict = {} def MultiCallCreator(widget): """Return a MultiCall class which inherits its methods from the given widget class (for example, Tkinter.Text). This is used instead of a templating mechanism. """ if widget in _multicall_dict: return _multicall_dict[widget] class MultiCall (widget): assert issubclass(widget, tkinter.Misc) def __init__(self, *args, **kwargs): widget.__init__(self, *args, **kwargs) # a dictionary which maps a virtual event to a tuple with: # 0. the function binded # 1. a list of triplets - the sequences it is binded to self.__eventinfo = {} self.__binders = [_binder_classes[i](i, widget, self) for i in range(len(_types))] def bind(self, sequence=None, func=None, add=None): #print("bind(%s, %s, %s)" % (sequence, func, add), # file=sys.__stderr__) if type(sequence) is str and len(sequence) > 2 and \ sequence[:2] == "<<" and sequence[-2:] == ">>": if sequence in self.__eventinfo: ei = self.__eventinfo[sequence] if ei[0] is not None: for triplet in ei[1]: self.__binders[triplet[1]].unbind(triplet, ei[0]) ei[0] = func if ei[0] is not None: for triplet in ei[1]: self.__binders[triplet[1]].bind(triplet, func) else: self.__eventinfo[sequence] = [func, []] return widget.bind(self, sequence, func, add) def unbind(self, sequence, funcid=None): if type(sequence) is str and len(sequence) > 2 and \ sequence[:2] == "<<" and sequence[-2:] == ">>" and \ sequence in self.__eventinfo: func, triplets = self.__eventinfo[sequence] if func is not None: for triplet in triplets: self.__binders[triplet[1]].unbind(triplet, func) self.__eventinfo[sequence][0] = None return widget.unbind(self, sequence, funcid) def event_add(self, virtual, *sequences): #print("event_add(%s, %s)" % (repr(virtual), repr(sequences)), # file=sys.__stderr__) if virtual not in self.__eventinfo: self.__eventinfo[virtual] = [None, []] func, triplets = self.__eventinfo[virtual] for seq in sequences: triplet = _parse_sequence(seq) if triplet is None: #print("Tkinter event_add(%s)" % seq, file=sys.__stderr__) widget.event_add(self, virtual, seq) else: if func is not None: self.__binders[triplet[1]].bind(triplet, func) triplets.append(triplet) def event_delete(self, virtual, *sequences): if virtual not in self.__eventinfo: return func, triplets = self.__eventinfo[virtual] for seq in sequences: triplet = _parse_sequence(seq) if triplet is None: #print("Tkinter event_delete: %s" % seq, file=sys.__stderr__) widget.event_delete(self, virtual, seq) else: if func is not None: self.__binders[triplet[1]].unbind(triplet, func) triplets.remove(triplet) def event_info(self, virtual=None): if virtual is None or virtual not in self.__eventinfo: return widget.event_info(self, virtual) else: return tuple(map(_triplet_to_sequence, self.__eventinfo[virtual][1])) + \ widget.event_info(self, virtual) def __del__(self): for virtual in self.__eventinfo: func, triplets = self.__eventinfo[virtual] if func: for triplet in triplets: self.__binders[triplet[1]].unbind(triplet, func) _multicall_dict[widget] = MultiCall return MultiCall if __name__ == "__main__": # Test root = tkinter.Tk() text = MultiCallCreator(tkinter.Text)(root) text.pack() def bindseq(seq, n=[0]): def handler(event): print(seq) text.bind("<<handler%d>>"%n[0], handler) text.event_add("<<handler%d>>"%n[0], seq) n[0] += 1 bindseq("<Key>") bindseq("<Control-Key>") bindseq("<Alt-Key-a>") bindseq("<Control-Key-a>") bindseq("<Alt-Control-Key-a>") bindseq("<Key-b>") bindseq("<Control-Button-1>") bindseq("<Alt-Button-1>") bindseq("<FocusOut>") bindseq("<Enter>") bindseq("<Leave>") root.mainloop()
mit
alisidd/tensorflow
tensorflow/python/framework/dtypes.py
30
19314
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Library of dtypes (Tensor element types).""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.core.framework import types_pb2 class DType(object): """Represents the type of the elements in a `Tensor`. The following `DType` objects are defined: * `tf.float16`: 16-bit half-precision floating-point. * `tf.float32`: 32-bit single-precision floating-point. * `tf.float64`: 64-bit double-precision floating-point. * `tf.bfloat16`: 16-bit truncated floating-point. * `tf.complex64`: 64-bit single-precision complex. * `tf.complex128`: 128-bit double-precision complex. * `tf.int8`: 8-bit signed integer. * `tf.uint8`: 8-bit unsigned integer. * `tf.uint16`: 16-bit unsigned integer. * `tf.int16`: 16-bit signed integer. * `tf.int32`: 32-bit signed integer. * `tf.int64`: 64-bit signed integer. * `tf.bool`: Boolean. * `tf.string`: String. * `tf.qint8`: Quantized 8-bit signed integer. * `tf.quint8`: Quantized 8-bit unsigned integer. * `tf.qint16`: Quantized 16-bit signed integer. * `tf.quint16`: Quantized 16-bit unsigned integer. * `tf.qint32`: Quantized 32-bit signed integer. * `tf.resource`: Handle to a mutable resource. In addition, variants of these types with the `_ref` suffix are defined for reference-typed tensors. The `tf.as_dtype()` function converts numpy types and string type names to a `DType` object. """ def __init__(self, type_enum): """Creates a new `DataType`. NOTE(mrry): In normal circumstances, you should not need to construct a `DataType` object directly. Instead, use the `tf.as_dtype()` function. Args: type_enum: A `types_pb2.DataType` enum value. Raises: TypeError: If `type_enum` is not a value `types_pb2.DataType`. """ # TODO(mrry): Make the necessary changes (using __new__) to ensure # that calling this returns one of the interned values. type_enum = int(type_enum) if (type_enum not in types_pb2.DataType.values() or type_enum == types_pb2.DT_INVALID): raise TypeError( "type_enum is not a valid types_pb2.DataType: %s" % type_enum) self._type_enum = type_enum @property def _is_ref_dtype(self): """Returns `True` if this `DType` represents a reference type.""" return self._type_enum > 100 @property def _as_ref(self): """Returns a reference `DType` based on this `DType`.""" if self._is_ref_dtype: return self else: return _INTERN_TABLE[self._type_enum + 100] @property def base_dtype(self): """Returns a non-reference `DType` based on this `DType`.""" if self._is_ref_dtype: return _INTERN_TABLE[self._type_enum - 100] else: return self @property def real_dtype(self): """Returns the dtype correspond to this dtype's real part.""" base = self.base_dtype if base == complex64: return float32 elif base == complex128: return float64 else: return self @property def is_numpy_compatible(self): return (self._type_enum != types_pb2.DT_RESOURCE and self._type_enum != types_pb2.DT_RESOURCE_REF) @property def as_numpy_dtype(self): """Returns a `numpy.dtype` based on this `DType`.""" return _TF_TO_NP[self._type_enum] @property def as_datatype_enum(self): """Returns a `types_pb2.DataType` enum value based on this `DType`.""" return self._type_enum @property def is_bool(self): """Returns whether this is a boolean data type""" return self.base_dtype == bool @property def is_integer(self): """Returns whether this is a (non-quantized) integer type.""" return (self.is_numpy_compatible and not self.is_quantized and issubclass(self.as_numpy_dtype, np.integer)) @property def is_floating(self): """Returns whether this is a (non-quantized, real) floating point type.""" return self.is_numpy_compatible and issubclass(self.as_numpy_dtype, np.floating) @property def is_complex(self): """Returns whether this is a complex floating point type.""" return self.base_dtype in (complex64, complex128) @property def is_quantized(self): """Returns whether this is a quantized data type.""" return self.base_dtype in [qint8, quint8, qint16, quint16, qint32, bfloat16] @property def is_unsigned(self): """Returns whether this type is unsigned. Non-numeric, unordered, and quantized types are not considered unsigned, and this function returns `False`. Returns: Whether a `DType` is unsigned. """ try: return self.min == 0 except TypeError: return False @property def min(self): """Returns the minimum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type. """ if (self.is_quantized or self.base_dtype in (bool, string, complex64, complex128)): raise TypeError("Cannot find minimum value of %s." % self) # there is no simple way to get the min value of a dtype, we have to check # float and int types separately try: return np.finfo(self.as_numpy_dtype()).min except: # bare except as possible raises by finfo not documented try: return np.iinfo(self.as_numpy_dtype()).min except: raise TypeError("Cannot find minimum value of %s." % self) @property def max(self): """Returns the maximum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type. """ if (self.is_quantized or self.base_dtype in (bool, string, complex64, complex128)): raise TypeError("Cannot find maximum value of %s." % self) # there is no simple way to get the max value of a dtype, we have to check # float and int types separately try: return np.finfo(self.as_numpy_dtype()).max except: # bare except as possible raises by finfo not documented try: return np.iinfo(self.as_numpy_dtype()).max except: raise TypeError("Cannot find maximum value of %s." % self) @property def limits(self, clip_negative=True): """Return intensity limits, i.e. (min, max) tuple, of the dtype. Args: clip_negative : bool, optional If True, clip the negative range (i.e. return 0 for min intensity) even if the image dtype allows negative values. Returns min, max : tuple Lower and upper intensity limits. """ min, max = dtype_range[self.as_numpy_dtype] if clip_negative: min = 0 return min, max def is_compatible_with(self, other): """Returns True if the `other` DType will be converted to this DType. The conversion rules are as follows: ```python DType(T) .is_compatible_with(DType(T)) == True DType(T) .is_compatible_with(DType(T).as_ref) == True DType(T).as_ref.is_compatible_with(DType(T)) == False DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True ``` Args: other: A `DType` (or object that may be converted to a `DType`). Returns: True if a Tensor of the `other` `DType` will be implicitly converted to this `DType`. """ other = as_dtype(other) return self._type_enum in ( other.as_datatype_enum, other.base_dtype.as_datatype_enum) def __eq__(self, other): """Returns True iff this DType refers to the same type as `other`.""" if other is None: return False try: dtype = as_dtype(other).as_datatype_enum return self._type_enum == dtype # pylint: disable=protected-access except TypeError: return False def __ne__(self, other): """Returns True iff self != other.""" return not self.__eq__(other) @property def name(self): """Returns the string name for this `DType`.""" return _TYPE_TO_STRING[self._type_enum] def __int__(self): return self._type_enum def __str__(self): return "<dtype: %r>" % self.name def __repr__(self): return "tf." + self.name def __hash__(self): return self._type_enum @property def size(self): if self._type_enum == types_pb2.DT_RESOURCE: return 1 return np.dtype(self.as_numpy_dtype).itemsize # Define data type range of numpy dtype dtype_range = {np.bool_: (False, True), np.bool8: (False, True), np.uint8: (0, 255), np.uint16: (0, 65535), np.int8: (-128, 127), np.int16: (-32768, 32767), np.int64: (-2**63, 2**63 - 1), np.uint64: (0, 2**64 - 1), np.int32: (-2**31, 2**31 - 1), np.uint32: (0, 2**32 - 1), np.float32: (-1, 1), np.float64: (-1, 1)} # Define standard wrappers for the types_pb2.DataType enum. resource = DType(types_pb2.DT_RESOURCE) float16 = DType(types_pb2.DT_HALF) half = float16 float32 = DType(types_pb2.DT_FLOAT) float64 = DType(types_pb2.DT_DOUBLE) double = float64 int32 = DType(types_pb2.DT_INT32) uint8 = DType(types_pb2.DT_UINT8) uint16 = DType(types_pb2.DT_UINT16) int16 = DType(types_pb2.DT_INT16) int8 = DType(types_pb2.DT_INT8) string = DType(types_pb2.DT_STRING) complex64 = DType(types_pb2.DT_COMPLEX64) complex128 = DType(types_pb2.DT_COMPLEX128) int64 = DType(types_pb2.DT_INT64) bool = DType(types_pb2.DT_BOOL) qint8 = DType(types_pb2.DT_QINT8) quint8 = DType(types_pb2.DT_QUINT8) qint16 = DType(types_pb2.DT_QINT16) quint16 = DType(types_pb2.DT_QUINT16) qint32 = DType(types_pb2.DT_QINT32) resource_ref = DType(types_pb2.DT_RESOURCE_REF) bfloat16 = DType(types_pb2.DT_BFLOAT16) float16_ref = DType(types_pb2.DT_HALF_REF) half_ref = float16_ref float32_ref = DType(types_pb2.DT_FLOAT_REF) float64_ref = DType(types_pb2.DT_DOUBLE_REF) double_ref = float64_ref int32_ref = DType(types_pb2.DT_INT32_REF) uint8_ref = DType(types_pb2.DT_UINT8_REF) uint16_ref = DType(types_pb2.DT_UINT16_REF) int16_ref = DType(types_pb2.DT_INT16_REF) int8_ref = DType(types_pb2.DT_INT8_REF) string_ref = DType(types_pb2.DT_STRING_REF) complex64_ref = DType(types_pb2.DT_COMPLEX64_REF) complex128_ref = DType(types_pb2.DT_COMPLEX128_REF) int64_ref = DType(types_pb2.DT_INT64_REF) bool_ref = DType(types_pb2.DT_BOOL_REF) qint8_ref = DType(types_pb2.DT_QINT8_REF) quint8_ref = DType(types_pb2.DT_QUINT8_REF) qint16_ref = DType(types_pb2.DT_QINT16_REF) quint16_ref = DType(types_pb2.DT_QUINT16_REF) qint32_ref = DType(types_pb2.DT_QINT32_REF) bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF) # Maintain an intern table so that we don't have to create a large # number of small objects. _INTERN_TABLE = { types_pb2.DT_HALF: float16, types_pb2.DT_FLOAT: float32, types_pb2.DT_DOUBLE: float64, types_pb2.DT_INT32: int32, types_pb2.DT_UINT8: uint8, types_pb2.DT_UINT16: uint16, types_pb2.DT_INT16: int16, types_pb2.DT_INT8: int8, types_pb2.DT_STRING: string, types_pb2.DT_COMPLEX64: complex64, types_pb2.DT_COMPLEX128: complex128, types_pb2.DT_INT64: int64, types_pb2.DT_BOOL: bool, types_pb2.DT_QINT8: qint8, types_pb2.DT_QUINT8: quint8, types_pb2.DT_QINT16: qint16, types_pb2.DT_QUINT16: quint16, types_pb2.DT_QINT32: qint32, types_pb2.DT_BFLOAT16: bfloat16, types_pb2.DT_RESOURCE: resource, types_pb2.DT_HALF_REF: float16_ref, types_pb2.DT_FLOAT_REF: float32_ref, types_pb2.DT_DOUBLE_REF: float64_ref, types_pb2.DT_INT32_REF: int32_ref, types_pb2.DT_UINT8_REF: uint8_ref, types_pb2.DT_UINT16_REF: uint16_ref, types_pb2.DT_INT16_REF: int16_ref, types_pb2.DT_INT8_REF: int8_ref, types_pb2.DT_STRING_REF: string_ref, types_pb2.DT_COMPLEX64_REF: complex64_ref, types_pb2.DT_COMPLEX128_REF: complex128_ref, types_pb2.DT_INT64_REF: int64_ref, types_pb2.DT_BOOL_REF: bool_ref, types_pb2.DT_QINT8_REF: qint8_ref, types_pb2.DT_QUINT8_REF: quint8_ref, types_pb2.DT_QINT16_REF: qint16_ref, types_pb2.DT_QUINT16_REF: quint16_ref, types_pb2.DT_QINT32_REF: qint32_ref, types_pb2.DT_BFLOAT16_REF: bfloat16_ref, types_pb2.DT_RESOURCE_REF: resource_ref, } # Standard mappings between types_pb2.DataType values and string names. _TYPE_TO_STRING = { types_pb2.DT_HALF: "float16", types_pb2.DT_FLOAT: "float32", types_pb2.DT_DOUBLE: "float64", types_pb2.DT_INT32: "int32", types_pb2.DT_UINT8: "uint8", types_pb2.DT_UINT16: "uint16", types_pb2.DT_INT16: "int16", types_pb2.DT_INT8: "int8", types_pb2.DT_STRING: "string", types_pb2.DT_COMPLEX64: "complex64", types_pb2.DT_COMPLEX128: "complex128", types_pb2.DT_INT64: "int64", types_pb2.DT_BOOL: "bool", types_pb2.DT_QINT8: "qint8", types_pb2.DT_QUINT8: "quint8", types_pb2.DT_QINT16: "qint16", types_pb2.DT_QUINT16: "quint16", types_pb2.DT_QINT32: "qint32", types_pb2.DT_BFLOAT16: "bfloat16", types_pb2.DT_RESOURCE: "resource", types_pb2.DT_HALF_REF: "float16_ref", types_pb2.DT_FLOAT_REF: "float32_ref", types_pb2.DT_DOUBLE_REF: "float64_ref", types_pb2.DT_INT32_REF: "int32_ref", types_pb2.DT_UINT8_REF: "uint8_ref", types_pb2.DT_UINT16_REF: "uint16_ref", types_pb2.DT_INT16_REF: "int16_ref", types_pb2.DT_INT8_REF: "int8_ref", types_pb2.DT_STRING_REF: "string_ref", types_pb2.DT_COMPLEX64_REF: "complex64_ref", types_pb2.DT_COMPLEX128_REF: "complex128_ref", types_pb2.DT_INT64_REF: "int64_ref", types_pb2.DT_BOOL_REF: "bool_ref", types_pb2.DT_QINT8_REF: "qint8_ref", types_pb2.DT_QUINT8_REF: "quint8_ref", types_pb2.DT_QINT16_REF: "qint16_ref", types_pb2.DT_QUINT16_REF: "quint16_ref", types_pb2.DT_QINT32_REF: "qint32_ref", types_pb2.DT_BFLOAT16_REF: "bfloat16_ref", types_pb2.DT_RESOURCE_REF: "resource_ref", } _STRING_TO_TF = {value: _INTERN_TABLE[key] for key, value in _TYPE_TO_STRING.items()} # Add non-canonical aliases. _STRING_TO_TF["half"] = float16 _STRING_TO_TF["half_ref"] = float16_ref _STRING_TO_TF["float"] = float32 _STRING_TO_TF["float_ref"] = float32_ref _STRING_TO_TF["double"] = float64 _STRING_TO_TF["double_ref"] = float64_ref # Numpy representation for quantized dtypes. # # These are magic strings that are used in the swig wrapper to identify # quantized types. # TODO(mrry,keveman): Investigate Numpy type registration to replace this # hard-coding of names. _np_qint8 = np.dtype([("qint8", np.int8, 1)]) _np_quint8 = np.dtype([("quint8", np.uint8, 1)]) _np_qint16 = np.dtype([("qint16", np.int16, 1)]) _np_quint16 = np.dtype([("quint16", np.uint16, 1)]) _np_qint32 = np.dtype([("qint32", np.int32, 1)]) # Custom struct dtype for directly-fed ResourceHandles of supported type(s). np_resource = np.dtype([("resource", np.ubyte, 1)]) # Standard mappings between types_pb2.DataType values and numpy.dtypes. _NP_TO_TF = frozenset([ (np.float16, float16), (np.float32, float32), (np.float64, float64), (np.int32, int32), (np.int64, int64), (np.uint8, uint8), (np.uint16, uint16), (np.int16, int16), (np.int8, int8), (np.complex64, complex64), (np.complex128, complex128), (np.object, string), (np.bool, bool), (_np_qint8, qint8), (_np_quint8, quint8), (_np_qint16, qint16), (_np_quint16, quint16), (_np_qint32, qint32), # NOTE(touts): Intentionally no way to feed a DT_BFLOAT16. ]) _TF_TO_NP = { types_pb2.DT_HALF: np.float16, types_pb2.DT_FLOAT: np.float32, types_pb2.DT_DOUBLE: np.float64, types_pb2.DT_INT32: np.int32, types_pb2.DT_UINT8: np.uint8, types_pb2.DT_UINT16: np.uint16, types_pb2.DT_INT16: np.int16, types_pb2.DT_INT8: np.int8, # NOTE(touts): For strings we use np.object as it supports variable length # strings. types_pb2.DT_STRING: np.object, types_pb2.DT_COMPLEX64: np.complex64, types_pb2.DT_COMPLEX128: np.complex128, types_pb2.DT_INT64: np.int64, types_pb2.DT_BOOL: np.bool, types_pb2.DT_QINT8: _np_qint8, types_pb2.DT_QUINT8: _np_quint8, types_pb2.DT_QINT16: _np_qint16, types_pb2.DT_QUINT16: _np_quint16, types_pb2.DT_QINT32: _np_qint32, types_pb2.DT_BFLOAT16: np.uint16, # Ref types types_pb2.DT_HALF_REF: np.float16, types_pb2.DT_FLOAT_REF: np.float32, types_pb2.DT_DOUBLE_REF: np.float64, types_pb2.DT_INT32_REF: np.int32, types_pb2.DT_UINT8_REF: np.uint8, types_pb2.DT_UINT16_REF: np.uint16, types_pb2.DT_INT16_REF: np.int16, types_pb2.DT_INT8_REF: np.int8, types_pb2.DT_STRING_REF: np.object, types_pb2.DT_COMPLEX64_REF: np.complex64, types_pb2.DT_COMPLEX128_REF: np.complex128, types_pb2.DT_INT64_REF: np.int64, types_pb2.DT_BOOL_REF: np.bool, types_pb2.DT_QINT8_REF: _np_qint8, types_pb2.DT_QUINT8_REF: _np_quint8, types_pb2.DT_QINT16_REF: _np_qint16, types_pb2.DT_QUINT16_REF: _np_quint16, types_pb2.DT_QINT32_REF: _np_qint32, types_pb2.DT_BFLOAT16_REF: np.uint16, } QUANTIZED_DTYPES = frozenset( [qint8, quint8, qint16, quint16, qint32, qint8_ref, quint8_ref, qint16_ref, quint16_ref, qint32_ref]) def as_dtype(type_value): """Converts the given `type_value` to a `DType`. Args: type_value: A value that can be converted to a `tf.DType` object. This may currently be a `tf.DType` object, a [`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto), a string type name, or a `numpy.dtype`. Returns: A `DType` corresponding to `type_value`. Raises: TypeError: If `type_value` cannot be converted to a `DType`. """ if isinstance(type_value, DType): return type_value try: return _INTERN_TABLE[type_value] except KeyError: pass try: return _STRING_TO_TF[type_value] except KeyError: pass if isinstance(type_value, np.dtype): # The numpy dtype for strings is variable length. We can not compare # dtype with a single constant (np.string does not exist) to decide # dtype is a "string" type. We need to compare the dtype.type to be # sure it's a string type. if type_value.type == np.string_ or type_value.type == np.unicode_: return string for key, val in _NP_TO_TF: try: if key == type_value: return val except TypeError as e: raise TypeError("Cannot convert {} to a dtype. {}".format(type_value, e)) raise TypeError( "Cannot convert value %r to a TensorFlow DType." % type_value)
apache-2.0
vijayanandnandam/youtube-dl
youtube_dl/extractor/nobelprize.py
68
2123
# coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..utils import ( js_to_json, mimetype2ext, determine_ext, update_url_query, get_element_by_attribute, int_or_none, ) class NobelPrizeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?nobelprize\.org/mediaplayer.*?\bid=(?P<id>\d+)' _TEST = { 'url': 'http://www.nobelprize.org/mediaplayer/?id=2636', 'md5': '04c81e5714bb36cc4e2232fee1d8157f', 'info_dict': { 'id': '2636', 'ext': 'mp4', 'title': 'Announcement of the 2016 Nobel Prize in Physics', 'description': 'md5:05beba57f4f5a4bbd4cf2ef28fcff739', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) media = self._parse_json(self._search_regex( r'(?s)var\s*config\s*=\s*({.+?});', webpage, 'config'), video_id, js_to_json)['media'] title = media['title'] formats = [] for source in media.get('source', []): source_src = source.get('src') if not source_src: continue ext = mimetype2ext(source.get('type')) or determine_ext(source_src) if ext == 'm3u8': formats.extend(self._extract_m3u8_formats( source_src, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False)) elif ext == 'f4m': formats.extend(self._extract_f4m_formats( update_url_query(source_src, {'hdcore': '3.7.0'}), video_id, f4m_id='hds', fatal=False)) else: formats.append({ 'url': source_src, }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': get_element_by_attribute('itemprop', 'description', webpage), 'duration': int_or_none(media.get('duration')), 'formats': formats, }
unlicense
takeshineshiro/neutron
neutron/tests/unit/notifiers/test_batch_notifier.py
56
2032
# Copyright (c) 2014 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.notifiers import batch_notifier from neutron.tests import base class TestBatchNotifier(base.BaseTestCase): def setUp(self): super(TestBatchNotifier, self).setUp() self.notifier = batch_notifier.BatchNotifier(0.1, lambda x: x) self.spawn_n = mock.patch('eventlet.spawn_n').start() def test_queue_event_no_event(self): self.notifier.queue_event(None) self.assertEqual(0, len(self.notifier.pending_events)) self.assertEqual(0, self.spawn_n.call_count) def test_queue_event_first_event(self): self.notifier.queue_event(mock.Mock()) self.assertEqual(1, len(self.notifier.pending_events)) self.assertEqual(1, self.spawn_n.call_count) def test_queue_event_multiple_events(self): events = 6 for i in range(0, events): self.notifier.queue_event(mock.Mock()) self.assertEqual(events, len(self.notifier.pending_events)) self.assertEqual(1, self.spawn_n.call_count) def test_queue_event_call_send_events(self): with mock.patch.object(self.notifier, 'callback') as send_events: self.spawn_n.side_effect = lambda func: func() self.notifier.queue_event(mock.Mock()) self.assertFalse(self.notifier._waiting_to_send) self.assertTrue(send_events.called)
apache-2.0
chouseknecht/ansible-modules-core
network/vyos/vyos_command.py
6
7539
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = """ --- module: vyos_command version_added: "2.2" author: "Peter Sprygada (@privateip)" short_description: Run one or more commands on VyOS devices description: - The command module allows running one or more commands on remote devices running VyOS. This module can also be introspected to validate key parameters before returning successfully. If the conditional statements are not met in the wait period, the task fails. extends_documentation_fragment: vyos options: commands: description: - The ordered set of commands to execute on the remote device running VyOS. The output from the command execution is returned to the playbook. If the I(wait_for) argument is provided, the module is not returned until the condition is satisfied or the number of retries has been exceeded. required: true wait_for: description: - Specifies what to evaluate from the output of the command and what conditionals to apply. This argument will cause the task to wait for a particular conditional to be true before moving forward. If the conditional is not true by the configured I(retries), the task fails. See examples. required: false default: null aliases: ['waitfor'] match: description: - The I(match) argument is used in conjunction with the I(wait_for) argument to specify the match policy. Valid values are C(all) or C(any). If the value is set to C(all) then all conditionals in the wait_for must be satisfied. If the value is set to C(any) then only one of the values must be satisfied. required: false default: all choices: ['any', 'all'] retries: description: - Specifies the number of retries a command should be tried before it is considered failed. The command is run on the target device every retry and evaluated against the I(wait_for) conditionals. required: false default: 10 interval: description: - Configures the interval in seconds to wait between I(retries) of the command. If the command does not pass the specified conditional, the interval indicates how to long to wait before trying the command again. required: false default: 1 """ EXAMPLES = """ # Note: examples below use the following provider dict to handle # transport and authentication to the node. vars: cli: host: "{{ inventory_hostname }}" username: vyos password: vyos transport: cli - vyos_command: commands: - show interfaces ethernet {{ item }} provider: "{{ cli }}" with_items: - eth0 - eth1 - vyos_command: commands: - show version - show hardware cpu wait_for: - "result[0] contains 'VyOS 1.1.7'" provider: "{{ cli }}" """ RETURN = """ stdout: description: The set of responses from the commands returned: always type: list sample: ['...', '...'] stdout_lines: description: The value of stdout split into a list returned: always type: list sample: [['...', '...'], ['...'], ['...']] failed_conditions: description: The conditionals that failed retured: failed type: list sample: ['...', '...'] warnings: description: The list of warnings (if any) generated by module based on arguments returned: always type: list sample: ['...', '...'] """ import ansible.module_utils.vyos from ansible.module_utils.basic import get_exception from ansible.module_utils.netcli import CommandRunner from ansible.module_utils.netcli import AddCommandError, FailedConditionsError from ansible.module_utils.network import NetworkModule, NetworkError from ansible.module_utils.six import string_types VALID_KEYS = ['command', 'output', 'prompt', 'response'] def to_lines(stdout): for item in stdout: if isinstance(item, string_types): item = str(item).split('\n') yield item def parse_commands(module): for cmd in module.params['commands']: if isinstance(cmd, string_types): cmd = dict(command=cmd, output=None) elif 'command' not in cmd: module.fail_json(msg='command keyword argument is required') elif cmd.get('output') not in [None, 'text']: module.fail_json(msg='invalid output specified for command') elif not set(cmd.keys()).issubset(VALID_KEYS): module.fail_json(msg='unknown keyword specified') yield cmd def main(): spec = dict( # { command: <str>, output: <str>, prompt: <str>, response: <str> } commands=dict(type='list', required=True), wait_for=dict(type='list', aliases=['waitfor']), match=dict(default='all', choices=['all', 'any']), retries=dict(default=10, type='int'), interval=dict(default=1, type='int') ) module = NetworkModule(argument_spec=spec, connect_on_load=False, supports_check_mode=True) commands = list(parse_commands(module)) conditionals = module.params['wait_for'] or list() warnings = list() runner = CommandRunner(module) for cmd in commands: if module.check_mode and not cmd['command'].startswith('show'): warnings.append('only show commands are supported when using ' 'check mode, not executing `%s`' % cmd['command']) else: if cmd['command'].startswith('conf'): module.fail_json(msg='vyos_command does not support running ' 'config mode commands. Please use ' 'vyos_config instead') try: runner.add_command(**cmd) except AddCommandError: exc = get_exception() warnings.append('duplicate command detected: %s' % cmd) for item in conditionals: runner.add_conditional(item) runner.retries = module.params['retries'] runner.interval = module.params['interval'] runner.match = module.params['match'] try: runner.run() except FailedConditionsError: exc = get_exception() module.fail_json(msg=str(exc), failed_conditions=exc.failed_conditions) except NetworkError: exc = get_exception() module.fail_json(msg=str(exc)) result = dict(changed=False, stdout=list()) for cmd in commands: try: output = runner.get_command(cmd['command']) except ValueError: output = 'command not executed due to check_mode, see warnings' result['stdout'].append(output) result['warnings'] = warnings result['stdout_lines'] = list(to_lines(result['stdout'])) module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
PredictiveScienceLab/GPy
GPy/inference/latent_function_inference/fitc.py
15
3137
# Copyright (c) 2012, James Hensman # Licensed under the BSD 3-clause license (see LICENSE.txt) from .posterior import Posterior from ...util.linalg import jitchol, tdot, dtrtrs, dpotri, pdinv from ...util import diag import numpy as np from . import LatentFunctionInference log_2_pi = np.log(2*np.pi) class FITC(LatentFunctionInference): """ An object for inference when the likelihood is Gaussian, but we want to do sparse inference. The function self.inference returns a Posterior object, which summarizes the posterior. """ const_jitter = 1e-6 def inference(self, kern, X, Z, likelihood, Y, mean_function=None, Y_metadata=None): assert mean_function is None, "inference with a mean function not implemented" num_inducing, _ = Z.shape num_data, output_dim = Y.shape #make sure the noise is not hetero sigma_n = likelihood.gaussian_variance(Y_metadata) if sigma_n.size >1: raise NotImplementedError("no hetero noise with this implementation of FITC") Kmm = kern.K(Z) Knn = kern.Kdiag(X) Knm = kern.K(X, Z) U = Knm #factor Kmm diag.add(Kmm, self.const_jitter) Kmmi, L, Li, _ = pdinv(Kmm) #compute beta_star, the effective noise precision LiUT = np.dot(Li, U.T) sigma_star = Knn + sigma_n - np.sum(np.square(LiUT),0) beta_star = 1./sigma_star # Compute and factor A A = tdot(LiUT*np.sqrt(beta_star)) + np.eye(num_inducing) LA = jitchol(A) # back substutue to get b, P, v URiy = np.dot(U.T*beta_star,Y) tmp, _ = dtrtrs(L, URiy, lower=1) b, _ = dtrtrs(LA, tmp, lower=1) tmp, _ = dtrtrs(LA, b, lower=1, trans=1) v, _ = dtrtrs(L, tmp, lower=1, trans=1) tmp, _ = dtrtrs(LA, Li, lower=1, trans=0) P = tdot(tmp.T) #compute log marginal log_marginal = -0.5*num_data*output_dim*np.log(2*np.pi) + \ -np.sum(np.log(np.diag(LA)))*output_dim + \ 0.5*output_dim*np.sum(np.log(beta_star)) + \ -0.5*np.sum(np.square(Y.T*np.sqrt(beta_star))) + \ 0.5*np.sum(np.square(b)) #compute dL_dR Uv = np.dot(U, v) dL_dR = 0.5*(np.sum(U*np.dot(U,P), 1) - 1./beta_star + np.sum(np.square(Y), 1) - 2.*np.sum(Uv*Y, 1) + np.sum(np.square(Uv), 1))*beta_star**2 # Compute dL_dKmm vvT_P = tdot(v.reshape(-1,1)) + P dL_dK = 0.5*(Kmmi - vvT_P) KiU = np.dot(Kmmi, U.T) dL_dK += np.dot(KiU*dL_dR, KiU.T) # Compute dL_dU vY = np.dot(v.reshape(-1,1),Y.T) dL_dU = vY - np.dot(vvT_P, U.T) dL_dU *= beta_star dL_dU -= 2.*KiU*dL_dR dL_dthetaL = likelihood.exact_inference_gradients(dL_dR) grad_dict = {'dL_dKmm': dL_dK, 'dL_dKdiag':dL_dR, 'dL_dKnm':dL_dU.T, 'dL_dthetaL':dL_dthetaL} #construct a posterior object post = Posterior(woodbury_inv=Kmmi-P, woodbury_vector=v, K=Kmm, mean=None, cov=None, K_chol=L) return post, log_marginal, grad_dict
bsd-3-clause
ProjectSWGCore/NGECore2
scripts/mobiles/endor/panshee_elder_warrior.py
2
3145
import sys from services.spawn import MobileTemplate from services.spawn import WeaponTemplate from resources.datatables import WeaponType from resources.datatables import Difficulty from resources.datatables import Options from java.util import Vector def addTemplate(core): mobileTemplate = MobileTemplate() mobileTemplate.setCreatureName('panshee_elder_worker') mobileTemplate.setLevel(71) mobileTemplate.setDifficulty(Difficulty.NORMAL) mobileTemplate.setMinSpawnDistance(3) mobileTemplate.setMaxSpawnDistance(5) mobileTemplate.setDeathblow(False) mobileTemplate.setScale(1) mobileTemplate.setSocialGroup('panshee tribe') mobileTemplate.setAssistRange(1) mobileTemplate.setOptionsBitmask(128) mobileTemplate.setStalker(True) templates = Vector() templates.add('object/mobile/shared_dressed_ewok_f_01.iff') templates.add('object/mobile/shared_dressed_ewok_f_02.iff') templates.add('object/mobile/shared_dressed_ewok_f_03.iff') templates.add('object/mobile/shared_dressed_ewok_f_04.iff') templates.add('object/mobile/shared_dressed_ewok_f_05.iff') templates.add('object/mobile/shared_dressed_ewok_f_06.iff') templates.add('object/mobile/shared_dressed_ewok_f_07.iff') templates.add('object/mobile/shared_dressed_ewok_f_08.iff') templates.add('object/mobile/shared_dressed_ewok_f_09.iff') templates.add('object/mobile/shared_dressed_ewok_f_10.iff') templates.add('object/mobile/shared_dressed_ewok_f_11.iff') templates.add('object/mobile/shared_dressed_ewok_f_12.iff') templates.add('object/mobile/shared_dressed_ewok_m_01.iff') templates.add('object/mobile/shared_dressed_ewok_m_02.iff') templates.add('object/mobile/shared_dressed_ewok_m_03.iff') templates.add('object/mobile/shared_dressed_ewok_m_04.iff') templates.add('object/mobile/shared_dressed_ewok_m_05.iff') templates.add('object/mobile/shared_dressed_ewok_m_06.iff') templates.add('object/mobile/shared_dressed_ewok_m_07.iff') templates.add('object/mobile/shared_dressed_ewok_m_08.iff') templates.add('object/mobile/shared_dressed_ewok_m_09.iff') templates.add('object/mobile/shared_dressed_ewok_m_10.iff') templates.add('object/mobile/shared_dressed_ewok_m_11.iff') templates.add('object/mobile/shared_dressed_ewok_m_12.iff') mobileTemplate.setTemplates(templates) weaponTemplates = Vector() weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic') weaponTemplates.add(weapontemplate) mobileTemplate.setWeaponTemplateVector(weaponTemplates) attacks = Vector() mobileTemplate.setDefaultAttack('meleeHit') mobileTemplate.setAttacks(attacks) lootPoolNames_1 = ['Junk'] lootPoolChances_1 = [100] lootGroupChance_1 = 65 mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1) lootPoolNames_2 = ['random_loot_primitives'] lootPoolChances_2 = [100] lootGroupChance_2 = 35 mobileTemplate.addToLootGroups(lootPoolNames_2,lootPoolChances_2,lootGroupChance_2) core.spawnService.addMobileTemplate('panshee_elder_worker', mobileTemplate) return
lgpl-3.0
ksrajkumar/openerp-6.1
openerp/workflow/instance.py
14
3542
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import wkf_logs import workitem import openerp.netsvc as netsvc import openerp.pooler as pooler def create(cr, ident, wkf_id): (uid,res_type,res_id) = ident cr.execute('insert into wkf_instance (res_type,res_id,uid,wkf_id) values (%s,%s,%s,%s) RETURNING id', (res_type,res_id,uid,wkf_id)) id_new = cr.fetchone()[0] cr.execute('select * from wkf_activity where flow_start=True and wkf_id=%s', (wkf_id,)) res = cr.dictfetchall() stack = [] workitem.create(cr, res, id_new, ident, stack=stack) update(cr, id_new, ident) return id_new def delete(cr, ident): (uid,res_type,res_id) = ident cr.execute('delete from wkf_instance where res_id=%s and res_type=%s', (res_id,res_type)) def validate(cr, inst_id, ident, signal, force_running=False): cr.execute("select * from wkf_workitem where inst_id=%s", (inst_id,)) stack = [] for witem in cr.dictfetchall(): stack = [] workitem.process(cr, witem, ident, signal, force_running, stack=stack) # An action is returned _update_end(cr, inst_id, ident) return stack and stack[0] or False def update(cr, inst_id, ident): cr.execute("select * from wkf_workitem where inst_id=%s", (inst_id,)) for witem in cr.dictfetchall(): stack = [] workitem.process(cr, witem, ident, stack=stack) return _update_end(cr, inst_id, ident) def _update_end(cr, inst_id, ident): cr.execute('select wkf_id from wkf_instance where id=%s', (inst_id,)) wkf_id = cr.fetchone()[0] cr.execute('select state,flow_stop from wkf_workitem w left join wkf_activity a on (a.id=w.act_id) where w.inst_id=%s', (inst_id,)) ok=True for r in cr.fetchall(): if (r[0]<>'complete') or not r[1]: ok=False break if ok: cr.execute('select distinct a.name from wkf_activity a left join wkf_workitem w on (a.id=w.act_id) where w.inst_id=%s', (inst_id,)) act_names = cr.fetchall() cr.execute("update wkf_instance set state='complete' where id=%s", (inst_id,)) cr.execute("update wkf_workitem set state='complete' where subflow_id=%s", (inst_id,)) cr.execute("select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (inst_id,)) for i in cr.fetchall(): for act_name in act_names: validate(cr, i[0], (ident[0],i[1],i[2]), 'subflow.'+act_name[0]) return ok # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
garyjyao1/ansible
contrib/inventory/ec2.py
20
54413
#!/usr/bin/env python ''' EC2 external inventory script ================================= Generates inventory that Ansible can understand by making API request to AWS EC2 using the Boto library. NOTE: This script assumes Ansible is being executed where the environment variables needed for Boto have already been set: export AWS_ACCESS_KEY_ID='AK123' export AWS_SECRET_ACCESS_KEY='abc123' This script also assumes there is an ec2.ini file alongside it. To specify a different path to ec2.ini, define the EC2_INI_PATH environment variable: export EC2_INI_PATH=/path/to/my_ec2.ini If you're using eucalyptus you need to set the above variables and you need to define: export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus If you're using boto profiles (requires boto>=2.24.0) you can choose a profile using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using the AWS_PROFILE variable: AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html When run against a specific host, this script returns the following variables: - ec2_ami_launch_index - ec2_architecture - ec2_association - ec2_attachTime - ec2_attachment - ec2_attachmentId - ec2_client_token - ec2_deleteOnTermination - ec2_description - ec2_deviceIndex - ec2_dns_name - ec2_eventsSet - ec2_group_name - ec2_hypervisor - ec2_id - ec2_image_id - ec2_instanceState - ec2_instance_type - ec2_ipOwnerId - ec2_ip_address - ec2_item - ec2_kernel - ec2_key_name - ec2_launch_time - ec2_monitored - ec2_monitoring - ec2_networkInterfaceId - ec2_ownerId - ec2_persistent - ec2_placement - ec2_platform - ec2_previous_state - ec2_private_dns_name - ec2_private_ip_address - ec2_publicIp - ec2_public_dns_name - ec2_ramdisk - ec2_reason - ec2_region - ec2_requester_id - ec2_root_device_name - ec2_root_device_type - ec2_security_group_ids - ec2_security_group_names - ec2_shutdown_state - ec2_sourceDestCheck - ec2_spot_instance_request_id - ec2_state - ec2_state_code - ec2_state_reason - ec2_status - ec2_subnet_id - ec2_tenancy - ec2_virtualization_type - ec2_vpc_id These variables are pulled out of a boto.ec2.instance object. There is a lack of consistency with variable spellings (camelCase and underscores) since this just loops through all variables the object exposes. It is preferred to use the ones with underscores when multiple exist. In addition, if an instance has AWS Tags associated with it, each tag is a new variable named: - ec2_tag_[Key] = [Value] Security groups are comma-separated in 'ec2_security_group_ids' and 'ec2_security_group_names'. ''' # (c) 2012, Peter Sankauskas # # This file is part of Ansible, # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. ###################################################################### import sys import os import argparse import re from time import time import boto from boto import ec2 from boto import rds from boto import elasticache from boto import route53 import six from six.moves import configparser from collections import defaultdict try: import json except ImportError: import simplejson as json class Ec2Inventory(object): def _empty_inventory(self): return {"_meta" : {"hostvars" : {}}} def __init__(self): ''' Main execution path ''' # Inventory grouped by instance IDs, tags, security groups, regions, # and availability zones self.inventory = self._empty_inventory() # Index of hostname (address) to instance ID self.index = {} # Boto profile to use (if any) self.boto_profile = None # Read settings and parse CLI arguments self.parse_cli_args() self.read_settings() # Make sure that profile_name is not passed at all if not set # as pre 2.24 boto will fall over otherwise if self.boto_profile: if not hasattr(boto.ec2.EC2Connection, 'profile_name'): self.fail_with_error("boto version must be >= 2.24 to use profile") # Cache if self.args.refresh_cache: self.do_api_calls_update_cache() elif not self.is_cache_valid(): self.do_api_calls_update_cache() # Data to print if self.args.host: data_to_print = self.get_host_info() elif self.args.list: # Display list of instances for inventory if self.inventory == self._empty_inventory(): data_to_print = self.get_inventory_from_cache() else: data_to_print = self.json_format_dict(self.inventory, True) print(data_to_print) def is_cache_valid(self): ''' Determines if the cache files have expired, or if it is still valid ''' if os.path.isfile(self.cache_path_cache): mod_time = os.path.getmtime(self.cache_path_cache) current_time = time() if (mod_time + self.cache_max_age) > current_time: if os.path.isfile(self.cache_path_index): return True return False def read_settings(self): ''' Reads the settings from the ec2.ini file ''' if six.PY3: config = configparser.ConfigParser() else: config = configparser.SafeConfigParser() ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path))) config.read(ec2_ini_path) # is eucalyptus? self.eucalyptus_host = None self.eucalyptus = False if config.has_option('ec2', 'eucalyptus'): self.eucalyptus = config.getboolean('ec2', 'eucalyptus') if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') # Regions self.regions = [] configRegions = config.get('ec2', 'regions') configRegions_exclude = config.get('ec2', 'regions_exclude') if (configRegions == 'all'): if self.eucalyptus_host: self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) else: for regionInfo in ec2.regions(): if regionInfo.name not in configRegions_exclude: self.regions.append(regionInfo.name) else: self.regions = configRegions.split(",") # Destination addresses self.destination_variable = config.get('ec2', 'destination_variable') self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') # Route53 self.route53_enabled = config.getboolean('ec2', 'route53') self.route53_excluded_zones = [] if config.has_option('ec2', 'route53_excluded_zones'): self.route53_excluded_zones.extend( config.get('ec2', 'route53_excluded_zones', '').split(',')) # Include RDS instances? self.rds_enabled = True if config.has_option('ec2', 'rds'): self.rds_enabled = config.getboolean('ec2', 'rds') # Include ElastiCache instances? self.elasticache_enabled = True if config.has_option('ec2', 'elasticache'): self.elasticache_enabled = config.getboolean('ec2', 'elasticache') # Return all EC2 instances? if config.has_option('ec2', 'all_instances'): self.all_instances = config.getboolean('ec2', 'all_instances') else: self.all_instances = False # Instance states to be gathered in inventory. Default is 'running'. # Setting 'all_instances' to 'yes' overrides this option. ec2_valid_instance_states = [ 'pending', 'running', 'shutting-down', 'terminated', 'stopping', 'stopped' ] self.ec2_instance_states = [] if self.all_instances: self.ec2_instance_states = ec2_valid_instance_states elif config.has_option('ec2', 'instance_states'): for instance_state in config.get('ec2', 'instance_states').split(','): instance_state = instance_state.strip() if instance_state not in ec2_valid_instance_states: continue self.ec2_instance_states.append(instance_state) else: self.ec2_instance_states = ['running'] # Return all RDS instances? (if RDS is enabled) if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') else: self.all_rds_instances = False # Return all ElastiCache replication groups? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') else: self.all_elasticache_replication_groups = False # Return all ElastiCache clusters? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') else: self.all_elasticache_clusters = False # Return all ElastiCache nodes? (if ElastiCache is enabled) if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') else: self.all_elasticache_nodes = False # boto configuration profile (prefer CLI argument) self.boto_profile = self.args.boto_profile if config.has_option('ec2', 'boto_profile') and not self.boto_profile: self.boto_profile = config.get('ec2', 'boto_profile') # Cache related cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) if self.boto_profile: cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) if not os.path.exists(cache_dir): os.makedirs(cache_dir) self.cache_path_cache = cache_dir + "/ansible-ec2.cache" self.cache_path_index = cache_dir + "/ansible-ec2.index" self.cache_max_age = config.getint('ec2', 'cache_max_age') # Configure nested groups instead of flat namespace. if config.has_option('ec2', 'nested_groups'): self.nested_groups = config.getboolean('ec2', 'nested_groups') else: self.nested_groups = False # Configure which groups should be created. group_by_options = [ 'group_by_instance_id', 'group_by_region', 'group_by_availability_zone', 'group_by_ami_id', 'group_by_instance_type', 'group_by_key_pair', 'group_by_vpc_id', 'group_by_security_group', 'group_by_tag_keys', 'group_by_tag_none', 'group_by_route53_names', 'group_by_rds_engine', 'group_by_rds_parameter_group', 'group_by_elasticache_engine', 'group_by_elasticache_cluster', 'group_by_elasticache_parameter_group', 'group_by_elasticache_replication_group', ] for option in group_by_options: if config.has_option('ec2', option): setattr(self, option, config.getboolean('ec2', option)) else: setattr(self, option, True) # Do we need to just include hosts that match a pattern? try: pattern_include = config.get('ec2', 'pattern_include') if pattern_include and len(pattern_include) > 0: self.pattern_include = re.compile(pattern_include) else: self.pattern_include = None except configparser.NoOptionError as e: self.pattern_include = None # Do we need to exclude hosts that match a pattern? try: pattern_exclude = config.get('ec2', 'pattern_exclude'); if pattern_exclude and len(pattern_exclude) > 0: self.pattern_exclude = re.compile(pattern_exclude) else: self.pattern_exclude = None except configparser.NoOptionError as e: self.pattern_exclude = None # Instance filters (see boto and EC2 API docs). Ignore invalid filters. self.ec2_instance_filters = defaultdict(list) if config.has_option('ec2', 'instance_filters'): for instance_filter in config.get('ec2', 'instance_filters', '').split(','): instance_filter = instance_filter.strip() if not instance_filter or '=' not in instance_filter: continue filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] if not filter_key: continue self.ec2_instance_filters[filter_key].append(filter_value) def parse_cli_args(self): ''' Command line argument processing ''' parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance') parser.add_argument('--refresh-cache', action='store_true', default=False, help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') parser.add_argument('--boto-profile', action='store', help='Use boto profile for connections to EC2') self.args = parser.parse_args() def do_api_calls_update_cache(self): ''' Do API calls to each region, and save data in cache files ''' if self.route53_enabled: self.get_route53_records() for region in self.regions: self.get_instances_by_region(region) if self.rds_enabled: self.get_rds_instances_by_region(region) if self.elasticache_enabled: self.get_elasticache_clusters_by_region(region) self.get_elasticache_replication_groups_by_region(region) self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.index, self.cache_path_index) def connect(self, region): ''' create connection to api server''' if self.eucalyptus: conn = boto.connect_euca(host=self.eucalyptus_host) conn.APIVersion = '2010-08-31' else: conn = self.connect_to_aws(ec2, region) return conn def boto_fix_security_token_in_profile(self, connect_args): ''' monkey patch for boto issue boto/boto#2100 ''' profile = 'profile ' + self.boto_profile if boto.config.has_option(profile, 'aws_security_token'): connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') return connect_args def connect_to_aws(self, module, region): connect_args = {} # only pass the profile name if it's set (as it is not supported by older boto versions) if self.boto_profile: connect_args['profile_name'] = self.boto_profile self.boto_fix_security_token_in_profile(connect_args) conn = module.connect_to_region(region, **connect_args) # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported if conn is None: self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) return conn def get_instances_by_region(self, region): ''' Makes an AWS EC2 API call to the list of instances in a particular region ''' try: conn = self.connect(region) reservations = [] if self.ec2_instance_filters: for filter_key, filter_values in self.ec2_instance_filters.items(): reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) else: reservations = conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: self.add_instance(instance, region) except boto.exception.BotoServerError as e: if e.error_code == 'AuthFailure': error = self.get_auth_error_message() else: backend = 'Eucalyptus' if self.eucalyptus else 'AWS' error = "Error connecting to %s backend.\n%s" % (backend, e.message) self.fail_with_error(error, 'getting EC2 instances') def get_rds_instances_by_region(self, region): ''' Makes an AWS API call to the list of RDS instances in a particular region ''' try: conn = self.connect_to_aws(rds, region) if conn: instances = conn.get_all_dbinstances() for instance in instances: self.add_rds_instance(instance, region) except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS RDS is down:\n%s" % e.message self.fail_with_error(error, 'getting RDS instances') def get_elasticache_clusters_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache clusters (with nodes' info) in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = elasticache.connect_to_region(region) if conn: # show_cache_node_info = True # because we also want nodes' information response = conn.describe_cache_clusters(None, None, None, True) except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to CacheClusters or # CacheNodes. Because of that wo can't make use of the get_list # method in the AWSQueryConnection. Let's do the work manually clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] except KeyError as e: error = "ElastiCache query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for cluster in clusters: self.add_elasticache_cluster(cluster, region) def get_elasticache_replication_groups_by_region(self, region): ''' Makes an AWS API call to the list of ElastiCache replication groups in a particular region.''' # ElastiCache boto module doesn't provide a get_all_intances method, # that's why we need to call describe directly (it would be called by # the shorthand method anyway...) try: conn = elasticache.connect_to_region(region) if conn: response = conn.describe_replication_groups() except boto.exception.BotoServerError as e: error = e.reason if e.error_code == 'AuthFailure': error = self.get_auth_error_message() if not e.reason == "Forbidden": error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message self.fail_with_error(error, 'getting ElastiCache clusters') try: # Boto also doesn't provide wrapper classes to ReplicationGroups # Because of that wo can't make use of the get_list method in the # AWSQueryConnection. Let's do the work manually replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] except KeyError as e: error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." self.fail_with_error(error, 'getting ElastiCache clusters') for replication_group in replication_groups: self.add_elasticache_replication_group(replication_group, region) def get_auth_error_message(self): ''' create an informative error message if there is an issue authenticating''' errors = ["Authentication error retrieving ec2 inventory."] if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') else: errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) if len(boto_config_found) > 0: errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) else: errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) return '\n'.join(errors) def fail_with_error(self, err_msg, err_operation=None): '''log an error to std err for ansible-playbook to consume and exit''' if err_operation: err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( err_msg=err_msg, err_operation=err_operation) sys.stderr.write(err_msg) sys.exit(1) def get_instance(self, region, instance_id): conn = self.connect(region) reservations = conn.get_all_instances([instance_id]) for reservation in reservations: for instance in reservation.instances: return instance def add_instance(self, instance, region): ''' Adds an instance to the inventory and index, as long as it is addressable ''' # Only return instances with desired instance states if instance.state not in self.ec2_instance_states: return # Select the best destination address if instance.subnet_id: dest = getattr(instance, self.vpc_destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) else: dest = getattr(instance, self.destination_variable, None) if dest is None: dest = getattr(instance, 'tags').get(self.destination_variable, None) if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # if we only want to include hosts that match a pattern, skip those that don't if self.pattern_include and not self.pattern_include.match(dest): return # if we need to exclude hosts that match a pattern, skip those if self.pattern_exclude and self.pattern_exclude.match(dest): return # Add to index self.index[dest] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.placement, dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.placement) self.push_group(self.inventory, 'zones', instance.placement) # Inventory: Group by Amazon Machine Image (AMI) ID if self.group_by_ami_id: ami_id = self.to_safe(instance.image_id) self.push(self.inventory, ami_id, dest) if self.nested_groups: self.push_group(self.inventory, 'images', ami_id) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_type) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by key pair if self.group_by_key_pair and instance.key_name: key_name = self.to_safe('key_' + instance.key_name) self.push(self.inventory, key_name, dest) if self.nested_groups: self.push_group(self.inventory, 'keys', key_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) self.push(self.inventory, vpc_id_name, dest) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: for group in instance.groups: key = self.to_safe("security_group_" + group.name) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by tag keys if self.group_by_tag_keys: for k, v in instance.tags.items(): if v: key = self.to_safe("tag_" + k + "=" + v) else: key = self.to_safe("tag_" + k) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) self.push_group(self.inventory, self.to_safe("tag_" + k), key) # Inventory: Group by Route53 domain names if enabled if self.route53_enabled and self.group_by_route53_names: route53_names = self.get_instance_route53_names(instance) for name in route53_names: self.push(self.inventory, name, dest) if self.nested_groups: self.push_group(self.inventory, 'route53', name) # Global Tag: instances without tags if self.group_by_tag_none and len(instance.tags) == 0: self.push(self.inventory, 'tag_none', dest) if self.nested_groups: self.push_group(self.inventory, 'tags', 'tag_none') # Global Tag: tag all EC2 instances self.push(self.inventory, 'ec2', dest) self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) def add_rds_instance(self, instance, region): ''' Adds an RDS instance to the inventory and index, as long as it is addressable ''' # Only want available instances unless all_rds_instances is True if not self.all_rds_instances and instance.status != 'available': return # Select the best destination address dest = instance.endpoint[0] if not dest: # Skip instances we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, instance.id] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[instance.id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', instance.id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, instance.availability_zone, dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, instance.availability_zone) self.push_group(self.inventory, 'zones', instance.availability_zone) # Inventory: Group by instance type if self.group_by_instance_type: type_name = self.to_safe('type_' + instance.instance_class) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) self.push(self.inventory, vpc_id_name, dest) if self.nested_groups: self.push_group(self.inventory, 'vpcs', vpc_id_name) # Inventory: Group by security group if self.group_by_security_group: try: if instance.security_group: key = self.to_safe("security_group_" + instance.security_group.name) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) except AttributeError: self.fail_with_error('\n'.join(['Package boto seems a bit older.', 'Please upgrade boto >= 2.3.0.'])) # Inventory: Group by engine if self.group_by_rds_engine: self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) if self.nested_groups: self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) # Inventory: Group by parameter group if self.group_by_rds_parameter_group: self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) if self.nested_groups: self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) # Global Tag: all RDS instances self.push(self.inventory, 'rds', dest) self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) def add_elasticache_cluster(self, cluster, region): ''' Adds an ElastiCache cluster to the inventory and index, as long as it's nodes are addressable ''' # Only want available clusters unless all_elasticache_clusters is True if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': return # Select the best destination address if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: # Memcached cluster dest = cluster['ConfigurationEndpoint']['Address'] is_redis = False else: # Redis sigle node cluster # Because all Redis clusters are single nodes, we'll merge the # info from the cluster with info about the node dest = cluster['CacheNodes'][0]['Endpoint']['Address'] is_redis = True if not dest: # Skip clusters we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, cluster['CacheClusterId']] # Inventory: Group by instance ID (always a group of 1) if self.group_by_instance_id: self.inventory[cluster['CacheClusterId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) # Inventory: Group by region if self.group_by_region and not is_redis: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone and not is_redis: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type and not is_redis: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group and not is_redis: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine and not is_redis: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) # Inventory: Group by parameter group if self.group_by_elasticache_parameter_group: self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) # Inventory: Group by replication group if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) host_info = self.get_host_info_dict_from_describe_dict(cluster) self.inventory["_meta"]["hostvars"][dest] = host_info # Add the nodes for node in cluster['CacheNodes']: self.add_elasticache_node(node, cluster, region) def add_elasticache_node(self, node, cluster, region): ''' Adds an ElastiCache node to the inventory and index, as long as it is addressable ''' # Only want available nodes unless all_elasticache_nodes is True if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': return # Select the best destination address dest = node['Endpoint']['Address'] if not dest: # Skip nodes we cannot address (e.g. private VPC subnet) return node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) # Add to index self.index[dest] = [region, node_id] # Inventory: Group by node ID (always a group of 1) if self.group_by_instance_id: self.inventory[node_id] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', node_id) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone if self.group_by_availability_zone: self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) if self.nested_groups: if self.group_by_region: self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) # Inventory: Group by node type if self.group_by_instance_type: type_name = self.to_safe('type_' + cluster['CacheNodeType']) self.push(self.inventory, type_name, dest) if self.nested_groups: self.push_group(self.inventory, 'types', type_name) # Inventory: Group by VPC (information not available in the current # AWS API version for ElastiCache) # Inventory: Group by security group if self.group_by_security_group: # Check for the existence of the 'SecurityGroups' key and also if # this key has some value. When the cluster is not placed in a SG # the query can return None here and cause an error. if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: for security_group in cluster['SecurityGroups']: key = self.to_safe("security_group_" + security_group['SecurityGroupId']) self.push(self.inventory, key, dest) if self.nested_groups: self.push_group(self.inventory, 'security_groups', key) # Inventory: Group by engine if self.group_by_elasticache_engine: self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) # Inventory: Group by parameter group (done at cluster level) # Inventory: Group by replication group (done at cluster level) # Inventory: Group by ElastiCache Cluster if self.group_by_elasticache_cluster: self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) # Global Tag: all ElastiCache nodes self.push(self.inventory, 'elasticache_nodes', dest) host_info = self.get_host_info_dict_from_describe_dict(node) if dest in self.inventory["_meta"]["hostvars"]: self.inventory["_meta"]["hostvars"][dest].update(host_info) else: self.inventory["_meta"]["hostvars"][dest] = host_info def add_elasticache_replication_group(self, replication_group, region): ''' Adds an ElastiCache replication group to the inventory and index ''' # Only want available clusters unless all_elasticache_replication_groups is True if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': return # Select the best destination address (PrimaryEndpoint) dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] if not dest: # Skip clusters we cannot address (e.g. private VPC subnet) return # Add to index self.index[dest] = [region, replication_group['ReplicationGroupId']] # Inventory: Group by ID (always a group of 1) if self.group_by_instance_id: self.inventory[replication_group['ReplicationGroupId']] = [dest] if self.nested_groups: self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) # Inventory: Group by region if self.group_by_region: self.push(self.inventory, region, dest) if self.nested_groups: self.push_group(self.inventory, 'regions', region) # Inventory: Group by availability zone (doesn't apply to replication groups) # Inventory: Group by node type (doesn't apply to replication groups) # Inventory: Group by VPC (information not available in the current # AWS API version for replication groups # Inventory: Group by security group (doesn't apply to replication groups) # Check this value in cluster level # Inventory: Group by engine (replication groups are always Redis) if self.group_by_elasticache_engine: self.push(self.inventory, 'elasticache_redis', dest) if self.nested_groups: self.push_group(self.inventory, 'elasticache_engines', 'redis') # Global Tag: all ElastiCache clusters self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) host_info = self.get_host_info_dict_from_describe_dict(replication_group) self.inventory["_meta"]["hostvars"][dest] = host_info def get_route53_records(self): ''' Get and store the map of resource records to domain names that point to them. ''' r53_conn = route53.Route53Connection() all_zones = r53_conn.get_zones() route53_zones = [ zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones ] self.route53_records = {} for zone in route53_zones: rrsets = r53_conn.get_all_rrsets(zone.id) for record_set in rrsets: record_name = record_set.name if record_name.endswith('.'): record_name = record_name[:-1] for resource in record_set.resource_records: self.route53_records.setdefault(resource, set()) self.route53_records[resource].add(record_name) def get_instance_route53_names(self, instance): ''' Check if an instance is referenced in the records we have from Route53. If it is, return the list of domain names pointing to said instance. If nothing points to it, return an empty list. ''' instance_attributes = [ 'public_dns_name', 'private_dns_name', 'ip_address', 'private_ip_address' ] name_list = set() for attrib in instance_attributes: try: value = getattr(instance, attrib) except AttributeError: continue if value in self.route53_records: name_list.update(self.route53_records[value]) return list(name_list) def get_host_info_dict_from_instance(self, instance): instance_vars = {} for key in vars(instance): value = getattr(instance, key) key = self.to_safe('ec2_' + key) # Handle complex types # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 if key == 'ec2__state': instance_vars['ec2_state'] = instance.state or '' instance_vars['ec2_state_code'] = instance.state_code elif key == 'ec2__previous_state': instance_vars['ec2_previous_state'] = instance.previous_state or '' instance_vars['ec2_previous_state_code'] = instance.previous_state_code elif type(value) in [int, bool]: instance_vars[key] = value elif isinstance(value, six.string_types): instance_vars[key] = value.strip() elif type(value) == type(None): instance_vars[key] = '' elif key == 'ec2_region': instance_vars[key] = value.name elif key == 'ec2__placement': instance_vars['ec2_placement'] = value.zone elif key == 'ec2_tags': for k, v in value.items(): key = self.to_safe('ec2_tag_' + k) instance_vars[key] = v elif key == 'ec2_groups': group_ids = [] group_names = [] for group in value: group_ids.append(group.id) group_names.append(group.name) instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) else: pass # TODO Product codes if someone finds them useful #print key #print type(value) #print value return instance_vars def get_host_info_dict_from_describe_dict(self, describe_dict): ''' Parses the dictionary returned by the API call into a flat list of parameters. This method should be used only when 'describe' is used directly because Boto doesn't provide specific classes. ''' # I really don't agree with prefixing everything with 'ec2' # because EC2, RDS and ElastiCache are different services. # I'm just following the pattern used until now to not break any # compatibility. host_info = {} for key in describe_dict: value = describe_dict[key] key = self.to_safe('ec2_' + self.uncammelize(key)) # Handle complex types # Target: Memcached Cache Clusters if key == 'ec2_configuration_endpoint' and value: host_info['ec2_configuration_endpoint_address'] = value['Address'] host_info['ec2_configuration_endpoint_port'] = value['Port'] # Target: Cache Nodes and Redis Cache Clusters (single node) if key == 'ec2_endpoint' and value: host_info['ec2_endpoint_address'] = value['Address'] host_info['ec2_endpoint_port'] = value['Port'] # Target: Redis Replication Groups if key == 'ec2_node_groups' and value: host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] replica_count = 0 for node in value[0]['NodeGroupMembers']: if node['CurrentRole'] == 'primary': host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] elif node['CurrentRole'] == 'replica': host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] replica_count += 1 # Target: Redis Replication Groups if key == 'ec2_member_clusters' and value: host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) # Target: All Cache Clusters elif key == 'ec2_cache_parameter_group': host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] # Target: Almost everything elif key == 'ec2_security_groups': # Skip if SecurityGroups is None # (it is possible to have the key defined but no value in it). if value is not None: sg_ids = [] for sg in value: sg_ids.append(sg['SecurityGroupId']) host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) # Target: Everything # Preserve booleans and integers elif type(value) in [int, bool]: host_info[key] = value # Target: Everything # Sanitize string values elif isinstance(value, six.string_types): host_info[key] = value.strip() # Target: Everything # Replace None by an empty string elif type(value) == type(None): host_info[key] = '' else: # Remove non-processed complex types pass return host_info def get_host_info(self): ''' Get variables about a specific host ''' if len(self.index) == 0: # Need to load index from cache self.load_index_from_cache() if not self.args.host in self.index: # try updating the cache self.do_api_calls_update_cache() if not self.args.host in self.index: # host might not exist anymore return self.json_format_dict({}, True) (region, instance_id) = self.index[self.args.host] instance = self.get_instance(region, instance_id) return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) def push(self, my_dict, key, element): ''' Push an element onto an array that may not have been defined in the dict ''' group_info = my_dict.setdefault(key, []) if isinstance(group_info, dict): host_list = group_info.setdefault('hosts', []) host_list.append(element) else: group_info.append(element) def push_group(self, my_dict, key, element): ''' Push a group as a child of another group. ''' parent_group = my_dict.setdefault(key, {}) if not isinstance(parent_group, dict): parent_group = my_dict[key] = {'hosts': parent_group} child_groups = parent_group.setdefault('children', []) if element not in child_groups: child_groups.append(element) def get_inventory_from_cache(self): ''' Reads the inventory from the cache file and returns it as a JSON object ''' cache = open(self.cache_path_cache, 'r') json_inventory = cache.read() return json_inventory def load_index_from_cache(self): ''' Reads the index from the cache file sets self.index ''' cache = open(self.cache_path_index, 'r') json_index = cache.read() self.index = json.loads(json_index) def write_to_cache(self, data, filename): ''' Writes data in JSON format to a file ''' json_data = self.json_format_dict(data, True) cache = open(filename, 'w') cache.write(json_data) cache.close() def uncammelize(self, key): temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() def to_safe(self, word): ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' return re.sub("[^A-Za-z0-9\_]", "_", word) def json_format_dict(self, data, pretty=False): ''' Converts a dict to a JSON object and dumps it as a formatted string ''' if pretty: return json.dumps(data, sort_keys=True, indent=2) else: return json.dumps(data) # Run the script Ec2Inventory()
gpl-3.0
maxamillion/anitya
tests/test_backend_pagure.py
4
3605
# -*- coding: utf-8 -*- # # Copyright © 2015 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2, or (at your option) any later # version. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY expressed or implied, including the # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. You # should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # Any Red Hat trademarks that are incorporated in the source # code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission # of Red Hat, Inc. # ''' anitya tests for the pagure backend. ''' __requires__ = ['SQLAlchemy >= 0.7'] import pkg_resources import json import unittest import sys import os sys.path.insert(0, os.path.join(os.path.dirname( os.path.abspath(__file__)), '..')) import anitya.lib.backends.pagure as backend import anitya.lib.model as model from anitya.lib.exceptions import AnityaPluginException from tests import Modeltests, create_distro, skip_jenkins BACKEND = 'pagure' class PagureBackendtests(Modeltests): """ pagure backend tests. """ @skip_jenkins def setUp(self): """ Set up the environnment, ran before every tests. """ super(PagureBackendtests, self).setUp() create_distro(self.session) self.create_project() def create_project(self): """ Create some basic projects to work with. """ project = model.Project( name='pagure', homepage='https://pagure.io/pagure', backend=BACKEND, ) self.session.add(project) self.session.commit() project = model.Project( name='fake', homepage='https://pagure.io/fake', backend=BACKEND, ) self.session.add(project) self.session.commit() def test_pagure_get_version(self): """ Test the get_version function of the pagure backend. """ pid = 1 project = model.Project.get(self.session, pid) exp = '0.1.16' obs = backend.PagureBackend.get_version(project) self.assertEqual(obs, exp) pid = 2 project = model.Project.get(self.session, pid) self.assertRaises( AnityaPluginException, backend.PagureBackend.get_version, project ) def test_pagure_get_versions(self): """ Test the get_versions function of the pagure backend. """ pid = 1 project = model.Project.get(self.session, pid) exp = [ '0.1', '0.1.1', '0.1.10', '0.1.11', '0.1.12', '0.1.13', '0.1.14', '0.1.15', '0.1.16', '0.1.2', '0.1.3', '0.1.4', '0.1.5', '0.1.6', '0.1.7', '0.1.8', '0.1.9'] obs = backend.PagureBackend.get_versions(project) self.assertEqual(obs, exp) pid = 2 project = model.Project.get(self.session, pid) self.assertRaises( AnityaPluginException, backend.PagureBackend.get_versions, project ) if __name__ == '__main__': SUITE = unittest.TestLoader().loadTestsFromTestCase(PagureBackendtests) unittest.TextTestRunner(verbosity=2).run(SUITE)
gpl-2.0
qqzwc/XX-Net
code/default/python27/1.0/lib/noarch/hyper/packages/rfc3986/parseresult.py
45
11142
# -*- coding: utf-8 -*- # Copyright (c) 2015 Ian Cordasco # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import namedtuple from . import compat from . import exceptions from . import normalizers from . import uri __all__ = ('ParseResult', 'ParseResultBytes') PARSED_COMPONENTS = ('scheme', 'userinfo', 'host', 'port', 'path', 'query', 'fragment') class ParseResultMixin(object): def _generate_authority(self, attributes): # I swear I did not align the comparisons below. That's just how they # happened to align based on pep8 and attribute lengths. userinfo, host, port = (attributes[p] for p in ('userinfo', 'host', 'port')) if (self.userinfo != userinfo or self.host != host or self.port != port): if port: port = '{0}'.format(port) return normalizers.normalize_authority( (compat.to_str(userinfo, self.encoding), compat.to_str(host, self.encoding), port) ) return self.authority def geturl(self): """Standard library shim to the unsplit method.""" return self.unsplit() @property def hostname(self): """Standard library shim for the host portion of the URI.""" return self.host @property def netloc(self): """Standard library shim for the authority portion of the URI.""" return self.authority @property def params(self): """Standard library shim for the query portion of the URI.""" return self.query class ParseResult(namedtuple('ParseResult', PARSED_COMPONENTS), ParseResultMixin): slots = () def __new__(cls, scheme, userinfo, host, port, path, query, fragment, uri_ref, encoding='utf-8'): parse_result = super(ParseResult, cls).__new__( cls, scheme or None, userinfo or None, host, port or None, path or None, query or None, fragment or None) parse_result.encoding = encoding parse_result.reference = uri_ref return parse_result @classmethod def from_string(cls, uri_string, encoding='utf-8', strict=True): """Parse a URI from the given unicode URI string. :param str uri_string: Unicode URI to be parsed into a reference. :param str encoding: The encoding of the string provided :param bool strict: Parse strictly according to :rfc:`3986` if True. If False, parse similarly to the standard library's urlparse function. :returns: :class:`ParseResult` or subclass thereof """ reference = uri.URIReference.from_string(uri_string, encoding) try: subauthority = reference.authority_info() except exceptions.InvalidAuthority: if strict: raise userinfo, host, port = split_authority(reference.authority) else: # Thanks to Richard Barrell for this idea: # https://twitter.com/0x2ba22e11/status/617338811975139328 userinfo, host, port = (subauthority.get(p) for p in ('userinfo', 'host', 'port')) if port: try: port = int(port) except ValueError: raise exceptions.InvalidPort(port) return cls(scheme=reference.scheme, userinfo=userinfo, host=host, port=port, path=reference.path, query=reference.query, fragment=reference.fragment, uri_ref=reference, encoding=encoding) @property def authority(self): """Normalized authority generated from the subauthority parts.""" return self.reference.authority def copy_with(self, scheme=None, userinfo=None, host=None, port=None, path=None, query=None, fragment=None): attributes = zip(PARSED_COMPONENTS, (scheme, userinfo, host, port, path, query, fragment)) attrs_dict = {} for name, value in attributes: if value is None: value = getattr(self, name) attrs_dict[name] = value authority = self._generate_authority(attrs_dict) ref = self.reference.copy_with(scheme=attrs_dict['scheme'], authority=authority, path=attrs_dict['path'], query=attrs_dict['query'], fragment=attrs_dict['fragment']) return ParseResult(uri_ref=ref, encoding=self.encoding, **attrs_dict) def encode(self, encoding=None): encoding = encoding or self.encoding attrs = dict( zip(PARSED_COMPONENTS, (attr.encode(encoding) if hasattr(attr, 'encode') else attr for attr in self))) return ParseResultBytes( uri_ref=self.reference, encoding=encoding, **attrs ) def unsplit(self, use_idna=False): """Create a URI string from the components. :returns: The parsed URI reconstituted as a string. :rtype: str """ parse_result = self if use_idna and self.host: hostbytes = self.host.encode('idna') host = hostbytes.decode(self.encoding) parse_result = self.copy_with(host=host) return parse_result.reference.unsplit() class ParseResultBytes(namedtuple('ParseResultBytes', PARSED_COMPONENTS), ParseResultMixin): def __new__(cls, scheme, userinfo, host, port, path, query, fragment, uri_ref, encoding='utf-8'): parse_result = super(ParseResultBytes, cls).__new__( cls, scheme or None, userinfo or None, host, port or None, path or None, query or None, fragment or None) parse_result.encoding = encoding parse_result.reference = uri_ref return parse_result @classmethod def from_string(cls, uri_string, encoding='utf-8', strict=True): """Parse a URI from the given unicode URI string. :param str uri_string: Unicode URI to be parsed into a reference. :param str encoding: The encoding of the string provided :param bool strict: Parse strictly according to :rfc:`3986` if True. If False, parse similarly to the standard library's urlparse function. :returns: :class:`ParseResultBytes` or subclass thereof """ reference = uri.URIReference.from_string(uri_string, encoding) try: subauthority = reference.authority_info() except exceptions.InvalidAuthority: if strict: raise userinfo, host, port = split_authority(reference.authority) else: # Thanks to Richard Barrell for this idea: # https://twitter.com/0x2ba22e11/status/617338811975139328 userinfo, host, port = (subauthority.get(p) for p in ('userinfo', 'host', 'port')) if port: try: port = int(port) except ValueError: raise exceptions.InvalidPort(port) to_bytes = compat.to_bytes return cls(scheme=to_bytes(reference.scheme, encoding), userinfo=to_bytes(userinfo, encoding), host=to_bytes(host, encoding), port=port, path=to_bytes(reference.path, encoding), query=to_bytes(reference.query, encoding), fragment=to_bytes(reference.fragment, encoding), uri_ref=reference, encoding=encoding) @property def authority(self): """Normalized authority generated from the subauthority parts.""" return self.reference.authority.encode(self.encoding) def copy_with(self, scheme=None, userinfo=None, host=None, port=None, path=None, query=None, fragment=None): attributes = zip(PARSED_COMPONENTS, (scheme, userinfo, host, port, path, query, fragment)) attrs_dict = {} for name, value in attributes: if value is None: value = getattr(self, name) if not isinstance(value, bytes) and hasattr(value, 'encode'): value = value.encode(self.encoding) attrs_dict[name] = value authority = self._generate_authority(attrs_dict) to_str = compat.to_str ref = self.reference.copy_with( scheme=to_str(attrs_dict['scheme'], self.encoding), authority=authority, path=to_str(attrs_dict['path'], self.encoding), query=to_str(attrs_dict['query'], self.encoding), fragment=to_str(attrs_dict['fragment'], self.encoding) ) return ParseResultBytes( uri_ref=ref, encoding=self.encoding, **attrs_dict ) def unsplit(self, use_idna=False): """Create a URI bytes object from the components. :returns: The parsed URI reconstituted as a string. :rtype: bytes """ parse_result = self if use_idna and self.host: # self.host is bytes, to encode to idna, we need to decode it # first host = self.host.decode(self.encoding) hostbytes = host.encode('idna') parse_result = self.copy_with(host=hostbytes) uri = parse_result.reference.unsplit() return uri.encode(self.encoding) def split_authority(authority): # Initialize our expected return values userinfo = host = port = None # Initialize an extra var we may need to use extra_host = None # Set-up rest in case there is no userinfo portion rest = authority if '@' in authority: userinfo, rest = authority.rsplit('@', 1) # Handle IPv6 host addresses if rest.startswith('['): host, rest = rest.split(']', 1) host += ']' if ':' in rest: extra_host, port = rest.split(':', 1) elif not host and rest: host = rest if extra_host and not host: host = extra_host return userinfo, host, port
bsd-2-clause
plediii/coord_util
test_sql_table.py
1
16745
import unittest from sql_table import * class SQLTableTestCase(unittest.TestCase): def test_no_empty_table(self): class EmptyTable(SQLTable): pass self.assertRaises(SQLDeclarationError, EmptyTable) def test_no_primary_keys_a(self): class TestTable(SQLTable): table_name = 'test_table' a = Integer() self.assertIs(TestTable.a.abstract_table, TestTable) self.assertTrue(not TestTable.a.primary_key) table = TestTable() self.assertTrue(not TestTable.a.primary_key) self.assertTrue(not table.a.is_primary_key()) self.assertEqual(table.primary_key.columns, tuple()) def test_no_primary_keys_b(self): class TestTable(SQLTable): table_name = 'test_table' a = Integer() b = Real() self.assertIs(TestTable.a.abstract_table, TestTable) self.assertIs(TestTable.b.abstract_table, TestTable) self.assertTrue(not TestTable.a.primary_key) self.assertTrue(not TestTable.b.primary_key) table = TestTable() self.assertTrue(not TestTable.a.primary_key) self.assertTrue(not TestTable.b.primary_key) self.assertTrue(not table.a.is_primary_key()) self.assertTrue(not table.b.is_primary_key()) self.assertEqual(table.primary_key.columns, tuple()) def test_first_primary_keys(self): class TestTable(SQLTable): table_name = 'test_table' a = Integer() b = Real() primary_key(a) self.assertIs(TestTable.a.abstract_table, TestTable) self.assertIs(TestTable.b.abstract_table, TestTable) self.assertTrue(TestTable.a.primary_key) self.assertTrue(not TestTable.b.primary_key) table = TestTable() self.assertTrue(TestTable.a.primary_key) self.assertTrue(not TestTable.b.primary_key) self.assertTrue(table.a.is_primary_key()) self.assertTrue(not table.b.is_primary_key()) self.assertEqual(table.primary_key.columns, (table.a,)) class ForeignTableTestCase(unittest.TestCase): def test_foreign_table_declaration(self): class TestTable(SQLTable): table_name = 'test_table' Keys = ForeignTable() key = Keys.Integer('key') def test_requires_foreign_table(self): class TestTable(SQLTable): table_name = 'test_table' Keys = ForeignTable() key = Keys.Integer('key') self.assertRaises(ForeignTableError, TestTable) def test_requires_foreign_column(self): class TestTable(SQLTable): table_name = 'test_table' Keys = ForeignTable() key = Keys.Integer('key') class TestKeys(SQLTable): table_name = 'test_keys' notkey = Integer() test_keys = TestKeys() self.assertRaises(ForeignTableError, TestTable, Keys=test_keys) def test_requires_foreign_column_type(self): class TestTable(SQLTable): table_name = 'test_table' Keys = ForeignTable() key = Keys.Integer('key') class TestKeys(SQLTable): table_name = 'test_keys' notkey = Real() test_keys = TestKeys() self.assertRaises(ForeignTableError, TestTable, Keys=test_keys) def test_ok_with_column(self): class TestTable(SQLTable): table_name = 'test_table' Keys = ForeignTable() key = Keys.Integer('key') class TestKeys(SQLTable): table_name = 'test_keys' key = Integer() test_keys = TestKeys() def test_ok_with_rename(self): class TestTable(SQLTable): table_name = 'test_table' Keys = ForeignTable() key = Keys.Integer('key') class TestKeys(SQLTable): table_name = 'test_keys' key = Integer() test_keys = TestKeys(notkey_name='key') def test_ok_with_additional_columns(self): class TestTable(SQLTable): table_name = 'test_table' Keys = ForeignTable() key = Keys.Integer('key') class TestKeys(SQLTable): table_name = 'test_keys' key = Integer() otherkey = Integer() test_keys = TestKeys(notkey_name='key') test_table = TestTable(Keys=test_keys) class TestTableEqualTestCase(unittest.TestCase): def assertEqual(self, a, b): unittest.TestCase.assertEqual(self, a, b) unittest.TestCase.assertEqual(self, b, a) def assertNotEqual(self, a, b): unittest.TestCase.assertNotEqual(self, a, b) unittest.TestCase.assertNotEqual(self, b, a) def test_single_int_equal_single_int(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() class SecondTable(SQLTable): table_name = 'test_table' a = Integer() first = FirstTable() second = SecondTable() self.assertEqual(first, second) def test_single_text_equal_single_text(self): class FirstTable(SQLTable): table_name = 'test_table' a = Text() class SecondTable(SQLTable): table_name = 'test_table' a = Text() first = FirstTable() second = SecondTable() self.assertEqual(first, second) def test_single_real_equal_single_real(self): class FirstTable(SQLTable): table_name = 'test_table' a = Real() class SecondTable(SQLTable): table_name = 'test_table' a = Real() first = FirstTable() second = SecondTable() self.assertEqual(first, second) def test_single_int_not_equal_single_real(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() class SecondTable(SQLTable): table_name = 'test_table' a = Real() first = FirstTable() second = SecondTable() self.assertNotEqual(first, second) def test_single_int_not_equal_single_int_different_name_a(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() class SecondTable(SQLTable): table_name = 'test_table' b = Integer() first = FirstTable() second = SecondTable() self.assertNotEqual(first, second) def test_single_int_not_equal_single_int_different_name_b(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() class SecondTable(SQLTable): table_name = 'test_table' a = Integer() first = FirstTable() second = SecondTable(a_name='b') self.assertNotEqual(first, second) def test_single_primary_int_equal_single_primary_int(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() primary_key(a) class SecondTable(SQLTable): table_name = 'test_table' a = Integer() primary_key(a) first = FirstTable() second = SecondTable() self.assertEqual(first, second) def test_single_primary_int_not_equal_single_int(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() primary_key(a) class SecondTable(SQLTable): table_name = 'test_table' a = Integer() first = FirstTable() second = SecondTable() self.assertNotEqual(first, second) def test_int_real_equal_int_real(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() b = Real() class SecondTable(SQLTable): table_name = 'test_table' a = Integer() b = Real() first = FirstTable() second = SecondTable() self.assertEqual(first, second) def test_real_int_not_equal_int_real(self): class FirstTable(SQLTable): table_name = 'test_table' a = Real() b = Integer() class SecondTable(SQLTable): table_name = 'test_table' a = Integer() b = Real() first = FirstTable() second = SecondTable() self.assertNotEqual(first, second) def test_int_real_not_equal_int_real_different_name(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() b = Real() class SecondTable(SQLTable): table_name = 'test_table' a = Integer() b = Real() first = FirstTable() second = SecondTable(a_name='b', b_name='a') self.assertNotEqual(first, second) def test_int_int_equal_int_int(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() b = Integer() class SecondTable(SQLTable): table_name = 'test_table' a = Integer() b = Integer() first = FirstTable() second = SecondTable() self.assertEqual(first, second) def test_int_int_not_equal_int_int_different_order(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() b = Integer() class SecondTable(SQLTable): table_name = 'test_table' b = Integer() a = Integer() first = FirstTable() second = SecondTable() self.assertNotEqual(first, second) def test_int_int_equal_int_int_different_order_renamed(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() b = Integer() class SecondTable(SQLTable): table_name = 'test_table' b = Integer() a = Integer() first = FirstTable() second = SecondTable(b_name='a', a_name='b') self.assertEqual(first, second) def test_int_int_equal_int_int_same_primary_key(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() b = Integer() primary_key(a) class SecondTable(SQLTable): table_name = 'test_table' a = Integer() b = Integer() primary_key(a) first = FirstTable() second = SecondTable() self.assertEqual(first, second) def test_int_int_not_equal_int_int_different_primary_key(self): class FirstTable(SQLTable): table_name = 'test_table' a = Integer() b = Integer() primary_key(a) class SecondTable(SQLTable): table_name = 'test_table' a = Integer() b = Integer() primary_key(b) first = FirstTable() second = SecondTable() self.assertNotEqual(first, second) def test_int_int_not_equal_int_int_same_primary_key_renamed(self): class FirstTable(SQLTable): table_name = 'test_table' b = Integer() a = Integer() primary_key(b) class SecondTable(SQLTable): table_name = 'test_table' a = Integer() b = Integer() primary_key(a) first = FirstTable() second = SecondTable() self.assertNotEqual(first, second) def test_int_int_equal_int_int_same_primary_key_re_renamed(self): class FirstTable(SQLTable): table_name = 'test_table' b = Integer() a = Integer() primary_key(b) class SecondTable(SQLTable): table_name = 'test_table' a = Integer() b = Integer() primary_key(a) first = FirstTable() second = SecondTable(a_name='b', b_name='a') self.assertEqual(first, second) def test_int_int_equal_int_int_same_foreignkey(self): class ReferenceTable(SQLTable): key = Integer() primary_key(key) ref_table = ReferenceTable() class FirstTable(SQLTable): table_name = 'test_table' ref_table = ForeignTable() a = ref_table.Integer('key') b = Integer() primary_key(a) class SecondTable(SQLTable): table_name = 'test_table' ref_table = ForeignTable() a = ref_table.Integer('key') b = Integer() primary_key(a) first = FirstTable(ref_table=ref_table) second = SecondTable(ref_table=ref_table) self.assertEqual(first, second) def test_int_int_not_equal_int_int_lacking_foreignkey(self): class ReferenceTable(SQLTable): key = Integer() primary_key(key) ref_table = ReferenceTable() class FirstTable(SQLTable): table_name = 'test_table' ref_table = ForeignTable() a = ref_table.Integer('key') b = Integer() primary_key(a) class SecondTable(SQLTable): table_name = 'test_table' a = Integer() b = Integer() primary_key(a) first = FirstTable(ref_table=ref_table) second = SecondTable() self.assertNotEqual(first, second) class ParseTableTestCase(unittest.TestCase): def assertOuroboros(self, table, ref_tables=[]): parsed_table = parse_table(table.sql_table(), ref_tables=ref_tables) self.assertEqual(table, parsed_table) # def test_oroubus_empty(self): # class ExampleTable(SQLTable): # pass # self.assertOuroboros(ExampleTable()) def test_oroubus_int(self): class ExampleTable(SQLTable): a = Integer() self.assertOuroboros(ExampleTable()) def test_oroubus_int_real(self): # TODO: somehow this is being contaminated with primary keys class ExampleTable(SQLTable): a = Integer() b = Real() self.assertOuroboros(ExampleTable()) def test_oroubus_int_real_primarykey(self): class ExampleTable(SQLTable): a = Integer() b = Real() primary_key(a) self.assertOuroboros(ExampleTable()) def test_oroubus_double_int_primarykey(self): class ExampleTable(SQLTable): a = Integer() b = Integer() primary_key(a, b) self.assertOuroboros(ExampleTable()) def test_oroubus_int_real_primarykey_foreignkey(self): class ReferenceTable(SQLTable): key = Integer() primary_key(key) class ExampleTable(SQLTable): ref_table = ForeignTable() a = ref_table.Integer('key') b = Real() primary_key(a) ref_table = ReferenceTable() self.assertOuroboros(ExampleTable(ref_table=ref_table), ref_tables=[ref_table]) def test_oroubus_int_real_primarykey_foreignkey_parsed(self): class ReferenceTable(SQLTable): key = Integer() primary_key(key) class ExampleTable(SQLTable): ref_table = ForeignTable() a = ref_table.Integer('key') b = Real() primary_key(a) ref_table = parse_table(ReferenceTable().sql_table()) self.assertOuroboros(ExampleTable(ref_table=ref_table), ref_tables=[ref_table]) def test_oroubus_int_real_primarykey_foreignkey_needs_reftable(self): class ReferenceTable(SQLTable): key = Integer() primary_key(key) class ExampleTable(SQLTable): ref_table = ForeignTable() a = ref_table.Integer('key') b = Real() primary_key(a) ref_table = ReferenceTable() table = ExampleTable(ref_table=ref_table) self.assertRaises(SQLSyntaxError, parse_table, table.sql_table()) if __name__ == "__main__": unittest.main()
bsd-3-clause