repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
bxshi/gem5
src/arch/x86/isa/insts/general_purpose/compare_and_test/compare.py
91
3017
# Copyright (c) 2007 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop CMP_R_M { ld t1, seg, sib, disp sub t0, reg, t1, flags=(OF, SF, ZF, AF, PF, CF) }; def macroop CMP_R_P { rdip t7 ld t1, seg, riprel, disp sub t0, reg, t1, flags=(OF, SF, ZF, AF, PF, CF) }; def macroop CMP_M_I { limm t2, imm ld t1, seg, sib, disp sub t0, t1, t2, flags=(OF, SF, ZF, AF, PF, CF) }; def macroop CMP_P_I { limm t2, imm rdip t7 ld t1, seg, riprel, disp sub t0, t1, t2, flags=(OF, SF, ZF, AF, PF, CF) }; def macroop CMP_M_R { ld t1, seg, sib, disp sub t0, t1, reg, flags=(OF, SF, ZF, AF, PF, CF) }; def macroop CMP_P_R { rdip t7 ld t1, seg, riprel, disp sub t0, t1, reg, flags=(OF, SF, ZF, AF, PF, CF) }; def macroop CMP_R_R { sub t0, reg, regm, flags=(OF, SF, ZF, AF, PF, CF) }; def macroop CMP_R_I { limm t1, imm sub t0, reg, t1, flags=(OF, SF, ZF, AF, PF, CF) }; '''
bsd-3-clause
Lukc/ospace-lukc
server/lib/medusa/mime_type_table.py
4
3941
# -*- Python -*- # Converted by ./convert_mime_type_table.py from: # /usr/src2/apache_1.2b6/conf/mime.types # content_type_map = \ { 'ai': 'application/postscript', 'aif': 'audio/x-aiff', 'aifc': 'audio/x-aiff', 'aiff': 'audio/x-aiff', 'au': 'audio/basic', 'avi': 'video/x-msvideo', 'bcpio': 'application/x-bcpio', 'bin': 'application/octet-stream', 'cdf': 'application/x-netcdf', 'class': 'application/octet-stream', 'cpio': 'application/x-cpio', 'cpt': 'application/mac-compactpro', 'csh': 'application/x-csh', 'dcr': 'application/x-director', 'dir': 'application/x-director', 'dms': 'application/octet-stream', 'doc': 'application/msword', 'dvi': 'application/x-dvi', 'dxr': 'application/x-director', 'eps': 'application/postscript', 'etx': 'text/x-setext', 'exe': 'application/octet-stream', 'gif': 'image/gif', 'gtar': 'application/x-gtar', 'gz': 'application/x-gzip', 'hdf': 'application/x-hdf', 'hqx': 'application/mac-binhex40', 'htm': 'text/html', 'html': 'text/html', 'ice': 'x-conference/x-cooltalk', 'ief': 'image/ief', 'jpe': 'image/jpeg', 'jpeg': 'image/jpeg', 'jpg': 'image/jpeg', 'kar': 'audio/midi', 'latex': 'application/x-latex', 'lha': 'application/octet-stream', 'lzh': 'application/octet-stream', 'man': 'application/x-troff-man', 'me': 'application/x-troff-me', 'mid': 'audio/midi', 'midi': 'audio/midi', 'mif': 'application/x-mif', 'mov': 'video/quicktime', 'movie': 'video/x-sgi-movie', 'mp2': 'audio/mpeg', 'mpe': 'video/mpeg', 'mpeg': 'video/mpeg', 'mpg': 'video/mpeg', 'mpga': 'audio/mpeg', 'mp3': 'audio/mpeg', 'ms': 'application/x-troff-ms', 'nc': 'application/x-netcdf', 'oda': 'application/oda', 'pbm': 'image/x-portable-bitmap', 'pdb': 'chemical/x-pdb', 'pdf': 'application/pdf', 'pgm': 'image/x-portable-graymap', 'png': 'image/png', 'pnm': 'image/x-portable-anymap', 'ppm': 'image/x-portable-pixmap', 'ppt': 'application/powerpoint', 'ps': 'application/postscript', 'qt': 'video/quicktime', 'ra': 'audio/x-realaudio', 'ram': 'audio/x-pn-realaudio', 'ras': 'image/x-cmu-raster', 'rgb': 'image/x-rgb', 'roff': 'application/x-troff', 'rpm': 'audio/x-pn-realaudio-plugin', 'rtf': 'application/rtf', 'rtx': 'text/richtext', 'sgm': 'text/x-sgml', 'sgml': 'text/x-sgml', 'sh': 'application/x-sh', 'shar': 'application/x-shar', 'sit': 'application/x-stuffit', 'skd': 'application/x-koan', 'skm': 'application/x-koan', 'skp': 'application/x-koan', 'skt': 'application/x-koan', 'snd': 'audio/basic', 'src': 'application/x-wais-source', 'sv4cpio': 'application/x-sv4cpio', 'sv4crc': 'application/x-sv4crc', 't': 'application/x-troff', 'tar': 'application/x-tar', 'tcl': 'application/x-tcl', 'tex': 'application/x-tex', 'texi': 'application/x-texinfo', 'texinfo': 'application/x-texinfo', 'tif': 'image/tiff', 'tiff': 'image/tiff', 'tr': 'application/x-troff', 'tsv': 'text/tab-separated-values', 'txt': 'text/plain', 'ustar': 'application/x-ustar', 'vcd': 'application/x-cdlink', 'vrml': 'x-world/x-vrml', 'wav': 'audio/x-wav', 'wrl': 'x-world/x-vrml', 'xbm': 'image/x-xbitmap', 'xpm': 'image/x-xpixmap', 'xwd': 'image/x-xwindowdump', 'xyz': 'chemical/x-pdb', 'zip': 'application/zip', }
gpl-2.0
KousikaGanesh/purchaseandInventory
openerp/tools/lru.py
204
2946
# -*- coding: utf-8 -*- # taken from http://code.activestate.com/recipes/252524-length-limited-o1-lru-cache-implementation/ import threading from func import synchronized __all__ = ['LRU'] class LRUNode(object): __slots__ = ['prev', 'next', 'me'] def __init__(self, prev, me): self.prev = prev self.me = me self.next = None class LRU(object): """ Implementation of a length-limited O(1) LRU queue. Built for and used by PyPE: http://pype.sourceforge.net Copyright 2003 Josiah Carlson. """ def __init__(self, count, pairs=[]): self._lock = threading.RLock() self.count = max(count, 1) self.d = {} self.first = None self.last = None for key, value in pairs: self[key] = value @synchronized() def __contains__(self, obj): return obj in self.d @synchronized() def __getitem__(self, obj): a = self.d[obj].me self[a[0]] = a[1] return a[1] @synchronized() def __setitem__(self, obj, val): if obj in self.d: del self[obj] nobj = LRUNode(self.last, (obj, val)) if self.first is None: self.first = nobj if self.last: self.last.next = nobj self.last = nobj self.d[obj] = nobj if len(self.d) > self.count: if self.first == self.last: self.first = None self.last = None return a = self.first a.next.prev = None self.first = a.next a.next = None del self.d[a.me[0]] del a @synchronized() def __delitem__(self, obj): nobj = self.d[obj] if nobj.prev: nobj.prev.next = nobj.next else: self.first = nobj.next if nobj.next: nobj.next.prev = nobj.prev else: self.last = nobj.prev del self.d[obj] @synchronized() def __iter__(self): cur = self.first while cur is not None: cur2 = cur.next yield cur.me[1] cur = cur2 @synchronized() def __len__(self): return len(self.d) @synchronized() def iteritems(self): cur = self.first while cur is not None: cur2 = cur.next yield cur.me cur = cur2 @synchronized() def iterkeys(self): return iter(self.d) @synchronized() def itervalues(self): for i,j in self.iteritems(): yield j @synchronized() def keys(self): return self.d.keys() @synchronized() def pop(self,key): v=self[key] del self[key] return v @synchronized() def clear(self): self.d = {} self.first = None self.last = None # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
gloaec/trifle
src/trifle/anyconfig/backend/tests/backends.py
1
1243
# # Copyright (C) 2012 Satoru SATOH <ssato @ redhat.com> # License: MIT # import anyconfig.backend.backends as T import unittest class Test_00_pure_functions(unittest.TestCase): def test_10_find_by_file(self): ini_cf = "/a/b/c.ini" unknown_cf = "/a/b/c.xyz" jsn_cfs = ["/a/b/c.jsn", "/a/b/c.json", "/a/b/c.js"] yml_cfs = ["/a/b/c.yml", "/a/b/c.yaml"] self.assertTrue(ini_cf, T.BINI.IniConfigParser) self.assertTrue(T.find_by_file(unknown_cf) is None) for f in jsn_cfs: self.assertTrue(f, T.BJSON.JsonConfigParser) for f in yml_cfs: self.assertTrue(f, T.BYAML.YamlConfigParser) def test_20_find_by_type(self): ini_t = "ini" jsn_t = "json" yml_t = "yaml" unknown_t = "unknown_type" self.assertTrue(ini_t, T.BINI.IniConfigParser) self.assertTrue(jsn_t, T.BJSON.JsonConfigParser) self.assertTrue(yml_t, T.BYAML.YamlConfigParser) self.assertTrue(T.find_by_type(unknown_t) is None) def test_30_list_types(self): types = T.list_types() self.assertTrue(isinstance(types, list)) self.assertTrue(bool(list)) # ensure it's not empty. # vim:sw=4:ts=4:et:
gpl-3.0
JioCloud/nova
nova/objects/numa.py
24
8397
# Copyright 2014 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from nova import exception from nova.objects import base from nova.objects import fields from nova.virt import hardware def all_things_equal(obj_a, obj_b): for name in obj_a.fields: set_a = obj_a.obj_attr_is_set(name) set_b = obj_b.obj_attr_is_set(name) if set_a != set_b: return False elif not set_a: continue if getattr(obj_a, name) != getattr(obj_b, name): return False return True # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class NUMACell(base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added pinned_cpus and siblings fields # Version 1.2: Added mempages field VERSION = '1.2' fields = { 'id': fields.IntegerField(read_only=True), 'cpuset': fields.SetOfIntegersField(), 'memory': fields.IntegerField(), 'cpu_usage': fields.IntegerField(default=0), 'memory_usage': fields.IntegerField(default=0), 'pinned_cpus': fields.SetOfIntegersField(), 'siblings': fields.ListOfSetsOfIntegersField(), 'mempages': fields.ListOfObjectsField('NUMAPagesTopology'), } obj_relationships = { 'mempages': [('1.2', '1.0')] } def __eq__(self, other): return all_things_equal(self, other) def __ne__(self, other): return not (self == other) @property def free_cpus(self): return self.cpuset - self.pinned_cpus or set() @property def free_siblings(self): return [sibling_set & self.free_cpus for sibling_set in self.siblings] @property def avail_cpus(self): return len(self.free_cpus) @property def avail_memory(self): return self.memory - self.memory_usage def pin_cpus(self, cpus): if self.pinned_cpus & cpus: raise exception.CPUPinningInvalid(requested=list(cpus), pinned=list(self.pinned_cpus)) self.pinned_cpus |= cpus def unpin_cpus(self, cpus): if (self.pinned_cpus & cpus) != cpus: raise exception.CPUPinningInvalid(requested=list(cpus), pinned=list(self.pinned_cpus)) self.pinned_cpus -= cpus def _to_dict(self): return { 'id': self.id, 'cpus': hardware.format_cpu_spec( self.cpuset, allow_ranges=False), 'mem': { 'total': self.memory, 'used': self.memory_usage}, 'cpu_usage': self.cpu_usage} @classmethod def _from_dict(cls, data_dict): cpuset = hardware.parse_cpu_spec( data_dict.get('cpus', '')) cpu_usage = data_dict.get('cpu_usage', 0) memory = data_dict.get('mem', {}).get('total', 0) memory_usage = data_dict.get('mem', {}).get('used', 0) cell_id = data_dict.get('id') return cls(id=cell_id, cpuset=cpuset, memory=memory, cpu_usage=cpu_usage, memory_usage=memory_usage, mempages=[], pinned_cpus=set([]), siblings=[]) def can_fit_hugepages(self, pagesize, memory): """Returns whether memory can fit into hugepages size :param pagesize: a page size in KibB :param memory: a memory size asked to fit in KiB :returns: whether memory can fit in hugepages :raises: MemoryPageSizeNotSupported if page size not supported """ for pages in self.mempages: if pages.size_kb == pagesize: return (memory <= pages.free_kb and (memory % pages.size_kb) == 0) raise exception.MemoryPageSizeNotSupported(pagesize=pagesize) # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class NUMAPagesTopology(base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' fields = { 'size_kb': fields.IntegerField(), 'total': fields.IntegerField(), 'used': fields.IntegerField(default=0), } def __eq__(self, other): return all_things_equal(self, other) def __ne__(self, other): return not (self == other) @property def free(self): """Returns the number of avail pages.""" return self.total - self.used @property def free_kb(self): """Returns the avail memory size in KiB.""" return self.free * self.size_kb # TODO(berrange): Remove NovaObjectDictCompat @base.NovaObjectRegistry.register class NUMATopology(base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Update NUMACell to 1.1 # Version 1.2: Update NUMACell to 1.2 VERSION = '1.2' fields = { 'cells': fields.ListOfObjectsField('NUMACell'), } obj_relationships = { 'cells': [('1.0', '1.0'), ('1.1', '1.1'), ('1.2', '1.2')] } @classmethod def obj_from_primitive(cls, primitive): if 'nova_object.name' in primitive: obj_topology = super(NUMATopology, cls).obj_from_primitive( primitive) else: # NOTE(sahid): This compatibility code needs to stay until we can # guarantee that there are no cases of the old format stored in # the database (or forever, if we can never guarantee that). obj_topology = NUMATopology._from_dict(primitive) return obj_topology def _to_json(self): return jsonutils.dumps(self.obj_to_primitive()) @classmethod def obj_from_db_obj(cls, db_obj): return cls.obj_from_primitive( jsonutils.loads(db_obj)) def __len__(self): """Defined so that boolean testing works the same as for lists.""" return len(self.cells) def _to_dict(self): # TODO(sahid): needs to be removed. return {'cells': [cell._to_dict() for cell in self.cells]} @classmethod def _from_dict(cls, data_dict): return cls(cells=[ NUMACell._from_dict(cell_dict) for cell_dict in data_dict.get('cells', [])]) @base.NovaObjectRegistry.register class NUMATopologyLimits(base.NovaObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'cpu_allocation_ratio': fields.FloatField(), 'ram_allocation_ratio': fields.FloatField(), } def to_dict_legacy(self, host_topology): cells = [] for cell in host_topology.cells: cells.append( {'cpus': hardware.format_cpu_spec( cell.cpuset, allow_ranges=False), 'mem': {'total': cell.memory, 'limit': cell.memory * self.ram_allocation_ratio}, 'cpu_limit': len(cell.cpuset) * self.cpu_allocation_ratio, 'id': cell.id}) return {'cells': cells} @classmethod def obj_from_db_obj(cls, db_obj): if 'nova_object.name' in db_obj: obj_topology = cls.obj_from_primitive(db_obj) else: # NOTE(sahid): This compatibility code needs to stay until we can # guarantee that all compute nodes are using RPC API => 3.40. cell = db_obj['cells'][0] ram_ratio = cell['mem']['limit'] / float(cell['mem']['total']) cpu_ratio = cell['cpu_limit'] / float(len(hardware.parse_cpu_spec( cell['cpus']))) obj_topology = NUMATopologyLimits( cpu_allocation_ratio=cpu_ratio, ram_allocation_ratio=ram_ratio) return obj_topology
apache-2.0
AlgoHunt/nerual_style_transfer
nets/nets_factory.py
31
5146
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains a factory for building various models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import tensorflow as tf from nets import alexnet from nets import cifarnet from nets import inception from nets import lenet from nets import mobilenet_v1 from nets import overfeat from nets import resnet_v1 from nets import resnet_v2 from nets import vgg slim = tf.contrib.slim networks_map = {'alexnet_v2': alexnet.alexnet_v2, 'cifarnet': cifarnet.cifarnet, 'overfeat': overfeat.overfeat, 'vgg_a': vgg.vgg_a, 'vgg_16': vgg.vgg_16, 'vgg_19': vgg.vgg_19, 'inception_v1': inception.inception_v1, 'inception_v2': inception.inception_v2, 'inception_v3': inception.inception_v3, 'inception_v4': inception.inception_v4, 'inception_resnet_v2': inception.inception_resnet_v2, 'lenet': lenet.lenet, 'resnet_v1_50': resnet_v1.resnet_v1_50, 'resnet_v1_101': resnet_v1.resnet_v1_101, 'resnet_v1_152': resnet_v1.resnet_v1_152, 'resnet_v1_200': resnet_v1.resnet_v1_200, 'resnet_v2_50': resnet_v2.resnet_v2_50, 'resnet_v2_101': resnet_v2.resnet_v2_101, 'resnet_v2_152': resnet_v2.resnet_v2_152, 'resnet_v2_200': resnet_v2.resnet_v2_200, 'mobilenet_v1': mobilenet_v1.mobilenet_v1, 'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_075, 'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_050, 'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_025, } arg_scopes_map = {'alexnet_v2': alexnet.alexnet_v2_arg_scope, 'cifarnet': cifarnet.cifarnet_arg_scope, 'overfeat': overfeat.overfeat_arg_scope, 'vgg_a': vgg.vgg_arg_scope, 'vgg_16': vgg.vgg_arg_scope, 'vgg_19': vgg.vgg_arg_scope, 'inception_v1': inception.inception_v3_arg_scope, 'inception_v2': inception.inception_v3_arg_scope, 'inception_v3': inception.inception_v3_arg_scope, 'inception_v4': inception.inception_v4_arg_scope, 'inception_resnet_v2': inception.inception_resnet_v2_arg_scope, 'lenet': lenet.lenet_arg_scope, 'resnet_v1_50': resnet_v1.resnet_arg_scope, 'resnet_v1_101': resnet_v1.resnet_arg_scope, 'resnet_v1_152': resnet_v1.resnet_arg_scope, 'resnet_v1_200': resnet_v1.resnet_arg_scope, 'resnet_v2_50': resnet_v2.resnet_arg_scope, 'resnet_v2_101': resnet_v2.resnet_arg_scope, 'resnet_v2_152': resnet_v2.resnet_arg_scope, 'resnet_v2_200': resnet_v2.resnet_arg_scope, 'mobilenet_v1': mobilenet_v1.mobilenet_v1_arg_scope, 'mobilenet_v1_075': mobilenet_v1.mobilenet_v1_arg_scope, 'mobilenet_v1_050': mobilenet_v1.mobilenet_v1_arg_scope, 'mobilenet_v1_025': mobilenet_v1.mobilenet_v1_arg_scope, } def get_network_fn(name, num_classes, weight_decay=0.0, is_training=False): """Returns a network_fn such as `logits, end_points = network_fn(images)`. Args: name: The name of the network. num_classes: The number of classes to use for classification. weight_decay: The l2 coefficient for the model weights. is_training: `True` if the model is being used for training and `False` otherwise. Returns: network_fn: A function that applies the model to a batch of images. It has the following signature: logits, end_points = network_fn(images) Raises: ValueError: If network `name` is not recognized. """ if name not in networks_map: raise ValueError('Name of network unknown %s' % name) func = networks_map[name] @functools.wraps(func) def network_fn(images): arg_scope = arg_scopes_map[name](weight_decay=weight_decay) with slim.arg_scope(arg_scope): return func(images, num_classes, is_training=is_training) if hasattr(func, 'default_image_size'): network_fn.default_image_size = func.default_image_size return network_fn
apache-2.0
Pikecillo/genna
external/4Suite-XML-1.0.2/test/Xml/Core/__init__.py
1
1056
__revision__ = '$Id: __init__.py,v 1.12 2005/11/15 02:22:41 jkloth Exp $' def PreprocessFiles(dirs, files): """ PreprocessFiles(dirs, files) -> (dirs, files) This function is responsible for sorting and trimming the file and directory lists as needed for proper testing. """ from Ft.Lib.TestSuite import RemoveTests, SortTests ignored_files = ['test_domlette_readers', 'test_domlette_interfaces', 'test_domlette_memory', 'test_domlette_writers', 'test_catalog', 'test_ranges', # only supported by 4DOM 'test_get_all_ns', 'test_string_strip', 'test_split_qname', ] RemoveTests(files, ignored_files) ordered_files = ['test_domlette', 'test_saxlette'] SortTests(files, ordered_files) ignored_dirs = [] RemoveTests(dirs, ignored_dirs) ordered_dirs = [] SortTests(dirs, ordered_dirs) return (dirs, files)
gpl-2.0
groutr/conda-tools
src/conda_tools/environment/history.py
1
7101
""" Adapted from conda/history.py Licensed under BSD 3-clause license. """ from __future__ import print_function import re import time from json import loads from os.path import isfile, join from functools import lru_cache from ..common import lazyproperty class CondaHistoryException(Exception): pass class CondaHistoryWarning(Warning): pass def dist2pair(dist): dist = str(dist) if dist.endswith(']'): dist = dist.split('[', 1)[0] if dist.endswith('.tar.bz2'): dist = dist[:-8] parts = dist.split('::', 1) return 'defaults' if len(parts) < 2 else parts[0], parts[-1] def dist2quad(dist): channel, dist = dist2pair(dist) parts = dist.rsplit('-', 2) + ['', ''] return (parts[0], parts[1], parts[2], channel) def is_diff(content): return any(s.startswith(('-', '+')) for s in content) def pretty_diff(diff): added = {} removed = {} for s in diff: fn = s[1:] name, version, _, channel = dist2quad(fn) if channel != 'defaults': version += ' (%s)' % channel if s.startswith('-'): removed[name.lower()] = version elif s.startswith('+'): added[name.lower()] = version changed = set(added) & set(removed) for name in sorted(changed): yield ' %s {%s -> %s}' % (name, removed[name], added[name]) for name in sorted(set(removed) - changed): yield '-%s-%s' % (name, removed[name]) for name in sorted(set(added) - changed): yield '+%s-%s' % (name, added[name]) def pretty_content(content): if is_diff(content): return pretty_diff(content) else: return iter(sorted(content)) class History(object): def __init__(self, prefix): meta_dir = join(prefix, 'conda-meta') self.path = join(meta_dir, 'history') @lazyproperty def _parse(self): """ parse the history file and return a list of tuples(datetime strings, set of distributions/diffs, comments) """ res = [] if not isfile(self.path): return res sep_pat = re.compile(r'==>\s*(.+?)\s*<==') with open(self.path, 'r') as f: lines = f.read().splitlines() for line in lines: line = line.strip() if not line: continue m = sep_pat.match(line) if m: res.append((m.group(1), set(), [])) elif line.startswith('#'): res[-1][2].append(line) else: res[-1][1].add(line) return res @lazyproperty def get_user_requests(self): """ return a list of user requested items. Each item is a dict with the following keys: 'date': the date and time running the command 'cmd': a list of argv of the actual command which was run 'action': install/remove/update 'specs': the specs being used """ res = [] com_pat = re.compile(r'#\s*cmd:\s*(.+)') spec_pat = re.compile(r'#\s*(\w+)\s*specs:\s*(.+)') for dt, unused_cont, comments in self._parse: item = {'date': dt} for line in comments: m = com_pat.match(line) if m: argv = m.group(1).split() if argv[0].endswith('conda'): argv[0] = 'conda' item['cmd'] = argv m = spec_pat.match(line) if m: action, specs = m.groups() item['action'] = action item['specs'] = loads(specs.replace("'", "\"")) if 'cmd' in item: res.append(item) return res @lazyproperty def construct_states(self): """ return a list of tuples(datetime strings, set of distributions) """ res = [] cur = set([]) for dt, cont, unused_com in self._parse: if not is_diff(cont): cur = cont else: for s in cont: if s.startswith('-'): cur.discard(s[1:]) elif s.startswith('+'): cur.add(s[1:]) else: raise CondaHistoryException('Did not expect: %s' % s) res.append((dt, cur.copy())) return res def get_state(self, rev=-1): """ return the state, i.e. the set of distributions, for a given revision, defaults to latest (which is the same as the current state when the log file is up-to-date) """ states = self.construct_states if not states: return set([]) times, pkgs = zip(*states) return pkgs[rev] def print_log(self): for i, (date, content, unused_com) in enumerate(self._parse): print('%s (rev %d)' % (date, i)) for line in pretty_content(content): print(' %s' % line) print() @lazyproperty def object_log(self): result = [] for i, (date, content, unused_com) in enumerate(self._parse): # Based on Mateusz's code; provides more details about the # history event event = { 'date': date, 'rev': i, 'install': [], 'remove': [], 'upgrade': [], 'downgrade': [] } added = {} removed = {} if is_diff(content): for pkg in content: name, version, build, channel = dist2quad(pkg[1:]) if pkg.startswith('+'): added[name.lower()] = (version, build, channel) elif pkg.startswith('-'): removed[name.lower()] = (version, build, channel) changed = set(added) & set(removed) for name in sorted(changed): old = removed[name] new = added[name] details = { 'old': '-'.join((name,) + old), 'new': '-'.join((name,) + new) } if new > old: event['upgrade'].append(details) else: event['downgrade'].append(details) for name in sorted(set(removed) - changed): event['remove'].append('-'.join((name,) + removed[name])) for name in sorted(set(added) - changed): event['install'].append('-'.join((name,) + added[name])) else: for pkg in sorted(content): event['install'].append(pkg) result.append(event) return result def __repr__(self): return 'History({}) @ {}'.format(self.path, hex(id(self))) def __str__(self): return 'History({})'.format(self.path)
bsd-3-clause
jordanemedlock/psychtruths
temboo/Library/Basecamp/UpdateEntry.py
5
5739
# -*- coding: utf-8 -*- ############################################################################### # # UpdateEntry # Updates a calendar event or milestone in a project you specify. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class UpdateEntry(Choreography): def __init__(self, temboo_session): """ Create a new instance of the UpdateEntry Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(UpdateEntry, self).__init__(temboo_session, '/Library/Basecamp/UpdateEntry') def new_input_set(self): return UpdateEntryInputSet() def _make_result_set(self, result, path): return UpdateEntryResultSet(result, path) def _make_execution(self, session, exec_id, path): return UpdateEntryChoreographyExecution(session, exec_id, path) class UpdateEntryInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the UpdateEntry Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AccountName(self, value): """ Set the value of the AccountName input for this Choreo. ((required, string) A valid Basecamp account name. This is the first part of the account's URL.) """ super(UpdateEntryInputSet, self)._set_input('AccountName', value) def set_EndDate(self, value): """ Set the value of the EndDate input for this Choreo. ((required, date) The new end date for the updated entry, in the format YYYY-MM-DD.) """ super(UpdateEntryInputSet, self)._set_input('EndDate', value) def set_EntryID(self, value): """ Set the value of the EntryID input for this Choreo. ((required, integer) The ID for the calendar entry to update.) """ super(UpdateEntryInputSet, self)._set_input('EntryID', value) def set_Password(self, value): """ Set the value of the Password input for this Choreo. ((required, password) The Basecamp account password. Use the value 'X' when specifying an API Key for the Username input.) """ super(UpdateEntryInputSet, self)._set_input('Password', value) def set_ProjectID(self, value): """ Set the value of the ProjectID input for this Choreo. ((required, integer) The ID of the project with the calendar entry to update.) """ super(UpdateEntryInputSet, self)._set_input('ProjectID', value) def set_ResponsibleParty(self, value): """ Set the value of the ResponsibleParty input for this Choreo. ((optional, any) The user ID or company ID (preceded by a “c”, as in "c1234") to reassign the entry to. Applies only to "Milestone" entry types.) """ super(UpdateEntryInputSet, self)._set_input('ResponsibleParty', value) def set_StartDate(self, value): """ Set the value of the StartDate input for this Choreo. ((optional, date) The new start date for the updated entry, in the format YYYY-MM-DD.) """ super(UpdateEntryInputSet, self)._set_input('StartDate', value) def set_Title(self, value): """ Set the value of the Title input for this Choreo. ((optional, string) The new title for the updated entry.) """ super(UpdateEntryInputSet, self)._set_input('Title', value) def set_Type(self, value): """ Set the value of the Type input for this Choreo. ((optional, string) The new type for the updated entry, either "CalendarEvent" (the default) or "Milestone".) """ super(UpdateEntryInputSet, self)._set_input('Type', value) def set_Username(self, value): """ Set the value of the Username input for this Choreo. ((required, string) A Basecamp account username or API Key.) """ super(UpdateEntryInputSet, self)._set_input('Username', value) class UpdateEntryResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the UpdateEntry Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((xml) The response returned from Basecamp.) """ return self._output.get('Response', None) def get_TemplateOutput(self): """ Retrieve the value for the "TemplateOutput" output from this Choreo execution. (The request created from the input template.) """ return self._output.get('TemplateOutput', None) class UpdateEntryChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return UpdateEntryResultSet(response, path)
apache-2.0
youprofit/zato
code/zato-web-admin/src/zato/admin/web/forms/load_balancer.py
7
3670
# -*- coding: utf-8 -*- """ Copyright (C) 2010 Dariusz Suchojad <dsuch at zato.io> Licensed under LGPLv3, see LICENSE.txt for terms and conditions. """ from __future__ import absolute_import, division, print_function, unicode_literals # stdlib from operator import itemgetter # Django from django import forms # Zato from zato.common.haproxy import timeouts, http_log from zato.common.util import make_repr def populate_choices(form, fields_choices): """ A convenience function used in several places for populating a given form's SELECT choices. """ for field_name, choices in fields_choices: form.fields[field_name].choices = [] choices = sorted(choices.items(), key=itemgetter(0)) for choice_id, choice_info in choices: choice_name = choice_info[1] form.fields[field_name].choices.append([choice_id, choice_name]) class ManageLoadBalancerForm(forms.Form): """ Form for the graphical management of HAProxy. """ global_log_host = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:70%"})) global_log_port = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:70%"})) global_log_facility = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:70%"})) global_log_level = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:70%"})) timeout_connect = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:30%"})) timeout_client = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:30%"})) timeout_server = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:30%"})) http_plain_bind_address = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:70%"})) http_plain_bind_port = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:30%"})) http_plain_log_http_requests = forms.ChoiceField() http_plain_maxconn = forms.CharField(widget=forms.TextInput(attrs={"class":"required validate-digits", "style":"width:30%"})) http_plain_monitor_uri = forms.CharField(widget=forms.TextInput(attrs={"class":"required", "style":"width:70%"})) def __init__(self, initial={}): super(ManageLoadBalancerForm, self).__init__(initial=initial) fields_choices = ( ("http_plain_log_http_requests", http_log), ) populate_choices(self, fields_choices) def __repr__(self): return make_repr(self) class ManageLoadBalancerSourceCodeForm(forms.Form): """ Form for the source code-level management of HAProxy. """ source_code = forms.CharField(widget=forms.Textarea(attrs={"style":"overflow:auto; width:100%; white-space: pre-wrap;height:400px"})) class RemoteCommandForm(forms.Form): """ Form for the direct interface to HAProxy's commands. """ command = forms.ChoiceField() timeout = forms.ChoiceField() extra = forms.CharField(widget=forms.TextInput(attrs={"style":"width:40%"})) result = forms.CharField(widget=forms.Textarea(attrs={"style":"overflow:auto; width:100%; white-space: pre-wrap;height:400px"})) def __init__(self, commands, initial={}): super(RemoteCommandForm, self).__init__(initial=initial) fields_choices = ( ("command", commands), ("timeout", timeouts), ) populate_choices(self, fields_choices) def __repr__(self): return make_repr(self)
gpl-3.0
gVallverdu/pymatgen
pymatgen/alchemy/materials.py
4
14429
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module provides various representations of transformed structures. A TransformedStructure is a structure that has been modified by undergoing a series of transformations. """ import os import re import json import datetime from monty.json import MontyDecoder, jsanitize from pymatgen.core.structure import Structure from pymatgen.io.cif import CifParser from pymatgen.io.vasp.inputs import Poscar from monty.json import MSONable from pymatgen.io.vasp.sets import MPRelaxSet from warnings import warn __author__ = "Shyue Ping Ong, Will Richards" __copyright__ = "Copyright 2012, The Materials Project" __version__ = "1.0" __maintainer__ = "Shyue Ping Ong" __email__ = "[email protected]" __date__ = "Mar 2, 2012" dec = MontyDecoder() class TransformedStructure(MSONable): """ Container object for new structures that include history of transformations. Each transformed structure is made up of a sequence of structures with associated transformation history. """ def __init__(self, structure, transformations=None, history=None, other_parameters=None): """ Initializes a transformed structure from a structure. Args: structure (Structure): Input structure transformations ([Transformations]): List of transformations to apply. history (list): Previous history. other_parameters (dict): Additional parameters to be added. """ self.final_structure = structure self.history = history or [] self.other_parameters = other_parameters or {} self._undone = [] transformations = transformations or [] for t in transformations: self.append_transformation(t) def undo_last_change(self): """ Undo the last change in the TransformedStructure. Raises: IndexError: If already at the oldest change. """ if len(self.history) == 0: raise IndexError("Can't undo. Already at oldest change.") if 'input_structure' not in self.history[-1]: raise IndexError("Can't undo. Latest history has no " "input_structure") h = self.history.pop() self._undone.append((h, self.final_structure)) s = h["input_structure"] if isinstance(s, dict): s = Structure.from_dict(s) self.final_structure = s def redo_next_change(self): """ Redo the last undone change in the TransformedStructure. Raises: IndexError: If already at the latest change. """ if len(self._undone) == 0: raise IndexError("Can't redo. Already at latest change.") h, s = self._undone.pop() self.history.append(h) self.final_structure = s def __getattr__(self, name): s = object.__getattribute__(self, 'final_structure') return getattr(s, name) def __len__(self): return len(self.history) def append_transformation(self, transformation, return_alternatives=False, clear_redo=True): """ Appends a transformation to the TransformedStructure. Args: transformation: Transformation to append return_alternatives: Whether to return alternative TransformedStructures for one-to-many transformations. return_alternatives can be a number, which stipulates the total number of structures to return. clear_redo: Boolean indicating whether to clear the redo list. By default, this is True, meaning any appends clears the history of undoing. However, when using append_transformation to do a redo, the redo list should not be cleared to allow multiple redos. """ if clear_redo: self._undone = [] if return_alternatives and transformation.is_one_to_many: ranked_list = transformation.apply_transformation( self.final_structure, return_ranked_list=return_alternatives) input_structure = self.final_structure.as_dict() alts = [] for x in ranked_list[1:]: s = x.pop("structure") actual_transformation = x.pop("transformation", transformation) hdict = actual_transformation.as_dict() hdict["input_structure"] = input_structure hdict["output_parameters"] = x self.final_structure = s d = self.as_dict() d['history'].append(hdict) d['final_structure'] = s.as_dict() alts.append(TransformedStructure.from_dict(d)) x = ranked_list[0] s = x.pop("structure") actual_transformation = x.pop("transformation", transformation) hdict = actual_transformation.as_dict() hdict["input_structure"] = self.final_structure.as_dict() hdict["output_parameters"] = x self.history.append(hdict) self.final_structure = s return alts else: s = transformation.apply_transformation(self.final_structure) hdict = transformation.as_dict() hdict["input_structure"] = self.final_structure.as_dict() hdict["output_parameters"] = {} self.history.append(hdict) self.final_structure = s def append_filter(self, structure_filter): """ Adds a filter. Args: structure_filter (StructureFilter): A filter implementating the AbstractStructureFilter API. Tells transmuter waht structures to retain. """ hdict = structure_filter.as_dict() hdict["input_structure"] = self.final_structure.as_dict() self.history.append(hdict) def extend_transformations(self, transformations, return_alternatives=False): """ Extends a sequence of transformations to the TransformedStructure. Args: transformations: Sequence of Transformations return_alternatives: Whether to return alternative TransformedStructures for one-to-many transformations. return_alternatives can be a number, which stipulates the total number of structures to return. """ for t in transformations: self.append_transformation(t, return_alternatives=return_alternatives) def get_vasp_input(self, vasp_input_set=MPRelaxSet, **kwargs): """ Returns VASP input as a dict of vasp objects. Args: vasp_input_set (pymatgen.io.vaspio_set.VaspInputSet): input set to create vasp input files from structures """ d = vasp_input_set(self.final_structure, **kwargs).get_vasp_input() d["transformations.json"] = json.dumps(self.as_dict()) return d def write_vasp_input(self, vasp_input_set=MPRelaxSet, output_dir=".", create_directory=True, **kwargs): r""" Writes VASP input to an output_dir. Args: vasp_input_set: pymatgen.io.vaspio_set.VaspInputSet like object that creates vasp input files from structures output_dir: Directory to output files create_directory: Create the directory if not present. Defaults to True. **kwargs: All keyword args supported by the VASP input set. """ vasp_input_set(self.final_structure, **kwargs).write_input( output_dir, make_dir_if_not_present=create_directory) with open(os.path.join(output_dir, "transformations.json"), "w") as fp: json.dump(self.as_dict(), fp) def __str__(self): output = ["Current structure", "------------", str(self.final_structure), "\nHistory", "------------"] for h in self.history: h.pop('input_structure', None) output.append(str(h)) output.append("\nOther parameters") output.append("------------") output.append(str(self.other_parameters)) return "\n".join(output) def set_parameter(self, key, value): """ Set a parameter :param key: The string key :param value: The value. """ self.other_parameters[key] = value @property def was_modified(self): """ Boolean describing whether the last transformation on the structure made any alterations to it one example of when this would return false is in the case of performing a substitution transformation on the structure when the specie to replace isn't in the structure. """ return not self.final_structure == self.structures[-2] @property def structures(self): """ Copy of all structures in the TransformedStructure. A structure is stored after every single transformation. """ hstructs = [Structure.from_dict(s['input_structure']) for s in self.history if 'input_structure' in s] return hstructs + [self.final_structure] @staticmethod def from_cif_string(cif_string, transformations=None, primitive=True, occupancy_tolerance=1.): """ Generates TransformedStructure from a cif string. Args: cif_string (str): Input cif string. Should contain only one structure. For cifs containing multiple structures, please use CifTransmuter. transformations ([Transformations]): Sequence of transformations to be applied to the input structure. primitive (bool): Option to set if the primitive cell should be extracted. Defaults to True. However, there are certain instances where you might want to use a non-primitive cell, e.g., if you are trying to generate all possible orderings of partial removals or order a disordered structure. occupancy_tolerance (float): If total occupancy of a site is between 1 and occupancy_tolerance, the occupancies will be scaled down to 1. Returns: TransformedStructure """ parser = CifParser.from_string(cif_string, occupancy_tolerance) raw_string = re.sub(r"'", "\"", cif_string) cif_dict = parser.as_dict() cif_keys = list(cif_dict.keys()) s = parser.get_structures(primitive)[0] partial_cif = cif_dict[cif_keys[0]] if "_database_code_ICSD" in partial_cif: source = partial_cif["_database_code_ICSD"] + "-ICSD" else: source = "uploaded cif" source_info = {"source": source, "datetime": str(datetime.datetime.now()), "original_file": raw_string, "cif_data": cif_dict[cif_keys[0]]} return TransformedStructure(s, transformations, history=[source_info]) @staticmethod def from_poscar_string(poscar_string, transformations=None): """ Generates TransformedStructure from a poscar string. Args: poscar_string (str): Input POSCAR string. transformations ([Transformations]): Sequence of transformations to be applied to the input structure. """ p = Poscar.from_string(poscar_string) if not p.true_names: raise ValueError("Transformation can be craeted only from POSCAR " "strings with proper VASP5 element symbols.") raw_string = re.sub(r"'", "\"", poscar_string) s = p.structure source_info = {"source": "POSCAR", "datetime": str(datetime.datetime.now()), "original_file": raw_string} return TransformedStructure(s, transformations, history=[source_info]) def as_dict(self): """ Dict representation of the TransformedStructure. """ d = self.final_structure.as_dict() d["@module"] = self.__class__.__module__ d["@class"] = self.__class__.__name__ d["history"] = jsanitize(self.history) d["version"] = __version__ d["last_modified"] = str(datetime.datetime.utcnow()) d["other_parameters"] = jsanitize(self.other_parameters) return d @classmethod def from_dict(cls, d): """ Creates a TransformedStructure from a dict. """ s = Structure.from_dict(d) return cls(s, history=d["history"], other_parameters=d.get("other_parameters", None)) def to_snl(self, authors, **kwargs): """ Generate SNL from TransformedStructure. :param authors: List of authors :param **kwargs: All kwargs supported by StructureNL. :return: StructureNL """ if self.other_parameters: warn('Data in TransformedStructure.other_parameters discarded ' 'during type conversion to SNL') hist = [] for h in self.history: snl_metadata = h.pop('_snl', {}) hist.append({'name': snl_metadata.pop('name', 'pymatgen'), 'url': snl_metadata.pop( 'url', 'http://pypi.python.org/pypi/pymatgen'), 'description': h}) from pymatgen.util.provenance import StructureNL return StructureNL(self.final_structure, authors, history=hist, **kwargs) @classmethod def from_snl(cls, snl): """ Create TransformedStructure from SNL. Args: snl (StructureNL): Starting snl Returns: TransformedStructure """ hist = [] for h in snl.history: d = h.description d['_snl'] = {'url': h.url, 'name': h.name} hist.append(d) return cls(snl.structure, history=hist)
mit
misterhat/youtube-dl
youtube_dl/extractor/hellporno.py
153
2279
from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( js_to_json, remove_end, ) class HellPornoIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?hellporno\.com/videos/(?P<id>[^/]+)' _TEST = { 'url': 'http://hellporno.com/videos/dixie-is-posing-with-naked-ass-very-erotic/', 'md5': '1fee339c610d2049699ef2aa699439f1', 'info_dict': { 'id': '149116', 'display_id': 'dixie-is-posing-with-naked-ass-very-erotic', 'ext': 'mp4', 'title': 'Dixie is posing with naked ass very erotic', 'thumbnail': 're:https?://.*\.jpg$', 'age_limit': 18, } } def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) title = remove_end(self._html_search_regex( r'<title>([^<]+)</title>', webpage, 'title'), ' - Hell Porno') flashvars = self._parse_json(self._search_regex( r'var\s+flashvars\s*=\s*({.+?});', webpage, 'flashvars'), display_id, transform_source=js_to_json) video_id = flashvars.get('video_id') thumbnail = flashvars.get('preview_url') ext = flashvars.get('postfix', '.mp4')[1:] formats = [] for video_url_key in ['video_url', 'video_alt_url']: video_url = flashvars.get(video_url_key) if not video_url: continue video_text = flashvars.get('%s_text' % video_url_key) fmt = { 'url': video_url, 'ext': ext, 'format_id': video_text, } m = re.search(r'^(?P<height>\d+)[pP]', video_text) if m: fmt['height'] = int(m.group('height')) formats.append(fmt) self._sort_formats(formats) categories = self._html_search_meta( 'keywords', webpage, 'categories', default='').split(',') return { 'id': video_id, 'display_id': display_id, 'title': title, 'thumbnail': thumbnail, 'categories': categories, 'age_limit': 18, 'formats': formats, }
unlicense
farmisen/electron
script/create-dist.py
65
5723
#!/usr/bin/env python import os import re import shutil import subprocess import sys import stat from lib.config import LIBCHROMIUMCONTENT_COMMIT, BASE_URL, PLATFORM, \ get_target_arch, get_chromedriver_version from lib.util import scoped_cwd, rm_rf, get_atom_shell_version, make_zip, \ execute, atom_gyp ATOM_SHELL_VERSION = get_atom_shell_version() SOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) DIST_DIR = os.path.join(SOURCE_ROOT, 'dist') OUT_DIR = os.path.join(SOURCE_ROOT, 'out', 'R') CHROMIUM_DIR = os.path.join(SOURCE_ROOT, 'vendor', 'brightray', 'vendor', 'download', 'libchromiumcontent', 'static_library') PROJECT_NAME = atom_gyp()['project_name%'] PRODUCT_NAME = atom_gyp()['product_name%'] TARGET_BINARIES = { 'darwin': [ ], 'win32': [ '{0}.exe'.format(PROJECT_NAME), # 'electron.exe' 'content_shell.pak', 'd3dcompiler_47.dll', 'icudtl.dat', 'libEGL.dll', 'libGLESv2.dll', 'msvcp120.dll', 'msvcr120.dll', 'node.dll', 'pdf.dll', 'content_resources_200_percent.pak', 'ui_resources_200_percent.pak', 'xinput1_3.dll', 'natives_blob.bin', 'snapshot_blob.bin', 'vccorlib120.dll', ], 'linux': [ PROJECT_NAME, # 'electron' 'content_shell.pak', 'icudtl.dat', 'libnode.so', 'natives_blob.bin', 'snapshot_blob.bin', ], } TARGET_DIRECTORIES = { 'darwin': [ '{0}.app'.format(PRODUCT_NAME), ], 'win32': [ 'resources', 'locales', ], 'linux': [ 'resources', 'locales', ], } SYSTEM_LIBRARIES = [ 'libgcrypt.so', 'libnotify.so', ] def main(): rm_rf(DIST_DIR) os.makedirs(DIST_DIR) target_arch = get_target_arch() force_build() create_symbols() copy_binaries() copy_chrome_binary('chromedriver') copy_chrome_binary('mksnapshot') copy_license() if PLATFORM == 'linux': strip_binaries() if target_arch != 'arm': copy_system_libraries() create_version() create_dist_zip() create_chrome_binary_zip('chromedriver', get_chromedriver_version()) create_chrome_binary_zip('mksnapshot', ATOM_SHELL_VERSION) create_symbols_zip() def force_build(): build = os.path.join(SOURCE_ROOT, 'script', 'build.py') execute([sys.executable, build, '-c', 'Release']) def copy_binaries(): for binary in TARGET_BINARIES[PLATFORM]: shutil.copy2(os.path.join(OUT_DIR, binary), DIST_DIR) for directory in TARGET_DIRECTORIES[PLATFORM]: shutil.copytree(os.path.join(OUT_DIR, directory), os.path.join(DIST_DIR, directory), symlinks=True) def copy_chrome_binary(binary): if PLATFORM == 'win32': binary += '.exe' src = os.path.join(CHROMIUM_DIR, binary) dest = os.path.join(DIST_DIR, binary) # Copy file and keep the executable bit. shutil.copyfile(src, dest) os.chmod(dest, os.stat(dest).st_mode | stat.S_IEXEC) def copy_license(): shutil.copy2(os.path.join(SOURCE_ROOT, 'LICENSE'), DIST_DIR) def strip_binaries(): if get_target_arch() == 'arm': strip = 'arm-linux-gnueabihf-strip' else: strip = 'strip' for binary in TARGET_BINARIES[PLATFORM]: if binary.endswith('.so') or '.' not in binary: execute([strip, os.path.join(DIST_DIR, binary)]) def copy_system_libraries(): executable_path = os.path.join(OUT_DIR, PROJECT_NAME) # our/R/electron ldd = execute(['ldd', executable_path]) lib_re = re.compile('\t(.*) => (.+) \(.*\)$') for line in ldd.splitlines(): m = lib_re.match(line) if not m: continue for i, library in enumerate(SYSTEM_LIBRARIES): real_library = m.group(1) if real_library.startswith(library): shutil.copyfile(m.group(2), os.path.join(DIST_DIR, real_library)) SYSTEM_LIBRARIES[i] = real_library def create_version(): version_path = os.path.join(SOURCE_ROOT, 'dist', 'version') with open(version_path, 'w') as version_file: version_file.write(ATOM_SHELL_VERSION) def create_symbols(): destination = os.path.join(DIST_DIR, '{0}.breakpad.syms'.format(PROJECT_NAME)) dump_symbols = os.path.join(SOURCE_ROOT, 'script', 'dump-symbols.py') execute([sys.executable, dump_symbols, destination]) def create_dist_zip(): dist_name = '{0}-{1}-{2}-{3}.zip'.format(PROJECT_NAME, ATOM_SHELL_VERSION, PLATFORM, get_target_arch()) zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name) with scoped_cwd(DIST_DIR): files = TARGET_BINARIES[PLATFORM] + ['LICENSE', 'version'] if PLATFORM == 'linux': files += [lib for lib in SYSTEM_LIBRARIES if os.path.exists(lib)] dirs = TARGET_DIRECTORIES[PLATFORM] make_zip(zip_file, files, dirs) def create_chrome_binary_zip(binary, version): dist_name = '{0}-{1}-{2}-{3}.zip'.format(binary, version, PLATFORM, get_target_arch()) zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name) with scoped_cwd(DIST_DIR): files = ['LICENSE'] if PLATFORM == 'win32': files += [binary + '.exe'] else: files += [binary] make_zip(zip_file, files, []) def create_symbols_zip(): dist_name = '{0}-{1}-{2}-{3}-symbols.zip'.format(PROJECT_NAME, ATOM_SHELL_VERSION, PLATFORM, get_target_arch()) zip_file = os.path.join(SOURCE_ROOT, 'dist', dist_name) with scoped_cwd(DIST_DIR): files = ['LICENSE', 'version'] dirs = ['{0}.breakpad.syms'.format(PROJECT_NAME)] make_zip(zip_file, files, dirs) if __name__ == '__main__': sys.exit(main())
mit
jorik041/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/scm/scm_mock.py
122
4889
# Copyright (C) 2011 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from webkitpy.common.checkout.scm import CommitMessage from webkitpy.common.system.filesystem_mock import MockFileSystem from webkitpy.common.system.executive_mock import MockExecutive class MockSCM(object): def __init__(self, filesystem=None, executive=None): self.checkout_root = "/mock-checkout" self.added_paths = set() self._filesystem = filesystem or MockFileSystem() self._executive = executive or MockExecutive() def add(self, destination_path): self.add_list([destination_path]) def add_list(self, destination_paths): self.added_paths.update(set(destination_paths)) def has_working_directory_changes(self): return False def discard_working_directory_changes(self): pass def supports_local_commits(self): return True def has_local_commits(self): return False def discard_local_commits(self): pass def discard_local_changes(self): pass def exists(self, path): # TestRealMain.test_real_main (and several other rebaseline tests) are sensitive to this return value. # We should make those tests more robust, but for now we just return True always (since no test needs otherwise). return True def absolute_path(self, *comps): return self._filesystem.join(self.checkout_root, *comps) def changed_files(self, git_commit=None): return ["MockFile1"] def changed_files_for_revision(self, revision): return ["MockFile1"] def head_svn_revision(self): return '1234' def svn_revision(self, path): return '5678' def timestamp_of_revision(self, path, revision): return '2013-02-01 08:48:05 +0000' def create_patch(self, git_commit, changed_files=None): return "Patch1" def commit_ids_from_commitish_arguments(self, args): return ["Commitish1", "Commitish2"] def committer_email_for_revision(self, revision): return "[email protected]" def commit_locally_with_message(self, message): pass def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None): pass def merge_base(self, git_commit): return None def commit_message_for_local_commit(self, commit_id): if commit_id == "Commitish1": return CommitMessage("CommitMessage1\n" \ "https://bugs.example.org/show_bug.cgi?id=50000\n") if commit_id == "Commitish2": return CommitMessage("CommitMessage2\n" \ "https://bugs.example.org/show_bug.cgi?id=50001\n") raise Exception("Bogus commit_id in commit_message_for_local_commit.") def diff_for_file(self, path, log=None): return path + '-diff' def diff_for_revision(self, revision): return "DiffForRevision%s\nhttp://bugs.webkit.org/show_bug.cgi?id=12345" % revision def show_head(self, path): return path def svn_revision_from_commit_text(self, commit_text): return "49824" def delete(self, path): return self.delete_list([path]) def delete_list(self, paths): if not self._filesystem: return for path in paths: if self._filesystem.exists(path): self._filesystem.remove(path)
bsd-3-clause
TeamExodus/kernel_google_msm
tools/perf/scripts/python/syscall-counts.py
11181
1522
# system call counts # (c) 2010, Tom Zanussi <[email protected]> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): print "%-40s %10d\n" % (syscall_name(id), val),
gpl-2.0
nazoking/DNC-tensorflow
dnc/controller.py
1
9060
import tensorflow as tf import numpy as np class BaseController: def __init__(self, input_size, output_size, memory_read_heads, memory_word_size, batch_size=1): """ constructs a controller as described in the DNC paper: http://www.nature.com/nature/journal/vaop/ncurrent/full/nature20101.html Parameters: ---------- input_size: int the size of the data input vector output_size: int the size of the data output vector memory_read_heads: int the number of read haeds in the associated external memory memory_word_size: int the size of the word in the associated external memory batch_size: int the size of the input data batch [optional, usually set by the DNC object] """ self.input_size = input_size self.output_size = output_size self.read_heads = memory_read_heads self.word_size = memory_word_size self.batch_size = batch_size # indicates if the internal neural network is recurrent # by the existence of recurrent_update and get_state methods has_recurrent_update = callable(getattr(self, 'update_state', None)) has_get_state = callable(getattr(self, 'get_state', None)) self.has_recurrent_nn = has_recurrent_update and has_get_state # the actual size of the neural network input after flatenning and # concatenating the input vector with the previously read vctors from memory self.nn_input_size = self.word_size * self.read_heads + self.input_size self.interface_vector_size = self.word_size * self.read_heads + 3 * self.word_size + 5 * self.read_heads + 3 # define network vars with tf.name_scope("controller"): self.network_vars() nn_output_size = None with tf.variable_scope("shape_inference"): nn_output_size = self.get_nn_output_size() initial_std = lambda in_nodes: np.min(1e-2, np.sqrt(2.0 / in_nodes)) # defining internal weights of the controller self.interface_weights = tf.Variable( tf.truncated_normal([nn_output_size, self.interface_vector_size], stddev=initial_std(nn_output_size)), name='interface_weights' ) self.nn_output_weights = tf.Variable( tf.truncated_normal([nn_output_size, self.output_size], stddev=initial_std(nn_output_size)), name='nn_output_weights' ) self.mem_output_weights = tf.Variable( tf.truncated_normal([self.word_size * self.read_heads, self.output_size], stddev=initial_std(self.word_size * self.read_heads)), name='mem_output_weights' ) def network_vars(self): """ defines the variables needed by the internal neural network [the variables should be attributes of the class, i.e. self.*] """ raise NotImplementedError("network_vars is not implemented") def network_op(self, X): """ defines the controller's internal neural network operation Parameters: ---------- X: Tensor (batch_size, word_size * read_haeds + input_size) the input data concatenated with the previously read vectors from memory Returns: Tensor (batch_size, nn_output_size) """ raise NotImplementedError("network_op method is not implemented") def get_nn_output_size(self): """ retrives the output size of the defined neural network Returns: int the output's size Raises: ValueError """ input_vector = np.zeros([self.batch_size, self.nn_input_size], dtype=np.float32) output_vector = None if self.has_recurrent_nn: output_vector,_ = self.network_op(input_vector, self.get_state()) else: output_vector = self.network_op(input_vector) shape = output_vector.get_shape().as_list() if len(shape) > 2: raise ValueError("Expected the neural network to output a 1D vector, but got %dD" % (len(shape) - 1)) else: return shape[1] def parse_interface_vector(self, interface_vector): """ pasres the flat interface_vector into its various components with their correct shapes Parameters: ---------- interface_vector: Tensor (batch_size, interface_vector_size) the flattened inetrface vector to be parsed Returns: dict a dictionary with the components of the interface_vector parsed """ parsed = {} r_keys_end = self.word_size * self.read_heads r_strengths_end = r_keys_end + self.read_heads w_key_end = r_strengths_end + self.word_size erase_end = w_key_end + 1 + self.word_size write_end = erase_end + self.word_size free_end = write_end + self.read_heads r_keys_shape = (-1, self.word_size, self.read_heads) r_strengths_shape = (-1, self.read_heads) w_key_shape = (-1, self.word_size, 1) write_shape = erase_shape = (-1, self.word_size) free_shape = (-1, self.read_heads) modes_shape = (-1, 3, self.read_heads) # parsing the vector into its individual components parsed['read_keys'] = tf.reshape(interface_vector[:, :r_keys_end], r_keys_shape) parsed['read_strengths'] = tf.reshape(interface_vector[:, r_keys_end:r_strengths_end], r_strengths_shape) parsed['write_key'] = tf.reshape(interface_vector[:, r_strengths_end:w_key_end], w_key_shape) parsed['write_strength'] = tf.reshape(interface_vector[:, w_key_end], (-1, 1)) parsed['erase_vector'] = tf.reshape(interface_vector[:, w_key_end + 1:erase_end], erase_shape) parsed['write_vector'] = tf.reshape(interface_vector[:, erase_end:write_end], write_shape) parsed['free_gates'] = tf.reshape(interface_vector[:, write_end:free_end], free_shape) parsed['allocation_gate'] = tf.expand_dims(interface_vector[:, free_end], 1) parsed['write_gate'] = tf.expand_dims(interface_vector[:, free_end + 1], 1) parsed['read_modes'] = tf.reshape(interface_vector[:, free_end + 2:], modes_shape) # transforming the components to ensure they're in the right ranges parsed['read_strengths'] = 1 + tf.nn.softplus(parsed['read_strengths']) parsed['write_strength'] = 1 + tf.nn.softplus(parsed['write_strength']) parsed['erase_vector'] = tf.nn.sigmoid(parsed['erase_vector']) parsed['free_gates'] = tf.nn.sigmoid(parsed['free_gates']) parsed['allocation_gate'] = tf.nn.sigmoid(parsed['allocation_gate']) parsed['write_gate'] = tf.nn.sigmoid(parsed['write_gate']) parsed['read_modes'] = tf.nn.softmax(parsed['read_modes'], 1) return parsed def process_input(self, X, last_read_vectors, state=None): """ processes input data through the controller network and returns the pre-output and interface_vector Parameters: ---------- X: Tensor (batch_size, input_size) the input data batch last_read_vectors: (batch_size, word_size, read_heads) the last batch of read vectors from memory state: Tuple state vectors if the network is recurrent Returns: Tuple pre-output: Tensor (batch_size, output_size) parsed_interface_vector: dict """ flat_read_vectors = tf.reshape(last_read_vectors, (-1, self.word_size * self.read_heads)) complete_input = tf.concat(1, [X, flat_read_vectors]) nn_output, nn_state = None, None if self.has_recurrent_nn: nn_output, nn_state = self.network_op(complete_input, state) else: nn_output = self.network_op(complete_input) pre_output = tf.matmul(nn_output, self.nn_output_weights) interface = tf.matmul(nn_output, self.interface_weights) parsed_interface = self.parse_interface_vector(interface) if self.has_recurrent_nn: return pre_output, parsed_interface, nn_state else: return pre_output, parsed_interface def final_output(self, pre_output, new_read_vectors): """ returns the final output by taking rececnt memory changes into account Parameters: ---------- pre_output: Tensor (batch_size, output_size) the ouput vector from the input processing step new_read_vectors: Tensor (batch_size, words_size, read_heads) the newly read vectors from the updated memory Returns: Tensor (batch_size, output_size) """ flat_read_vectors = tf.reshape(new_read_vectors, (-1, self.word_size * self.read_heads)) final_output = pre_output + tf.matmul(flat_read_vectors, self.mem_output_weights) return final_output
mit
brezerk/taverna
userauth/tests.py
2
1309
# -*- coding: utf-8 -*- # Copyright (C) 2010 by Alexey S. Malakhov <[email protected]> # Opium <[email protected]> # # This file is part of Taverna # # Taverna is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Taverna is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Taverna. If not, see <http://www.gnu.org/licenses/>. """ This file demonstrates two different styles of tests (one doctest and one unittest). These will both pass when you run "manage.py test". Replace these with more appropriate tests for your application. """ from django.test import TestCase class SimpleTest(TestCase): def test_basic_addition(self): """ Tests that 1 + 1 always equals 2. """ self.failUnlessEqual(1 + 1, 2) __test__ = {"doctest": """ Another way to test that 1 + 1 is equal to 2. >>> 1 + 1 == 2 True """}
gpl-3.0
kvar/ansible
test/units/modules/network/nso/test_nso_verify.py
40
5452
# # Copyright (c) 2017 Cisco and/or its affiliates. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) import json from units.compat.mock import patch from ansible.modules.network.nso import nso_verify from . import nso_module from .nso_module import MockResponse from units.modules.utils import set_module_args class TestNsoVerify(nso_module.TestNsoModule): module = nso_verify @patch('ansible.module_utils.network.nso.nso.open_url') def test_nso_verify_empty_data(self, open_url_mock): calls = [ MockResponse('login', {}, 200, '{}', {'set-cookie': 'id'}), MockResponse('get_system_setting', {'operation': 'version'}, 200, '{"result": "4.4.3"}'), MockResponse('logout', {}, 200, '{"result": {}}'), ] open_url_mock.side_effect = lambda *args, **kwargs: nso_module.mock_call(calls, *args, **kwargs) data = {} set_module_args({ 'username': 'user', 'password': 'password', 'url': 'http://localhost:8080/jsonrpc', 'data': data }) self.execute_module(changed=False) self.assertEqual(0, len(calls)) @patch('ansible.module_utils.network.nso.nso.open_url') def test_nso_verify_violation(self, open_url_mock): devices_schema = nso_module.load_fixture('devices_schema.json') device_schema = nso_module.load_fixture('device_schema.json') description_schema = nso_module.load_fixture('description_schema.json') calls = [ MockResponse('login', {}, 200, '{}', {'set-cookie': 'id'}), MockResponse('get_system_setting', {'operation': 'version'}, 200, '{"result": "4.5.0"}'), MockResponse('get_module_prefix_map', {}, 200, '{"result": {"tailf-ncs": "ncs"}}'), MockResponse('new_trans', {'mode': 'read'}, 200, '{"result": {"th": 1}}'), MockResponse('get_schema', {'path': '/ncs:devices'}, 200, '{"result": %s}' % (json.dumps(devices_schema, ))), MockResponse('get_schema', {'path': '/ncs:devices/device'}, 200, '{"result": %s}' % (json.dumps(device_schema, ))), MockResponse('exists', {'path': '/ncs:devices/device{ce0}'}, 200, '{"result": {"exists": true}}'), MockResponse('get_value', {'path': '/ncs:devices/device{ce0}/description'}, 200, '{"result": {"value": "In Violation"}}'), MockResponse('get_schema', {'path': '/ncs:devices/device/description'}, 200, '{"result": %s}' % (json.dumps(description_schema, ))), MockResponse('logout', {}, 200, '{"result": {}}'), ] open_url_mock.side_effect = lambda *args, **kwargs: nso_module.mock_call(calls, *args, **kwargs) data = nso_module.load_fixture('verify_violation_data.json') set_module_args({ 'username': 'user', 'password': 'password', 'url': 'http://localhost:8080/jsonrpc', 'data': data }) self.execute_module(failed=True, violations=[ {'path': '/ncs:devices/device{ce0}/description', 'expected-value': 'Example Device', 'value': 'In Violation'}, ]) self.assertEqual(0, len(calls)) @patch('ansible.module_utils.network.nso.nso.open_url') def test_nso_verify_ok(self, open_url_mock): devices_schema = nso_module.load_fixture('devices_schema.json') device_schema = nso_module.load_fixture('device_schema.json') calls = [ MockResponse('login', {}, 200, '{}', {'set-cookie': 'id'}), MockResponse('get_system_setting', {'operation': 'version'}, 200, '{"result": "4.5.0"}'), MockResponse('get_module_prefix_map', {}, 200, '{"result": {"tailf-ncs": "ncs"}}'), MockResponse('new_trans', {'mode': 'read'}, 200, '{"result": {"th": 1}}'), MockResponse('get_schema', {'path': '/ncs:devices'}, 200, '{"result": %s}' % (json.dumps(devices_schema, ))), MockResponse('get_schema', {'path': '/ncs:devices/device'}, 200, '{"result": %s}' % (json.dumps(device_schema, ))), MockResponse('exists', {'path': '/ncs:devices/device{ce0}'}, 200, '{"result": {"exists": true}}'), MockResponse('get_value', {'path': '/ncs:devices/device{ce0}/description'}, 200, '{"result": {"value": "Example Device"}}'), MockResponse('logout', {}, 200, '{"result": {}}'), ] open_url_mock.side_effect = lambda *args, **kwargs: nso_module.mock_call(calls, *args, **kwargs) data = nso_module.load_fixture('verify_violation_data.json') set_module_args({ 'username': 'user', 'password': 'password', 'url': 'http://localhost:8080/jsonrpc', 'data': data, 'validate_certs': False }) self.execute_module(changed=False) self.assertEqual(0, len(calls))
gpl-3.0
vlinhd11/vlinhd11-android-scripting
python/src/Lib/_LWPCookieJar.py
267
6553
"""Load / save to libwww-perl (LWP) format files. Actually, the format is slightly extended from that used by LWP's (libwww-perl's) HTTP::Cookies, to avoid losing some RFC 2965 information not recorded by LWP. It uses the version string "2.0", though really there isn't an LWP Cookies 2.0 format. This indicates that there is extra information in here (domain_dot and # port_spec) while still being compatible with libwww-perl, I hope. """ import time, re from cookielib import (_warn_unhandled_exception, FileCookieJar, LoadError, Cookie, MISSING_FILENAME_TEXT, join_header_words, split_header_words, iso2time, time2isoz) def lwp_cookie_str(cookie): """Return string representation of Cookie in an the LWP cookie file format. Actually, the format is extended a bit -- see module docstring. """ h = [(cookie.name, cookie.value), ("path", cookie.path), ("domain", cookie.domain)] if cookie.port is not None: h.append(("port", cookie.port)) if cookie.path_specified: h.append(("path_spec", None)) if cookie.port_specified: h.append(("port_spec", None)) if cookie.domain_initial_dot: h.append(("domain_dot", None)) if cookie.secure: h.append(("secure", None)) if cookie.expires: h.append(("expires", time2isoz(float(cookie.expires)))) if cookie.discard: h.append(("discard", None)) if cookie.comment: h.append(("comment", cookie.comment)) if cookie.comment_url: h.append(("commenturl", cookie.comment_url)) keys = cookie._rest.keys() keys.sort() for k in keys: h.append((k, str(cookie._rest[k]))) h.append(("version", str(cookie.version))) return join_header_words([h]) class LWPCookieJar(FileCookieJar): """ The LWPCookieJar saves a sequence of"Set-Cookie3" lines. "Set-Cookie3" is the format used by the libwww-perl libary, not known to be compatible with any browser, but which is easy to read and doesn't lose information about RFC 2965 cookies. Additional methods as_lwp_str(ignore_discard=True, ignore_expired=True) """ def as_lwp_str(self, ignore_discard=True, ignore_expires=True): """Return cookies as a string of "\n"-separated "Set-Cookie3" headers. ignore_discard and ignore_expires: see docstring for FileCookieJar.save """ now = time.time() r = [] for cookie in self: if not ignore_discard and cookie.discard: continue if not ignore_expires and cookie.is_expired(now): continue r.append("Set-Cookie3: %s" % lwp_cookie_str(cookie)) return "\n".join(r+[""]) def save(self, filename=None, ignore_discard=False, ignore_expires=False): if filename is None: if self.filename is not None: filename = self.filename else: raise ValueError(MISSING_FILENAME_TEXT) f = open(filename, "w") try: # There really isn't an LWP Cookies 2.0 format, but this indicates # that there is extra information in here (domain_dot and # port_spec) while still being compatible with libwww-perl, I hope. f.write("#LWP-Cookies-2.0\n") f.write(self.as_lwp_str(ignore_discard, ignore_expires)) finally: f.close() def _really_load(self, f, filename, ignore_discard, ignore_expires): magic = f.readline() if not re.search(self.magic_re, magic): msg = ("%r does not look like a Set-Cookie3 (LWP) format " "file" % filename) raise LoadError(msg) now = time.time() header = "Set-Cookie3:" boolean_attrs = ("port_spec", "path_spec", "domain_dot", "secure", "discard") value_attrs = ("version", "port", "path", "domain", "expires", "comment", "commenturl") try: while 1: line = f.readline() if line == "": break if not line.startswith(header): continue line = line[len(header):].strip() for data in split_header_words([line]): name, value = data[0] standard = {} rest = {} for k in boolean_attrs: standard[k] = False for k, v in data[1:]: if k is not None: lc = k.lower() else: lc = None # don't lose case distinction for unknown fields if (lc in value_attrs) or (lc in boolean_attrs): k = lc if k in boolean_attrs: if v is None: v = True standard[k] = v elif k in value_attrs: standard[k] = v else: rest[k] = v h = standard.get expires = h("expires") discard = h("discard") if expires is not None: expires = iso2time(expires) if expires is None: discard = True domain = h("domain") domain_specified = domain.startswith(".") c = Cookie(h("version"), name, value, h("port"), h("port_spec"), domain, domain_specified, h("domain_dot"), h("path"), h("path_spec"), h("secure"), expires, discard, h("comment"), h("commenturl"), rest) if not ignore_discard and c.discard: continue if not ignore_expires and c.is_expired(now): continue self.set_cookie(c) except IOError: raise except Exception: _warn_unhandled_exception() raise LoadError("invalid Set-Cookie3 format file %r: %r" % (filename, line))
apache-2.0
tedder/ansible
test/units/modules/network/f5/test_bigip_pool.py
25
17853
# -*- coding: utf-8 -*- # # Copyright (c) 2017 F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest import sys if sys.version_info < (2, 7): pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7") from ansible.module_utils.basic import AnsibleModule try: from library.modules.bigip_pool import ApiParameters from library.modules.bigip_pool import ModuleParameters from library.modules.bigip_pool import ModuleManager from library.modules.bigip_pool import ArgumentSpec from library.module_utils.network.f5.common import F5ModuleError # In Ansible 2.8, Ansible changed import paths. from test.units.compat import unittest from test.units.compat.mock import Mock from test.units.compat.mock import patch from test.units.modules.utils import set_module_args except ImportError: from ansible.modules.network.f5.bigip_pool import ApiParameters from ansible.modules.network.f5.bigip_pool import ModuleParameters from ansible.modules.network.f5.bigip_pool import ModuleManager from ansible.modules.network.f5.bigip_pool import ArgumentSpec from ansible.module_utils.network.f5.common import F5ModuleError # Ansible 2.8 imports from units.compat import unittest from units.compat.mock import Mock from units.compat.mock import patch from units.modules.utils import set_module_args fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters(self): args = dict( monitor_type='m_of_n', monitors=['/Common/Fake', '/Common/Fake2'], quorum=1, slow_ramp_time=200, reselect_tries=5, service_down_action='drop' ) p = ModuleParameters(params=args) assert p.monitor_type == 'm_of_n' assert p.quorum == 1 assert p.monitors == 'min 1 of { /Common/Fake /Common/Fake2 }' assert p.slow_ramp_time == 200 assert p.reselect_tries == 5 assert p.service_down_action == 'drop' def test_api_parameters(self): args = dict( monitor="/Common/Fake and /Common/Fake2 ", slowRampTime=200, reselectTries=5, serviceDownAction='drop' ) p = ApiParameters(params=args) assert p.monitors == '/Common/Fake and /Common/Fake2' assert p.slow_ramp_time == 200 assert p.reselect_tries == 5 assert p.service_down_action == 'drop' def test_unknown_module_lb_method(self): args = dict( lb_method='obscure_hyphenated_fake_method', ) with pytest.raises(F5ModuleError): p = ModuleParameters(params=args) assert p.lb_method == 'foo' def test_unknown_api_lb_method(self): args = dict( loadBalancingMode='obscure_hypenated_fake_method' ) with pytest.raises(F5ModuleError): p = ApiParameters(params=args) assert p.lb_method == 'foo' class TestManager(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() def test_create_pool(self, *args): set_module_args(dict( pool='fake_pool', description='fakepool', service_down_action='drop', lb_method='round-robin', partition='Common', slow_ramp_time=10, reselect_tries=1, provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) results = mm.exec_module() assert results['changed'] is True assert results['name'] == 'fake_pool' assert results['description'] == 'fakepool' assert results['service_down_action'] == 'drop' assert results['lb_method'] == 'round-robin' assert results['slow_ramp_time'] == 10 assert results['reselect_tries'] == 1 def test_create_pool_monitor_type_missing(self, *args): set_module_args(dict( pool='fake_pool', lb_method='round-robin', partition='Common', monitors=['/Common/tcp', '/Common/http'], provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) results = mm.exec_module() assert results['changed'] is True assert results['name'] == 'fake_pool' assert results['monitors'] == ['/Common/http', '/Common/tcp'] assert results['monitor_type'] == 'and_list' def test_create_pool_monitors_missing(self, *args): set_module_args(dict( pool='fake_pool', lb_method='round-robin', partition='Common', monitor_type='and_list', provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) msg = "The 'monitors' parameter cannot be empty when " \ "'monitor_type' parameter is specified" with pytest.raises(F5ModuleError) as err: mm.exec_module() assert str(err.value) == msg def test_create_pool_quorum_missing(self, *args): set_module_args(dict( pool='fake_pool', lb_method='round-robin', partition='Common', monitor_type='m_of_n', monitors=['/Common/tcp', '/Common/http'], provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) msg = "Quorum value must be specified with monitor_type 'm_of_n'." with pytest.raises(F5ModuleError) as err: mm.exec_module() assert str(err.value) == msg def test_create_pool_monitor_and_list(self, *args): set_module_args(dict( pool='fake_pool', partition='Common', monitor_type='and_list', monitors=['/Common/tcp', '/Common/http'], provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) results = mm.exec_module() assert results['changed'] is True assert results['name'] == 'fake_pool' assert results['monitors'] == ['/Common/http', '/Common/tcp'] assert results['monitor_type'] == 'and_list' def test_create_pool_monitor_m_of_n(self, *args): set_module_args(dict( pool='fake_pool', partition='Common', monitor_type='m_of_n', quorum=1, monitors=['/Common/tcp', '/Common/http'], provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) results = mm.exec_module() assert results['changed'] is True assert results['name'] == 'fake_pool' assert results['monitors'] == ['/Common/http', '/Common/tcp'] assert results['monitor_type'] == 'm_of_n' def test_update_monitors(self, *args): set_module_args(dict( name='test_pool', partition='Common', monitor_type='and_list', monitors=['/Common/http', '/Common/tcp'], provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) current = ApiParameters(params=load_fixture('load_ltm_pool.json')) mm.update_on_device = Mock(return_value=True) mm.exists = Mock(return_value=True) mm.read_current_from_device = Mock(return_value=current) results = mm.exec_module() assert results['changed'] is True assert results['monitor_type'] == 'and_list' def test_create_pool_monitor_and_list_no_partition(self, *args): set_module_args(dict( pool='fake_pool', monitor_type='and_list', monitors=['tcp', 'http'], provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) results = mm.exec_module() assert results['changed'] is True assert results['name'] == 'fake_pool' assert results['monitors'] == ['/Common/http', '/Common/tcp'] assert results['monitor_type'] == 'and_list' def test_create_pool_monitor_m_of_n_no_partition(self, *args): set_module_args(dict( pool='fake_pool', monitor_type='m_of_n', quorum=1, monitors=['tcp', 'http'], provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) results = mm.exec_module() assert results['changed'] is True assert results['name'] == 'fake_pool' assert results['monitors'] == ['/Common/http', '/Common/tcp'] assert results['monitor_type'] == 'm_of_n' def test_create_pool_monitor_and_list_custom_partition(self, *args): set_module_args(dict( pool='fake_pool', partition='Testing', monitor_type='and_list', monitors=['tcp', 'http'], provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) results = mm.exec_module() assert results['changed'] is True assert results['name'] == 'fake_pool' assert results['monitors'] == ['/Testing/http', '/Testing/tcp'] assert results['monitor_type'] == 'and_list' def test_create_pool_monitor_m_of_n_custom_partition(self, *args): set_module_args(dict( pool='fake_pool', partition='Testing', monitor_type='m_of_n', quorum=1, monitors=['tcp', 'http'], provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) results = mm.exec_module() assert results['changed'] is True assert results['name'] == 'fake_pool' assert results['monitors'] == ['/Testing/http', '/Testing/tcp'] assert results['monitor_type'] == 'm_of_n' def test_create_pool_with_metadata(self, *args): set_module_args(dict( pool='fake_pool', metadata=dict(ansible='2.4'), provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) results = mm.exec_module() assert results['changed'] is True assert results['name'] == 'fake_pool' assert 'metadata' in results assert 'ansible' in results['metadata'] assert results['metadata']['ansible'] == '2.4' def test_create_aggregate_pools(self, *args): set_module_args(dict( aggregate=[ dict( pool='fake_pool', description='fakepool', service_down_action='drop', lb_method='round-robin', partition='Common', slow_ramp_time=10, reselect_tries=1, ), dict( pool='fake_pool2', description='fakepool2', service_down_action='drop', lb_method='predictive-node', partition='Common', slow_ramp_time=110, reselect_tries=2, ) ], provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode, mutually_exclusive=self.spec.mutually_exclusive, required_one_of=self.spec.required_one_of ) mm = ModuleManager(module=module) mm.create_on_device = Mock(return_value=True) mm.exists = Mock(return_value=False) results = mm.exec_module() assert results['changed'] is True
gpl-3.0
nfredrik/pyModelStuff
pymodel/StateCoverage.py
2
2493
""" StateCoverage: choose the (aname, args) whose next state has been used least """ import sys import random # Tester state is a bag of states: [ ( state , n of times used ), ... ] # Implement bag of states as list of pairs, not dictionary with state keys # because our states are themselves dictionaries, which are not hashable. coverage = list() # Functions for maintaining coverage def additems(coverage, enabled): # coverage(after) is empty if coverage(before) is empty and enabled is empty # 'if' prevents dup of keys in coverage(before) but might be dups in enabled coverage += [(next, 0) for (a,args,result,next,properties) in enabled if next not in [ s for (s,n) in coverage] ] def inbag(coverage, x): # False if coverage is empty, but doesn't crash return x in [ s for (s,n) in coverage ] def count(coverage, x): # always return first occurence, do dups matter? # index [0] fails if list is empty, is that possible? return [ n for (xitem,n) in coverage if xitem == x ][0] def incr(coverage, x): # get index and replace there, always update first index, do dups matter? xs = [ s for (s,n) in coverage ] i = xs.index(x) # raise ValueError if x not in xs, is that possible? coverage[i] = (x, count(coverage, x) + 1) def select_action(enabled): """ Choose the action + args whose next state has been used the least If more than one action has been used that many of times, choose randomly """ # print 'enabled %s, coverage: %s' % (enabled, coverage) if not enabled: # empty return (None, None) else: additems(coverage, enabled) # next line fails if enabled is empty - but it isn't, see above # count in next line fails if coverage is empty - possible? # no, because additems (above) will execute, and enabled is not empty least = min([ count(coverage,next) for (aname,args,result,next,properties) in enabled ]) aleast = [(aname,args,next) for (aname,args,result,next,properties) in enabled if count(coverage, next) == least] # next line fails if aleast is empty - is that possible? # could be possible if none in enabled results in next state in least # BUT that's not possible because least (above) is built using enabled (aname,args,next) = random.choice(aleast) incr(coverage,next) return (aname,args)
bsd-3-clause
duncanmmacleod/gwpy
gwpy/timeseries/io/hdf5.py
3
4214
# -*- coding: utf-8 -*- # Copyright (C) Duncan Macleod (2014-2020) # # This file is part of GWpy. # # GWpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GWpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GWpy. If not, see <http://www.gnu.org/licenses/>. """This module attaches the HDF5 input output methods to the TimeSeries. """ from astropy import units from ...io import registry as io_registry from ...io.hdf5 import (identify_hdf5, with_read_hdf5, with_write_hdf5) from ...types.io.hdf5 import (read_hdf5_array, write_hdf5_series) from .. import (TimeSeries, TimeSeriesDict, StateVector, StateVectorDict) SEC_UNIT = units.second __author__ = 'Duncan Macleod <[email protected]>' # -- read --------------------------------------------------------------------- def read_hdf5_timeseries(h5f, path=None, start=None, end=None, **kwargs): """Read a `TimeSeries` from HDF5 """ # read data kwargs.setdefault('array_type', TimeSeries) series = read_hdf5_array(h5f, path=path, **kwargs) # crop if needed if start is not None: start = max(start, series.span[0]) if end is not None: end = min(end, series.span[1]) if start is not None or end is not None: return series.crop(start, end) return series def _is_timeseries_dataset(dataset): """Returns `True` if a dataset contains `TimeSeries` data """ return SEC_UNIT.is_equivalent(dataset.attrs.get('xunit', 'undef')) @with_read_hdf5 def read_hdf5_dict(h5f, names=None, group=None, **kwargs): """Read a `TimeSeriesDict` from HDF5 """ # find group from which to read if group: h5g = h5f[group] else: h5g = h5f # find list of names to read if names is None: names = [key for key in h5g if _is_timeseries_dataset(h5g[key])] # read names out = kwargs.pop('dict_type', TimeSeriesDict)() kwargs.setdefault('array_type', out.EntryClass) for name in names: out[name] = read_hdf5_timeseries(h5g[name], **kwargs) return out def read_hdf5_factory(data_class): if issubclass(data_class, dict): def read_(*args, **kwargs): kwargs.setdefault('dict_type', data_class) return read_hdf5_dict(*args, **kwargs) else: def read_(*args, **kwargs): kwargs.setdefault('array_type', data_class) return read_hdf5_timeseries(*args, **kwargs) return read_ # -- write -------------------------------------------------------------------- @with_write_hdf5 def write_hdf5_dict(tsdict, h5f, group=None, **kwargs): """Write a `TimeSeriesBaseDict` to HDF5 Each series in the dict is written as a dataset in the group """ # create group if needed if group and group not in h5f: h5g = h5f.create_group(group) elif group: h5g = h5f[group] else: h5g = h5f # write each timeseries kwargs.setdefault('format', 'hdf5') for key, series in tsdict.items(): series.write(h5g, path=str(key), **kwargs) # -- register ----------------------------------------------------------------- # series classes for series_class in (TimeSeries, StateVector): reader = read_hdf5_factory(series_class) io_registry.register_reader('hdf5', series_class, reader) io_registry.register_writer('hdf5', series_class, write_hdf5_series) io_registry.register_identifier('hdf5', series_class, identify_hdf5) # dict classes for dict_class in (TimeSeriesDict, StateVectorDict): reader = read_hdf5_factory(dict_class) io_registry.register_reader('hdf5', dict_class, reader) io_registry.register_writer('hdf5', dict_class, write_hdf5_dict) io_registry.register_identifier('hdf5', dict_class, identify_hdf5)
gpl-3.0
cchurch/ansible
test/units/modules/network/fortimanager/test_fmgr_fwpol_package.py
38
4044
# Copyright 2018 Fortinet, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <https://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler import pytest try: from ansible.modules.network.fortimanager import fmgr_fwpol_package except ImportError: pytest.skip("Could not load required modules for testing", allow_module_level=True) def load_fixtures(): fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') + "/{filename}.json".format( filename=os.path.splitext(os.path.basename(__file__))[0]) try: with open(fixture_path, "r") as fixture_file: fixture_data = json.load(fixture_file) except IOError: return [] return [fixture_data] @pytest.fixture(autouse=True) def module_mock(mocker): connection_class_mock = mocker.patch('ansible.module_utils.basic.AnsibleModule') return connection_class_mock @pytest.fixture(autouse=True) def connection_mock(mocker): connection_class_mock = mocker.patch('ansible.modules.network.fortimanager.fmgr_fwpol_package.Connection') return connection_class_mock @pytest.fixture(scope="function", params=load_fixtures()) def fixture_data(request): func_name = request.function.__name__.replace("test_", "") return request.param.get(func_name, None) fmg_instance = FortiManagerHandler(connection_mock, module_mock) def test_fmgr_fwpol_package(fixture_data, mocker): mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request", side_effect=fixture_data) # Test using fixture 1 # output = fmgr_fwpol_package.fmgr_fwpol_package(fmg_instance, fixture_data[0]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 2 # output = fmgr_fwpol_package.fmgr_fwpol_package(fmg_instance, fixture_data[1]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 3 # output = fmgr_fwpol_package.fmgr_fwpol_package(fmg_instance, fixture_data[2]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 4 # output = fmgr_fwpol_package.fmgr_fwpol_package(fmg_instance, fixture_data[3]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 def test_fmgr_fwpol_package_folder(fixture_data, mocker): mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request", side_effect=fixture_data) # Test using fixture 1 # output = fmgr_fwpol_package.fmgr_fwpol_package_folder(fmg_instance, fixture_data[0]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 2 # output = fmgr_fwpol_package.fmgr_fwpol_package_folder(fmg_instance, fixture_data[1]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 3 # output = fmgr_fwpol_package.fmgr_fwpol_package_folder(fmg_instance, fixture_data[2]['paramgram_used']) assert output['raw_response']['status']['code'] == 0 # Test using fixture 4 # output = fmgr_fwpol_package.fmgr_fwpol_package_folder(fmg_instance, fixture_data[3]['paramgram_used']) assert output['raw_response']['status']['code'] == 0
gpl-3.0
adngdb/socorro
webapp-django/crashstats/tokens/tests/test_middleware.py
3
4138
import datetime import json from nose.tools import eq_, ok_, assert_raises from django.contrib.auth.models import User, Permission from django.conf import settings from django.test.client import RequestFactory from django.contrib.auth.middleware import AuthenticationMiddleware from django.contrib.sessions.middleware import SessionMiddleware from django.core.exceptions import ImproperlyConfigured from django.contrib.contenttypes.models import ContentType from crashstats.base.tests.testbase import DjangoTestCase from crashstats.tokens import models from crashstats.tokens.middleware import APIAuthenticationMiddleware class TestMiddleware(DjangoTestCase): django_session_middleware = SessionMiddleware() django_auth_middleware = AuthenticationMiddleware() middleware = APIAuthenticationMiddleware() def test_impropertly_configured(self): request = RequestFactory().get('/') assert_raises( ImproperlyConfigured, self.middleware.process_request, request ) def _get_request(self, **headers): # boilerplate stuff request = RequestFactory(**headers).get('/') self.django_session_middleware.process_request(request) self.django_auth_middleware.process_request(request) assert request.user return request def test_no_token_key(self): request = self._get_request() eq_(self.middleware.process_request(request), None) def test_non_existant_token_key(self): request = self._get_request(HTTP_AUTH_TOKEN='xxx') response = self.middleware.process_request(request) eq_(response.status_code, 403) # the response content will be JSON result = json.loads(response.content) eq_(result['error'], 'API Token not matched') def test_expired_token(self): user = User.objects.create(username='peterbe') token = models.Token.objects.create( user=user, ) token.expires -= datetime.timedelta( days=settings.TOKENS_DEFAULT_EXPIRATION_DAYS ) token.save() request = self._get_request(HTTP_AUTH_TOKEN=token.key) response = self.middleware.process_request(request) eq_(response.status_code, 403) result = json.loads(response.content) eq_(result['error'], 'API Token found but expired') def test_token_valid(self): user = User.objects.create(username='peterbe') token = models.Token.objects.create( user=user, ) request = self._get_request(HTTP_AUTH_TOKEN=token.key) response = self.middleware.process_request(request) eq_(response, None) eq_(request.user, user) def test_token_permissions(self): user = User.objects.create(username='peterbe') token = models.Token.objects.create( user=user, ) ct, __ = ContentType.objects.get_or_create( model='', app_label='crashstats', ) permission = Permission.objects.create( codename='play', content_type=ct ) token.permissions.add(permission) Permission.objects.create( codename='fire', content_type=ct ) # deliberately not adding this second permission request = self._get_request(HTTP_AUTH_TOKEN=token.key) # do the magic to the request self.middleware.process_request(request) eq_(request.user, user) ok_(request.user.has_perm('crashstats.play')) ok_(not request.user.has_perm('crashstats.fire')) def test_token_on_inactive_user(self): user = User.objects.create(username='peterbe') user.is_active = False user.save() token = models.Token.objects.create( user=user, ) request = self._get_request(HTTP_AUTH_TOKEN=token.key) response = self.middleware.process_request(request) eq_(response.status_code, 403) result = json.loads(response.content) eq_(result['error'], 'User of API token not active')
mpl-2.0
svirusxxx/cjdns
node_build/dependencies/libuv/build/gyp/pylib/gyp/generator/dump_dependency_json.py
899
2768
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import os import gyp import gyp.common import gyp.msvs_emulation import json import sys generator_supports_multiple_toolsets = True generator_wants_static_library_dependencies_adjusted = False generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: # Some gyp steps fail if these are empty(!). generator_default_variables[dirname] = 'dir' for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' def CalculateVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) for key, val in generator_flags.items(): default_variables.setdefault(key, val) default_variables.setdefault('OS', gyp.common.GetFlavor(params)) flavor = gyp.common.GetFlavor(params) if flavor =='win': # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) if generator_flags.get('adjust_static_libraries', False): global generator_wants_static_library_dependencies_adjusted generator_wants_static_library_dependencies_adjusted = True def GenerateOutput(target_list, target_dicts, data, params): # Map of target -> list of targets it depends on. edges = {} # Queue of targets to visit. targets_to_visit = target_list[:] while len(targets_to_visit) > 0: target = targets_to_visit.pop() if target in edges: continue edges[target] = [] for dep in target_dicts[target].get('dependencies', []): edges[target].append(dep) targets_to_visit.append(dep) filename = 'dump.json' f = open(filename, 'w') json.dump(edges, f) f.close() print 'Wrote json to %s.' % filename
gpl-3.0
coala-analyzer/coala-quickstart
coala_quickstart/coala_quickstart.py
1
5542
import argparse import logging import os import sys from pyprint.ConsolePrinter import ConsolePrinter from coala_utils.FilePathCompleter import FilePathCompleter from coala_utils.Question import ask_question from coala_quickstart import __version__ from coala_quickstart.interaction.Logo import print_welcome_message from coala_quickstart.generation.InfoCollector import collect_info from coala_quickstart.generation.Project import ( ask_to_select_languages, get_used_languages, print_used_languages, valid_path, ) from coala_quickstart.generation.FileGlobs import get_project_files from coala_quickstart.Strings import PROJECT_DIR_HELP from coala_quickstart.generation.Bears import ( filter_relevant_bears, print_relevant_bears, get_non_optional_settings_bears, remove_unusable_bears, ) from coala_quickstart.generation.Settings import ( generate_settings, write_coafile) from coala_quickstart.generation.SettingsClass import ( collect_bear_settings) from coala_quickstart.green_mode.green_mode_core import green_mode MAX_ARGS_GREEN_MODE = 5 MAX_VALUES_GREEN_MODE = 5 def _get_arg_parser(): description = """ coala-quickstart automatically creates a .coafile for use by coala. """ arg_parser = argparse.ArgumentParser( prog='coala-quickstart', description=description, add_help=True ) arg_parser.add_argument( '-v', '--version', action='version', version=__version__) arg_parser.add_argument( '-C', '--non-interactive', const=True, action='store_const', help='run coala-quickstart in non interactive mode') arg_parser.add_argument( '--ci', action='store_const', dest='non_interactive', const=True, help='continuous integration run, alias for `--non-interactive`') arg_parser.add_argument( '--allow-incomplete-sections', action='store_const', dest='incomplete_sections', const=True, help='generate coafile with only `bears` and `files` field in sections') arg_parser.add_argument( '--no-filter-by-capabilities', action='store_const', dest='no_filter_by_capabilities', const=True, help='disable filtering of bears by their capabilties.') arg_parser.add_argument( '-g', '--green-mode', const=True, action='store_const', help='Produce "green" config files for you project. Green config files' ' don\'t generate any error in the project and match the coala' ' configuration as closely as possible to your project.') arg_parser.add_argument( '--max-args', nargs='?', type=int, help='Maximum number of optional settings allowed to be checked' ' by green_mode for each bear.') arg_parser.add_argument( '--max-values', nargs='?', type=int, help='Maximum number of values to optional settings allowed to be' ' checked by green_mode for each bear.') return arg_parser def main(): global MAX_ARGS_GREEN_MODE, MAX_VALUES_GREEN_MODE arg_parser = _get_arg_parser() args = arg_parser.parse_args() logging.basicConfig(stream=sys.stdout) printer = ConsolePrinter() logging.getLogger(__name__) fpc = None project_dir = os.getcwd() if args.green_mode: args.no_filter_by_capabilities = None args.incomplete_sections = None if args.max_args: MAX_ARGS_GREEN_MODE = args.max_args if args.max_values: MAX_VALUES_GREEN_MODE = args.max_values if not args.green_mode and (args.max_args or args.max_values): logging.warning(' --max-args and --max-values can be used ' 'only with --green-mode. The arguments will ' 'be ignored.') if not args.non_interactive and not args.green_mode: fpc = FilePathCompleter() fpc.activate() print_welcome_message(printer) printer.print(PROJECT_DIR_HELP) project_dir = ask_question( 'What is your project directory?', default=project_dir, typecast=valid_path) fpc.deactivate() project_files, ignore_globs = get_project_files( None, printer, project_dir, fpc, args.non_interactive) used_languages = list(get_used_languages(project_files)) used_languages = ask_to_select_languages(used_languages, printer, args.non_interactive) extracted_information = collect_info(project_dir) relevant_bears = filter_relevant_bears( used_languages, printer, arg_parser, extracted_information) if args.green_mode: bear_settings_obj = collect_bear_settings(relevant_bears) green_mode( project_dir, ignore_globs, relevant_bears, bear_settings_obj, MAX_ARGS_GREEN_MODE, MAX_VALUES_GREEN_MODE, project_files, printer, ) exit() print_relevant_bears(printer, relevant_bears) if args.non_interactive and not args.incomplete_sections: unusable_bears = get_non_optional_settings_bears(relevant_bears) remove_unusable_bears(relevant_bears, unusable_bears) print_relevant_bears(printer, relevant_bears, 'usable') settings = generate_settings( project_dir, project_files, ignore_globs, relevant_bears, extracted_information, args.incomplete_sections) write_coafile(printer, project_dir, settings)
agpl-3.0
mytliulei/DCNRobotInstallPackages
windows/win32/pygal-1.7.0/setup.py
1
2698
#!/usr/bin/env python # -*- coding: utf-8 -*- # This file is part of pygal # # A python svg graph plotting library # Copyright © 2012-2014 Kozea # # This library is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # This library is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # details. # # You should have received a copy of the GNU Lesser General Public License # along with pygal. If not, see <http://www.gnu.org/licenses/>. import os import sys import re from setuptools import setup, find_packages from setuptools.command.test import test as TestCommand class PyTest(TestCommand): def finalize_options(self): TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True def run_tests(self): # import here, cause outside the eggs aren't loaded import pytest errno = pytest.main(self.test_args) sys.exit(errno) ROOT = os.path.dirname(__file__) # Explicitly specify the encoding of pygal/__init__.py if we're on py3. kwargs = {} if sys.version_info[0] == 3: kwargs['encoding'] = 'utf-8' with open(os.path.join(ROOT, 'pygal', '__init__.py'), **kwargs) as fd: __version__ = re.search("__version__ = '([^']+)'", fd.read()).group(1) setup( name="pygal", version=__version__, description="A python svg graph plotting library", author="Kozea", url="http://pygal.org/", author_email="[email protected]", license="GNU LGPL v3+", platforms="Any", packages=find_packages(), provides=['pygal'], scripts=["pygal_gen.py"], keywords=[ "svg", "chart", "graph", "diagram", "plot", "histogram", "kiviat"], tests_require=["pytest", "pyquery", "flask", "cairosvg"], cmdclass={'test': PyTest}, package_data={'pygal': ['css/*', 'graph/*.svg']}, extras_require={ 'lxml': ['lxml'], 'png': ['cairosvg'] }, classifiers=[ "Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: End Users/Desktop", "License :: OSI Approved :: " "GNU Lesser General Public License v3 or later (LGPLv3+)", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Topic :: Multimedia :: Graphics :: Presentation"])
apache-2.0
srajag/nova
nova/tests/virt/baremetal/test_volume_driver.py
11
11573
# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for baremetal volume driver.""" from oslo.config import cfg from nova import exception from nova import test from nova.virt.baremetal import volume_driver from nova.virt import fake from nova.virt.libvirt import volume as libvirt_volume CONF = cfg.CONF SHOW_OUTPUT = """Target 1: iqn.2010-10.org.openstack:volume-00000001 System information: Driver: iscsi State: ready I_T nexus information: I_T nexus: 8 Initiator: iqn.1993-08.org.debian:01:7780c6a16b4 Connection: 0 IP Address: 172.17.12.10 LUN information: LUN: 0 Type: controller SCSI ID: IET 00010000 SCSI SN: beaf10 Size: 0 MB, Block size: 1 Online: Yes Removable media: No Readonly: No Backing store type: null Backing store path: None Backing store flags: LUN: 1 Type: disk SCSI ID: IET 00010001 SCSI SN: beaf11 Size: 1074 MB, Block size: 512 Online: Yes Removable media: No Readonly: No Backing store type: rdwr Backing store path: /dev/nova-volumes/volume-00000001 Backing store flags: Account information: ACL information: ALL Target 2: iqn.2010-10.org.openstack:volume-00000002 System information: Driver: iscsi State: ready I_T nexus information: LUN information: LUN: 0 Type: controller SCSI ID: IET 00020000 SCSI SN: beaf20 Size: 0 MB, Block size: 1 Online: Yes Removable media: No Readonly: No Backing store type: null Backing store path: None Backing store flags: LUN: 1 Type: disk SCSI ID: IET 00020001 SCSI SN: beaf21 Size: 2147 MB, Block size: 512 Online: Yes Removable media: No Readonly: No Backing store type: rdwr Backing store path: /dev/nova-volumes/volume-00000002 Backing store flags: Account information: ACL information: ALL Target 1000001: iqn.2010-10.org.openstack.baremetal:1000001-dev.vdc System information: Driver: iscsi State: ready I_T nexus information: LUN information: LUN: 0 Type: controller SCSI ID: IET f42410000 SCSI SN: beaf10000010 Size: 0 MB, Block size: 1 Online: Yes Removable media: No Readonly: No Backing store type: null Backing store path: None Backing store flags: LUN: 1 Type: disk SCSI ID: IET f42410001 SCSI SN: beaf10000011 Size: 1074 MB, Block size: 512 Online: Yes Removable media: No Readonly: No Backing store type: rdwr Backing store path: /dev/disk/by-path/ip-172.17.12.10:3260-iscsi-\ iqn.2010-10.org.openstack:volume-00000001-lun-1 Backing store flags: Account information: ACL information: ALL """ def fake_show_tgtadm(): return SHOW_OUTPUT class BareMetalVolumeTestCase(test.NoDBTestCase): def setUp(self): super(BareMetalVolumeTestCase, self).setUp() self.stubs.Set(volume_driver, '_show_tgtadm', fake_show_tgtadm) def test_list_backingstore_path(self): l = volume_driver._list_backingstore_path() self.assertEqual(len(l), 3) self.assertIn('/dev/nova-volumes/volume-00000001', l) self.assertIn('/dev/nova-volumes/volume-00000002', l) self.assertIn('/dev/disk/by-path/ip-172.17.12.10:3260-iscsi-' 'iqn.2010-10.org.openstack:volume-00000001-lun-1', l) def test_get_next_tid(self): tid = volume_driver._get_next_tid() self.assertEqual(1000002, tid) def test_find_tid_found(self): tid = volume_driver._find_tid( 'iqn.2010-10.org.openstack.baremetal:1000001-dev.vdc') self.assertEqual(1000001, tid) def test_find_tid_not_found(self): tid = volume_driver._find_tid( 'iqn.2010-10.org.openstack.baremetal:1000002-dev.vdc') self.assertIsNone(tid) def test_get_iqn(self): self.flags(iscsi_iqn_prefix='iqn.2012-12.a.b', group='baremetal') iqn = volume_driver._get_iqn('instname', '/dev/vdx') self.assertEqual('iqn.2012-12.a.b:instname-dev-vdx', iqn) class FakeConf(object): def __init__(self, source_path): self.source_path = source_path class BareMetalLibVirtVolumeDriverTestCase(test.TestCase): def setUp(self): super(BareMetalLibVirtVolumeDriverTestCase, self).setUp() self.flags(volume_drivers=[ 'fake=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver', 'fake2=nova.virt.libvirt.volume.LibvirtFakeVolumeDriver', ], group='libvirt') self.driver = volume_driver.LibvirtVolumeDriver(fake.FakeVirtAPI()) self.disk_info = { 'dev': 'vdc', 'bus': 'baremetal', 'type': 'baremetal', } self.connection_info = {'driver_volume_type': 'fake'} self.mount_point = '/dev/vdc' self.mount_device = 'vdc' self.source_path = '/dev/sdx' self.instance = {'uuid': '12345678-1234-1234-1234-123467890123456', 'name': 'instance-00000001'} self.fixed_ips = [{'address': '10.2.3.4'}, {'address': '172.16.17.18'}, ] self.iqn = 'iqn.fake:instance-00000001-dev-vdc' self.tid = 100 def test_init_loads_volume_drivers(self): self.assertIsInstance(self.driver.volume_drivers['fake'], libvirt_volume.LibvirtFakeVolumeDriver) self.assertIsInstance(self.driver.volume_drivers['fake2'], libvirt_volume.LibvirtFakeVolumeDriver) self.assertEqual(len(self.driver.volume_drivers), 2) def test_fake_connect_volume(self): """Check connect_volume returns without exceptions.""" self.driver._connect_volume(self.connection_info, self.disk_info) def test_volume_driver_method_ok(self): fake_driver = self.driver.volume_drivers['fake'] self.mox.StubOutWithMock(fake_driver, 'connect_volume') fake_driver.connect_volume(self.connection_info, self.disk_info) self.mox.ReplayAll() self.driver._connect_volume(self.connection_info, self.disk_info) def test_volume_driver_method_driver_type_not_found(self): self.connection_info['driver_volume_type'] = 'qwerty' self.assertRaises(exception.VolumeDriverNotFound, self.driver._connect_volume, self.connection_info, self.disk_info) def test_publish_iscsi(self): self.mox.StubOutWithMock(volume_driver, '_get_iqn') self.mox.StubOutWithMock(volume_driver, '_get_next_tid') self.mox.StubOutWithMock(volume_driver, '_create_iscsi_export_tgtadm') self.mox.StubOutWithMock(volume_driver, '_allow_iscsi_tgtadm') volume_driver._get_iqn(self.instance['name'], self.mount_point).\ AndReturn(self.iqn) volume_driver._get_next_tid().AndReturn(self.tid) volume_driver._create_iscsi_export_tgtadm(self.source_path, self.tid, self.iqn) volume_driver._allow_iscsi_tgtadm(self.tid, self.fixed_ips[0]['address']) volume_driver._allow_iscsi_tgtadm(self.tid, self.fixed_ips[1]['address']) self.mox.ReplayAll() self.driver._publish_iscsi(self.instance, self.mount_point, self.fixed_ips, self.source_path) def test_depublish_iscsi_ok(self): self.mox.StubOutWithMock(volume_driver, '_get_iqn') self.mox.StubOutWithMock(volume_driver, '_find_tid') self.mox.StubOutWithMock(volume_driver, '_delete_iscsi_export_tgtadm') volume_driver._get_iqn(self.instance['name'], self.mount_point).\ AndReturn(self.iqn) volume_driver._find_tid(self.iqn).AndReturn(self.tid) volume_driver._delete_iscsi_export_tgtadm(self.tid) self.mox.ReplayAll() self.driver._depublish_iscsi(self.instance, self.mount_point) def test_depublish_iscsi_do_nothing_if_tid_is_not_found(self): self.mox.StubOutWithMock(volume_driver, '_get_iqn') self.mox.StubOutWithMock(volume_driver, '_find_tid') volume_driver._get_iqn(self.instance['name'], self.mount_point).\ AndReturn(self.iqn) volume_driver._find_tid(self.iqn).AndReturn(None) self.mox.ReplayAll() self.driver._depublish_iscsi(self.instance, self.mount_point) def test_attach_volume(self): self.mox.StubOutWithMock(volume_driver, '_get_fixed_ips') self.mox.StubOutWithMock(self.driver, '_connect_volume') self.mox.StubOutWithMock(self.driver, '_publish_iscsi') volume_driver._get_fixed_ips(self.instance).AndReturn(self.fixed_ips) self.driver._connect_volume(self.connection_info, self.disk_info).\ AndReturn(FakeConf(self.source_path)) self.driver._publish_iscsi(self.instance, self.mount_point, self.fixed_ips, self.source_path) self.mox.ReplayAll() self.driver.attach_volume(self.connection_info, self.instance, self.mount_point) def test_detach_volume(self): self.mox.StubOutWithMock(volume_driver, '_get_iqn') self.mox.StubOutWithMock(volume_driver, '_find_tid') self.mox.StubOutWithMock(volume_driver, '_delete_iscsi_export_tgtadm') self.mox.StubOutWithMock(self.driver, '_disconnect_volume') volume_driver._get_iqn(self.instance['name'], self.mount_point).\ AndReturn(self.iqn) volume_driver._find_tid(self.iqn).AndReturn(self.tid) volume_driver._delete_iscsi_export_tgtadm(self.tid) self.driver._disconnect_volume(self.connection_info, self.mount_device) self.mox.ReplayAll() self.driver.detach_volume(self.connection_info, self.instance, self.mount_point)
apache-2.0
simonwydooghe/ansible
lib/ansible/modules/cloud/podman/podman_image_info.py
21
9173
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = """ module: podman_image_info author: - Sam Doran (@samdoran) version_added: '2.8' short_description: Gather info about images using podman notes: - Podman may required elevated privileges in order to run properly. description: - Gather info about images using C(podman) options: executable: description: - Path to C(podman) executable if it is not in the C($PATH) on the machine running C(podman) default: 'podman' type: str name: description: - List of tags or UID to gather info about. If no name is given return info about all images. """ EXAMPLES = """ - name: Gather info for all images podman_image_info: - name: Gather info on a specific image podman_image_info: name: nginx - name: Gather info on several images podman_image_info: name: - redis - quay.io/bitnami/wildfly """ RETURN = """ images: description: info from all or specified images returned: always type: dict sample: [ { "Annotations": {}, "Architecture": "amd64", "Author": "", "Comment": "from Bitnami with love", "ContainerConfig": { "Cmd": [ "nami", "start", "--foreground", "wildfly" ], "Entrypoint": [ "/app-entrypoint.sh" ], "Env": [ "PATH=/opt/bitnami/java/bin:/opt/bitnami/wildfly/bin:/opt/bitnami/nami/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", "IMAGE_OS=debian-9", "NAMI_VERSION=0.0.9-0", "GPG_KEY_SERVERS_LIST=ha.pool.sks-keyservers.net \ hkp://p80.pool.sks-keyservers.net:80 keyserver.ubuntu.com hkp://keyserver.ubuntu.com:80 pgp.mit.edu", "TINI_VERSION=v0.13.2", "TINI_GPG_KEY=595E85A6B1B4779EA4DAAEC70B588DFF0527A9B7", "GOSU_VERSION=1.10", "GOSU_GPG_KEY=B42F6819007F00F88E364FD4036A9C25BF357DD4", "BITNAMI_IMAGE_VERSION=14.0.1-debian-9-r12", "BITNAMI_APP_NAME=wildfly", "WILDFLY_JAVA_HOME=", "WILDFLY_JAVA_OPTS=", "WILDFLY_MANAGEMENT_HTTP_PORT_NUMBER=9990", "WILDFLY_PASSWORD=bitnami", "WILDFLY_PUBLIC_CONSOLE=true", "WILDFLY_SERVER_AJP_PORT_NUMBER=8009", "WILDFLY_SERVER_HTTP_PORT_NUMBER=8080", "WILDFLY_SERVER_INTERFACE=0.0.0.0", "WILDFLY_USERNAME=user", "WILDFLY_WILDFLY_HOME=/home/wildfly", "WILDFLY_WILDFLY_OPTS=-Dwildfly.as.deployment.ondemand=false" ], "ExposedPorts": { "8080/tcp": {}, "9990/tcp": {} }, "Labels": { "maintainer": "Bitnami <[email protected]>" } }, "Created": "2018-09-25T04:07:45.934395523Z", "Digest": "sha256:5c7d8e2dd66dcf4a152a4032a1d3c5a33458c67e1c1335edd8d18d738892356b", "GraphDriver": { "Data": { "LowerDir": "/var/lib/containers/storage/overlay/a9dbf5616cc16919a8ac0dfc60aff87a72b5be52994c4649fcc91a089a12931\ f/diff:/var/lib/containers/storage/overlay/67129bd46022122a7d8b7acb490092af6c7ce244ce4fbd7d9e2d2b7f5979e090/diff:/var/lib/containers/storage/overlay/7c51242c\ 4c5db5c74afda76d7fdbeab6965d8b21804bb3fc597dee09c770b0ca/diff:/var/lib/containers/storage/overlay/f97315dc58a9c002ba0cabccb9933d4b0d2113733d204188c88d72f75569b57b/diff:/var/lib/containers/storage/overlay/1dbde2dd497ddde2b467727125b900958a051a72561e58d29abe3d660dcaa9a7/diff:/var/lib/containers/storage/overlay/4aad9d80f30c3f0608f58173558b7554d84dee4dc4479672926eca29f75e6e33/diff:/var/lib/containers/storage/overlay/6751fc9b6868254870c062d75a511543fc8cfda2ce6262f4945f107449219632/diff:/var/lib/containers/storage/overlay/a27034d79081347421dd24d7e9e776c18271cd9a6e51053cb39af4d3d9c400e8/diff:/var/lib/containers/storage/overlay/537cf0045ed9cd7989f7944e7393019c81b16c1799a2198d8348cd182665397f/diff:/var/lib/containers/storage/overlay/27578615c5ae352af4e8449862d61aaf5c11b105a7d5905af55bd01b0c656d6e/diff:/var/lib/containers/storage/overlay/566542742840fe3034b3596f7cb9e62a6274c95a69f368f9e713746f8712c0b6/diff", "MergedDir": "/var/lib/containers/storage/overlay/72bb96d6\ c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/merged", "UpperDir": "/var/lib/containers/storage/overlay/72bb96d6c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/diff", "WorkDir": "/var/lib/containers/storage/overlay/72bb96d6c53ad57a0b1e44cab226a6251598accbead40b23fac89c19ad8c25ca/work" }, "Name": "overlay" }, "Id": "bcacbdf7a119c0fa934661ca8af839e625ce6540d9ceb6827cdd389f823d49e0", "Labels": { "maintainer": "Bitnami <[email protected]>" }, "ManifestType": "application/vnd.docker.distribution.manifest.v1+prettyjws", "Os": "linux", "Parent": "", "RepoDigests": [ "quay.io/bitnami/wildfly@sha256:5c7d8e2dd66dcf4a152a4032a1d3c5a33458c67e1c1335edd8d18d738892356b" ], "RepoTags": [ "quay.io/bitnami/wildfly:latest" ], "RootFS": { "Layers": [ "sha256:75391df2c87e076b0c2f72d20c95c57dc8be7ee684cc07273416cce622b43367", "sha256:7dd303f041039bfe8f0833092673ac35f93137d10e0fbc4302021ea65ad57731", "sha256:720d9edf0cd2a9bb56b88b80be9070dbfaad359514c70094c65066963fed485d", "sha256:6a567ecbf97725501a634fcb486271999aa4591b633b4ae9932a46b40f5aaf47", "sha256:59e9a6db8f178f3da868614564faabb2820cdfb69be32e63a4405d6f7772f68c", "sha256:310a82ccb092cd650215ab375da8943d235a263af9a029b8ac26a281446c04db", "sha256:36cb91cf4513543a8f0953fed785747ea18b675bc2677f3839889cfca0aac79e" ], "Type": "layers" }, "Size": 569919342, "User": "", "Version": "17.06.0-ce", "VirtualSize": 569919342 } ] """ import json from ansible.module_utils.basic import AnsibleModule def image_exists(module, executable, name): command = [executable, 'image', 'exists', name] rc, out, err = module.run_command(command) if rc == 1: return False elif 'Command "exists" not found' in err: # The 'exists' test is available in podman >= 0.12.1 command = [executable, 'image', 'ls', '-q', name] rc2, out2, err2 = module.run_command(command) if rc2 != 0: return False return True def filter_invalid_names(module, executable, name): valid_names = [] names = name if not isinstance(name, list): names = [name] for name in names: if image_exists(module, executable, name): valid_names.append(name) return valid_names def get_image_info(module, executable, name): names = name if not isinstance(name, list): names = [name] if len(names) > 0: command = [executable, 'image', 'inspect'] command.extend(names) rc, out, err = module.run_command(command) if rc != 0: module.fail_json(msg="Unable to gather info for '{0}': {1}".format(', '.join(names), err)) return out else: return json.dumps([]) def get_all_image_info(module, executable): command = [executable, 'image', 'ls', '-q'] rc, out, err = module.run_command(command) name = out.strip().split('\n') out = get_image_info(module, executable, name) return out def main(): module = AnsibleModule( argument_spec=dict( executable=dict(type='str', default='podman'), name=dict(type='list') ), supports_check_mode=True, ) executable = module.params['executable'] name = module.params.get('name') executable = module.get_bin_path(executable, required=True) if name: valid_names = filter_invalid_names(module, executable, name) results = json.loads(get_image_info(module, executable, valid_names)) else: results = json.loads(get_all_image_info(module, executable)) results = dict( changed=False, images=results ) module.exit_json(**results) if __name__ == '__main__': main()
gpl-3.0
dhoffman34/django
django/utils/lorem_ipsum.py
81
4910
""" Utility functions for generating "lorem ipsum" Latin text. """ from __future__ import unicode_literals import random COMMON_P = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.' WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet', 'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi', 'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi', 'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos', 'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum', 'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus', 'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus', 'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum', 'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem', 'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus', 'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente', 'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet', 'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta', 'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima', 'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim', 'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores', 'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias', 'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea', 'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt', 'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate', 'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius', 'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos', 'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore', 'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo', 'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi', 'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam', 'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique', 'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere', 'maxime', 'corrupti') COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur', 'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt', 'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua') def sentence(): """ Returns a randomly generated sentence of lorem ipsum text. The first word is capitalized, and the sentence ends in either a period or question mark. Commas are added at random. """ # Determine the number of comma-separated sections and number of words in # each section for this sentence. sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))] s = ', '.join(sections) # Convert to sentence case and add end punctuation. return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.')) def paragraph(): """ Returns a randomly generated paragraph of lorem ipsum text. The paragraph consists of between 1 and 4 sentences, inclusive. """ return ' '.join(sentence() for i in range(random.randint(1, 4))) def paragraphs(count, common=True): """ Returns a list of paragraphs as returned by paragraph(). If `common` is True, then the first paragraph will be the standard 'lorem ipsum' paragraph. Otherwise, the first paragraph will be random Latin text. Either way, subsequent paragraphs will be random Latin text. """ paras = [] for i in range(count): if common and i == 0: paras.append(COMMON_P) else: paras.append(paragraph()) return paras def words(count, common=True): """ Returns a string of `count` lorem ipsum words separated by a single space. If `common` is True, then the first 19 words will be the standard 'lorem ipsum' words. Otherwise, all words will be selected randomly. """ if common: word_list = list(COMMON_WORDS) else: word_list = [] c = len(word_list) if count > c: count -= c while count > 0: c = min(count, len(WORDS)) count -= c word_list += random.sample(WORDS, c) else: word_list = word_list[:count] return ' '.join(word_list)
bsd-3-clause
ESS-LLP/erpnext
erpnext/patches/v11_0/change_healthcare_desktop_icons.py
4
2450
import frappe from frappe import _ change_icons_map = [ { "module_name": "Patient", "color": "#6BE273", "icon": "fa fa-user", "doctype": "Patient", "type": "link", "link": "List/Patient", "label": _("Patient") }, { "module_name": "Patient Encounter", "color": "#2ecc71", "icon": "fa fa-stethoscope", "doctype": "Patient Encounter", "type": "link", "link": "List/Patient Encounter", "label": _("Patient Encounter"), }, { "module_name": "Healthcare Practitioner", "color": "#2ecc71", "icon": "fa fa-user-md", "doctype": "Healthcare Practitioner", "type": "link", "link": "List/Healthcare Practitioner", "label": _("Healthcare Practitioner") }, { "module_name": "Patient Appointment", "color": "#934F92", "icon": "fa fa-calendar-plus-o", "doctype": "Patient Appointment", "type": "link", "link": "List/Patient Appointment", "label": _("Patient Appointment") }, { "module_name": "Lab Test", "color": "#7578f6", "icon": "octicon octicon-beaker", "doctype": "Lab Test", "type": "link", "link": "List/Lab Test", "label": _("Lab Test") } ] def execute(): change_healthcare_desktop_icons() def change_healthcare_desktop_icons(): doctypes = ["patient", "patient_encounter", "healthcare_practitioner", "patient_appointment", "lab_test"] for doctype in doctypes: frappe.reload_doc("healthcare", "doctype", doctype) for spec in change_icons_map: frappe.db.sql(""" delete from `tabDesktop Icon` where _doctype = '{0}' """.format(spec['doctype'])) desktop_icon = frappe.new_doc("Desktop Icon") desktop_icon.hidden = 1 desktop_icon.standard = 1 desktop_icon.icon = spec['icon'] desktop_icon.color = spec['color'] desktop_icon.module_name = spec['module_name'] desktop_icon.label = spec['label'] desktop_icon.app = "erpnext" desktop_icon.type = spec['type'] desktop_icon._doctype = spec['doctype'] desktop_icon.link = spec['link'] desktop_icon.save(ignore_permissions=True) frappe.db.sql(""" delete from `tabDesktop Icon` where module_name = 'Healthcare' and type = 'module' """) desktop_icon = frappe.new_doc("Desktop Icon") desktop_icon.hidden = 1 desktop_icon.standard = 1 desktop_icon.icon = "fa fa-heartbeat" desktop_icon.color = "#FF888B" desktop_icon.module_name = "Healthcare" desktop_icon.label = _("Healthcare") desktop_icon.app = "erpnext" desktop_icon.type = 'module' desktop_icon.save(ignore_permissions=True)
gpl-3.0
HesselTjeerdsma/Cyber-Physical-Pacman-Game
Algor/flask/lib/python2.7/site-packages/urllib3/contrib/_securetransport/low_level.py
136
12062
""" Low-level helpers for the SecureTransport bindings. These are Python functions that are not directly related to the high-level APIs but are necessary to get them to work. They include a whole bunch of low-level CoreFoundation messing about and memory management. The concerns in this module are almost entirely about trying to avoid memory leaks and providing appropriate and useful assistance to the higher-level code. """ import base64 import ctypes import itertools import re import os import ssl import tempfile from .bindings import Security, CoreFoundation, CFConst # This regular expression is used to grab PEM data out of a PEM bundle. _PEM_CERTS_RE = re.compile( b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL ) def _cf_data_from_bytes(bytestring): """ Given a bytestring, create a CFData object from it. This CFData object must be CFReleased by the caller. """ return CoreFoundation.CFDataCreate( CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring) ) def _cf_dictionary_from_tuples(tuples): """ Given a list of Python tuples, create an associated CFDictionary. """ dictionary_size = len(tuples) # We need to get the dictionary keys and values out in the same order. keys = (t[0] for t in tuples) values = (t[1] for t in tuples) cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys) cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values) return CoreFoundation.CFDictionaryCreate( CoreFoundation.kCFAllocatorDefault, cf_keys, cf_values, dictionary_size, CoreFoundation.kCFTypeDictionaryKeyCallBacks, CoreFoundation.kCFTypeDictionaryValueCallBacks, ) def _cf_string_to_unicode(value): """ Creates a Unicode string from a CFString object. Used entirely for error reporting. Yes, it annoys me quite a lot that this function is this complex. """ value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p)) string = CoreFoundation.CFStringGetCStringPtr( value_as_void_p, CFConst.kCFStringEncodingUTF8 ) if string is None: buffer = ctypes.create_string_buffer(1024) result = CoreFoundation.CFStringGetCString( value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8 ) if not result: raise OSError('Error copying C string from CFStringRef') string = buffer.value if string is not None: string = string.decode('utf-8') return string def _assert_no_error(error, exception_class=None): """ Checks the return code and throws an exception if there is an error to report """ if error == 0: return cf_error_string = Security.SecCopyErrorMessageString(error, None) output = _cf_string_to_unicode(cf_error_string) CoreFoundation.CFRelease(cf_error_string) if output is None or output == u'': output = u'OSStatus %s' % error if exception_class is None: exception_class = ssl.SSLError raise exception_class(output) def _cert_array_from_pem(pem_bundle): """ Given a bundle of certs in PEM format, turns them into a CFArray of certs that can be used to validate a cert chain. """ der_certs = [ base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle) ] if not der_certs: raise ssl.SSLError("No root certificates specified") cert_array = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks) ) if not cert_array: raise ssl.SSLError("Unable to allocate memory!") try: for der_bytes in der_certs: certdata = _cf_data_from_bytes(der_bytes) if not certdata: raise ssl.SSLError("Unable to allocate memory!") cert = Security.SecCertificateCreateWithData( CoreFoundation.kCFAllocatorDefault, certdata ) CoreFoundation.CFRelease(certdata) if not cert: raise ssl.SSLError("Unable to build cert object!") CoreFoundation.CFArrayAppendValue(cert_array, cert) CoreFoundation.CFRelease(cert) except Exception: # We need to free the array before the exception bubbles further. # We only want to do that if an error occurs: otherwise, the caller # should free. CoreFoundation.CFRelease(cert_array) return cert_array def _is_cert(item): """ Returns True if a given CFTypeRef is a certificate. """ expected = Security.SecCertificateGetTypeID() return CoreFoundation.CFGetTypeID(item) == expected def _is_identity(item): """ Returns True if a given CFTypeRef is an identity. """ expected = Security.SecIdentityGetTypeID() return CoreFoundation.CFGetTypeID(item) == expected def _temporary_keychain(): """ This function creates a temporary Mac keychain that we can use to work with credentials. This keychain uses a one-time password and a temporary file to store the data. We expect to have one keychain per socket. The returned SecKeychainRef must be freed by the caller, including calling SecKeychainDelete. Returns a tuple of the SecKeychainRef and the path to the temporary directory that contains it. """ # Unfortunately, SecKeychainCreate requires a path to a keychain. This # means we cannot use mkstemp to use a generic temporary file. Instead, # we're going to create a temporary directory and a filename to use there. # This filename will be 8 random bytes expanded into base64. We also need # some random bytes to password-protect the keychain we're creating, so we # ask for 40 random bytes. random_bytes = os.urandom(40) filename = base64.b64encode(random_bytes[:8]).decode('utf-8') password = base64.b64encode(random_bytes[8:]) # Must be valid UTF-8 tempdirectory = tempfile.mkdtemp() keychain_path = os.path.join(tempdirectory, filename).encode('utf-8') # We now want to create the keychain itself. keychain = Security.SecKeychainRef() status = Security.SecKeychainCreate( keychain_path, len(password), password, False, None, ctypes.byref(keychain) ) _assert_no_error(status) # Having created the keychain, we want to pass it off to the caller. return keychain, tempdirectory def _load_items_from_file(keychain, path): """ Given a single file, loads all the trust objects from it into arrays and the keychain. Returns a tuple of lists: the first list is a list of identities, the second a list of certs. """ certificates = [] identities = [] result_array = None with open(path, 'rb') as f: raw_filedata = f.read() try: filedata = CoreFoundation.CFDataCreate( CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata) ) result_array = CoreFoundation.CFArrayRef() result = Security.SecItemImport( filedata, # cert data None, # Filename, leaving it out for now None, # What the type of the file is, we don't care None, # what's in the file, we don't care 0, # import flags None, # key params, can include passphrase in the future keychain, # The keychain to insert into ctypes.byref(result_array) # Results ) _assert_no_error(result) # A CFArray is not very useful to us as an intermediary # representation, so we are going to extract the objects we want # and then free the array. We don't need to keep hold of keys: the # keychain already has them! result_count = CoreFoundation.CFArrayGetCount(result_array) for index in range(result_count): item = CoreFoundation.CFArrayGetValueAtIndex( result_array, index ) item = ctypes.cast(item, CoreFoundation.CFTypeRef) if _is_cert(item): CoreFoundation.CFRetain(item) certificates.append(item) elif _is_identity(item): CoreFoundation.CFRetain(item) identities.append(item) finally: if result_array: CoreFoundation.CFRelease(result_array) CoreFoundation.CFRelease(filedata) return (identities, certificates) def _load_client_cert_chain(keychain, *paths): """ Load certificates and maybe keys from a number of files. Has the end goal of returning a CFArray containing one SecIdentityRef, and then zero or more SecCertificateRef objects, suitable for use as a client certificate trust chain. """ # Ok, the strategy. # # This relies on knowing that macOS will not give you a SecIdentityRef # unless you have imported a key into a keychain. This is a somewhat # artificial limitation of macOS (for example, it doesn't necessarily # affect iOS), but there is nothing inside Security.framework that lets you # get a SecIdentityRef without having a key in a keychain. # # So the policy here is we take all the files and iterate them in order. # Each one will use SecItemImport to have one or more objects loaded from # it. We will also point at a keychain that macOS can use to work with the # private key. # # Once we have all the objects, we'll check what we actually have. If we # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise, # we'll take the first certificate (which we assume to be our leaf) and # ask the keychain to give us a SecIdentityRef with that cert's associated # key. # # We'll then return a CFArray containing the trust chain: one # SecIdentityRef and then zero-or-more SecCertificateRef objects. The # responsibility for freeing this CFArray will be with the caller. This # CFArray must remain alive for the entire connection, so in practice it # will be stored with a single SSLSocket, along with the reference to the # keychain. certificates = [] identities = [] # Filter out bad paths. paths = (path for path in paths if path) try: for file_path in paths: new_identities, new_certs = _load_items_from_file( keychain, file_path ) identities.extend(new_identities) certificates.extend(new_certs) # Ok, we have everything. The question is: do we have an identity? If # not, we want to grab one from the first cert we have. if not identities: new_identity = Security.SecIdentityRef() status = Security.SecIdentityCreateWithCertificate( keychain, certificates[0], ctypes.byref(new_identity) ) _assert_no_error(status) identities.append(new_identity) # We now want to release the original certificate, as we no longer # need it. CoreFoundation.CFRelease(certificates.pop(0)) # We now need to build a new CFArray that holds the trust chain. trust_chain = CoreFoundation.CFArrayCreateMutable( CoreFoundation.kCFAllocatorDefault, 0, ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks), ) for item in itertools.chain(identities, certificates): # ArrayAppendValue does a CFRetain on the item. That's fine, # because the finally block will release our other refs to them. CoreFoundation.CFArrayAppendValue(trust_chain, item) return trust_chain finally: for obj in itertools.chain(identities, certificates): CoreFoundation.CFRelease(obj)
apache-2.0
wolfv/AutobahnPython
examples/twisted/websocket/streaming/frame_based_server.py
18
2612
############################################################################### ## ## Copyright (C) 2011-2013 Tavendo GmbH ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ############################################################################### import hashlib from twisted.internet import reactor from autobahn.twisted.websocket import WebSocketServerFactory, \ WebSocketServerProtocol, \ listenWS class FrameBasedHashServerProtocol(WebSocketServerProtocol): """ Frame-based WebSockets server that computes a running SHA-256 for message data received. It will respond after every frame received with the digest computed up to that point. It can receive messages of unlimited number of frames. Digest is reset upon new message. """ def onMessageBegin(self, isBinary): WebSocketServerProtocol.onMessageBegin(self, isBinary) self.sha256 = hashlib.sha256() def onMessageFrame(self, payload): l = 0 for data in payload: l += len(data) self.sha256.update(data) digest = self.sha256.hexdigest() print("Received frame with payload length {}, compute digest: {}".format(l, digest)) self.sendMessage(digest.encode('utf8')) def onMessageEnd(self): self.sha256 = None if __name__ == '__main__': factory = WebSocketServerFactory("ws://localhost:9000") factory.protocol = FrameBasedHashServerProtocol enableCompression = False if enableCompression: from autobahn.websocket.compress import PerMessageDeflateOffer, \ PerMessageDeflateOfferAccept ## Function to accept offers from the client .. def accept(offers): for offer in offers: if isinstance(offer, PerMessageDeflateOffer): return PerMessageDeflateOfferAccept(offer) factory.setProtocolOptions(perMessageCompressionAccept = accept) listenWS(factory) reactor.run()
apache-2.0
klmitch/nova
nova/tests/functional/api_sample_tests/test_security_groups.py
4
6316
# Copyright 2012 Nebula, Inc. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.tests.functional.api_sample_tests import test_servers import nova.tests.functional.api_samples_test_base as astb def fake_get(*args, **kwargs): nova_group = {} nova_group['id'] = 1 nova_group['description'] = 'default' nova_group['name'] = 'default' nova_group['project_id'] = astb.PROJECT_ID nova_group['rules'] = [] return nova_group def fake_get_instances_security_groups_bindings(context, servers, detailed=False): result = {} for s in servers: result[s.get('id')] = [{'name': 'test'}] return result def fake_add_to_instance(context, instance, security_group_name): pass def fake_remove_from_instance(context, instance, security_group_name): pass def fake_list(context, names=None, ids=None, project=None, search_opts=None): return [fake_get()] def fake_get_instance_security_groups(context, instance_uuid, detailed=False): return [fake_get()] def fake_create_security_group(context, name, description): return fake_get() def fake_create_security_group_rule(context, security_group, new_rule): return { 'from_port': 22, 'to_port': 22, 'cidr': '10.0.0.0/24', 'id': '00000000-0000-0000-0000-000000000000', 'parent_group_id': '11111111-1111-1111-1111-111111111111', 'protocol': 'tcp', 'group_id': None } def fake_remove_rules(context, security_group, rule_ids): pass def fake_get_rule(context, id): return { 'id': id, 'parent_group_id': '11111111-1111-1111-1111-111111111111' } class SecurityGroupsJsonTest(test_servers.ServersSampleBase): sample_dir = 'os-security-groups' def setUp(self): super(SecurityGroupsJsonTest, self).setUp() path = 'nova.network.security_group_api.' self.stub_out(path + 'get', fake_get) self.stub_out(path + 'get_instances_security_groups_bindings', fake_get_instances_security_groups_bindings) self.stub_out(path + 'add_to_instance', fake_add_to_instance) self.stub_out(path + 'remove_from_instance', fake_remove_from_instance) self.stub_out(path + 'list', fake_list) self.stub_out(path + 'get_instance_security_groups', fake_get_instance_security_groups) self.stub_out(path + 'create_security_group', fake_create_security_group) self.stub_out(path + 'create_security_group_rule', fake_create_security_group_rule) self.stub_out(path + 'remove_rules', fake_remove_rules) self.stub_out(path + 'get_rule', fake_get_rule) def _get_create_subs(self): return { 'group_name': 'default', "description": "default", } def _create_security_group(self): subs = self._get_create_subs() return self._do_post('os-security-groups', 'security-group-post-req', subs) def _add_group(self, uuid): subs = { 'group_name': 'test' } return self._do_post('servers/%s/action' % uuid, 'security-group-add-post-req', subs) def test_security_group_create(self): response = self._create_security_group() subs = self._get_create_subs() self._verify_response('security-groups-create-resp', subs, response, 200) def test_security_groups_list(self): # Get api sample of security groups get list request. response = self._do_get('os-security-groups') self._verify_response('security-groups-list-get-resp', {}, response, 200) def test_security_groups_get(self): # Get api sample of security groups get request. security_group_id = '11111111-1111-1111-1111-111111111111' response = self._do_get('os-security-groups/%s' % security_group_id) self._verify_response('security-groups-get-resp', {}, response, 200) def test_security_groups_list_server(self): # Get api sample of security groups for a specific server. uuid = self._post_server() response = self._do_get('servers/%s/os-security-groups' % uuid) self._verify_response('server-security-groups-list-resp', {}, response, 200) def test_security_groups_add(self): self._create_security_group() uuid = self._post_server() response = self._add_group(uuid) self.assertEqual(202, response.status_code) self.assertEqual('', response.text) def test_security_groups_remove(self): self._create_security_group() uuid = self._post_server() self._add_group(uuid) subs = { 'group_name': 'test' } response = self._do_post('servers/%s/action' % uuid, 'security-group-remove-post-req', subs) self.assertEqual(202, response.status_code) self.assertEqual('', response.text) def test_security_group_rules_create(self): response = self._do_post('os-security-group-rules', 'security-group-rules-post-req', {}) self._verify_response('security-group-rules-post-resp', {}, response, 200) def test_security_group_rules_remove(self): response = self._do_delete( 'os-security-group-rules/00000000-0000-0000-0000-000000000000') self.assertEqual(202, response.status_code)
apache-2.0
OpringaoDoTurno/airflow
airflow/migrations/versions/8504051e801b_xcom_dag_task_indices.py
46
1080
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """xcom dag task indices Revision ID: 8504051e801b Revises: 4addfa1236f1 Create Date: 2016-11-29 08:13:03.253312 """ # revision identifiers, used by Alembic. revision = '8504051e801b' down_revision = '4addfa1236f1' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): op.create_index('idx_xcom_dag_task_date', 'xcom', ['dag_id', 'task_id', 'execution_date'], unique=False) def downgrade(): op.drop_index('idx_xcom_dag_task_date', table_name='xcom')
apache-2.0
mathwuyue/py-wireless-sys-sim
d2d/rrm.py
1
8801
import operator import itertools import numpy as np import scipy.optimize from core import cal_thermal_noise, cal_umi_nlos, cal_umi_exp_los from functools import reduce def _sum(func, *args): return reduce(operator.add, map(func, *args), 0) def cal_D2D_basic_tp(d2d_ues, g_d2d_bs, kappa, bw, alpha, freq): """ This function calculates the transmit power for D2D UEs (Spectrum Sharing Scheme Between Cellular Users and Ad-hoc Device-to-Device Users) Args: d2d_ues (numpy array): d2d_ues positions g_d2d_cc (): channel gain between d2d and cc ues kappa (float): scale param for cc bw (float): bandwidth for d2d_ues alpha (float): pathloss parameter freq (float): frequency Returns: numpy array. The transmit power of D2D UEs. """ noise = cal_thermal_noise(bw, 273) pathloss = cal_umi_nlos(np.abs(d2d_ues), alpha, freq) return (kappa - 1) * pathloss * noise / g_d2d_bs def cal_D2D_opt_tp(d2d_ues, cc_ues, pmax_d, pmax_c, g_d2d_bs, g_cc, g_d2d, g_cc_d2d, sinr_d2d, sinr_cc, bw, alpha, freq): """ This function calculates the RRM for D2D UEs (Device-to-Device Communications Underlaying Cellular Networks) Args: d2d_ues (numpy array): d2d_ues positions g_d2d_cc (): channel gain between d2d and cc ues kappa (float): scale param for cc bw (float): bandwidth for d2d_ues alpha (float): pathloss parameter freq (float): frequency Returns: list of numpy array. The transmit power of D2D UEs and CC UEs. ::TODO: only consider one D2D """ noise = cal_thermal_noise(bw, 273) # set up reuse array idx_avail = [] p_c = (g_d2d*sinr_cc+g_d2d_bs*sinr_cc*sinr_d2d)*noise / \ (g_d2d*g_cc-sinr_d2d*sinr_cc*g_cc_d2d*g_d2d_bs) p_d2d = (g_cc_d2d*sinr_cc*sinr_d2d+g_cc*sinr_d2d)*noise / \ (g_d2d*g_cc-sinr_cc*sinr_d2d*g_cc_d2d*g_d2d_bs) for i in range(cc_ues.size): if (p_d2d > 0 and p_d2d <= pmax_c) and (p_c > 0 and p_c <= pmax_c): idx_avail.append(i) # calculate optimal transmit power # FIXME: one D2D def _argmax(tp_pairs): f = 0 idx = 0 for i, (pc, pd) in enumerate(tp_pairs): fc = np.log2(1+pc*g_cc/(pd*g_d2d_bs+noise))+np.log2(1+pd*g_d2d/(pc*g_cc_d2d+noise)) if fc > f: f = fc idx = i return tp_pairs[idx] p1 = (pmax_c*g_cc_d2d[idx_avail]+noise)*sinr_d2d/g_d2d p2 = (pmax_c*g_cc[idx_avail]-sinr_cc*noise)/(sinr_cc*g_d2d_bs) p3 = (pmax_d*g_d2d-sinr_d2d*noise)/(sinr_d2d*g_cc_d2d[idx_avail]) p4 = (pmax_d*g_d2d_bs+noise)*sinr_cc/g_cc[idx_avail] opt_tp_pairs = [] for i, j in enumerate(idx_avail): if (pmax_c*g_cc[i])/(noise+pmax_d*g_d2d_bs) <= sinr_cc: opt_tp_pairs.append(_argmax([(pmax_c, p1[j]), (pmax_c, p2[j])])) elif pmax_d*g_d2d/(noise+pmax_c*g_cc_d2d[i]) < sinr_d2d: opt_tp_pairs.append(_argmax([(p3[j], pmax_d), (p4[j], pmax_d)])) else: opt_tp_pairs.append(_argmax([(pmax_c, p1[j]), (pmax_c, pmax_d), (p4[j], pmax_d)])) # calculate channel allocation. return _argmax(opt_tp_pairs) def cal_D2D_ergodic_tp(d2d_tr, d2d_rc, cc_ue, rc, a_gain_c, a_gain_d, k_los, k_nlos, alpha_los, alpha_nlos, l): def _f(x): return x*np.log2(x)/(np.log(2)*(x-1)) a_c = a_gain_c/a_gain_d # antenna gain from CC to D2D a_d = 1 # antenna gain from D2D to BS d1_d = np.abs(d2d_tr - d2d_rc) d2_c = np.abs(d2d_tr) d1_c = np.abs(cc_ue) d2_d = np.abs(cc_ue - d2d_rc) # M, N def _m1(a, d1, d2): return a*(d1/d2)**(-alpha_los) def _m2(a, d1, d2): return a*k_los*d1**(-alpha_los) / (k_nlos*d2**(-alpha_nlos)) def _m3(a, d1, d2): return a*k_nlos*d1**(-alpha_nlos) / (k_los*d2**(-alpha_los)) def _m4(a, d1, d2): return a*(d1/d2)**(-alpha_nlos) def _n1(d1, d2): return np.exp(-(d1**2+d2**2)/l**2) def _n2(d1, d2): return np.exp(-d1**2/l**2) * (1-np.exp(-d2**2/l**2)) def _n3(d1, d2): return np.exp(-d2**2/l**2) * (1-np.exp(-d1**2/l**2)) def _n4(d1, d2): return (1-np.exp(-d1**2/l**2)) * (1-np.exp(-d2**2/l**2)) # equation def _f_beta_delta(beta): delta = (rc - _sum(lambda x, y: y*_f(x/beta), [_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c), _m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)], [_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)])) / \ _sum(lambda x, y: y*_f(beta*x)-y*_f(x/beta), [_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c), _m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)], [_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)]) # lambda1 = _sum(lambda x, y: y*_f(beta*x)-y*_f(x/beta), # [_m1(a_d, d1_d, d2_d), _m2(a_d, d1_d, d2_d), # _m3(a_d, d1_d, d2_d), _m4(a_d, d1_d, d2_d)], # [_n1(d1_d, d2_d), _n2(d1_d, d2_d), _n3(d1_d, d2_d), _n4(d1_d, d2_d)]) / \ # _sum(lambda x, y: y*_f(x/beta)+y*_f(beta*x), # [_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c), # _m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)], # [_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)]) lambda1 = 0 # h1, h2 def _h1(x, y): return x*y*(1-x/beta)/np.log(2)+np.log2(x/beta)/(np.log(2)*(x-beta)**2) def _h2(x, y): return y*_f(beta*x)/beta + beta*x*y*((1-1.0/(beta*x))/np.log(2)-x*np.log2(beta*x)) / \ (np.log(2)*(beta*x-1)**2) return _sum(lambda xc, yc, xd, yd: delta*_h1(xd, yd)+(1-delta)*_h2(xd, yd)-lambda1*((1-delta)*_h1(xc, yc)+delta*_h2(xc, yc)), [_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c), _m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)], [_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)], [_m1(a_d, d1_d, d2_d), _m2(a_d, d1_d, d2_d), _m3(a_d, d1_d, d2_d), _m4(a_d, d1_d, d2_d)], [_n1(d1_d, d2_d), _n2(d1_d, d2_d), _n3(d1_d, d2_d), _n4(d1_d, d2_d)]) opt_beta = scipy.optimize.brentq(_f_beta_delta, 1e-5, 1-1e-5) opt_delta = (rc - _sum(lambda x, y: y*_f(x/opt_beta), [_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c), _m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)], [_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)])) / \ _sum(lambda x, y: y*_f(opt_beta*x)-y*_f(x/opt_beta), [_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c), _m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)], [_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)]) return opt_beta, opt_delta def cal_ergodic_subopt_tp(d2d_tr, d2d_rc, cc_ue, rc_ratio, a_gain_c, a_gain_d, beta, k_los, k_nlos, alpha_los, alpha_nlos, l): def _f(x): return x*np.log2(x)/(np.log(2)*(x-1)) a_c = a_gain_c/a_gain_d # antenna gain from CC to D2D d2_c = np.abs(d2d_tr) d1_c = np.abs(cc_ue) # M, N def _m1(a, d1, d2): return a*(d1/d2)**(-alpha_los) def _m2(a, d1, d2): return a*k_los*d1**(-alpha_los) / (k_nlos*d2**(-alpha_nlos)) def _m3(a, d1, d2): return a*k_nlos*d1**(-alpha_nlos) / (k_los*d2**(-alpha_los)) def _m4(a, d1, d2): return a*(d1/d2)**(-alpha_nlos) def _n1(d1, d2): return np.exp(-(d1**2+d2**2)/l**2) def _n2(d1, d2): return np.exp(-d1**2/l**2) * (1-np.exp(-d2**2/l**2)) def _n3(d1, d2): return np.exp(-d2**2/l**2) * (1-np.exp(-d1**2/l**2)) def _n4(d1, d2): return (1-np.exp(-d1**2/l**2)) * (1-np.exp(-d2**2/l**2)) # rc max_rc = _sum(lambda x, y: y*_f(x/beta), [_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c), _m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)], [_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)]) min_rc = _sum(lambda x, y: y*_f(x*beta), [_m1(a_c, d1_c, d2_c), _m2(a_c, d1_c, d2_c), _m3(a_c, d1_c, d2_c), _m4(a_c, d1_c, d2_c)], [_n1(d1_c, d2_c), _n2(d1_c, d2_c), _n3(d1_c, d2_c), _n4(d1_c, d2_c)]) rc = rc_ratio * (max_rc - min_rc) + min_rc print(rc) delta = (rc - max_rc) / (min_rc - max_rc) return delta
mit
artefactual/archivematica-history
src/MCPClient/lib/clientScripts/checkForSubmissionDocumenation.py
1
1358
#!/usr/bin/python -OO # This file is part of Archivematica. # # Copyright 2010-2012 Artefactual Systems Inc. <http://artefactual.com> # # Archivematica is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Archivematica is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Archivematica. If not, see <http://www.gnu.org/licenses/>. # @package Archivematica # @subpackage archivematicaClientScript # @author Joseph Perry <[email protected]> # @version svn: $Id$ import os import sys target = sys.argv[1] if not os.path.isdir(target): print >>sys.stderr, "Directory doesn't exist: ", target os.mkdir(target) if os.listdir(target) == []: print >>sys.stderr, "Directory is empty: ", target fileName = os.path.join(target, "submissionDocumentation.log") f = open(fileName, 'a') f.write("No submission documentation added") f.close() os.chmod(fileName, 488) else: exit(0)
agpl-3.0
repotvsupertuga/tvsupertuga.repository
script.module.resolveurl/lib/resolveurl/plugins/speedwatch.py
2
1350
''' SpeedWatch.io resolveurl plugin Copyright (C) 2019 gujal This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import re from lib import helpers from resolveurl import common from resolveurl.resolver import ResolveUrl, ResolverError class SpeedWatchResolver(ResolveUrl): name = "speedwatch" domains = ["speedwatch.io"] pattern = r'(?://|\.)(speedwatch\.io)/(?:plyr|e)/([0-9a-zA-Z]+)' def __init__(self): self.net = common.Net() def get_media_url(self, host, media_id): return helpers.get_media_url(self.get_url(host, media_id), patterns=[r'''sources\s*:\s*\["(?P<url>[^"]+)'''], generic_patterns=False) def get_url(self, host, media_id): return self._default_get_url(host, media_id, template='https://www.{host}/e/{media_id}.html')
gpl-2.0
vivekanand1101/anitya
tests/test_plugins.py
2
2944
# -*- coding: utf-8 -*- # # Copyright © 2014 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions # of the GNU General Public License v.2, or (at your option) any later # version. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY expressed or implied, including the # implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR # PURPOSE. See the GNU General Public License for more details. You # should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # Any Red Hat trademarks that are incorporated in the source # code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission # of Red Hat, Inc. # ''' anitya tests of the plugins. ''' __requires__ = ['SQLAlchemy >= 0.7'] import pkg_resources import datetime import unittest import sys import os sys.path.insert(0, os.path.join(os.path.dirname( os.path.abspath(__file__)), '..')) import anitya.lib.plugins as plugins from tests import Modeltests class Pluginstests(Modeltests): """ Plugins tests. """ def test_load_plugins(self): """ Test the plugins.load_plugins function. """ plgns = [plg.name for plg in plugins.load_plugins(self.session)] self.assertEqual(len(plgns), 23) exp = [ 'CPAN (perl)', 'Debian project', 'Drupal6', 'Drupal7', 'Freshmeat', 'GNOME', 'GNU project', 'GitHub', 'Google code', 'Hackage', 'Launchpad', 'Maven Central', 'PEAR', 'PECL', 'Packagist', 'PyPI', 'Rubygems', 'Sourceforge', 'Stackage', 'custom', 'folder', 'npmjs', 'pagure', ] self.assertEqual(sorted(plgns), exp) def test_plugins_get_plugin_names(self): """ Test the plugins.get_plugin_names function. """ plgns = plugins.get_plugin_names() self.assertEqual(len(plgns), 23) exp = [ 'CPAN (perl)', 'Debian project', 'Drupal6', 'Drupal7', 'Freshmeat', 'GNOME', 'GNU project', 'GitHub', 'Google code', 'Hackage', 'Launchpad', 'Maven Central', 'PEAR', 'PECL', 'Packagist', 'PyPI', 'Rubygems', 'Sourceforge', 'Stackage', 'custom', 'folder', 'npmjs', 'pagure', ] self.assertEqual(sorted(plgns), exp) def test_plugins_get_plugin(self): """ Test the plugins.get_plugin function. """ plgns = plugins.get_plugin('PyPI') self.assertEqual( str(plgns), "<class 'anitya.lib.backends.pypi.PypiBackend'>") if __name__ == '__main__': SUITE = unittest.TestLoader().loadTestsFromTestCase(Pluginstests) unittest.TextTestRunner(verbosity=2).run(SUITE)
gpl-2.0
Br1an6/ACS_Netplumber_Implementation
hsa-python/net_plumbing/examples/load_stanford_backbone.py
5
12031
''' <Loads Stanford backbone network into appropriate objects (e.g. emulated_tf)> Copyright 2012, Stanford University. This file is licensed under GPL v2 plus a special exception, as described in included LICENSE_EXCEPTION.txt. Created on Aug 13, 2011 @author: Peyman Kazemian ''' from headerspace.tf import * from headerspace.hs import * from headerspace.nu_smv_generator import * from examples.emulated_tf import * from utils.helper import dotted_ip_to_int from config_parser.cisco_router_parser import cisco_router from utils.helper import compose_standard_rules rtr_names = ["bbra_rtr", "bbrb_rtr", "boza_rtr", "bozb_rtr", "coza_rtr", "cozb_rtr", "goza_rtr", "gozb_rtr", "poza_rtr", "pozb_rtr", "roza_rtr", "rozb_rtr", "soza_rtr", "sozb_rtr", "yoza_rtr", "yozb_rtr", ] def load_stanford_backbone_ntf(): ''' Loads Stanford backbone network transfer functions into an emulated_tf object with 3 layers. ''' emul_tf = emulated_tf(3) emul_tf.set_fwd_engine_stage(1) for rtr_name in rtr_names: f = TF(1) f.load_object_from_file("tf_stanford_backbone/%s.tf"%rtr_name) f.activate_hash_table([15,14]) emul_tf.append_tf(f) emul_tf.length = f.length return emul_tf def load_stanford_backbone_ttf(): ''' Loads Stanford backbone topology transfer functions into a transfer function object ''' f = TF(1) f.load_object_from_file("tf_stanford_backbone/backbone_topology.tf") return f def load_stanford_ip_fwd_ntf(): ''' Loads IP forwarding part of Stanford backbone network transfer functions into an emulated_tf object with 1 layer. ''' emul_tf = emulated_tf(1) emul_tf.set_fwd_engine_stage(0) emul_tf.output_port_const = 0 for rtr_name in rtr_names: f = TF(1) f.load_object_from_file("tf_simple_stanford_backbone/%s.tf"%rtr_name) #f.activate_hash_table([3,2]) emul_tf.append_tf(f) emul_tf.length = f.length return emul_tf def load_stanford_ip_fwd_ttf(): ''' Loads Stanford backbone topology transfer functions into a transfer function object. this should be used together with load_stanford_ip_fwd_ntf. ''' f = TF(1) f.load_object_from_file("tf_simple_stanford_backbone/backbone_topology.tf") return f def load_port_to_id_map(path): ''' load the map from port ID to name of box-port name. ''' f = open("%s/port_map.txt"%path,'r') id_to_name = {} map = {} rtr = "" cs = cisco_router(1) for line in f: if line.startswith("$"): rtr = line[1:].strip() map[rtr] = {} elif line != "": tokens = line.strip().split(":") map[rtr][tokens[0]] = int(tokens[1]) id_to_name[tokens[1]] = "%s-%s"%(rtr,tokens[0]) out_port = int(tokens[1]) + cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST id_to_name["%s"%out_port] = "%s-%s"%(rtr,tokens[0]) return (map,id_to_name) def load_stanford_backbone_port_to_id_map(): return load_port_to_id_map("tf_stanford_backbone") def load_replicated_stanford_network(replicate,path): ''' Load the transfer functions created by generate_augmented_stanford_backbone_tf.py ''' ttf = TF(1) ttf.load_object_from_file("%s/backbone_topology.tf"%path) (name_to_id,id_to_name) = load_port_to_id_map(path) emul_tf = emulated_tf(3) for i in range(replicate): for rtr_name in rtr_names: f = TF(1) f.load_object_from_file("%s/%s%d.tf"%(path,rtr_name,i+1)) #f.activate_hash_table([15,14]) emul_tf.append_tf(f) f = TF(1) f.load_object_from_file("%s/root.tf"%(path)) f.activate_hash_table([15,14]) emul_tf.append_tf(f) return (emul_tf,ttf,name_to_id,id_to_name) def add_internet(ntf,ttf,port_map,cs,campus_ip_list): ''' Campus IP list is a list of (ip address,subnet mask) for each IP subnet on campus ''' s = TF(cs.HS_FORMAT()["length"]*2) s.set_prefix_id("internet") for entry in campus_ip_list: match = byte_array_get_all_x(cs.HS_FORMAT()["length"]*2) cs.set_field(match,'ip_dst',dotted_ip_to_int(entry[0]),32-entry[1]) rule = TF.create_standard_rule([0], match, [0], None, None, "", []) s.add_fwd_rule(rule) ntf.append_tf(s) bbra_internet_port1 = port_map["bbra_rtr"]["te1/1"] bbra_internet_port2 = port_map["bbra_rtr"]["te7/4"] bbrb_internet_port1 = port_map["bbrb_rtr"]["te1/4"] bbrb_internet_port2 = port_map["bbrb_rtr"]["te7/3"] rule = TF.create_standard_rule([bbra_internet_port1], None,[0], None, None, "", []) ttf.add_link_rule(rule) rule = TF.create_standard_rule([bbra_internet_port2], None,[0], None, None, "", []) ttf.add_link_rule(rule) rule = TF.create_standard_rule([bbrb_internet_port1], None,[0], None, None, "", []) ttf.add_link_rule(rule) rule = TF.create_standard_rule([bbrb_internet_port2], None,[0], None, None, "", []) ttf.add_link_rule(rule) rule = TF.create_standard_rule([0], None,[bbra_internet_port1], None, None, "", []) ttf.add_link_rule(rule) rule = TF.create_standard_rule([0], None,[bbra_internet_port2], None, None, "", []) ttf.add_link_rule(rule) rule = TF.create_standard_rule([0], None,[bbrb_internet_port1], None, None, "", []) ttf.add_link_rule(rule) rule = TF.create_standard_rule([0], None,[bbrb_internet_port2], None, None, "", []) ttf.add_link_rule(rule) def get_end_ports(name_to_id,index): linked_ports = [("bbra_rtr","te7/3"), ("bbra_rtr","te7/2"), ("bbra_rtr","te7/1"), ("bbra_rtr","te1/3"), ("bbra_rtr","te1/4"), ("bbra_rtr","te6/1"), ("bbra_rtr","te6/3"), ("bbrb_rtr","te7/1"), ("bbrb_rtr","te7/2"), ("bbrb_rtr","te7/4"), ("bbrb_rtr","te6/3"), ("bbrb_rtr","te6/1"), ("bbrb_rtr","te1/1"), ("bbrb_rtr","te1/3"), ("boza_rtr","te2/1"), ("boza_rtr","te3/1"), ("boza_rtr","te2/3"), ("bozb_rtr","te2/3"), ("bozb_rtr","te2/1"), ("bozb_rtr","te3/1"), ("coza_rtr","te3/1"), ("coza_rtr","te2/1"), ("coza_rtr","te2/3"), ("cozb_rtr","te2/3"), ("cozb_rtr","te2/1"), ("cozb_rtr","te3/1"), ("goza_rtr","te2/1"), ("goza_rtr","te3/1"), ("goza_rtr","te2/3"), ("gozb_rtr","te2/3"), ("gozb_rtr","te2/1"), ("gozb_rtr","te3/1"), ("poza_rtr","te2/1"), ("poza_rtr","te3/1"), ("poza_rtr","te2/3"), ("pozb_rtr","te2/3"), ("pozb_rtr","te2/1"), ("pozb_rtr","te3/1"), ("roza_rtr","te3/1"), ("roza_rtr","te2/1"), ("roza_rtr","te2/3"), ("rozb_rtr","te2/3"), ("rozb_rtr","te2/1"), ("rozb_rtr","te3/1"), ("soza_rtr","te2/1"), ("soza_rtr","te3/1"), ("soza_rtr","te2/3"), ("sozb_rtr","te2/3"), ("sozb_rtr","te3/1"), ("sozb_rtr","te2/1"), ("yoza_rtr","te7/1"), ("yoza_rtr","te1/3"), ("yoza_rtr","te1/1"), ("yoza_rtr","te1/2"), ("yozb_rtr","te1/2"), ("yozb_rtr","te1/3"), ("yozb_rtr","te2/1"), ("yozb_rtr","te1/1"), ] end_ports = [] cs = cisco_router(1) for rtr_name in rtr_names: mod_rtr_name = "%s%s"%(rtr_name,index) for rtr_port in name_to_id[mod_rtr_name]: if (rtr_name,rtr_port) not in linked_ports: end_ports.append(name_to_id[mod_rtr_name][rtr_port] + cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST) return end_ports def load_tf_to_nusmv(): ''' For Model Checking Project. Creates NuSMV file from transfer function objects of Stanford network. ''' nusmv = NuSMV() cs = cisco_router(1) nusmv.set_output_port_offset(cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST) for rtr_name in rtr_names: f = TF(1) f.load_object_from_file("tf_stanford_backbone/%s.tf"%rtr_name) nusmv.generate_nusmv_trans(f, []) (port_map,port_reverse_map) = load_stanford_backbone_port_to_id_map() end_ports = get_end_ports(port_map,"") f = TF(1) f.load_object_from_file("tf_stanford_backbone/backbone_topology.tf") nusmv.generate_nusmv_trans(f,end_ports) nusmv.generate_nusmv_input() return nusmv def load_augmented_tf_to_nusmv(replication_factor,dir_path): ''' For Model Checking Project. Creates NuSMV file from transfer function objects of replicated Stanford network. ''' nusmv = NuSMV() cs = cisco_router(1) nusmv.set_output_port_offset(cs.PORT_TYPE_MULTIPLIER * cs.OUTPUT_PORT_TYPE_CONST) (port_map,port_reverse_map) = load_port_to_id_map(dir_path) end_ports = [] for replicate in range(1,replication_factor+1): for rtr_name in rtr_names: f = TF(1) f.load_object_from_file("%s/%s%d.tf"%(dir_path,rtr_name,replicate)) nusmv.generate_nusmv_trans(f, []) end_ports_subset = get_end_ports(port_map,"%d"%replicate) end_ports.extend(end_ports_subset) f = TF(1) f.load_object_from_file("%s/root.tf"%(dir_path)) nusmv.generate_nusmv_trans(f, []) f = TF(1) f.load_object_from_file("%s/backbone_topology.tf"%dir_path) nusmv.generate_nusmv_trans(f,end_ports) nusmv.generate_nusmv_input() return nusmv def generate_stanford_backbne_one_layer_tf(): ''' Tries to merge the 3 layers of transfer function into one layer. WARNING: the result will be huge. ''' for rtr_name in rtr_names: f = TF(1) f.load_object_from_file("tf_stanford_backbone/%s.tf"%rtr_name) stage1_rules = [] stage2_rules = [] stage3_rules = [] stage1_2_rules = [] for rule in f.rules: if (rule["in_ports"][0] % 100000) == 0: stage2_rules.append(rule) elif ((rule["in_ports"][0] % 100000)/ 10000) == 0: stage1_rules.append(rule) elif ((rule["in_ports"][0] % 100000)/ 10000) == 1: stage3_rules.append(rule) for stage2_rule in stage2_rules: for stage1_rule in stage1_rules: r = compose_standard_rules(stage1_rule,stage2_rule) if r != None: stage1_2_rules.append(r) for stage3_rule in stage3_rules: for stage1_2_rule in stage1_2_rules: r = compose_standard_rules(stage3_rule,stage1_2_rule) if r != None: if r["mask"] == None: f.add_fwd_rule(r) else: f.add_rewrite_rule(r) f.save_object_to_file("tf_stanford_backbone/%s_one_layer.tf"%rtr_name)
gpl-2.0
PaloAltoNetworks-BD/ansible-pan
library/panos_email_server.py
1
4152
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function __metaclass__ = type # Copyright 2019 Palo Alto Networks, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: panos_email_server short_description: Manage email servers in an email profile. description: - Manages email servers in an email server profile. author: "Garfield Lee Freeman (@shinmog)" version_added: "2.8" requirements: - pan-python - pandevice >= 0.11.1 notes: - Panorama is supported. - Check mode is supported. extends_documentation_fragment: - panos.transitional_provider - panos.vsys_shared - panos.device_group options: email_profile: description: - Name of the email server profile. required: True name: description: - Server name. required: True display_name: description: - Display name from_email: description: - From email address to_email: description: - Destination email address. also_to_email: description: - Additional destination email address email_gateway: description: - IP address or FQDN of email gateway to use. ''' EXAMPLES = ''' # Create a profile - name: Create email server in an email profile panos_email_server: provider: '{{ provider }}' email_profile: 'my-profile' name: 'my-email-server' from_email: '[email protected]' to_email: '[email protected]' email_gateway: 'smtp.example.com' ''' RETURN = ''' # Default return values ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.panos.panos import get_connection try: from pandevice.device import EmailServerProfile from pandevice.device import EmailServer from pandevice.errors import PanDeviceError except ImportError: pass def main(): helper = get_connection( vsys_shared=True, device_group=True, with_state=True, with_classic_provider_spec=True, min_pandevice_version=(0, 11, 1), min_panos_version=(7, 1, 0), argument_spec=dict( email_profile=dict(required=True), name=dict(required=True), display_name=dict(), from_email=dict(), to_email=dict(), also_to_email=dict(), email_gateway=dict(), ), ) module = AnsibleModule( argument_spec=helper.argument_spec, supports_check_mode=True, required_one_of=helper.required_one_of, ) # Verify imports, build pandevice object tree. parent = helper.get_pandevice_parent(module) sp = EmailServerProfile(module.params['email_profile']) parent.add(sp) try: sp.refresh() except PanDeviceError as e: module.fail_json(msg='Failed refresh: {0}'.format(e)) listing = sp.findall(EmailServer) spec = { 'name': module.params['name'], 'display_name': module.params['display_name'], 'from': module.params['from_email'], 'to': module.params['to_email'], 'also_to': module.params['also_to_email'], 'email_gateway': module.params['email_gateway'], } obj = EmailServer(**spec) sp.add(obj) changed = helper.apply_state(obj, listing, module) module.exit_json(changed=changed, msg='Done') if __name__ == '__main__': main()
isc
xHeliotrope/injustice_dropper
env/lib/python3.4/site-packages/django/db/models/query.py
10
71207
""" The main QuerySet implementation. This provides the public API for the ORM. """ import copy import sys import warnings from collections import OrderedDict, deque from django.conf import settings from django.core import exceptions from django.db import ( DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router, transaction, ) from django.db.models import sql from django.db.models.constants import LOOKUP_SEP from django.db.models.deletion import Collector from django.db.models.expressions import F, Date, DateTime from django.db.models.fields import AutoField, Empty from django.db.models.query_utils import ( Q, InvalidQuery, deferred_class_factory, ) from django.db.models.sql.constants import CURSOR from django.utils import six, timezone from django.utils.functional import partition from django.utils.version import get_version # The maximum number of items to display in a QuerySet.__repr__ REPR_OUTPUT_SIZE = 20 # Pull into this namespace for backwards compatibility. EmptyResultSet = sql.EmptyResultSet def _pickle_queryset(class_bases, class_dict): """ Used by `__reduce__` to create the initial version of the `QuerySet` class onto which the output of `__getstate__` will be applied. See `__reduce__` for more details. """ new = Empty() new.__class__ = type(class_bases[0].__name__, class_bases, class_dict) return new class QuerySet(object): """ Represents a lazy database lookup for a set of objects. """ def __init__(self, model=None, query=None, using=None, hints=None): self.model = model self._db = using self._hints = hints or {} self.query = query or sql.Query(self.model) self._result_cache = None self._sticky_filter = False self._for_write = False self._prefetch_related_lookups = [] self._prefetch_done = False self._known_related_objects = {} # {rel_field, {pk: rel_obj}} def as_manager(cls): # Address the circular dependency between `Queryset` and `Manager`. from django.db.models.manager import Manager manager = Manager.from_queryset(cls)() manager._built_with_as_manager = True return manager as_manager.queryset_only = True as_manager = classmethod(as_manager) ######################## # PYTHON MAGIC METHODS # ######################## def __deepcopy__(self, memo): """ Deep copy of a QuerySet doesn't populate the cache """ obj = self.__class__() for k, v in self.__dict__.items(): if k == '_result_cache': obj.__dict__[k] = None else: obj.__dict__[k] = copy.deepcopy(v, memo) return obj def __getstate__(self): """ Allows the QuerySet to be pickled. """ # Force the cache to be fully populated. self._fetch_all() obj_dict = self.__dict__.copy() obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version() return obj_dict def __setstate__(self, state): msg = None pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY) if pickled_version: current_version = get_version() if current_version != pickled_version: msg = ("Pickled queryset instance's Django version %s does" " not match the current version %s." % (pickled_version, current_version)) else: msg = "Pickled queryset instance's Django version is not specified." if msg: warnings.warn(msg, RuntimeWarning, stacklevel=2) self.__dict__.update(state) def __reduce__(self): """ Used by pickle to deal with the types that we create dynamically when specialized queryset such as `ValuesQuerySet` are used in conjunction with querysets that are *subclasses* of `QuerySet`. See `_clone` implementation for more details. """ if hasattr(self, '_specialized_queryset_class'): class_bases = ( self._specialized_queryset_class, self._base_queryset_class, ) class_dict = { '_specialized_queryset_class': self._specialized_queryset_class, '_base_queryset_class': self._base_queryset_class, } return _pickle_queryset, (class_bases, class_dict), self.__getstate__() return super(QuerySet, self).__reduce__() def __repr__(self): data = list(self[:REPR_OUTPUT_SIZE + 1]) if len(data) > REPR_OUTPUT_SIZE: data[-1] = "...(remaining elements truncated)..." return repr(data) def __len__(self): self._fetch_all() return len(self._result_cache) def __iter__(self): """ The queryset iterator protocol uses three nested iterators in the default case: 1. sql.compiler:execute_sql() - Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE) using cursor.fetchmany(). This part is responsible for doing some column masking, and returning the rows in chunks. 2. sql/compiler.results_iter() - Returns one row at time. At this point the rows are still just tuples. In some cases the return values are converted to Python values at this location. 3. self.iterator() - Responsible for turning the rows into model objects. """ self._fetch_all() return iter(self._result_cache) def __bool__(self): self._fetch_all() return bool(self._result_cache) def __nonzero__(self): # Python 2 compatibility return type(self).__bool__(self) def __getitem__(self, k): """ Retrieves an item or slice from the set of results. """ if not isinstance(k, (slice,) + six.integer_types): raise TypeError assert ((not isinstance(k, slice) and (k >= 0)) or (isinstance(k, slice) and (k.start is None or k.start >= 0) and (k.stop is None or k.stop >= 0))), \ "Negative indexing is not supported." if self._result_cache is not None: return self._result_cache[k] if isinstance(k, slice): qs = self._clone() if k.start is not None: start = int(k.start) else: start = None if k.stop is not None: stop = int(k.stop) else: stop = None qs.query.set_limits(start, stop) return list(qs)[::k.step] if k.step else qs qs = self._clone() qs.query.set_limits(k, k + 1) return list(qs)[0] def __and__(self, other): self._merge_sanity_check(other) if isinstance(other, EmptyQuerySet): return other if isinstance(self, EmptyQuerySet): return self combined = self._clone() combined._merge_known_related_objects(other) combined.query.combine(other.query, sql.AND) return combined def __or__(self, other): self._merge_sanity_check(other) if isinstance(self, EmptyQuerySet): return other if isinstance(other, EmptyQuerySet): return self combined = self._clone() combined._merge_known_related_objects(other) combined.query.combine(other.query, sql.OR) return combined #################################### # METHODS THAT DO DATABASE QUERIES # #################################### def iterator(self): """ An iterator over the results from applying this QuerySet to the database. """ db = self.db compiler = self.query.get_compiler(using=db) # Execute the query. This will also fill compiler.select, klass_info, # and annotations. results = compiler.execute_sql() select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info, compiler.annotation_col_map) if klass_info is None: return model_cls = klass_info['model'] select_fields = klass_info['select_fields'] model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1 init_list = [f[0].target.attname for f in select[model_fields_start:model_fields_end]] if len(init_list) != len(model_cls._meta.concrete_fields): init_set = set(init_list) skip = [f.attname for f in model_cls._meta.concrete_fields if f.attname not in init_set] model_cls = deferred_class_factory(model_cls, skip) related_populators = get_related_populators(klass_info, select, db) for row in compiler.results_iter(results): obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end]) if related_populators: for rel_populator in related_populators: rel_populator.populate(row, obj) if annotation_col_map: for attr_name, col_pos in annotation_col_map.items(): setattr(obj, attr_name, row[col_pos]) # Add the known related objects to the model, if there are any if self._known_related_objects: for field, rel_objs in self._known_related_objects.items(): # Avoid overwriting objects loaded e.g. by select_related if hasattr(obj, field.get_cache_name()): continue pk = getattr(obj, field.get_attname()) try: rel_obj = rel_objs[pk] except KeyError: pass # may happen in qs1 | qs2 scenarios else: setattr(obj, field.name, rel_obj) yield obj def aggregate(self, *args, **kwargs): """ Returns a dictionary containing the calculations (aggregation) over the current queryset If args is present the expression is passed as a kwarg using the Aggregate object's default alias. """ if self.query.distinct_fields: raise NotImplementedError("aggregate() + distinct(fields) not implemented.") for arg in args: # The default_alias property may raise a TypeError, so we use # a try/except construct rather than hasattr in order to remain # consistent between PY2 and PY3 (hasattr would swallow # the TypeError on PY2). try: arg.default_alias except (AttributeError, TypeError): raise TypeError("Complex aggregates require an alias") kwargs[arg.default_alias] = arg query = self.query.clone() for (alias, aggregate_expr) in kwargs.items(): query.add_annotation(aggregate_expr, alias, is_summary=True) if not query.annotations[alias].contains_aggregate: raise TypeError("%s is not an aggregate expression" % alias) return query.get_aggregation(self.db, kwargs.keys()) def count(self): """ Performs a SELECT COUNT() and returns the number of records as an integer. If the QuerySet is already fully cached this simply returns the length of the cached results set to avoid multiple SELECT COUNT(*) calls. """ if self._result_cache is not None: return len(self._result_cache) return self.query.get_count(using=self.db) def get(self, *args, **kwargs): """ Performs the query and returns a single object matching the given keyword arguments. """ clone = self.filter(*args, **kwargs) if self.query.can_filter(): clone = clone.order_by() num = len(clone) if num == 1: return clone._result_cache[0] if not num: raise self.model.DoesNotExist( "%s matching query does not exist." % self.model._meta.object_name ) raise self.model.MultipleObjectsReturned( "get() returned more than one %s -- it returned %s!" % (self.model._meta.object_name, num) ) def create(self, **kwargs): """ Creates a new object with the given kwargs, saving it to the database and returning the created object. """ obj = self.model(**kwargs) self._for_write = True obj.save(force_insert=True, using=self.db) return obj def _populate_pk_values(self, objs): for obj in objs: if obj.pk is None: obj.pk = obj._meta.pk.get_pk_value_on_save(obj) def bulk_create(self, objs, batch_size=None): """ Inserts each of the instances into the database. This does *not* call save() on each of the instances, does not send any pre/post save signals, and does not set the primary key attribute if it is an autoincrement field. """ # So this case is fun. When you bulk insert you don't get the primary # keys back (if it's an autoincrement), so you can't insert into the # child tables which references this. There are two workarounds, 1) # this could be implemented if you didn't have an autoincrement pk, # and 2) you could do it by doing O(n) normal inserts into the parent # tables to get the primary keys back, and then doing a single bulk # insert into the childmost table. Some databases might allow doing # this by using RETURNING clause for the insert query. We're punting # on these for now because they are relatively rare cases. assert batch_size is None or batch_size > 0 if self.model._meta.parents: raise ValueError("Can't bulk create an inherited model") if not objs: return objs self._for_write = True connection = connections[self.db] fields = self.model._meta.local_concrete_fields objs = list(objs) self._populate_pk_values(objs) with transaction.atomic(using=self.db, savepoint=False): if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk and self.model._meta.has_auto_field): self._batched_insert(objs, fields, batch_size) else: objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs) if objs_with_pk: self._batched_insert(objs_with_pk, fields, batch_size) if objs_without_pk: fields = [f for f in fields if not isinstance(f, AutoField)] self._batched_insert(objs_without_pk, fields, batch_size) return objs def get_or_create(self, defaults=None, **kwargs): """ Looks up an object with the given kwargs, creating one if necessary. Returns a tuple of (object, created), where created is a boolean specifying whether an object was created. """ lookup, params = self._extract_model_params(defaults, **kwargs) self._for_write = True try: return self.get(**lookup), False except self.model.DoesNotExist: return self._create_object_from_params(lookup, params) def update_or_create(self, defaults=None, **kwargs): """ Looks up an object with the given kwargs, updating one with defaults if it exists, otherwise creates a new one. Returns a tuple (object, created), where created is a boolean specifying whether an object was created. """ defaults = defaults or {} lookup, params = self._extract_model_params(defaults, **kwargs) self._for_write = True try: obj = self.get(**lookup) except self.model.DoesNotExist: obj, created = self._create_object_from_params(lookup, params) if created: return obj, created for k, v in six.iteritems(defaults): setattr(obj, k, v) with transaction.atomic(using=self.db, savepoint=False): obj.save(using=self.db) return obj, False def _create_object_from_params(self, lookup, params): """ Tries to create an object using passed params. Used by get_or_create and update_or_create """ try: with transaction.atomic(using=self.db): obj = self.create(**params) return obj, True except IntegrityError: exc_info = sys.exc_info() try: return self.get(**lookup), False except self.model.DoesNotExist: pass six.reraise(*exc_info) def _extract_model_params(self, defaults, **kwargs): """ Prepares `lookup` (kwargs that are valid model attributes), `params` (for creating a model instance) based on given kwargs; for use by get_or_create and update_or_create. """ defaults = defaults or {} lookup = kwargs.copy() for f in self.model._meta.fields: if f.attname in lookup: lookup[f.name] = lookup.pop(f.attname) params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k} params.update(defaults) return lookup, params def _earliest_or_latest(self, field_name=None, direction="-"): """ Returns the latest object, according to the model's 'get_latest_by' option or optional given field_name. """ order_by = field_name or getattr(self.model._meta, 'get_latest_by') assert bool(order_by), "earliest() and latest() require either a "\ "field_name parameter or 'get_latest_by' in the model" assert self.query.can_filter(), \ "Cannot change a query once a slice has been taken." obj = self._clone() obj.query.set_limits(high=1) obj.query.clear_ordering(force_empty=True) obj.query.add_ordering('%s%s' % (direction, order_by)) return obj.get() def earliest(self, field_name=None): return self._earliest_or_latest(field_name=field_name, direction="") def latest(self, field_name=None): return self._earliest_or_latest(field_name=field_name, direction="-") def first(self): """ Returns the first object of a query, returns None if no match is found. """ objects = list((self if self.ordered else self.order_by('pk'))[:1]) if objects: return objects[0] return None def last(self): """ Returns the last object of a query, returns None if no match is found. """ objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1]) if objects: return objects[0] return None def in_bulk(self, id_list): """ Returns a dictionary mapping each of the given IDs to the object with that ID. """ assert self.query.can_filter(), \ "Cannot use 'limit' or 'offset' with in_bulk" if not id_list: return {} qs = self.filter(pk__in=id_list).order_by() return {obj._get_pk_val(): obj for obj in qs} def delete(self): """ Deletes the records in the current QuerySet. """ assert self.query.can_filter(), \ "Cannot use 'limit' or 'offset' with delete." del_query = self._clone() # The delete is actually 2 queries - one to find related objects, # and one to delete. Make sure that the discovery of related # objects is performed on the same database as the deletion. del_query._for_write = True # Disable non-supported fields. del_query.query.select_for_update = False del_query.query.select_related = False del_query.query.clear_ordering(force_empty=True) collector = Collector(using=del_query.db) collector.collect(del_query) collector.delete() # Clear the result cache, in case this QuerySet gets reused. self._result_cache = None delete.alters_data = True delete.queryset_only = True def _raw_delete(self, using): """ Deletes objects found from the given queryset in single direct SQL query. No signals are sent, and there is no protection for cascades. """ sql.DeleteQuery(self.model).delete_qs(self, using) _raw_delete.alters_data = True def update(self, **kwargs): """ Updates all elements in the current QuerySet, setting all the given fields to the appropriate values. """ assert self.query.can_filter(), \ "Cannot update a query once a slice has been taken." self._for_write = True query = self.query.clone(sql.UpdateQuery) query.add_update_values(kwargs) with transaction.atomic(using=self.db, savepoint=False): rows = query.get_compiler(self.db).execute_sql(CURSOR) self._result_cache = None return rows update.alters_data = True def _update(self, values): """ A version of update that accepts field objects instead of field names. Used primarily for model saving and not intended for use by general code (it requires too much poking around at model internals to be useful at that level). """ assert self.query.can_filter(), \ "Cannot update a query once a slice has been taken." query = self.query.clone(sql.UpdateQuery) query.add_update_fields(values) self._result_cache = None return query.get_compiler(self.db).execute_sql(CURSOR) _update.alters_data = True _update.queryset_only = False def exists(self): if self._result_cache is None: return self.query.has_results(using=self.db) return bool(self._result_cache) def _prefetch_related_objects(self): # This method can only be called once the result cache has been filled. prefetch_related_objects(self._result_cache, self._prefetch_related_lookups) self._prefetch_done = True ################################################## # PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS # ################################################## def raw(self, raw_query, params=None, translations=None, using=None): if using is None: using = self.db return RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using) def values(self, *fields): return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields) def values_list(self, *fields, **kwargs): flat = kwargs.pop('flat', False) if kwargs: raise TypeError('Unexpected keyword arguments to values_list: %s' % (list(kwargs),)) if flat and len(fields) > 1: raise TypeError("'flat' is not valid when values_list is called with more than one field.") return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat, _fields=fields) def dates(self, field_name, kind, order='ASC'): """ Returns a list of date objects representing all available dates for the given field_name, scoped to 'kind'. """ assert kind in ("year", "month", "day"), \ "'kind' must be one of 'year', 'month' or 'day'." assert order in ('ASC', 'DESC'), \ "'order' must be either 'ASC' or 'DESC'." return self.annotate( datefield=Date(field_name, kind), plain_field=F(field_name) ).values_list( 'datefield', flat=True ).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield') def datetimes(self, field_name, kind, order='ASC', tzinfo=None): """ Returns a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'. """ assert kind in ("year", "month", "day", "hour", "minute", "second"), \ "'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'." assert order in ('ASC', 'DESC'), \ "'order' must be either 'ASC' or 'DESC'." if settings.USE_TZ: if tzinfo is None: tzinfo = timezone.get_current_timezone() else: tzinfo = None return self.annotate( datetimefield=DateTime(field_name, kind, tzinfo), plain_field=F(field_name) ).values_list( 'datetimefield', flat=True ).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield') def none(self): """ Returns an empty QuerySet. """ clone = self._clone() clone.query.set_empty() return clone ################################################################## # PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET # ################################################################## def all(self): """ Returns a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases. """ return self._clone() def filter(self, *args, **kwargs): """ Returns a new QuerySet instance with the args ANDed to the existing set. """ return self._filter_or_exclude(False, *args, **kwargs) def exclude(self, *args, **kwargs): """ Returns a new QuerySet instance with NOT (args) ANDed to the existing set. """ return self._filter_or_exclude(True, *args, **kwargs) def _filter_or_exclude(self, negate, *args, **kwargs): if args or kwargs: assert self.query.can_filter(), \ "Cannot filter a query once a slice has been taken." clone = self._clone() if negate: clone.query.add_q(~Q(*args, **kwargs)) else: clone.query.add_q(Q(*args, **kwargs)) return clone def complex_filter(self, filter_obj): """ Returns a new QuerySet instance with filter_obj added to the filters. filter_obj can be a Q object (or anything with an add_to_query() method) or a dictionary of keyword lookup arguments. This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods. """ if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'): clone = self._clone() clone.query.add_q(filter_obj) return clone else: return self._filter_or_exclude(None, **filter_obj) def select_for_update(self, nowait=False): """ Returns a new QuerySet instance that will select objects with a FOR UPDATE lock. """ obj = self._clone() obj._for_write = True obj.query.select_for_update = True obj.query.select_for_update_nowait = nowait return obj def select_related(self, *fields): """ Returns a new QuerySet instance that will select related objects. If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection. If select_related(None) is called, the list is cleared. """ obj = self._clone() if fields == (None,): obj.query.select_related = False elif fields: obj.query.add_select_related(fields) else: obj.query.select_related = True return obj def prefetch_related(self, *lookups): """ Returns a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated. When prefetch_related() is called more than once, the list of lookups to prefetch is appended to. If prefetch_related(None) is called, the list is cleared. """ clone = self._clone() if lookups == (None,): clone._prefetch_related_lookups = [] else: clone._prefetch_related_lookups.extend(lookups) return clone def annotate(self, *args, **kwargs): """ Return a query set in which the returned objects have been annotated with extra data or aggregations. """ annotations = OrderedDict() # To preserve ordering of args for arg in args: # The default_alias property may raise a TypeError, so we use # a try/except construct rather than hasattr in order to remain # consistent between PY2 and PY3 (hasattr would swallow # the TypeError on PY2). try: if arg.default_alias in kwargs: raise ValueError("The named annotation '%s' conflicts with the " "default name for another annotation." % arg.default_alias) except (AttributeError, TypeError): raise TypeError("Complex annotations require an alias") annotations[arg.default_alias] = arg annotations.update(kwargs) obj = self._clone() names = getattr(self, '_fields', None) if names is None: names = {f.name for f in self.model._meta.get_fields()} # Add the annotations to the query for alias, annotation in annotations.items(): if alias in names: raise ValueError("The annotation '%s' conflicts with a field on " "the model." % alias) obj.query.add_annotation(annotation, alias, is_summary=False) # expressions need to be added to the query before we know if they contain aggregates added_aggregates = [] for alias, annotation in obj.query.annotations.items(): if alias in annotations and annotation.contains_aggregate: added_aggregates.append(alias) if added_aggregates: obj._setup_aggregate_query(list(added_aggregates)) return obj def order_by(self, *field_names): """ Returns a new QuerySet instance with the ordering changed. """ assert self.query.can_filter(), \ "Cannot reorder a query once a slice has been taken." obj = self._clone() obj.query.clear_ordering(force_empty=False) obj.query.add_ordering(*field_names) return obj def distinct(self, *field_names): """ Returns a new QuerySet instance that will select only distinct results. """ assert self.query.can_filter(), \ "Cannot create distinct fields once a slice has been taken." obj = self._clone() obj.query.add_distinct_fields(*field_names) return obj def extra(self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None): """ Adds extra SQL fragments to the query. """ assert self.query.can_filter(), \ "Cannot change a query once a slice has been taken" clone = self._clone() clone.query.add_extra(select, select_params, where, params, tables, order_by) return clone def reverse(self): """ Reverses the ordering of the QuerySet. """ clone = self._clone() clone.query.standard_ordering = not clone.query.standard_ordering return clone def defer(self, *fields): """ Defers the loading of data for certain fields until they are accessed. The set of fields to defer is added to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case all deferrals are removed (None acts as a reset option). """ clone = self._clone() if fields == (None,): clone.query.clear_deferred_loading() else: clone.query.add_deferred_loading(fields) return clone def only(self, *fields): """ Essentially, the opposite of defer. Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated. """ if fields == (None,): # Can only pass None to defer(), not only(), as the rest option. # That won't stop people trying to do this, so let's be explicit. raise TypeError("Cannot pass None as an argument to only().") clone = self._clone() clone.query.add_immediate_loading(fields) return clone def using(self, alias): """ Selects which database this QuerySet should execute its query against. """ clone = self._clone() clone._db = alias return clone ################################### # PUBLIC INTROSPECTION ATTRIBUTES # ################################### def ordered(self): """ Returns True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model. """ if self.query.extra_order_by or self.query.order_by: return True elif self.query.default_ordering and self.query.get_meta().ordering: return True else: return False ordered = property(ordered) @property def db(self): "Return the database that will be used if this query is executed now" if self._for_write: return self._db or router.db_for_write(self.model, **self._hints) return self._db or router.db_for_read(self.model, **self._hints) ################### # PRIVATE METHODS # ################### def _insert(self, objs, fields, return_id=False, raw=False, using=None): """ Inserts a new record for the given model. This provides an interface to the InsertQuery class and is how Model.save() is implemented. """ self._for_write = True if using is None: using = self.db query = sql.InsertQuery(self.model) query.insert_values(fields, objs, raw=raw) return query.get_compiler(using=using).execute_sql(return_id) _insert.alters_data = True _insert.queryset_only = False def _batched_insert(self, objs, fields, batch_size): """ A little helper method for bulk_insert to insert the bulk one batch at a time. Inserts recursively a batch from the front of the bulk and then _batched_insert() the remaining objects again. """ if not objs: return ops = connections[self.db].ops batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1)) for batch in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]: self.model._base_manager._insert(batch, fields=fields, using=self.db) def _clone(self, klass=None, setup=False, **kwargs): if klass is None: klass = self.__class__ elif not issubclass(self.__class__, klass): base_queryset_class = getattr(self, '_base_queryset_class', self.__class__) class_bases = (klass, base_queryset_class) class_dict = { '_base_queryset_class': base_queryset_class, '_specialized_queryset_class': klass, } klass = type(klass.__name__, class_bases, class_dict) query = self.query.clone() if self._sticky_filter: query.filter_is_sticky = True c = klass(model=self.model, query=query, using=self._db, hints=self._hints) c._for_write = self._for_write c._prefetch_related_lookups = self._prefetch_related_lookups[:] c._known_related_objects = self._known_related_objects c.__dict__.update(kwargs) if setup and hasattr(c, '_setup_query'): c._setup_query() return c def _fetch_all(self): if self._result_cache is None: self._result_cache = list(self.iterator()) if self._prefetch_related_lookups and not self._prefetch_done: self._prefetch_related_objects() def _next_is_sticky(self): """ Indicates that the next filter call and the one following that should be treated as a single filter. This is only important when it comes to determining when to reuse tables for many-to-many filters. Required so that we can filter naturally on the results of related managers. This doesn't return a clone of the current QuerySet (it returns "self"). The method is only used internally and should be immediately followed by a filter() that does create a clone. """ self._sticky_filter = True return self def _merge_sanity_check(self, other): """ Checks that we are merging two comparable QuerySet classes. By default this does nothing, but see the ValuesQuerySet for an example of where it's useful. """ pass def _merge_known_related_objects(self, other): """ Keep track of all known related objects from either QuerySet instance. """ for field, objects in other._known_related_objects.items(): self._known_related_objects.setdefault(field, {}).update(objects) def _setup_aggregate_query(self, aggregates): """ Prepare the query for computing a result that contains aggregate annotations. """ if self.query.group_by is None: self.query.group_by = True def _prepare(self): return self def _as_sql(self, connection): """ Returns the internal query's SQL and parameters (as a tuple). """ obj = self.values("pk") if obj._db is None or connection == connections[obj._db]: return obj.query.get_compiler(connection=connection).as_nested_sql() raise ValueError("Can't do subqueries with queries on different DBs.") # When used as part of a nested query, a queryset will never be an "always # empty" result. value_annotation = True def _add_hints(self, **hints): """ Update hinting information for later use by Routers """ # If there is any hinting information, add it to what we already know. # If we have a new hint for an existing key, overwrite with the new value. self._hints.update(hints) def _has_filters(self): """ Checks if this QuerySet has any filtering going on. Note that this isn't equivalent for checking if all objects are present in results, for example qs[1:]._has_filters() -> False. """ return self.query.has_filters() def is_compatible_query_object_type(self, opts): model = self.model return ( model == opts.concrete_model or opts.concrete_model in model._meta.get_parent_list() or model in opts.get_parent_list() ) is_compatible_query_object_type.queryset_only = True class InstanceCheckMeta(type): def __instancecheck__(self, instance): return instance.query.is_empty() class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)): """ Marker class usable for checking if a queryset is empty by .none(): isinstance(qs.none(), EmptyQuerySet) -> True """ def __init__(self, *args, **kwargs): raise TypeError("EmptyQuerySet can't be instantiated") class ValuesQuerySet(QuerySet): def __init__(self, *args, **kwargs): super(ValuesQuerySet, self).__init__(*args, **kwargs) # select_related isn't supported in values(). (FIXME -#3358) self.query.select_related = False # QuerySet.clone() will also set up the _fields attribute with the # names of the model fields to select. def only(self, *fields): raise NotImplementedError("ValuesQuerySet does not implement only()") def defer(self, *fields): raise NotImplementedError("ValuesQuerySet does not implement defer()") def iterator(self): # Purge any extra columns that haven't been explicitly asked for extra_names = list(self.query.extra_select) field_names = self.field_names annotation_names = list(self.query.annotation_select) names = extra_names + field_names + annotation_names for row in self.query.get_compiler(self.db).results_iter(): yield dict(zip(names, row)) def delete(self): # values().delete() doesn't work currently - make sure it raises an # user friendly error. raise TypeError("Queries with .values() or .values_list() applied " "can't be deleted") def _setup_query(self): """ Constructs the field_names list that the values query will be retrieving. Called by the _clone() method after initializing the rest of the instance. """ if self.query.group_by is True: self.query.add_fields([f.attname for f in self.model._meta.concrete_fields], False) self.query.set_group_by() self.query.clear_deferred_loading() self.query.clear_select_fields() if self._fields: self.extra_names = [] self.annotation_names = [] if not self.query._extra and not self.query._annotations: # Short cut - if there are no extra or annotations, then # the values() clause must be just field names. self.field_names = list(self._fields) else: self.query.default_cols = False self.field_names = [] for f in self._fields: # we inspect the full extra_select list since we might # be adding back an extra select item that we hadn't # had selected previously. if self.query._extra and f in self.query._extra: self.extra_names.append(f) elif f in self.query.annotation_select: self.annotation_names.append(f) else: self.field_names.append(f) else: # Default to all fields. self.extra_names = None self.field_names = [f.attname for f in self.model._meta.concrete_fields] self.annotation_names = None self.query.select = [] if self.extra_names is not None: self.query.set_extra_mask(self.extra_names) self.query.add_fields(self.field_names, True) if self.annotation_names is not None: self.query.set_annotation_mask(self.annotation_names) def _clone(self, klass=None, setup=False, **kwargs): """ Cloning a ValuesQuerySet preserves the current fields. """ c = super(ValuesQuerySet, self)._clone(klass, **kwargs) if not hasattr(c, '_fields'): # Only clone self._fields if _fields wasn't passed into the cloning # call directly. c._fields = self._fields[:] c.field_names = self.field_names c.extra_names = self.extra_names c.annotation_names = self.annotation_names if setup and hasattr(c, '_setup_query'): c._setup_query() return c def _merge_sanity_check(self, other): super(ValuesQuerySet, self)._merge_sanity_check(other) if (set(self.extra_names) != set(other.extra_names) or set(self.field_names) != set(other.field_names) or self.annotation_names != other.annotation_names): raise TypeError("Merging '%s' classes must involve the same values in each case." % self.__class__.__name__) def _setup_aggregate_query(self, aggregates): """ Prepare the query for computing a result that contains aggregate annotations. """ self.query.set_group_by() if self.annotation_names is not None: self.annotation_names.extend(aggregates) self.query.set_annotation_mask(self.annotation_names) super(ValuesQuerySet, self)._setup_aggregate_query(aggregates) def _as_sql(self, connection): """ For ValuesQuerySet (and subclasses like ValuesListQuerySet), they can only be used as nested queries if they're already set up to select only a single field (in which case, that is the field column that is returned). This differs from QuerySet.as_sql(), where the column to select is set up by Django. """ if ((self._fields and len(self._fields) > 1) or (not self._fields and len(self.model._meta.fields) > 1)): raise TypeError('Cannot use a multi-field %s as a filter value.' % self.__class__.__name__) obj = self._clone() if obj._db is None or connection == connections[obj._db]: return obj.query.get_compiler(connection=connection).as_nested_sql() raise ValueError("Can't do subqueries with queries on different DBs.") def _prepare(self): """ Validates that we aren't trying to do a query like value__in=qs.values('value1', 'value2'), which isn't valid. """ if ((self._fields and len(self._fields) > 1) or (not self._fields and len(self.model._meta.fields) > 1)): raise TypeError('Cannot use a multi-field %s as a filter value.' % self.__class__.__name__) return self def is_compatible_query_object_type(self, opts): """ ValueQuerySets do not need to be checked for compatibility. We trust that users of ValueQuerySets know what they are doing. """ return True class ValuesListQuerySet(ValuesQuerySet): def iterator(self): compiler = self.query.get_compiler(self.db) if self.flat and len(self._fields) == 1: for row in compiler.results_iter(): yield row[0] elif not self.query.extra_select and not self.query.annotation_select: for row in compiler.results_iter(): yield tuple(row) else: # When extra(select=...) or an annotation is involved, the extra # cols are always at the start of the row, and we need to reorder # the fields to match the order in self._fields. extra_names = list(self.query.extra_select) field_names = self.field_names annotation_names = list(self.query.annotation_select) names = extra_names + field_names + annotation_names # If a field list has been specified, use it. Otherwise, use the # full list of fields, including extras and annotations. if self._fields: fields = list(self._fields) + [f for f in annotation_names if f not in self._fields] else: fields = names for row in compiler.results_iter(): data = dict(zip(names, row)) yield tuple(data[f] for f in fields) def _clone(self, *args, **kwargs): clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs) if not hasattr(clone, "flat"): # Only assign flat if the clone didn't already get it from kwargs clone.flat = self.flat return clone class RawQuerySet(object): """ Provides an iterator which converts the results of raw SQL queries into annotated model instances. """ def __init__(self, raw_query, model=None, query=None, params=None, translations=None, using=None, hints=None): self.raw_query = raw_query self.model = model self._db = using self._hints = hints or {} self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params) self.params = params or () self.translations = translations or {} def resolve_model_init_order(self): """ Resolve the init field names and value positions """ model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns] annotation_fields = [(column, pos) for pos, column in enumerate(self.columns) if column not in self.model_fields] model_init_order = [self.columns.index(f.column) for f in model_init_fields] model_init_names = [f.attname for f in model_init_fields] return model_init_names, model_init_order, annotation_fields def __iter__(self): # Cache some things for performance reasons outside the loop. db = self.db compiler = connections[db].ops.compiler('SQLCompiler')( self.query, connections[db], db ) query = iter(self.query) try: model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order() # Find out which model's fields are not present in the query. skip = set() for field in self.model._meta.fields: if field.attname not in model_init_names: skip.add(field.attname) if skip: if self.model._meta.pk.attname in skip: raise InvalidQuery('Raw query must include the primary key') model_cls = deferred_class_factory(self.model, skip) else: model_cls = self.model fields = [self.model_fields.get(c, None) for c in self.columns] converters = compiler.get_converters([ f.get_col(f.model._meta.db_table) if f else None for f in fields ]) for values in query: if converters: values = compiler.apply_converters(values, converters) # Associate fields to values model_init_values = [values[pos] for pos in model_init_pos] instance = model_cls.from_db(db, model_init_names, model_init_values) if annotation_fields: for column, pos in annotation_fields: setattr(instance, column, values[pos]) yield instance finally: # Done iterating the Query. If it has its own cursor, close it. if hasattr(self.query, 'cursor') and self.query.cursor: self.query.cursor.close() def __repr__(self): return "<RawQuerySet: %s>" % self.query def __getitem__(self, k): return list(self)[k] @property def db(self): "Return the database that will be used if this query is executed now" return self._db or router.db_for_read(self.model, **self._hints) def using(self, alias): """ Selects which database this Raw QuerySet should execute its query against. """ return RawQuerySet(self.raw_query, model=self.model, query=self.query.clone(using=alias), params=self.params, translations=self.translations, using=alias) @property def columns(self): """ A list of model field names in the order they'll appear in the query results. """ if not hasattr(self, '_columns'): self._columns = self.query.get_columns() # Adjust any column names which don't match field names for (query_name, model_name) in self.translations.items(): try: index = self._columns.index(query_name) self._columns[index] = model_name except ValueError: # Ignore translations for non-existent column names pass return self._columns @property def model_fields(self): """ A dict mapping column names to model field names. """ if not hasattr(self, '_model_fields'): converter = connections[self.db].introspection.table_name_converter self._model_fields = {} for field in self.model._meta.fields: name, column = field.get_attname_column() self._model_fields[converter(column)] = field return self._model_fields class Prefetch(object): def __init__(self, lookup, queryset=None, to_attr=None): # `prefetch_through` is the path we traverse to perform the prefetch. self.prefetch_through = lookup # `prefetch_to` is the path to the attribute that stores the result. self.prefetch_to = lookup if to_attr: self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr]) self.queryset = queryset self.to_attr = to_attr def add_prefix(self, prefix): self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through]) self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to]) def get_current_prefetch_through(self, level): return LOOKUP_SEP.join(self.prefetch_through.split(LOOKUP_SEP)[:level + 1]) def get_current_prefetch_to(self, level): return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1]) def get_current_to_attr(self, level): parts = self.prefetch_to.split(LOOKUP_SEP) to_attr = parts[level] as_attr = self.to_attr and level == len(parts) - 1 return to_attr, as_attr def get_current_queryset(self, level): if self.get_current_prefetch_to(level) == self.prefetch_to: return self.queryset return None def __eq__(self, other): if isinstance(other, Prefetch): return self.prefetch_to == other.prefetch_to return False def __hash__(self): return hash(self.__class__) ^ hash(self.prefetch_to) def normalize_prefetch_lookups(lookups, prefix=None): """ Helper function that normalize lookups into Prefetch objects. """ ret = [] for lookup in lookups: if not isinstance(lookup, Prefetch): lookup = Prefetch(lookup) if prefix: lookup.add_prefix(prefix) ret.append(lookup) return ret def prefetch_related_objects(result_cache, related_lookups): """ Helper function for prefetch_related functionality Populates prefetched objects caches for a list of results from a QuerySet """ if len(result_cache) == 0: return # nothing to do related_lookups = normalize_prefetch_lookups(related_lookups) # We need to be able to dynamically add to the list of prefetch_related # lookups that we look up (see below). So we need some book keeping to # ensure we don't do duplicate work. done_queries = {} # dictionary of things like 'foo__bar': [results] auto_lookups = set() # we add to this as we go through. followed_descriptors = set() # recursion protection all_lookups = deque(related_lookups) while all_lookups: lookup = all_lookups.popleft() if lookup.prefetch_to in done_queries: if lookup.queryset: raise ValueError("'%s' lookup was already seen with a different queryset. " "You may need to adjust the ordering of your lookups." % lookup.prefetch_to) continue # Top level, the list of objects to decorate is the result cache # from the primary QuerySet. It won't be for deeper levels. obj_list = result_cache through_attrs = lookup.prefetch_through.split(LOOKUP_SEP) for level, through_attr in enumerate(through_attrs): # Prepare main instances if len(obj_list) == 0: break prefetch_to = lookup.get_current_prefetch_to(level) if prefetch_to in done_queries: # Skip any prefetching, and any object preparation obj_list = done_queries[prefetch_to] continue # Prepare objects: good_objects = True for obj in obj_list: # Since prefetching can re-use instances, it is possible to have # the same instance multiple times in obj_list, so obj might # already be prepared. if not hasattr(obj, '_prefetched_objects_cache'): try: obj._prefetched_objects_cache = {} except AttributeError: # Must be in a QuerySet subclass that is not returning # Model instances, either in Django or 3rd # party. prefetch_related() doesn't make sense, so quit # now. good_objects = False break if not good_objects: break # Descend down tree # We assume that objects retrieved are homogeneous (which is the premise # of prefetch_related), so what applies to first object applies to all. first_obj = obj_list[0] prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr) if not attr_found: raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid " "parameter to prefetch_related()" % (through_attr, first_obj.__class__.__name__, lookup.prefetch_through)) if level == len(through_attrs) - 1 and prefetcher is None: # Last one, this *must* resolve to something that supports # prefetching, otherwise there is no point adding it and the # developer asking for it has made a mistake. raise ValueError("'%s' does not resolve to an item that supports " "prefetching - this is an invalid parameter to " "prefetch_related()." % lookup.prefetch_through) if prefetcher is not None and not is_fetched: obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level) # We need to ensure we don't keep adding lookups from the # same relationships to stop infinite recursion. So, if we # are already on an automatically added lookup, don't add # the new lookups from relationships we've seen already. if not (lookup in auto_lookups and descriptor in followed_descriptors): done_queries[prefetch_to] = obj_list new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to) auto_lookups.update(new_lookups) all_lookups.extendleft(new_lookups) followed_descriptors.add(descriptor) else: # Either a singly related object that has already been fetched # (e.g. via select_related), or hopefully some other property # that doesn't support prefetching but needs to be traversed. # We replace the current list of parent objects with the list # of related objects, filtering out empty or missing values so # that we can continue with nullable or reverse relations. new_obj_list = [] for obj in obj_list: try: new_obj = getattr(obj, through_attr) except exceptions.ObjectDoesNotExist: continue if new_obj is None: continue # We special-case `list` rather than something more generic # like `Iterable` because we don't want to accidentally match # user models that define __iter__. if isinstance(new_obj, list): new_obj_list.extend(new_obj) else: new_obj_list.append(new_obj) obj_list = new_obj_list def get_prefetcher(instance, attr): """ For the attribute 'attr' on the given instance, finds an object that has a get_prefetch_queryset(). Returns a 4 tuple containing: (the object with get_prefetch_queryset (or None), the descriptor object representing this relationship (or None), a boolean that is False if the attribute was not found at all, a boolean that is True if the attribute has already been fetched) """ prefetcher = None is_fetched = False # For singly related objects, we have to avoid getting the attribute # from the object, as this will trigger the query. So we first try # on the class, in order to get the descriptor object. rel_obj_descriptor = getattr(instance.__class__, attr, None) if rel_obj_descriptor is None: attr_found = hasattr(instance, attr) else: attr_found = True if rel_obj_descriptor: # singly related object, descriptor object has the # get_prefetch_queryset() method. if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'): prefetcher = rel_obj_descriptor if rel_obj_descriptor.is_cached(instance): is_fetched = True else: # descriptor doesn't support prefetching, so we go ahead and get # the attribute on the instance rather than the class to # support many related managers rel_obj = getattr(instance, attr) if hasattr(rel_obj, 'get_prefetch_queryset'): prefetcher = rel_obj return prefetcher, rel_obj_descriptor, attr_found, is_fetched def prefetch_one_level(instances, prefetcher, lookup, level): """ Helper function for prefetch_related_objects Runs prefetches on all instances using the prefetcher object, assigning results to relevant caches in instance. The prefetched objects are returned, along with any additional prefetches that must be done due to prefetch_related lookups found from default managers. """ # prefetcher must have a method get_prefetch_queryset() which takes a list # of instances, and returns a tuple: # (queryset of instances of self.model that are related to passed in instances, # callable that gets value to be matched for returned instances, # callable that gets value to be matched for passed in instances, # boolean that is True for singly related objects, # cache name to assign to). # The 'values to be matched' must be hashable as they will be used # in a dictionary. rel_qs, rel_obj_attr, instance_attr, single, cache_name = ( prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level))) # We have to handle the possibility that the QuerySet we just got back # contains some prefetch_related lookups. We don't want to trigger the # prefetch_related functionality by evaluating the query. Rather, we need # to merge in the prefetch_related lookups. additional_lookups = getattr(rel_qs, '_prefetch_related_lookups', []) if additional_lookups: # Don't need to clone because the manager should have given us a fresh # instance, so we access an internal instead of using public interface # for performance reasons. rel_qs._prefetch_related_lookups = [] all_related_objects = list(rel_qs) rel_obj_cache = {} for rel_obj in all_related_objects: rel_attr_val = rel_obj_attr(rel_obj) rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj) for obj in instances: instance_attr_val = instance_attr(obj) vals = rel_obj_cache.get(instance_attr_val, []) to_attr, as_attr = lookup.get_current_to_attr(level) if single: val = vals[0] if vals else None to_attr = to_attr if as_attr else cache_name setattr(obj, to_attr, val) else: if as_attr: setattr(obj, to_attr, vals) else: # Cache in the QuerySet.all(). qs = getattr(obj, to_attr).all() qs._result_cache = vals # We don't want the individual qs doing prefetch_related now, # since we have merged this into the current work. qs._prefetch_done = True obj._prefetched_objects_cache[cache_name] = qs return all_related_objects, additional_lookups class RelatedPopulator(object): """ RelatedPopulator is used for select_related() object instantiation. The idea is that each select_related() model will be populated by a different RelatedPopulator instance. The RelatedPopulator instances get klass_info and select (computed in SQLCompiler) plus the used db as input for initialization. That data is used to compute which columns to use, how to instantiate the model, and how to populate the links between the objects. The actual creation of the objects is done in populate() method. This method gets row and from_obj as input and populates the select_related() model instance. """ def __init__(self, klass_info, select, db): self.db = db # Pre-compute needed attributes. The attributes are: # - model_cls: the possibly deferred model class to instantiate # - either: # - cols_start, cols_end: usually the columns in the row are # in the same order model_cls.__init__ expects them, so we # can instantiate by model_cls(*row[cols_start:cols_end]) # - reorder_for_init: When select_related descends to a child # class, then we want to reuse the already selected parent # data. However, in this case the parent data isn't necessarily # in the same order that Model.__init__ expects it to be, so # we have to reorder the parent data. The reorder_for_init # attribute contains a function used to reorder the field data # in the order __init__ expects it. # - pk_idx: the index of the primary key field in the reordered # model data. Used to check if a related object exists at all. # - init_list: the field attnames fetched from the database. For # deferred models this isn't the same as all attnames of the # model's fields. # - related_populators: a list of RelatedPopulator instances if # select_related() descends to related models from this model. # - cache_name, reverse_cache_name: the names to use for setattr # when assigning the fetched object to the from_obj. If the # reverse_cache_name is set, then we also set the reverse link. select_fields = klass_info['select_fields'] from_parent = klass_info['from_parent'] if not from_parent: self.cols_start = select_fields[0] self.cols_end = select_fields[-1] + 1 self.init_list = [ f[0].target.attname for f in select[self.cols_start:self.cols_end] ] self.reorder_for_init = None else: model_init_attnames = [ f.attname for f in klass_info['model']._meta.concrete_fields ] reorder_map = [] for idx in select_fields: field = select[idx][0].target init_pos = model_init_attnames.index(field.attname) reorder_map.append((init_pos, field.attname, idx)) reorder_map.sort() self.init_list = [v[1] for v in reorder_map] pos_list = [row_pos for _, _, row_pos in reorder_map] def reorder_for_init(row): return [row[row_pos] for row_pos in pos_list] self.reorder_for_init = reorder_for_init self.model_cls = self.get_deferred_cls(klass_info, self.init_list) self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname) self.related_populators = get_related_populators(klass_info, select, self.db) field = klass_info['field'] reverse = klass_info['reverse'] self.reverse_cache_name = None if reverse: self.cache_name = field.rel.get_cache_name() self.reverse_cache_name = field.get_cache_name() else: self.cache_name = field.get_cache_name() if field.unique: self.reverse_cache_name = field.rel.get_cache_name() def get_deferred_cls(self, klass_info, init_list): model_cls = klass_info['model'] if len(init_list) != len(model_cls._meta.concrete_fields): init_set = set(init_list) skip = [ f.attname for f in model_cls._meta.concrete_fields if f.attname not in init_set ] model_cls = deferred_class_factory(model_cls, skip) return model_cls def populate(self, row, from_obj): if self.reorder_for_init: obj_data = self.reorder_for_init(row) else: obj_data = row[self.cols_start:self.cols_end] if obj_data[self.pk_idx] is None: obj = None else: obj = self.model_cls.from_db(self.db, self.init_list, obj_data) if obj and self.related_populators: for rel_iter in self.related_populators: rel_iter.populate(row, obj) setattr(from_obj, self.cache_name, obj) if obj and self.reverse_cache_name: setattr(obj, self.reverse_cache_name, from_obj) def get_related_populators(klass_info, select, db): iterators = [] related_klass_infos = klass_info.get('related_klass_infos', []) for rel_klass_info in related_klass_infos: rel_cls = RelatedPopulator(rel_klass_info, select, db) iterators.append(rel_cls) return iterators
mit
FireWRT/OpenWrt-Firefly-Libraries
staging_dir/host/lib/python3.4/test/test_heapq.py
111
14475
"""Unittests for heapq.""" import sys import random import unittest from test import support from unittest import TestCase, skipUnless py_heapq = support.import_fresh_module('heapq', blocked=['_heapq']) c_heapq = support.import_fresh_module('heapq', fresh=['_heapq']) # _heapq.nlargest/nsmallest are saved in heapq._nlargest/_smallest when # _heapq is imported, so check them there func_names = ['heapify', 'heappop', 'heappush', 'heappushpop', 'heapreplace', '_nlargest', '_nsmallest'] class TestModules(TestCase): def test_py_functions(self): for fname in func_names: self.assertEqual(getattr(py_heapq, fname).__module__, 'heapq') @skipUnless(c_heapq, 'requires _heapq') def test_c_functions(self): for fname in func_names: self.assertEqual(getattr(c_heapq, fname).__module__, '_heapq') class TestHeap: def test_push_pop(self): # 1) Push 256 random numbers and pop them off, verifying all's OK. heap = [] data = [] self.check_invariant(heap) for i in range(256): item = random.random() data.append(item) self.module.heappush(heap, item) self.check_invariant(heap) results = [] while heap: item = self.module.heappop(heap) self.check_invariant(heap) results.append(item) data_sorted = data[:] data_sorted.sort() self.assertEqual(data_sorted, results) # 2) Check that the invariant holds for a sorted array self.check_invariant(results) self.assertRaises(TypeError, self.module.heappush, []) try: self.assertRaises(TypeError, self.module.heappush, None, None) self.assertRaises(TypeError, self.module.heappop, None) except AttributeError: pass def check_invariant(self, heap): # Check the heap invariant. for pos, item in enumerate(heap): if pos: # pos 0 has no parent parentpos = (pos-1) >> 1 self.assertTrue(heap[parentpos] <= item) def test_heapify(self): for size in range(30): heap = [random.random() for dummy in range(size)] self.module.heapify(heap) self.check_invariant(heap) self.assertRaises(TypeError, self.module.heapify, None) def test_naive_nbest(self): data = [random.randrange(2000) for i in range(1000)] heap = [] for item in data: self.module.heappush(heap, item) if len(heap) > 10: self.module.heappop(heap) heap.sort() self.assertEqual(heap, sorted(data)[-10:]) def heapiter(self, heap): # An iterator returning a heap's elements, smallest-first. try: while 1: yield self.module.heappop(heap) except IndexError: pass def test_nbest(self): # Less-naive "N-best" algorithm, much faster (if len(data) is big # enough <wink>) than sorting all of data. However, if we had a max # heap instead of a min heap, it could go faster still via # heapify'ing all of data (linear time), then doing 10 heappops # (10 log-time steps). data = [random.randrange(2000) for i in range(1000)] heap = data[:10] self.module.heapify(heap) for item in data[10:]: if item > heap[0]: # this gets rarer the longer we run self.module.heapreplace(heap, item) self.assertEqual(list(self.heapiter(heap)), sorted(data)[-10:]) self.assertRaises(TypeError, self.module.heapreplace, None) self.assertRaises(TypeError, self.module.heapreplace, None, None) self.assertRaises(IndexError, self.module.heapreplace, [], None) def test_nbest_with_pushpop(self): data = [random.randrange(2000) for i in range(1000)] heap = data[:10] self.module.heapify(heap) for item in data[10:]: self.module.heappushpop(heap, item) self.assertEqual(list(self.heapiter(heap)), sorted(data)[-10:]) self.assertEqual(self.module.heappushpop([], 'x'), 'x') def test_heappushpop(self): h = [] x = self.module.heappushpop(h, 10) self.assertEqual((h, x), ([], 10)) h = [10] x = self.module.heappushpop(h, 10.0) self.assertEqual((h, x), ([10], 10.0)) self.assertEqual(type(h[0]), int) self.assertEqual(type(x), float) h = [10]; x = self.module.heappushpop(h, 9) self.assertEqual((h, x), ([10], 9)) h = [10]; x = self.module.heappushpop(h, 11) self.assertEqual((h, x), ([11], 10)) def test_heapsort(self): # Exercise everything with repeated heapsort checks for trial in range(100): size = random.randrange(50) data = [random.randrange(25) for i in range(size)] if trial & 1: # Half of the time, use heapify heap = data[:] self.module.heapify(heap) else: # The rest of the time, use heappush heap = [] for item in data: self.module.heappush(heap, item) heap_sorted = [self.module.heappop(heap) for i in range(size)] self.assertEqual(heap_sorted, sorted(data)) def test_merge(self): inputs = [] for i in range(random.randrange(5)): row = sorted(random.randrange(1000) for j in range(random.randrange(10))) inputs.append(row) self.assertEqual(sorted(chain(*inputs)), list(self.module.merge(*inputs))) self.assertEqual(list(self.module.merge()), []) def test_merge_does_not_suppress_index_error(self): # Issue 19018: Heapq.merge suppresses IndexError from user generator def iterable(): s = list(range(10)) for i in range(20): yield s[i] # IndexError when i > 10 with self.assertRaises(IndexError): list(self.module.merge(iterable(), iterable())) def test_merge_stability(self): class Int(int): pass inputs = [[], [], [], []] for i in range(20000): stream = random.randrange(4) x = random.randrange(500) obj = Int(x) obj.pair = (x, stream) inputs[stream].append(obj) for stream in inputs: stream.sort() result = [i.pair for i in self.module.merge(*inputs)] self.assertEqual(result, sorted(result)) def test_nsmallest(self): data = [(random.randrange(2000), i) for i in range(1000)] for f in (None, lambda x: x[0] * 547 % 2000): for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100): self.assertEqual(list(self.module.nsmallest(n, data)), sorted(data)[:n]) self.assertEqual(list(self.module.nsmallest(n, data, key=f)), sorted(data, key=f)[:n]) def test_nlargest(self): data = [(random.randrange(2000), i) for i in range(1000)] for f in (None, lambda x: x[0] * 547 % 2000): for n in (0, 1, 2, 10, 100, 400, 999, 1000, 1100): self.assertEqual(list(self.module.nlargest(n, data)), sorted(data, reverse=True)[:n]) self.assertEqual(list(self.module.nlargest(n, data, key=f)), sorted(data, key=f, reverse=True)[:n]) def test_comparison_operator(self): # Issue 3051: Make sure heapq works with both __lt__ # For python 3.0, __le__ alone is not enough def hsort(data, comp): data = [comp(x) for x in data] self.module.heapify(data) return [self.module.heappop(data).x for i in range(len(data))] class LT: def __init__(self, x): self.x = x def __lt__(self, other): return self.x > other.x class LE: def __init__(self, x): self.x = x def __le__(self, other): return self.x >= other.x data = [random.random() for i in range(100)] target = sorted(data, reverse=True) self.assertEqual(hsort(data, LT), target) self.assertRaises(TypeError, data, LE) class TestHeapPython(TestHeap, TestCase): module = py_heapq @skipUnless(c_heapq, 'requires _heapq') class TestHeapC(TestHeap, TestCase): module = c_heapq #============================================================================== class LenOnly: "Dummy sequence class defining __len__ but not __getitem__." def __len__(self): return 10 class GetOnly: "Dummy sequence class defining __getitem__ but not __len__." def __getitem__(self, ndx): return 10 class CmpErr: "Dummy element that always raises an error during comparison" def __eq__(self, other): raise ZeroDivisionError __ne__ = __lt__ = __le__ = __gt__ = __ge__ = __eq__ def R(seqn): 'Regular generator' for i in seqn: yield i class G: 'Sequence using __getitem__' def __init__(self, seqn): self.seqn = seqn def __getitem__(self, i): return self.seqn[i] class I: 'Sequence using iterator protocol' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self def __next__(self): if self.i >= len(self.seqn): raise StopIteration v = self.seqn[self.i] self.i += 1 return v class Ig: 'Sequence using iterator protocol defined with a generator' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): for val in self.seqn: yield val class X: 'Missing __getitem__ and __iter__' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __next__(self): if self.i >= len(self.seqn): raise StopIteration v = self.seqn[self.i] self.i += 1 return v class N: 'Iterator missing __next__()' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self class E: 'Test propagation of exceptions' def __init__(self, seqn): self.seqn = seqn self.i = 0 def __iter__(self): return self def __next__(self): 3 // 0 class S: 'Test immediate stop' def __init__(self, seqn): pass def __iter__(self): return self def __next__(self): raise StopIteration from itertools import chain def L(seqn): 'Test multiple tiers of iterators' return chain(map(lambda x:x, R(Ig(G(seqn))))) class SideEffectLT: def __init__(self, value, heap): self.value = value self.heap = heap def __lt__(self, other): self.heap[:] = [] return self.value < other.value class TestErrorHandling: def test_non_sequence(self): for f in (self.module.heapify, self.module.heappop): self.assertRaises((TypeError, AttributeError), f, 10) for f in (self.module.heappush, self.module.heapreplace, self.module.nlargest, self.module.nsmallest): self.assertRaises((TypeError, AttributeError), f, 10, 10) def test_len_only(self): for f in (self.module.heapify, self.module.heappop): self.assertRaises((TypeError, AttributeError), f, LenOnly()) for f in (self.module.heappush, self.module.heapreplace): self.assertRaises((TypeError, AttributeError), f, LenOnly(), 10) for f in (self.module.nlargest, self.module.nsmallest): self.assertRaises(TypeError, f, 2, LenOnly()) def test_get_only(self): for f in (self.module.heapify, self.module.heappop): self.assertRaises(TypeError, f, GetOnly()) for f in (self.module.heappush, self.module.heapreplace): self.assertRaises(TypeError, f, GetOnly(), 10) for f in (self.module.nlargest, self.module.nsmallest): self.assertRaises(TypeError, f, 2, GetOnly()) def test_get_only(self): seq = [CmpErr(), CmpErr(), CmpErr()] for f in (self.module.heapify, self.module.heappop): self.assertRaises(ZeroDivisionError, f, seq) for f in (self.module.heappush, self.module.heapreplace): self.assertRaises(ZeroDivisionError, f, seq, 10) for f in (self.module.nlargest, self.module.nsmallest): self.assertRaises(ZeroDivisionError, f, 2, seq) def test_arg_parsing(self): for f in (self.module.heapify, self.module.heappop, self.module.heappush, self.module.heapreplace, self.module.nlargest, self.module.nsmallest): self.assertRaises((TypeError, AttributeError), f, 10) def test_iterable_args(self): for f in (self.module.nlargest, self.module.nsmallest): for s in ("123", "", range(1000), (1, 1.2), range(2000,2200,5)): for g in (G, I, Ig, L, R): self.assertEqual(list(f(2, g(s))), list(f(2,s))) self.assertEqual(list(f(2, S(s))), []) self.assertRaises(TypeError, f, 2, X(s)) self.assertRaises(TypeError, f, 2, N(s)) self.assertRaises(ZeroDivisionError, f, 2, E(s)) # Issue #17278: the heap may change size while it's being walked. def test_heappush_mutating_heap(self): heap = [] heap.extend(SideEffectLT(i, heap) for i in range(200)) # Python version raises IndexError, C version RuntimeError with self.assertRaises((IndexError, RuntimeError)): self.module.heappush(heap, SideEffectLT(5, heap)) def test_heappop_mutating_heap(self): heap = [] heap.extend(SideEffectLT(i, heap) for i in range(200)) # Python version raises IndexError, C version RuntimeError with self.assertRaises((IndexError, RuntimeError)): self.module.heappop(heap) class TestErrorHandlingPython(TestErrorHandling, TestCase): module = py_heapq @skipUnless(c_heapq, 'requires _heapq') class TestErrorHandlingC(TestErrorHandling, TestCase): module = c_heapq if __name__ == "__main__": unittest.main()
gpl-2.0
spvkgn/youtube-dl
youtube_dl/extractor/lecture2go.py
87
2402
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import ( determine_ext, determine_protocol, parse_duration, int_or_none, ) class Lecture2GoIE(InfoExtractor): _VALID_URL = r'https?://lecture2go\.uni-hamburg\.de/veranstaltungen/-/v/(?P<id>\d+)' _TEST = { 'url': 'https://lecture2go.uni-hamburg.de/veranstaltungen/-/v/17473', 'md5': 'ac02b570883020d208d405d5a3fd2f7f', 'info_dict': { 'id': '17473', 'ext': 'mp4', 'title': '2 - Endliche Automaten und reguläre Sprachen', 'creator': 'Frank Heitmann', 'duration': 5220, }, 'params': { # m3u8 download 'skip_download': True, } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex(r'<em[^>]+class="title">(.+)</em>', webpage, 'title') formats = [] for url in set(re.findall(r'var\s+playerUri\d+\s*=\s*"([^"]+)"', webpage)): ext = determine_ext(url) protocol = determine_protocol({'url': url}) if ext == 'f4m': formats.extend(self._extract_f4m_formats(url, video_id, f4m_id='hds')) elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats(url, video_id, ext='mp4', m3u8_id='hls')) else: if protocol == 'rtmp': continue # XXX: currently broken formats.append({ 'format_id': protocol, 'url': url, }) self._sort_formats(formats) creator = self._html_search_regex( r'<div[^>]+id="description">([^<]+)</div>', webpage, 'creator', fatal=False) duration = parse_duration(self._html_search_regex( r'Duration:\s*</em>\s*<em[^>]*>([^<]+)</em>', webpage, 'duration', fatal=False)) view_count = int_or_none(self._html_search_regex( r'Views:\s*</em>\s*<em[^>]+>(\d+)</em>', webpage, 'view count', fatal=False)) return { 'id': video_id, 'title': title, 'formats': formats, 'creator': creator, 'duration': duration, 'view_count': view_count, }
unlicense
sjerdo/letsencrypt
acme/acme/fields.py
53
1742
"""ACME JSON fields.""" import logging import pyrfc3339 from acme import jose logger = logging.getLogger(__name__) class Fixed(jose.Field): """Fixed field.""" def __init__(self, json_name, value): self.value = value super(Fixed, self).__init__( json_name=json_name, default=value, omitempty=False) def decode(self, value): if value != self.value: raise jose.DeserializationError('Expected {0!r}'.format(self.value)) return self.value def encode(self, value): if value != self.value: logger.warn( 'Overriding fixed field (%s) with %r', self.json_name, value) return value class RFC3339Field(jose.Field): """RFC3339 field encoder/decoder. Handles decoding/encoding between RFC3339 strings and aware (not naive) `datetime.datetime` objects (e.g. ``datetime.datetime.now(pytz.utc)``). """ @classmethod def default_encoder(cls, value): return pyrfc3339.generate(value) @classmethod def default_decoder(cls, value): try: return pyrfc3339.parse(value) except ValueError as error: raise jose.DeserializationError(error) class Resource(jose.Field): """Resource MITM field.""" def __init__(self, resource_type, *args, **kwargs): self.resource_type = resource_type super(Resource, self).__init__( 'resource', default=resource_type, *args, **kwargs) def decode(self, value): if value != self.resource_type: raise jose.DeserializationError( 'Wrong resource type: {0} instead of {1}'.format( value, self.resource_type)) return value
apache-2.0
karan1276/servo
tests/wpt/web-platform-tests/tools/py/bench/localpath.py
215
1883
import py import timeit class Listdir: numiter = 100000 numentries = 100 def setup(self): tmpdir = py.path.local.make_numbered_dir(self.__class__.__name__) for i in range(self.numentries): tmpdir.join(str(i)) self.tmpdir = tmpdir def run(self): return self.tmpdir.listdir() class Listdir_arg(Listdir): numiter = 100000 numentries = 100 def run(self): return self.tmpdir.listdir("47") class Join_onearg(Listdir): def run(self): self.tmpdir.join("17") self.tmpdir.join("18") self.tmpdir.join("19") class Join_multi(Listdir): def run(self): self.tmpdir.join("a", "b") self.tmpdir.join("a", "b", "c") self.tmpdir.join("a", "b", "c", "d") class Check(Listdir): def run(self): self.tmpdir.check() self.tmpdir.check() self.tmpdir.check() class CheckDir(Listdir): def run(self): self.tmpdir.check(dir=1) self.tmpdir.check(dir=1) assert not self.tmpdir.check(dir=0) class CheckDir2(Listdir): def run(self): self.tmpdir.stat().isdir() self.tmpdir.stat().isdir() assert self.tmpdir.stat().isdir() class CheckFile(Listdir): def run(self): self.tmpdir.check(file=1) assert not self.tmpdir.check(file=1) assert self.tmpdir.check(file=0) if __name__ == "__main__": import time for cls in [Listdir, Listdir_arg, Join_onearg, Join_multi, Check, CheckDir, CheckDir2, CheckFile,]: inst = cls() inst.setup() now = time.time() for i in xrange(cls.numiter): inst.run() elapsed = time.time() - now print "%s: %d loops took %.2f seconds, per call %.6f" %( cls.__name__, cls.numiter, elapsed, elapsed / cls.numiter)
mpl-2.0
trondhindenes/ansible
lib/ansible/modules/network/avi/avi_controllerproperties.py
20
16486
#!/usr/bin/python # # @author: Gaurav Rastogi ([email protected]) # Eric Anderson ([email protected]) # module_check: supported # Avi Version: 17.1.2 # # Copyright: (c) 2017 Gaurav Rastogi, <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: avi_controllerproperties author: Gaurav Rastogi ([email protected]) short_description: Module for setup of ControllerProperties Avi RESTful Object description: - This module is used to configure ControllerProperties object - more examples at U(https://github.com/avinetworks/devops) requirements: [ avisdk ] version_added: "2.4" options: state: description: - The state that should be applied on the entity. default: present choices: ["absent", "present"] avi_api_update_method: description: - Default method for object update is HTTP PUT. - Setting to patch will override that behavior to use HTTP PATCH. version_added: "2.5" default: put choices: ["put", "patch"] avi_api_patch_op: description: - Patch operation to use when using avi_api_update_method as patch. version_added: "2.5" choices: ["add", "replace", "delete"] allow_ip_forwarding: description: - Field introduced in 17.1.1. - Default value when not specified in API or module is interpreted by Avi Controller as False. type: bool allow_unauthenticated_apis: description: - Allow unauthenticated access for special apis. - Default value when not specified in API or module is interpreted by Avi Controller as False. type: bool allow_unauthenticated_nodes: description: - Boolean flag to set allow_unauthenticated_nodes. - Default value when not specified in API or module is interpreted by Avi Controller as False. type: bool api_idle_timeout: description: - Allowed values are 0-1440. - Default value when not specified in API or module is interpreted by Avi Controller as 15. - Units(MIN). appviewx_compat_mode: description: - Export configuration in appviewx compatibility mode. - Field introduced in 17.1.1. - Default value when not specified in API or module is interpreted by Avi Controller as False. type: bool attach_ip_retry_interval: description: - Number of attach_ip_retry_interval. - Default value when not specified in API or module is interpreted by Avi Controller as 360. - Units(SEC). attach_ip_retry_limit: description: - Number of attach_ip_retry_limit. - Default value when not specified in API or module is interpreted by Avi Controller as 4. bm_use_ansible: description: - Use ansible for se creation in baremetal. - Field introduced in 17.2.2. - Default value when not specified in API or module is interpreted by Avi Controller as True. version_added: "2.5" type: bool cluster_ip_gratuitous_arp_period: description: - Number of cluster_ip_gratuitous_arp_period. - Default value when not specified in API or module is interpreted by Avi Controller as 60. - Units(MIN). crashed_se_reboot: description: - Number of crashed_se_reboot. - Default value when not specified in API or module is interpreted by Avi Controller as 900. - Units(SEC). dead_se_detection_timer: description: - Number of dead_se_detection_timer. - Default value when not specified in API or module is interpreted by Avi Controller as 360. - Units(SEC). dns_refresh_period: description: - Number of dns_refresh_period. - Default value when not specified in API or module is interpreted by Avi Controller as 60. - Units(MIN). dummy: description: - Number of dummy. enable_memory_balancer: description: - Enable/disable memory balancer. - Field introduced in 17.2.8. - Default value when not specified in API or module is interpreted by Avi Controller as True. version_added: "2.6" type: bool fatal_error_lease_time: description: - Number of fatal_error_lease_time. - Default value when not specified in API or module is interpreted by Avi Controller as 120. - Units(SEC). max_dead_se_in_grp: description: - Number of max_dead_se_in_grp. - Default value when not specified in API or module is interpreted by Avi Controller as 1. max_pcap_per_tenant: description: - Maximum number of pcap files stored per tenant. - Default value when not specified in API or module is interpreted by Avi Controller as 4. max_seq_attach_ip_failures: description: - Maximum number of consecutive attach ip failures that halts vs placement. - Field introduced in 17.2.2. - Default value when not specified in API or module is interpreted by Avi Controller as 3. version_added: "2.5" max_seq_vnic_failures: description: - Number of max_seq_vnic_failures. - Default value when not specified in API or module is interpreted by Avi Controller as 3. persistence_key_rotate_period: description: - Allowed values are 1-1051200. - Special values are 0 - 'disabled'. - Default value when not specified in API or module is interpreted by Avi Controller as 60. - Units(MIN). portal_token: description: - Token used for uploading tech-support to portal. - Field introduced in 16.4.6,17.1.2. version_added: "2.4" query_host_fail: description: - Number of query_host_fail. - Default value when not specified in API or module is interpreted by Avi Controller as 180. - Units(SEC). safenet_hsm_version: description: - Version of the safenet package installed on the controller. - Field introduced in 16.5.2,17.2.3. version_added: "2.5" se_create_timeout: description: - Number of se_create_timeout. - Default value when not specified in API or module is interpreted by Avi Controller as 900. - Units(SEC). se_failover_attempt_interval: description: - Interval between attempting failovers to an se. - Default value when not specified in API or module is interpreted by Avi Controller as 300. - Units(SEC). se_offline_del: description: - Number of se_offline_del. - Default value when not specified in API or module is interpreted by Avi Controller as 172000. - Units(SEC). se_vnic_cooldown: description: - Number of se_vnic_cooldown. - Default value when not specified in API or module is interpreted by Avi Controller as 120. - Units(SEC). secure_channel_cleanup_timeout: description: - Number of secure_channel_cleanup_timeout. - Default value when not specified in API or module is interpreted by Avi Controller as 60. - Units(MIN). secure_channel_controller_token_timeout: description: - Number of secure_channel_controller_token_timeout. - Default value when not specified in API or module is interpreted by Avi Controller as 60. - Units(MIN). secure_channel_se_token_timeout: description: - Number of secure_channel_se_token_timeout. - Default value when not specified in API or module is interpreted by Avi Controller as 60. - Units(MIN). seupgrade_fabric_pool_size: description: - Pool size used for all fabric commands during se upgrade. - Default value when not specified in API or module is interpreted by Avi Controller as 20. seupgrade_segroup_min_dead_timeout: description: - Time to wait before marking segroup upgrade as stuck. - Default value when not specified in API or module is interpreted by Avi Controller as 360. - Units(SEC). ssl_certificate_expiry_warning_days: description: - Number of days for ssl certificate expiry warning. - Units(DAYS). unresponsive_se_reboot: description: - Number of unresponsive_se_reboot. - Default value when not specified in API or module is interpreted by Avi Controller as 300. - Units(SEC). upgrade_dns_ttl: description: - Time to account for dns ttl during upgrade. - This is in addition to vs_scalein_timeout_for_upgrade in se_group. - Field introduced in 17.1.1. - Default value when not specified in API or module is interpreted by Avi Controller as 5. - Units(SEC). upgrade_lease_time: description: - Number of upgrade_lease_time. - Default value when not specified in API or module is interpreted by Avi Controller as 360. - Units(SEC). url: description: - Avi controller URL of the object. uuid: description: - Unique object identifier of the object. vnic_op_fail_time: description: - Number of vnic_op_fail_time. - Default value when not specified in API or module is interpreted by Avi Controller as 180. - Units(SEC). vs_apic_scaleout_timeout: description: - Time to wait for the scaled out se to become ready before marking the scaleout done, applies to apic configuration only. - Default value when not specified in API or module is interpreted by Avi Controller as 360. - Units(SEC). vs_awaiting_se_timeout: description: - Number of vs_awaiting_se_timeout. - Default value when not specified in API or module is interpreted by Avi Controller as 60. - Units(SEC). vs_key_rotate_period: description: - Allowed values are 1-1051200. - Special values are 0 - 'disabled'. - Default value when not specified in API or module is interpreted by Avi Controller as 60. - Units(MIN). vs_se_attach_ip_fail: description: - Time to wait before marking attach ip operation on an se as failed. - Field introduced in 17.2.2. - Default value when not specified in API or module is interpreted by Avi Controller as 3600. - Units(SEC). version_added: "2.5" vs_se_bootup_fail: description: - Number of vs_se_bootup_fail. - Default value when not specified in API or module is interpreted by Avi Controller as 480. - Units(SEC). vs_se_create_fail: description: - Number of vs_se_create_fail. - Default value when not specified in API or module is interpreted by Avi Controller as 1500. - Units(SEC). vs_se_ping_fail: description: - Number of vs_se_ping_fail. - Default value when not specified in API or module is interpreted by Avi Controller as 60. - Units(SEC). vs_se_vnic_fail: description: - Number of vs_se_vnic_fail. - Default value when not specified in API or module is interpreted by Avi Controller as 300. - Units(SEC). vs_se_vnic_ip_fail: description: - Number of vs_se_vnic_ip_fail. - Default value when not specified in API or module is interpreted by Avi Controller as 120. - Units(SEC). warmstart_se_reconnect_wait_time: description: - Number of warmstart_se_reconnect_wait_time. - Default value when not specified in API or module is interpreted by Avi Controller as 300. - Units(SEC). extends_documentation_fragment: - avi ''' EXAMPLES = """ - name: Example to create ControllerProperties object avi_controllerproperties: controller: 10.10.25.42 username: admin password: something state: present name: sample_controllerproperties """ RETURN = ''' obj: description: ControllerProperties (api/controllerproperties) object returned: success, changed type: dict ''' from ansible.module_utils.basic import AnsibleModule try: from ansible.module_utils.network.avi.avi import ( avi_common_argument_spec, HAS_AVI, avi_ansible_api) except ImportError: HAS_AVI = False def main(): argument_specs = dict( state=dict(default='present', choices=['absent', 'present']), avi_api_update_method=dict(default='put', choices=['put', 'patch']), avi_api_patch_op=dict(choices=['add', 'replace', 'delete']), allow_ip_forwarding=dict(type='bool',), allow_unauthenticated_apis=dict(type='bool',), allow_unauthenticated_nodes=dict(type='bool',), api_idle_timeout=dict(type='int',), appviewx_compat_mode=dict(type='bool',), attach_ip_retry_interval=dict(type='int',), attach_ip_retry_limit=dict(type='int',), bm_use_ansible=dict(type='bool',), cluster_ip_gratuitous_arp_period=dict(type='int',), crashed_se_reboot=dict(type='int',), dead_se_detection_timer=dict(type='int',), dns_refresh_period=dict(type='int',), dummy=dict(type='int',), enable_memory_balancer=dict(type='bool',), fatal_error_lease_time=dict(type='int',), max_dead_se_in_grp=dict(type='int',), max_pcap_per_tenant=dict(type='int',), max_seq_attach_ip_failures=dict(type='int',), max_seq_vnic_failures=dict(type='int',), persistence_key_rotate_period=dict(type='int',), portal_token=dict(type='str', no_log=True,), query_host_fail=dict(type='int',), safenet_hsm_version=dict(type='str',), se_create_timeout=dict(type='int',), se_failover_attempt_interval=dict(type='int',), se_offline_del=dict(type='int',), se_vnic_cooldown=dict(type='int',), secure_channel_cleanup_timeout=dict(type='int',), secure_channel_controller_token_timeout=dict(type='int',), secure_channel_se_token_timeout=dict(type='int',), seupgrade_fabric_pool_size=dict(type='int',), seupgrade_segroup_min_dead_timeout=dict(type='int',), ssl_certificate_expiry_warning_days=dict(type='list',), unresponsive_se_reboot=dict(type='int',), upgrade_dns_ttl=dict(type='int',), upgrade_lease_time=dict(type='int',), url=dict(type='str',), uuid=dict(type='str',), vnic_op_fail_time=dict(type='int',), vs_apic_scaleout_timeout=dict(type='int',), vs_awaiting_se_timeout=dict(type='int',), vs_key_rotate_period=dict(type='int',), vs_se_attach_ip_fail=dict(type='int',), vs_se_bootup_fail=dict(type='int',), vs_se_create_fail=dict(type='int',), vs_se_ping_fail=dict(type='int',), vs_se_vnic_fail=dict(type='int',), vs_se_vnic_ip_fail=dict(type='int',), warmstart_se_reconnect_wait_time=dict(type='int',), ) argument_specs.update(avi_common_argument_spec()) module = AnsibleModule( argument_spec=argument_specs, supports_check_mode=True) if not HAS_AVI: return module.fail_json(msg=( 'Avi python API SDK (avisdk>=17.1) is not installed. ' 'For more details visit https://github.com/avinetworks/sdk.')) return avi_ansible_api(module, 'controllerproperties', set(['portal_token'])) if __name__ == '__main__': main()
gpl-3.0
costadorione/purestream
core/tmdb.py
1
65766
# -*- coding: utf-8 -*- # ------------------------------------------------------------ # streamondemand 5 # Copyright 2015 [email protected] # http://www.mimediacenter.info/foro/viewforum.php?f=36 # # Distributed under the terms of GNU General Public License v3 (GPLv3) # http://www.gnu.org/licenses/gpl-3.0.html # ------------------------------------------------------------ # This file is part of streamondemand 5. # # streamondemand 5 is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # streamondemand 5 is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with streamondemand 5. If not, see <http://www.gnu.org/licenses/>. # -------------------------------------------------------------------------------- import copy import re import time from core import jsontools from core import logger from core import scrapertools from platformcode import platformtools # ----------------------------------------------------------------------------------------------------------- # Conjunto de funciones relacionadas con las infoLabels. # version 1.0: # Version inicial # # Incluyen: # set_infoLabels(source, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos extras de una o # varias series, capitulos o peliculas. # set_infoLabels_item(item, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos extras de una # serie, capitulo o pelicula. # set_infoLabels_itemlist(item_list, seekTmdb, idioma_busqueda): Obtiene y fija (item.infoLabels) los datos # extras de una lista de series, capitulos o peliculas. # infoLabels_tostring(item): Retorna un str con la lista ordenada con los infoLabels del item # # Uso: # tmdb.set_infoLabels(item, seekTmdb = True) # # Obtener datos basicos de una pelicula: # Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.fulltitle # o en item.contentTitle y el año en item.infoLabels['year']. # # Obtener datos basicos de una serie: # Antes de llamar al metodo set_infoLabels el titulo a buscar debe estar en item.show o en # item.contentSerieName. # # Obtener mas datos de una pelicula o serie: # Despues de obtener los datos basicos en item.infoLabels['tmdb'] tendremos el codigo de la serie o pelicula. # Tambien podriamos directamente fijar este codigo, si se conoce, o utilizar los codigo correspondientes de: # IMDB (en item.infoLabels['IMDBNumber'] o item.infoLabels['code'] o item.infoLabels['imdb_id']), TVDB # (solo series, en item.infoLabels['tvdb_id']), # Freebase (solo series, en item.infoLabels['freebase_mid']),TVRage (solo series, en # item.infoLabels['tvrage_id']) # # Obtener datos de una temporada: # Antes de llamar al metodo set_infoLabels el titulo de la serie debe estar en item.show o en # item.contentSerieName, # el codigo TMDB de la serie debe estar en item.infoLabels['tmdb'] (puede fijarse automaticamente mediante # la consulta de datos basica) # y el numero de temporada debe estar en item.infoLabels['season']. # # Obtener datos de un episodio: # Antes de llamar al metodo set_infoLabels el titulo de la serie debe estar en item.show o en # item.contentSerieName, # el codigo TMDB de la serie debe estar en item.infoLabels['tmdb'] (puede fijarse automaticamente mediante la # consulta de datos basica), # el numero de temporada debe estar en item.infoLabels['season'] y el numero de episodio debe estar en # item.infoLabels['episode']. # # # -------------------------------------------------------------------------------------------------------------- otmdb_global = None def cb_select_from_tmdb(item, tmdb_result): if tmdb_result is None: logger.debug("he pulsado 'cancelar' en la ventana de info de la serie/pelicula") return None else: return tmdb_result def find_and_set_infoLabels_tmdb(item, ask_video=True): global otmdb_global contentType = item.contentType if item.contentType else ("movie" if not item.contentSerieName else "tvshow") title = item.contentSerieName if contentType == "tvshow" else item.contentTitle season = int(item.contentSeason) if item.contentSeason else "" episode = int(item.contentEpisodeNumber) if item.contentEpisodeNumber else "" contentType = "episode" if contentType == "tvshow" and item.contentSeason and item.contentEpisodeNumber else \ contentType year = item.infoLabels.get('year', '') video_type = "tv" if contentType in ["tvshow", "episode"] else "movie" tmdb_result = None while not tmdb_result: if not item.infoLabels.get("tmdb_id"): otmdb_global = Tmdb(texto_buscado=title, tipo=video_type, year=year) elif not otmdb_global or otmdb_global.result.get("id") != item.infoLabels['tmdb_id']: otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=video_type, idioma_busqueda="es") results = otmdb_global.get_list_resultados() if len(results) > 1 and ask_video: tmdb_result = platformtools.show_video_info(results, caption="[{0}]: Selecciona la {1} correcta" .format(title, "serie" if video_type == "tv" else "pelicula"), callback='cb_select_from_tmdb', item=item) elif len(results) > 0: tmdb_result = results[0] if tmdb_result is None: if platformtools.dialog_yesno("{0} no encontrada". format("Serie" if video_type == "tv" else "Pelicula") , "No se ha encontrado la {0}:". format("serie" if video_type == "tv" else "pelicula"), title, '¿Desea introducir otro nombre?'): # Pregunta el titulo it = platformtools.dialog_input(title, "Introduzca el nombre de la {0} a buscar". format("serie" if video_type == "tv" else "pelicula")) if it is not None: title = it else: logger.debug("he pulsado 'cancelar' en la ventana 'introduzca el nombre correcto'") break else: break infoLabels = item.infoLabels if type(item.infoLabels) == dict else {} if not tmdb_result: item.infoLabels = infoLabels return False infoLabels = otmdb_global.get_infoLabels(infoLabels, tmdb_result) infoLabels["mediatype"] = contentType if infoLabels["mediatype"] == "episode": try: episodio = otmdb_global.get_episodio(season, episode) except: pass # No se ha podido buscar else: if episodio: # Actualizar datos infoLabels['title'] = episodio['episodio_titulo'] infoLabels['season'] = season infoLabels['episode'] = episode if episodio['episodio_sinopsis']: infoLabels['plot'] = episodio['episodio_sinopsis'] if episodio['episodio_imagen']: infoLabels['thumbnail'] = episodio['episodio_imagen'] item.infoLabels = infoLabels return True def set_infoLabels_item(item, seekTmdb=True, idioma_busqueda='it', lock=None): # ----------------------------------------------------------------------------------------------------------- # Obtiene y fija (item.infoLabels) los datos extras de una serie, capitulo o pelicula. # # Parametros: # item: (Item) Objeto Item que representa un pelicula, serie o capitulo. El diccionario item.infoLabels sera # modificado incluyendo los datos extras localizados. # (opcional) seekTmdb: (bool) Si es True hace una busqueda en www.themoviedb.org para obtener los datos, # en caso contrario obtiene los datos del propio Item si existen. # (opcional) idioma_busqueda: (str) Codigo del idioma segun ISO 639-1, en caso de busqueda en # www.themoviedb.org. # Retorna: # Un numero cuyo valor absoluto representa la cantidad de elementos incluidos en el diccionario # item.infoLabels. # Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y negativo en caso contrario. # --------------------------------------------------------------------------------------------------------- global otmdb_global def __inicializar(): # Inicializar con valores por defecto if 'year' not in item.infoLabels: item.infoLabels['year'] = '' if 'IMDBNumber' not in item.infoLabels: item.infoLabels['IMDBNumber'] = '' if 'code' not in item.infoLabels: item.infoLabels['code'] = '' if 'imdb_id' not in item.infoLabels: item.infoLabels['imdb_id'] = '' if 'plot' not in item.infoLabels: item.infoLabels['plot'] = item.plot if item.plot != '' else item.contentPlot if 'genre' not in item.infoLabels: item.infoLabels['genre'] = item.category item.infoLabels['duration'] = item.duration item.infoLabels['AudioLanguage'] = item.language titulo = item.fulltitle if item.fulltitle != '' else \ (item.contentTitle if item.contentTitle != '' else item.title) if 'title' not in item.infoLabels: item.infoLabels['title'] = titulo item.infoLabels['tvshowtitle'] = item.show if item.show != '' else item.contentSerieName if 'mediatype' not in item.infoLabels: item.infoLabels['mediatype'] = 'movie' if item.infoLabels['tvshowtitle'] == '' else 'tvshow' def obtener_datos_item(): if item.contentSeason != '': item.infoLabels['mediatype'] = 'season' if item.contentEpisodeNumber != '' or item.contentEpisodeTitle != '': item.infoLabels['mediatype'] = 'episode' if item.contentTitle == '': item.contentTitle = item.title return -1 * len(item.infoLabels) def __leer_datos(otmdb_aux): item.infoLabels = otmdb_aux.get_infoLabels(item.infoLabels) if 'thumbnail' in item.infoLabels: item.thumbnail = item.infoLabels['thumbnail'] if 'fanart' in item.infoLabels: item.fanart = item.infoLabels['fanart'] if seekTmdb: # Comprobamos q tipo de contenido es... if 'mediatype' not in item.infoLabels: item.infoLabels['tvshowtitle'] = item.show if item.show != '' else item.contentSerieName item.infoLabels['mediatype'] = 'movie' if item.infoLabels['tvshowtitle'] == '' else 'tvshow' tipo = 'movie' if item.infoLabels['mediatype'] == 'movie' else 'tv' if 'season' in item.infoLabels and 'tmdb_id' in item.infoLabels: try: numtemporada = int(item.infoLabels['season']) except ValueError: logger.debug("El numero de temporada no es valido") return obtener_datos_item() if lock: lock.acquire() if not otmdb_global: otmdb_global = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo, idioma_busqueda=idioma_busqueda) __leer_datos(otmdb_global) temporada = otmdb_global.get_temporada(numtemporada) if lock: lock.release() if 'episode' in item.infoLabels: try: episode = int(item.infoLabels['episode']) except ValueError: logger.debug("El número de episodio (%s) no es valido" % repr(item.infoLabels['episode'])) return obtener_datos_item() # Tenemos numero de temporada y numero de episodio validos... # ... buscar datos episodio item.infoLabels['mediatype'] = 'episode' episodio = otmdb_global.get_episodio(numtemporada, episode) if episodio: # Actualizar datos __leer_datos(otmdb_global) item.infoLabels['title'] = episodio['episodio_titulo'] if episodio['episodio_sinopsis']: item.infoLabels['plot'] = episodio['episodio_sinopsis'] if episodio['episodio_imagen']: item.infoLabels['poster_path'] = episodio['episodio_imagen'] item.thumbnail = item.infoLabels['poster_path'] if episodio['episodio_air_date']: item.infoLabels['aired'] = episodio['episodio_air_date'] if episodio['episodio_vote_average']: item.infoLabels['rating'] = episodio['episodio_vote_average'] item.infoLabels['votes'] = episodio['episodio_vote_count'] return len(item.infoLabels) else: # Tenemos numero de temporada valido pero no numero de episodio... # ... buscar datos temporada item.infoLabels['mediatype'] = 'season' temporada = otmdb_global.get_temporada(numtemporada) if temporada: # Actualizar datos __leer_datos(otmdb_global) logger.debug(str(item.infoLabels)) logger.debug(str(temporada)) item.infoLabels['title'] = temporada['name'] if temporada['overview']: item.infoLabels['plot'] = temporada['overview'] if temporada['air_date']: item.infoLabels['aired'] = temporada['air_date'] if temporada['poster_path']: item.infoLabels['poster_path'] = 'http://image.tmdb.org/t/p/original' + temporada['poster_path'] item.thumbnail = item.infoLabels['poster_path'] return len(item.infoLabels) # Buscar... else: __inicializar() otmdb = copy.copy(otmdb_global) # Busquedas por ID... if 'tmdb_id' in item.infoLabels and item.infoLabels['tmdb_id']: # ...Busqueda por tmdb_id otmdb = Tmdb(id_Tmdb=item.infoLabels['tmdb_id'], tipo=tipo, idioma_busqueda=idioma_busqueda) elif item.infoLabels['IMDBNumber'] or item.infoLabels['code'] or item.infoLabels['imdb_id']: if item.infoLabels['IMDBNumber']: item.infoLabels['code'] = item.infoLabels['IMDBNumber'] item.infoLabels['imdb_id'] = item.infoLabels['IMDBNumber'] elif item.infoLabels['code']: item.infoLabels['IMDBNumber'] = item.infoLabels['code'] item.infoLabels['imdb_id'] = item.infoLabels['code'] else: item.infoLabels['code'] = item.infoLabels['imdb_id'] item.infoLabels['IMDBNumber'] = item.infoLabels['imdb_id'] # ...Busqueda por imdb code otmdb = Tmdb(external_id=item.infoLabels['imdb_id'], external_source="imdb_id", tipo=tipo, idioma_busqueda=idioma_busqueda) elif tipo == 'tv': # buscar con otros codigos if 'tvdb_id' in item.infoLabels and item.infoLabels['tvdb_id']: # ...Busqueda por tvdb_id otmdb = Tmdb(external_id=item.infoLabels['tvdb_id'], external_source="tvdb_id", tipo=tipo, idioma_busqueda=idioma_busqueda) elif 'freebase_mid' in item.infoLabels and item.infoLabels['freebase_mid']: # ...Busqueda por freebase_mid otmdb = Tmdb(external_id=item.infoLabels['freebase_mid'], external_source="freebase_mid", tipo=tipo, idioma_busqueda=idioma_busqueda) elif 'freebase_id' in item.infoLabels and item.infoLabels['freebase_id']: # ...Busqueda por freebase_id otmdb = Tmdb(external_id=item.infoLabels['freebase_id'], external_source="freebase_id", tipo=tipo, idioma_busqueda=idioma_busqueda) elif 'tvrage_id' in item.infoLabels and item.infoLabels['tvrage_id']: # ...Busqueda por tvrage_id otmdb = Tmdb(external_id=item.infoLabels['tvrage_id'], external_source="tvrage_id", tipo=tipo, idioma_busqueda=idioma_busqueda) if otmdb is None: # No se ha podido buscar por ID... # hacerlo por titulo if item.infoLabels['title'] != '': if tipo == 'tv': # Busqueda de serie por titulo y filtrando sus resultados si es necesario otmdb = Tmdb(texto_buscado=item.infoLabels['tvshowtitle'], tipo=tipo, idioma_busqueda=idioma_busqueda, filtro=item.infoLabels.get('filtro', {}), year=str(item.infoLabels.get('year', ''))) else: # Busqueda de pelicula por titulo... if item.infoLabels['year'] or 'filtro' in item.infoLabels: # ...y año o filtro titulo_buscado = item.fulltitle if item.fulltitle != '' else item.contentTitle otmdb = Tmdb(texto_buscado=titulo_buscado, tipo=tipo, idioma_busqueda=idioma_busqueda, filtro=item.infoLabels.get('filtro', {}), year=str(item.infoLabels.get('year', ''))) if otmdb is None or not otmdb.get_id(): # La busqueda no ha dado resultado return obtener_datos_item() else: # La busqueda ha encontrado un resultado valido __leer_datos(otmdb) return len(item.infoLabels) else: __inicializar() return obtener_datos_item() def set_infoLabels_itemlist(item_list, seekTmdb=False, idioma_busqueda='it'): """ De manera concurrente, obtiene los datos de los items incluidos en la lista item_list. La API tiene un limite de 40 peticiones por IP cada 10'' y por eso la lista no deberia tener mas de 30 items para asegurar un buen funcionamiento de esta funcion. :param item_list: listado de objetos Item que representan peliculas, series o capitulos. El diccionario item.infoLabels de cada objeto Item sera modificado incluyendo los datos extras localizados. :type item_list: list :param seekTmdb: Si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario obtiene los datos del propio Item si existen. :type seekTmdb: bool :param idioma_busqueda: Codigo del idioma segun ISO 639-1, en caso de busqueda en www.themoviedb.org. :type idioma_busqueda: str :return: Una lista de numeros cuyo valor absoluto representa la cantidad de elementos incluidos en el diccionario item.infoLabels de cada Item. Este numero sera positivo si los datos se han obtenido de www.themoviedb.org y negativo en caso contrario. :rtype: list """ import threading semaforo = threading.Semaphore(20) lock = threading.Lock() r_list = list() i = 0 l_hilo = list() def sub_get(item, _i, _seekTmdb): semaforo.acquire() ret = set_infoLabels_item(item, _seekTmdb, idioma_busqueda, lock) # logger.debug(str(ret) + "item: " + item.tostring()) semaforo.release() r_list.append((_i, item, ret)) for item in item_list: t = threading.Thread(target=sub_get, args=(item, i, seekTmdb)) t.start() i += 1 l_hilo.append(t) # esperar q todos los hilos terminen for x in l_hilo: x.join() # Ordenar lista de resultados por orden de llamada para mantener el mismo orden q item_list r_list.sort(key=lambda i: i[0]) # Reconstruir y devolver la lista solo con los resultados de las llamadas individuales return [ii[2] for ii in r_list] def set_infoLabels(source, seekTmdb=False, idioma_busqueda='it'): """ Dependiendo del tipo de dato de source obtiene y fija (item.infoLabels) los datos extras de una o varias series, capitulos o peliculas. @param source: variable que contiene la información para establecer infoLabels @type source: list, item @param seekTmdb: si es True hace una busqueda en www.themoviedb.org para obtener los datos, en caso contrario obtiene los datos del propio Item. @type seekTmdb: bool @param idioma_busqueda: fija el valor de idioma en caso de busqueda en www.themoviedb.org @type idioma_busqueda: str @return: un numero o lista de numeros con el resultado de las llamadas a set_infoLabels_item @rtype: int, list """ start_time = time.time() if type(source) == list: ret = set_infoLabels_itemlist(source, seekTmdb, idioma_busqueda) logger.debug("Se han obtenido los datos de %i enlaces en %f segundos" % (len(source), time.time() - start_time)) else: ret = set_infoLabels_item(source, seekTmdb, idioma_busqueda) logger.debug("Se han obtenido los datos del enlace en %f segundos" % (time.time() - start_time)) return ret def infoLabels_tostring(item, separador="\n"): """ Retorna un str con la lista ordenada con los infoLabels del item @param item: item @type item: item @param separador: tipo de separador de los campos @type separador: str @return: la lista ordenada con los infoLabels del item @rtype: str """ return separador.join([var + "= " + str(item.infoLabels[var]) for var in sorted(item.infoLabels)]) # --------------------------------------------------------------------------------------------------------------- # class Tmdb: # Scraper para streamondemand basado en el Api de https://www.themoviedb.org/ # version 1.4: # - Documentada limitacion de uso de la API (ver mas abajo). # - Añadido metodo get_temporada() # version 1.3: # - Corregido error al devolver None el path_poster y el backdrop_path # - Corregido error que hacia que en el listado de generos se fueran acumulando de una llamada a otra # - Añadido metodo get_generos() # - Añadido parametros opcional idioma_alternativo al metodo get_sinopsis() # # # Uso: # Metodos constructores: # Tmdb(texto_buscado, tipo) # Parametros: # texto_buscado:(str) Texto o parte del texto a buscar # tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie" # (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1 # (opcional) include_adult: (bool) Se incluyen contenidos para adultos en la busqueda o no. Por defecto # 'False' # (opcional) year: (str) Año de lanzamiento. # (opcional) page: (int) Cuando hay muchos resultados para una busqueda estos se organizan por paginas. # Podemos cargar la pagina que deseemos aunque por defecto siempre es la primera. # Return: # Esta llamada devuelve un objeto Tmdb que contiene la primera pagina del resultado de buscar 'texto_buscado' # en la web themoviedb.org. Cuantos mas parametros opcionales se incluyan mas precisa sera la busqueda. # Ademas el objeto esta inicializado con el primer resultado de la primera pagina de resultados. # Tmdb(id_Tmdb,tipo) # Parametros: # id_Tmdb: (str) Codigo identificador de una determinada pelicula o serie en themoviedb.org # tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie" # (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1 # Return: # Esta llamada devuelve un objeto Tmdb que contiene el resultado de buscar una pelicula o serie con el # identificador id_Tmd # en la web themoviedb.org. # Tmdb(external_id, external_source, tipo) # Parametros: # external_id: (str) Codigo identificador de una determinada pelicula o serie en la web referenciada por # 'external_source'. # external_source: (Para series:"imdb_id","freebase_mid","freebase_id","tvdb_id","tvrage_id"; Para # peliculas:"imdb_id") # tipo: ("movie" o "tv") Tipo de resultado buscado peliculas o series. Por defecto "movie" # (opcional) idioma_busqueda: (str) codigo del idioma segun ISO 639-1 # Return: # Esta llamada devuelve un objeto Tmdb que contiene el resultado de buscar una pelicula o serie con el # identificador 'external_id' de # la web referenciada por 'external_source' en la web themoviedb.org. # # Metodos principales: # get_id(): Retorna un str con el identificador Tmdb de la pelicula o serie cargada o una cadena vacia si no hubiese # nada cargado. # get_sinopsis(idioma_alternativo): Retorna un str con la sinopsis de la serie o pelicula cargada. # get_poster (tipo_respuesta,size): Obtiene el poster o un listado de posters. # get_backdrop (tipo_respuesta,size): Obtiene una imagen de fondo o un listado de imagenes de fondo. # get_fanart (tipo,idioma,temporada): Obtiene un listado de imagenes del tipo especificado de la web Fanart.tv # get_temporada(temporada): Obtiene un diccionario con datos especificos de la temporada. # get_episodio (temporada, capitulo): Obtiene un diccionario con datos especificos del episodio. # get_generos(): Retorna un str con la lista de generos a los que pertenece la pelicula o serie. # # # Otros metodos: # load_resultado(resultado, page): Cuando la busqueda devuelve varios resultados podemos seleccionar que resultado # concreto y de que pagina cargar los datos. # # Limitaciones: # El uso de la API impone un limite de 20 conexiones simultaneas (concurrencia) o 30 peticiones en 10 segundos por IP # Informacion sobre la api : http://docs.themoviedb.apiary.io # ------------------------------------------------------------------------------------------------------------------- class Tmdb(object): # Atributo de clase dic_generos = {} ''' dic_generos={"id_idioma1": {"tv": {"id1": "name1", "id2": "name2" }, "movie": {"id1": "name1", "id2": "name2" } } } ''' def __search(self, index_resultado=0, page=1): # http://api.themoviedb.org/3/search/movie?api_key=f7f51775877e0bb6703520952b3c7840&query=superman&language=es # &include_adult=false&page=1 url = ('http://api.themoviedb.org/3/search/%s?api_key=f7f51775877e0bb6703520952b3c7840&query=%s&language=%s' '&include_adult=%s&page=%s' % (self.busqueda["tipo"], self.busqueda["texto"].replace(' ', '%20'), self.busqueda["idioma"], self.busqueda["include_adult"], str(page))) if self.busqueda["year"] != '': url += '&year=' + str(self.busqueda["year"]) buscando = self.busqueda["texto"].capitalize() logger.info("[Tmdb.py] Buscando %s en pagina %s:\n%s" % (buscando, page, url)) response_dic = {} try: response_dic = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url)) self.total_results = response_dic["total_results"] self.total_pages = response_dic["total_pages"] except: self.total_results = 0 if self.total_results > 0: self.results = response_dic["results"] if len(self.results) > 0: if self.busqueda['filtro']: # TODO documentar esta parte for key, value in dict(self.busqueda['filtro']).items(): for r in self.results[:]: ''' # Opcion mas permisiva if r.has_key(k) and r[k] != v: self.results.remove(r) self.total_results -= 1 ''' # Opcion mas precisa if key not in r or r[key] != value: self.results.remove(r) self.total_results -= 1 if index_resultado < len(self.results): self.__leer_resultado(self.results[index_resultado]) else: logger.error("La busqueda de '{0}' no dio {1} resultados para la pagina {2}" .format(buscando, index_resultado + 1, page)) else: # No hay resultados de la busqueda logger.error("La busqueda de '%s' no dio resultados para la pagina %s" % (buscando, page)) def __by_id(self, source="tmdb"): if source == "tmdb": # http://api.themoviedb.org/3/movie/1924?api_key=f7f51775877e0bb6703520952b3c7840&language=es # &append_to_response=images,videos,external_ids,credits&include_image_language=es,null # http://api.themoviedb.org/3/tv/1407?api_key=f7f51775877e0bb6703520952b3c7840&language=es # &append_to_response=images,videos,external_ids,credits&include_image_language=es,null url = ('http://api.themoviedb.org/3/%s/%s?api_key=f7f51775877e0bb6703520952b3c7840&language=%s' '&append_to_response=images,videos,external_ids,credits&include_image_language=%s,null' % (self.busqueda["tipo"], self.busqueda["id"], self.busqueda["idioma"], self.busqueda["idioma"])) buscando = "id_Tmdb: " + self.busqueda["id"] else: # http://api.themoviedb.org/3/find/%s?external_source=imdb_id&api_key=f7f51775877e0bb6703520952b3c7840 url = ('http://api.themoviedb.org/3/find/%s?external_source=%s&api_key=f7f51775877e0bb6703520952b3c7840' '&language=%s' % (self.busqueda["id"], source, self.busqueda["idioma"])) buscando = source.capitalize() + ": " + self.busqueda["id"] logger.info("[Tmdb.py] Buscando %s:\n%s" % (buscando, url)) try: resultado = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url)) if source != "tmdb": if self.busqueda["tipo"] == "movie": resultado = resultado["movie_results"] else: resultado = resultado["tv_results"] if len(resultado) > 0: resultado = resultado[0] except: resultado = {} if len(resultado) > 0: self.result = resultado if self.total_results == 0: self.results.append(resultado) self.total_results = 1 self.total_pages = 1 self.__leer_resultado(resultado) else: # No hay resultados de la busqueda logger.debug("La busqueda de %s no dio resultados." % buscando) def __inicializar(self): # Inicializamos las colecciones de resultados, fanart y temporada for i in (self.result, self.fanart, self.temporada): for k in i.keys(): if type(i[k]) == str: i[k] = "" elif type(i[k]) == list: i[k] = [] elif type(i[k]) == dict: i[k] = {} def __init__(self, **kwargs): self.page = kwargs.get('page', 1) self.results = [] self.total_pages = 0 self.total_results = 0 self.fanart = {} self.temporada = {} self.busqueda = {'id': "", 'texto': "", 'tipo': kwargs.get('tipo', 'movie'), 'idioma': kwargs.get('idioma_busqueda', 'es'), 'include_adult': str(kwargs.get('include_adult', 'false')), 'year': kwargs.get('year', ''), 'filtro': kwargs.get('filtro', {}) } self.result = {'adult': "", 'backdrop_path': "", # ruta imagen de fondo mas valorada # belongs_to_collection 'budget': "", # Presupuesto 'genres': [], # lista de generos 'homepage': "", 'id': "", 'imdb_id': "", 'freebase_mid': "", 'freebase_id': "", 'tvdb_id': "", 'tvrage_id': "", # IDs equivalentes 'original_language': "", 'original_title': "", 'overview': "", # sinopsis # popularity 'poster_path': "", 'production_companies': [], 'production_countries': [], 'origin_country': [], 'release_date': "", 'first_air_date': "", 'revenue': "", # recaudacion 'runtime': "", # runtime duracion # spoken_languages 'status': "", 'tagline': "", 'title': "", 'video': "", # ("true" o "false") indica si la busqueda movies/id/videos devolvera algo o no 'vote_average': "", 'vote_count': "", 'name': "", # nombre en caso de personas o series (tv) 'profile_path': "", # ruta imagenes en caso de personas 'known_for': {}, # Diccionario de peliculas en caso de personas (id_pelicula:titulo) 'images_backdrops': [], 'images_posters': [], 'images_profiles': [], 'videos': [] } def rellenar_dic_generos(): # Rellenar diccionario de generos del tipo e idioma seleccionados if self.busqueda["idioma"] not in Tmdb.dic_generos: Tmdb.dic_generos[self.busqueda["idioma"]] = {} if self.busqueda["tipo"] not in Tmdb.dic_generos[self.busqueda["idioma"]]: Tmdb.dic_generos[self.busqueda["idioma"]][self.busqueda["tipo"]] = {} url = ('http://api.themoviedb.org/3/genre/%s/list?api_key=f7f51775877e0bb6703520952b3c7840&language=%s' % (self.busqueda["tipo"], self.busqueda["idioma"])) try: lista_generos = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url))["genres"] except: pass for i in lista_generos: Tmdb.dic_generos[self.busqueda["idioma"]][self.busqueda["tipo"]][str(i["id"])] = i["name"] if self.busqueda["tipo"] == 'movie' or self.busqueda["tipo"] == "tv": if self.busqueda["idioma"] not in Tmdb.dic_generos: rellenar_dic_generos() elif self.busqueda["tipo"] not in Tmdb.dic_generos[self.busqueda["idioma"]]: rellenar_dic_generos() else: # La busqueda de personas no esta soportada en esta version. raise Exception("Parametros no validos al crear el objeto Tmdb.\nConsulte los modos de uso.") if 'id_Tmdb' in kwargs: self.busqueda["id"] = kwargs.get('id_Tmdb') self.__by_id() elif 'texto_buscado' in kwargs: self.busqueda["texto"] = kwargs.get('texto_buscado') self.__search(page=self.page) elif 'external_source' in kwargs and 'external_id' in kwargs: # TV Series: imdb_id, freebase_mid, freebase_id, tvdb_id, tvrage_id # Movies: imdb_id if (self.busqueda["tipo"] == 'movie' and kwargs.get('external_source') == "imdb_id") or \ (self.busqueda["tipo"] == 'tv' and kwargs.get('external_source') in ( "imdb_id", "freebase_mid", "freebase_id", "tvdb_id", "tvrage_id")): self.busqueda["id"] = kwargs.get('external_id') self.__by_id(source=kwargs.get('external_source')) else: raise Exception("Parametros no validos al crear el objeto Tmdb.\nConsulte los modos de uso.") def __leer_resultado(self, data): for k, v in data.items(): if k == "genre_ids": # Lista de generos (lista con los id de los generos) self.result["genres"] = [] for i in v: try: self.result["genres"].append( self.dic_generos[self.busqueda["idioma"]][self.busqueda["tipo"]][str(i)]) except: pass elif k == "genre" or k == "genres": # Lista de generos (lista de objetos {id,nombre}) self.result["genres"] = [] for i in v: self.result["genres"].append(i['name']) elif k == "known_for": # Lista de peliculas de un actor for i in v: self.result["known_for"][i['id']] = i['title'] elif k == "images": # Se incluyen los datos de las imagenes if "backdrops" in v: self.result["images_backdrops"] = v["backdrops"] if "posters" in v: self.result["images_posters"] = v["posters"] if "profiles" in v: self.result["images_profiles"] = v["profiles"] elif k == "credits": # Se incluyen los creditos if "cast" in v: self.result["credits_cast"] = v["cast"] if "crew" in v: self.result["credits_crew"] = v["crew"] elif k == "videos": # Se incluyen los datos de los videos self.result["videos"] = v["results"] elif k == "external_ids": # Listado de IDs externos for kj, _id in v.items(): # print kj + ":" + str(id) if kj in self.result: self.result[kj] = str(_id) elif k in self.result: # el resto if type(v) == list or type(v) == dict: self.result[k] = v elif v is None: self.result[k] = "" else: self.result[k] = str(v) def load_resultado(self, index_resultado=0, page=1): # Si no hay mas de un resultado no podemos cambiar if self.total_results <= 1: return None if page < 1 or page > self.total_pages: page = 1 if index_resultado < 0: index_resultado = 0 self.__inicializar() if page != self.page: self.__search(index_resultado=index_resultado, page=page) self.page = page else: # print self.result["genres"] self.__leer_resultado(self.results[index_resultado]) def get_list_resultados(self, numResult=20): # TODO documentar res = [] numResult = numResult if numResult > 0 else self.total_results numResult = min([numResult, self.total_results]) cr = 0 for p in range(1, self.total_pages + 1): for r in range(0, len(self.results)): try: self.load_resultado(r, p) self.result['type'] = self.busqueda.get("tipo", "movie") self.result['thumbnail'] = self.get_poster(size="w300") self.result['fanart'] = self.get_backdrop() res.append(self.result.copy()) cr += 1 if cr >= numResult: return res except: continue return res def get_generos(self): # -------------------------------------------------------------------------------------------------------------------------------------------- # Parametros: # none # Return: (str) # Devuelve la lista de generos a los que pertenece la pelicula o serie. # -------------------------------------------------------------------------------------------------------------------------------------------- return ', '.join(self.result["genres"]) def get_id(self): """ :return: Devuelve el identificador Tmdb de la pelicula o serie cargada o una cadena vacia en caso de que no hubiese nada cargado. Se puede utilizar este metodo para saber si una busqueda ha dado resultado o no. :rtype: str """ return str(self.result['id']) def get_sinopsis(self, idioma_alternativo=""): """ :param idioma_alternativo: codigo del idioma, segun ISO 639-1, en el caso de que en el idioma fijado para la busqueda no exista sinopsis. Por defecto, se utiliza el idioma original. Si se utiliza None como idioma_alternativo, solo se buscara en el idioma fijado. :type idioma_alternativo: str :return: Devuelve la sinopsis de una pelicula o serie :rtype: str """ ret = "" if self.result['id']: ret = self.result['overview'] if self.result['overview'] == "" and str(idioma_alternativo).lower() != 'none': # Vamos a lanzar una busqueda por id y releer de nuevo la sinopsis self.busqueda["id"] = str(self.result["id"]) if idioma_alternativo: self.busqueda["idioma"] = idioma_alternativo else: self.busqueda["idioma"] = self.result['original_language'] url = ('http://api.themoviedb.org/3/%s/%s?api_key=f7f51775877e0bb6703520952b3c7840&language=%s' % (self.busqueda["tipo"], self.busqueda["id"], self.busqueda["idioma"])) try: resultado = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url)) except: pass if resultado: if 'overview' in resultado: self.result['overview'] = resultado['overview'] ret = self.result['overview'] return ret def get_poster(self, tipo_respuesta="str", size="original"): """ @param tipo_respuesta: Tipo de dato devuelto por este metodo. Por defecto "str" @type tipo_respuesta: list, str @param size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original") Indica la anchura(w) o altura(h) de la imagen a descargar. Por defecto "original" @return: Si el tipo_respuesta es "list" devuelve un listado con todas las urls de las imagenes tipo poster del tamaño especificado. Si el tipo_respuesta es "str" devuelve la url de la imagen tipo poster, mas valorada, del tamaño especificado. Si el tamaño especificado no existe se retornan las imagenes al tamaño original. @rtype: list, str """ ret = [] if size not in ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original"): size = "original" if self.result["poster_path"] is None or self.result["poster_path"] == "": poster_path = "" else: poster_path = 'http://image.tmdb.org/t/p/' + size + self.result["poster_path"] if tipo_respuesta == 'str': return poster_path elif self.result["id"] == "": return [] if len(self.result['images_posters']) == 0: # Vamos a lanzar una busqueda por id y releer de nuevo todo self.busqueda["id"] = str(self.result["id"]) self.__by_id() if len(self.result['images_posters']) > 0: for i in self.result['images_posters']: imagen_path = i['file_path'] if size != "original": # No podemos pedir tamaños mayores que el original if size[1] == 'w' and int(imagen_path['width']) < int(size[1:]): size = "original" elif size[1] == 'h' and int(imagen_path['height']) < int(size[1:]): size = "original" ret.append('http://image.tmdb.org/t/p/' + size + imagen_path) else: ret.append(poster_path) return ret def get_backdrop(self, tipo_respuesta="str", size="original"): """ Devuelve las imagenes de tipo backdrop @param tipo_respuesta: Tipo de dato devuelto por este metodo. Por defecto "str" @type tipo_respuesta: list, str @param size: ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original") Indica la anchura(w) o altura(h) de la imagen a descargar. Por defecto "original" @type size: str @return: Si el tipo_respuesta es "list" devuelve un listado con todas las urls de las imagenes tipo backdrop del tamaño especificado. Si el tipo_respuesta es "str" devuelve la url de la imagen tipo backdrop, mas valorada, del tamaño especificado. Si el tamaño especificado no existe se retornan las imagenes al tamaño original. @rtype: list, str """ ret = [] if size not in ("w45", "w92", "w154", "w185", "w300", "w342", "w500", "w600", "h632", "w780", "w1280", "original"): size = "original" if self.result["backdrop_path"] is None or self.result["backdrop_path"] == "": backdrop_path = "" else: backdrop_path = 'http://image.tmdb.org/t/p/' + size + self.result["backdrop_path"] if tipo_respuesta == 'str': return backdrop_path elif self.result["id"] == "": return [] if len(self.result['images_backdrops']) == 0: # Vamos a lanzar una busqueda por id y releer de nuevo todo self.busqueda["id"] = str(self.result["id"]) self.__by_id() if len(self.result['images_backdrops']) > 0: for i in self.result['images_backdrops']: imagen_path = i['file_path'] if size != "original": # No podemos pedir tamaños mayores que el original if size[1] == 'w' and int(imagen_path['width']) < int(size[1:]): size = "original" elif size[1] == 'h' and int(imagen_path['height']) < int(size[1:]): size = "original" ret.append('http://image.tmdb.org/t/p/' + size + imagen_path) else: ret.append(backdrop_path) return ret def get_fanart(self, tipo="hdclearart", idioma=None, temporada="all"): """ @param tipo: ("hdclearlogo", "poster", "banner", "thumbs", "hdclearart", "clearart", "background", "clearlogo", "characterart", "seasonthumb", "seasonposter", "seasonbanner", "moviedisc") Indica el tipo de Art que se desea obtener, segun la web Fanart.tv. Alguno de estos tipos pueden estar solo disponibles para peliculas o series segun el caso. Por defecto "hdclearart" @type tipo: str @param idioma: (opcional) Codigos del idioma segun ISO 639-1, "all" (por defecto) para todos los idiomas o "00" para ninguno. Por ejemplo: idioma=["es","00","en"] Incluiria los resultados en español, sin idioma definido y en ingles, en este orden. @type idioma: list @param temporada: (opcional solo para series) Un numero entero que representa el numero de temporada, el numero cero para especiales o "all" para imagenes validas para cualquier temporada. Por defecto "all". @type: temporada: str @return: Retorna una lista con las url de las imagenes segun los parametros de entrada y ordenadas segun las votaciones de Fanart.tv @rtype: list """ if idioma is None: idioma = ["all"] if self.result["id"] == "": return [] if len(self.fanart) == 0: # Si esta vacio acceder a Fanart.tv y cargar el resultado if self.busqueda['tipo'] == 'movie': # http://assets.fanart.tv/v3/movies/1924?api_key=dffe90fba4d02c199ae7a9e71330c987 url = "http://assets.fanart.tv/v3/movies/" + str( self.result["id"]) + "?api_key=dffe90fba4d02c199ae7a9e71330c987" temporada = "" elif self.busqueda['tipo'] == 'tv': # En este caso necesitamos el tvdb_id if self.result["tvdb_id"] == '': # Vamos lanzar una busqueda por id y releer de nuevo todo self.busqueda["id"] = str(self.result["id"]) self.__by_id() # http://assets.fanart.tv/v3/tv/153021?api_key=dffe90fba4d02c199ae7a9e71330c987 url = "http://assets.fanart.tv/v3/tv/" + str( self.result["tvdb_id"]) + "?api_key=dffe90fba4d02c199ae7a9e71330c987" else: # 'person' No soportado return None fanarttv = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url)) if fanarttv is None: # Si el item buscado no esta en Fanart.tv devolvemos una lista vacia return [] for k, v in fanarttv.items(): if k in ("hdtvlogo", "hdmovielogo"): self.fanart["hdclearlogo"] = v elif k in ("tvposter", "movieposter"): self.fanart["poster"] = v elif k in ("tvbanner", "moviebanner"): self.fanart["banner"] = v elif k in ("tvthumb", "moviethumb"): self.fanart["thumbs"] = v elif k in ("hdclearart", "hdmovieclearart"): self.fanart["hdclearart"] = v elif k in ("clearart", "movieart"): self.fanart["clearart"] = v elif k in ("showbackground", "moviebackground"): self.fanart["background"] = v elif k in ("clearlogo", "movielogo"): self.fanart["clearlogo"] = v elif k in ("characterart", "seasonthumb", "seasonposter", "seasonbanner", "moviedisc"): self.fanart[k] = v # inicializamos el diccionario con los idiomas ret_dic = {} for i in idioma: ret_dic[i] = [] for i in self.fanart[tipo]: if i["lang"] in idioma: if "season" not in i: ret_dic[i["lang"]].append(i["url"]) elif temporada == "" or (temporada == 'all' and i["season"] == 'all'): ret_dic[i["lang"]].append(i["url"]) else: if i["season"] == "": i["season"] = 0 else: i["season"] = int(i["season"]) if i["season"] == int(temporada): ret_dic[i["lang"]].append(i["url"]) elif "all" in idioma: ret_dic["all"].append(i["url"]) ret_list = [] for i in idioma: ret_list.extend(ret_dic[i]) # print ret_list return ret_list def get_episodio(self, numtemporada=1, capitulo=1): # -------------------------------------------------------------------------------------------------------------------------------------------- # Parametros: # numtemporada(opcional): (int) Numero de temporada. Por defecto 1. # capitulo: (int) Numero de capitulo. Por defecto 1. # Return: (dic) # Devuelve un dicionario con los siguientes elementos: # "temporada_nombre", "temporada_sinopsis", "temporada_poster", "temporada_num_episodios"(int), # "episodio_titulo", "episodio_sinopsis", "episodio_imagen", "episodio_air_date", "episodio_air_date", # "episodio_crew", "episodio_guest_stars", "episodio_vote_count" y "episodio_vote_average" # -------------------------------------------------------------------------------------------------------------------------------------------- if self.result["id"] == "" or self.busqueda["tipo"] != "tv": return {} capitulo = int(capitulo) if capitulo < 1: capitulo = 1 temporada = self.get_temporada(numtemporada) if not temporada: # Se ha producido un error return {} if len(temporada["episodes"]) < capitulo: # Se ha producido un error logger.error("Episodio %d de la temporada %d no encontrado." % (capitulo, numtemporada)) return {} ret_dic = dict() ret_dic["temporada_nombre"] = temporada["name"] ret_dic["temporada_sinopsis"] = temporada["overview"] ret_dic["temporada_poster"] = ('http://image.tmdb.org/t/p/original' + temporada["poster_path"]) if temporada[ "poster_path"] else "" ret_dic["temporada_num_episodios"] = len(temporada["episodes"]) episodio = temporada["episodes"][capitulo - 1] ret_dic["episodio_titulo"] = episodio["name"] ret_dic["episodio_sinopsis"] = episodio["overview"] ret_dic["episodio_imagen"] = ('http://image.tmdb.org/t/p/original' + episodio["still_path"]) if episodio[ "still_path"] else "" ret_dic["episodio_air_date"] = episodio["air_date"] ret_dic["episodio_crew"] = episodio["crew"] ret_dic["episodio_guest_stars"] = episodio["guest_stars"] ret_dic["episodio_vote_count"] = episodio["vote_count"] ret_dic["episodio_vote_average"] = episodio["vote_average"] return ret_dic def get_temporada(self, numtemporada=1): # -------------------------------------------------------------------------------------------------------------------------------------------- # Parametros: # numtemporada: (int) Numero de temporada. Por defecto 1. # Return: (dic) # Devuelve un dicionario con datos sobre la temporada. # Puede obtener mas informacion sobre los datos devueltos en: # http://docs.themoviedb.apiary.io/#reference/tv-seasons/tvidseasonseasonnumber/get # http://docs.themoviedb.apiary.io/#reference/tv-seasons/tvidseasonseasonnumbercredits/get # -------------------------------------------------------------------------------------------------------------------------------------------- if self.result["id"] == "" or self.busqueda["tipo"] != "tv": return {} numtemporada = int(numtemporada) if numtemporada < 0: numtemporada = 1 # if not self.temporada.has_key("season_number") or self.temporada["season_number"] != numtemporada: # if numtemporada > len(self.temporada) or self.temporada[numtemporada] is None: if not self.temporada.has_key(numtemporada) or not self.temporada[numtemporada]: # Si no hay datos sobre la temporada solicitada, consultar en la web # http://api.themoviedb.org/3/tv/1407/season/1?api_key=f7f51775877e0bb6703520952b3c7840&language=es& # append_to_response=credits url = "http://api.themoviedb.org/3/tv/%s/season/%s?api_key=f7f51775877e0bb6703520952b3c7840&language=%s" \ "&append_to_response=credits" % (self.result["id"], numtemporada, self.busqueda["idioma"]) buscando = "id_Tmdb: " + str(self.result["id"]) + " temporada: " + str(numtemporada) + "\nURL: " + url logger.info("[Tmdb.py] Buscando " + buscando) try: self.temporada[numtemporada] = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url)) except: self.temporada[numtemporada] = ["status_code"] if "status_code" in self.temporada[numtemporada]: # Se ha producido un error self.temporada[numtemporada] = {} logger.error("La busqueda de " + buscando + " no dio resultados.") return {} return self.temporada[numtemporada] def get_videos(self): """ :return: Devuelve una lista ordenada (idioma/resolucion/tipo) de objetos Dict en la que cada uno de sus elementos corresponde con un trailer, teaser o clip de youtube. :rtype: list of Dict """ ret = [] if self.result['id']: if not self.result['videos']: # Primera búsqueda de videos en el idioma de busqueda url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=f7f51775877e0bb6703520952b3c7840&language=%s" \ % (self.busqueda['tipo'], self.result['id'], self.busqueda["idioma"]) try: dict_videos = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url)) except: pass if dict_videos['results']: dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) self.result["videos"] = dict_videos['results'] # Si el idioma de busqueda no es ingles, hacer una segunda búsqueda de videos en inglés if self.busqueda["idioma"] != 'en': url = "http://api.themoviedb.org/3/%s/%s/videos?api_key=f7f51775877e0bb6703520952b3c7840" \ % (self.busqueda['tipo'], self.result['id']) try: dict_videos = jsontools.load_json(scrapertools.downloadpageWithoutCookies(url)) except: pass if dict_videos['results']: dict_videos['results'] = sorted(dict_videos['results'], key=lambda x: (x['type'], x['size'])) self.result["videos"].extend(dict_videos['results']) # Si las busqueda han obtenido resultados devolver un listado de objetos for i in self.result['videos']: if i['site'] == "YouTube": ret.append({'name': i['name'], 'url': "https://www.youtube.com/watch?v=%s" % i['key'], 'size': str(i['size']), 'type': i['type'], 'language': i['iso_639_1']}) return ret def get_infoLabels(self, infoLabels=None, origen=None): """ :param infoLabels: Informacion extra de la pelicula, serie, temporada o capitulo. :type infoLabels: Dict :return: Devuelve la informacion extra obtenida del objeto actual. Si se paso el parametro infoLables, el valor devuelto sera el leido como parametro debidamente actualizado. :rtype: Dict """ ret_infoLabels = copy.copy(infoLabels) if infoLabels else {} items = self.result.items() if not origen else origen.items() for k, v in items: if v == '': continue elif type(v) == str: v = re.sub(r"\n|\r|\t", "", v) if k == 'overview': ret_infoLabels['plot'] = self.get_sinopsis() elif k == 'runtime': ret_infoLabels['duration'] = v elif k == 'release_date': ret_infoLabels['year'] = int(v[:4]) elif k == 'first_air_date': ret_infoLabels['year'] = int(v[:4]) ret_infoLabels['aired'] = v ret_infoLabels['premiered'] = v elif k == 'original_title': ret_infoLabels['originaltitle'] = v elif k == 'vote_average': ret_infoLabels['rating'] = float(v) elif k == 'vote_count': ret_infoLabels['votes'] = v elif k == 'poster_path': ret_infoLabels['thumbnail'] = 'http://image.tmdb.org/t/p/original' + v elif k == 'backdrop_path': ret_infoLabels['fanart'] = 'http://image.tmdb.org/t/p/original' + v elif k == 'id': ret_infoLabels['tmdb_id'] = v elif k == 'imdb_id': ret_infoLabels['imdb_id'] = v ret_infoLabels['IMDBNumber'] = v ret_infoLabels['code'] = v elif k == 'genres': ret_infoLabels['genre'] = self.get_generos() elif k == 'name': ret_infoLabels['title'] = v elif k == 'production_companies': ret_infoLabels['studio'] = ", ".join(i['name'] for i in v) elif k == 'production_countries' or k == 'origin_country': if 'country' not in ret_infoLabels: ret_infoLabels['country'] = ", ".join(i if type(i) == str else i['name'] for i in v) else: ret_infoLabels['country'] = ", " + ", ".join(i if type(i) == str else i['name'] for i in v) elif k == 'credits_cast': ret_infoLabels['castandrole'] = [] for c in sorted(v, key=lambda c: c.get("order")): ret_infoLabels['castandrole'].append((c['name'], c['character'])) elif k == 'credits_crew': l_director = [] l_writer = [] for crew in v: if crew['job'].lower() == 'director': l_director.append(crew['name']) elif crew['job'].lower() in ('screenplay', 'writer'): l_writer.append(crew['name']) if l_director: ret_infoLabels['director'] = ", ".join(l_director) if l_writer: if 'writer' not in ret_infoLabels: ret_infoLabels['writer'] = ", ".join(l_writer) else: ret_infoLabels['writer'] += "," + (",".join(l_writer)) elif k == 'created_by': l_writer = [] for cr in v: l_writer.append(cr['name']) if 'writer' not in ret_infoLabels: ret_infoLabels['writer'] = ",".join(l_writer) else: ret_infoLabels['writer'] += "," + (",".join(l_writer)) elif k == 'videos' and len(v) > 0: if v[0]["site"] == "YouTube": ret_infoLabels['trailer'] = "https://www.youtube.com/watch?v=" + v[0]["key"] elif type(v) == str: ret_infoLabels[k] = v # logger.debug(k +'= '+ v) return ret_infoLabels #################################################################################################### # for StreamOnDemand by costaplus # ==================================================================================================== def infoSod(item, tipo="movie"): ''' :param item: item :return: ritorna un'item completo esente da errori di codice ''' logger.info("streamondemand.core.tmdb infoSod") logger.info("channel=[" + item.channel + "], action=[" + item.action + "], title[" + item.title + "], url=[" + item.url + "], thumbnail=[" + item.thumbnail + "], tipo=[" + tipo + "]") try: tmdbtitle = item.fulltitle.split("|")[0].split("{")[0].split("[")[0].split("(")[0].split("Sub-ITA")[0].split("Sub ITA")[0].split("20")[0].split("19")[0].split("S0")[0].split("Serie")[0].split("HD ")[0] year = scrapertools.find_single_match(item.fulltitle, '\((\d{4})\)') otmdb = Tmdb(texto_buscado=tmdbtitle, tipo=tipo, idioma_busqueda='it', year=year) item.infoLabels = otmdb.get_infoLabels() if 'thumbnail' in item.infoLabels: item.thumbnail = item.infoLabels['thumbnail'] if 'fanart' in item.infoLabels: item.fanart = item.infoLabels['fanart'] except: pass return item # ===================================================================================================
gpl-3.0
ygol/odoo
addons/project_issue_sheet/project_issue_sheet.py
381
2875
#-*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields,osv,orm from openerp.tools.translate import _ class project_issue(osv.osv): _inherit = 'project.issue' _description = 'project issue' _columns = { 'timesheet_ids': fields.one2many('hr.analytic.timesheet', 'issue_id', 'Timesheets'), 'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'), } def on_change_project(self, cr, uid, ids, project_id, context=None): if not project_id: return {} result = super(project_issue, self).on_change_project(cr, uid, ids, project_id, context=context) project = self.pool.get('project.project').browse(cr, uid, project_id, context=context) if 'value' not in result: result['value'] = {} account = project.analytic_account_id if account: result['value']['analytic_account_id'] = account.id return result def on_change_account_id(self, cr, uid, ids, account_id, context=None): if not account_id: return {} account = self.pool.get('account.analytic.account').browse(cr, uid, account_id, context=context) result = {} if account and account.state == 'pending': result = {'warning' : {'title' : _('Analytic Account'), 'message' : _('The Analytic Account is pending !')}} return result class account_analytic_line(osv.osv): _inherit = 'account.analytic.line' _description = 'account analytic line' _columns = { 'create_date' : fields.datetime('Create Date', readonly=True), } class hr_analytic_issue(osv.osv): _inherit = 'hr.analytic.timesheet' _description = 'hr analytic timesheet' _columns = { 'issue_id' : fields.many2one('project.issue', 'Issue'), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
mattpap/sympy-polys
sympy/physics/paulialgebra.py
10
1595
""" This module implements Pauli algebra by subclassing Symbol. Only algebraic properties of Pauli matrices are used (we don't use the Matrix class). See the documentation to the class Pauli for examples. See also: http://en.wikipedia.org/wiki/Pauli_matrices """ from sympy import Symbol, I def delta(i,j): if i==j: return 1 else: return 0 def epsilon(i,j,k): if (i,j,k) in [(1,2,3), (2,3,1), (3,1,2)]: return 1 elif (i,j,k) in [(1,3,2), (3,2,1), (2,1,3)]: return -1 else: return 0 class Pauli(Symbol): """ >>> from sympy.physics.paulialgebra import Pauli >>> Pauli(1) sigma1 >>> Pauli(1)*Pauli(2) I*sigma3 >>> Pauli(1)*Pauli(1) 1 >>> Pauli(3)**4 1 >>> Pauli(1)*Pauli(2)*Pauli(3) I """ __slots__ = ["i"] def __new__(cls, i): if not i in [1,2,3]: raise IndexError("Invalid Pauli index") obj = Symbol.__new__(cls, "sigma%d"%i, commutative=False) obj.i=i return obj def __getnewargs__(self): return (self.i,) # FIXME don't work for -I*Pauli(2)*Pauli(3) def __mul__(self, other): if isinstance(other, Pauli): j=self.i k=other.i return delta(j,k) \ +I*epsilon(j,k,1)*Pauli(1) \ +I*epsilon(j,k,2)*Pauli(2) \ +I*epsilon(j,k,3)*Pauli(3) return super(Pauli, self).__mul__(other) def _eval_power(b, e): if e.is_Integer and e.is_positive: return super(Pauli, b).__pow__(int(e) % 2)
bsd-3-clause
openstack/nova
nova/api/validation/extra_specs/pci_passthrough.py
3
1271
# Copyright 2020 Red Hat, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Validators for ``pci_passthrough`` namespaced extra specs.""" from nova.api.validation.extra_specs import base EXTRA_SPEC_VALIDATORS = [ base.ExtraSpecValidator( name='pci_passthrough:alias', description=( 'Specify the number of ``$alias`` PCI device(s) to attach to the ' 'instance. Must be of format ``$alias:$number``. Use commas to ' 'specify multiple values.' ), value={ 'type': str, # one or more comma-separated '$alias:$num' values 'pattern': r'[^:]+:\d+(?:\s*,\s*[^:]+:\d+)*', }, ), ] def register(): return EXTRA_SPEC_VALIDATORS
apache-2.0
bright-sparks/chromium-spacewalk
chrome/common/extensions/docs/server2/caching_file_system.py
4
5389
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import posixpath import sys from file_system import FileSystem, StatInfo, FileNotFoundError from future import Future from path_util import IsDirectory, ToDirectory from third_party.json_schema_compiler.memoize import memoize class CachingFileSystem(FileSystem): '''FileSystem which implements a caching layer on top of |file_system|. It's smart, using Stat() to decided whether to skip Read()ing from |file_system|, and only Stat()ing directories never files. ''' def __init__(self, file_system, object_store_creator): self._file_system = file_system def create_object_store(category, **optargs): return object_store_creator.Create( CachingFileSystem, category='%s/%s' % (file_system.GetIdentity(), category), **optargs) self._stat_object_store = create_object_store('stat') # The read caches can start populated (start_empty=False) because file # updates are picked up by the stat, so it doesn't need the force-refresh # which starting empty is designed for. Without this optimisation, cron # runs are extra slow. self._read_object_store = create_object_store('read', start_empty=False) def Refresh(self): return self._file_system.Refresh() def Stat(self, path): return self.StatAsync(path).Get() def StatAsync(self, path): '''Stats the directory given, or if a file is given, stats the file's parent directory to get info about the file. ''' # Always stat the parent directory, since it will have the stat of the child # anyway, and this gives us an entire directory's stat info at once. dir_path, file_path = posixpath.split(path) dir_path = ToDirectory(dir_path) def make_stat_info(dir_stat): '''Converts a dir stat into the correct resulting StatInfo; if the Stat was for a file, the StatInfo should just contain that file. ''' if path == dir_path: return dir_stat # Was a file stat. Extract that file. file_version = dir_stat.child_versions.get(file_path) if file_version is None: raise FileNotFoundError('No stat found for %s in %s (found %s)' % (path, dir_path, dir_stat.child_versions)) return StatInfo(file_version) dir_stat = self._stat_object_store.Get(dir_path).Get() if dir_stat is not None: return Future(value=make_stat_info(dir_stat)) def next(dir_stat): assert dir_stat is not None # should have raised a FileNotFoundError # We only ever need to cache the dir stat. self._stat_object_store.Set(dir_path, dir_stat) return make_stat_info(dir_stat) return self._MemoizedStatAsyncFromFileSystem(dir_path).Then(next) @memoize def _MemoizedStatAsyncFromFileSystem(self, dir_path): '''This is a simple wrapper to memoize Futures to directory stats, since StatAsync makes heavy use of it. Only cache directories so that the memoized cache doesn't blow up. ''' assert IsDirectory(dir_path) return self._file_system.StatAsync(dir_path) def Read(self, paths, skip_not_found=False): '''Reads a list of files. If a file is in memcache and it is not out of date, it is returned. Otherwise, the file is retrieved from the file system. ''' cached_read_values = self._read_object_store.GetMulti(paths).Get() cached_stat_values = self._stat_object_store.GetMulti(paths).Get() # Populate a map of paths to Futures to their stat. They may have already # been cached in which case their Future will already have been constructed # with a value. stat_futures = {} def handle(error): if isinstance(error, FileNotFoundError): return None raise error for path in paths: stat_value = cached_stat_values.get(path) if stat_value is None: stat_future = self.StatAsync(path) if skip_not_found: stat_future = stat_future.Then(lambda x: x, handle) else: stat_future = Future(value=stat_value) stat_futures[path] = stat_future # Filter only the cached data which is fresh by comparing to the latest # stat. The cached read data includes the cached version. Remove it for # the result returned to callers. fresh_data = dict( (path, data) for path, (data, version) in cached_read_values.iteritems() if stat_futures[path].Get().version == version) if len(fresh_data) == len(paths): # Everything was cached and up-to-date. return Future(value=fresh_data) def next(new_results): # Update the cache. This is a path -> (data, version) mapping. self._read_object_store.SetMulti( dict((path, (new_result, stat_futures[path].Get().version)) for path, new_result in new_results.iteritems())) new_results.update(fresh_data) return new_results # Read in the values that were uncached or old. return self._file_system.Read(set(paths) - set(fresh_data.iterkeys()), skip_not_found=skip_not_found).Then(next) def GetIdentity(self): return self._file_system.GetIdentity() def __repr__(self): return '%s of <%s>' % (type(self).__name__, repr(self._file_system))
bsd-3-clause
ktosiek/spacewalk
client/solaris/smartpm/smart/commands/update.py
5
3102
# # Copyright (c) 2004 Conectiva, Inc. # # Written by Gustavo Niemeyer <[email protected]> # # This file is part of Smart Package Manager. # # Smart Package Manager is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as published # by the Free Software Foundation; either version 2 of the License, or (at # your option) any later version. # # Smart Package Manager is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Smart Package Manager; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # from smart.option import OptionParser from smart.const import NEVER from smart import * import string import time import re USAGE=_("smart update [options] [channelalias] ...") DESCRIPTION=_(""" This command will update the known information about the given channels. If no channels are given, all channels which are not disabled or setup for manual updates will be updated. """) EXAMPLES=_(""" smart update smart update mychannel smart update mychannel1 mychannel2 """) def parse_options(argv): parser = OptionParser(usage=USAGE, description=DESCRIPTION, examples=EXAMPLES) parser.add_option("--after", metavar="MIN", type="int", help=_("only update if the last successful update " "happened before the given delay")) opts, args = parser.parse_args(argv) opts.args = args return opts def main(ctrl, opts): sysconf.assertWritable() if opts.after is not None: lastupdate = sysconf.get("last-update", 0) if lastupdate >= time.time()-(opts.after*60): return 1 ctrl.rebuildSysConfChannels() if opts.args: channels = [] for arg in opts.args: for channel in ctrl.getChannels(): if channel.getAlias() == arg: channels.append(channel) break else: raise Error, _("Argument '%s' is not a channel alias.") % arg else: channels = None # First, load current cache to keep track of new packages. ctrl.reloadChannels() failed = not ctrl.reloadChannels(channels, caching=NEVER) cache = ctrl.getCache() newpackages = pkgconf.filterByFlag("new", cache.getPackages()) if not newpackages: iface.showStatus(_("Channels have no new packages.")) else: if len(newpackages) <= 10: newpackages.sort() info = ":\n" for pkg in newpackages: info += " %s\n" % pkg else: info = "." iface.showStatus(_("Channels have %d new packages%s") % (len(newpackages), info)) return int(failed) # vim:ts=4:sw=4:et
gpl-2.0
MobinRanjbar/hue
desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/PublicKey/__init__.py
124
1876
# -*- coding: utf-8 -*- # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """Public-key encryption and signature algorithms. Public-key encryption uses two different keys, one for encryption and one for decryption. The encryption key can be made public, and the decryption key is kept private. Many public-key algorithms can also be used to sign messages, and some can *only* be used for signatures. ======================== ============================================= Module Description ======================== ============================================= Crypto.PublicKey.DSA Digital Signature Algorithm (Signature only) Crypto.PublicKey.ElGamal (Signing and encryption) Crypto.PublicKey.RSA (Signing, encryption, and blinding) ======================== ============================================= :undocumented: _DSA, _RSA, _fastmath, _slowmath, pubkey """ __all__ = ['RSA', 'DSA', 'ElGamal'] __revision__ = "$Id$"
apache-2.0
evensonbryan/yocto-autobuilder
lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_changes_pb.py
4
9572
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members """ Test the PB change source. """ import mock from twisted.trial import unittest from twisted.internet import defer from buildbot.changes import pb from buildbot.test.util import changesource, pbmanager from buildbot.util import epoch2datetime class TestPBChangeSource( changesource.ChangeSourceMixin, pbmanager.PBManagerMixin, unittest.TestCase): def setUp(self): self.setUpPBChangeSource() d = self.setUpChangeSource() @d.addCallback def setup(_): self.master.pbmanager = self.pbmanager return d def test_registration_no_slaveport(self): return self._test_registration(None, user='alice', passwd='sekrit') def test_registration_global_slaveport(self): return self._test_registration(('9999', 'alice', 'sekrit'), slavePort='9999', user='alice', passwd='sekrit') def test_registration_custom_port(self): return self._test_registration(('8888', 'alice', 'sekrit'), user='alice', passwd='sekrit', port='8888') def test_registration_no_userpass(self): return self._test_registration(('9939', 'change', 'changepw'), slavePort='9939') def test_registration_no_userpass_no_global(self): return self._test_registration(None) @defer.inlineCallbacks def _test_registration(self, exp_registration, slavePort=None, **constr_kwargs): config = mock.Mock() config.slavePortnum = slavePort self.attachChangeSource(pb.PBChangeSource(**constr_kwargs)) self.startChangeSource() yield self.changesource.reconfigService(config) if exp_registration: self.assertRegistered(*exp_registration) else: self.assertNotRegistered() yield self.stopChangeSource() if exp_registration: self.assertUnregistered(*exp_registration) self.assertEqual(self.changesource.registration, None) def test_perspective(self): self.attachChangeSource(pb.PBChangeSource('alice', 'sekrit', port='8888')) persp = self.changesource.getPerspective(mock.Mock(), 'alice') self.assertIsInstance(persp, pb.ChangePerspective) def test_describe(self): cs = pb.PBChangeSource() self.assertSubstring("PBChangeSource", cs.describe()) def test_describe_prefix(self): cs = pb.PBChangeSource(prefix="xyz") self.assertSubstring("PBChangeSource", cs.describe()) self.assertSubstring("xyz", cs.describe()) def test_describe_int(self): cs = pb.PBChangeSource(port=9989) self.assertSubstring("PBChangeSource", cs.describe()) @defer.inlineCallbacks def test_reconfigService_no_change(self): config = mock.Mock() self.attachChangeSource(pb.PBChangeSource(port='9876')) self.startChangeSource() yield self.changesource.reconfigService(config) self.assertRegistered('9876', 'change', 'changepw') yield self.stopChangeSource() self.assertUnregistered('9876', 'change', 'changepw') @defer.inlineCallbacks def test_reconfigService_default_changed(self): config = mock.Mock() config.slavePortnum = '9876' self.attachChangeSource(pb.PBChangeSource()) self.startChangeSource() yield self.changesource.reconfigService(config) self.assertRegistered('9876', 'change', 'changepw') config.slavePortnum = '1234' yield self.changesource.reconfigService(config) self.assertUnregistered('9876', 'change', 'changepw') self.assertRegistered('1234', 'change', 'changepw') yield self.stopChangeSource() self.assertUnregistered('1234', 'change', 'changepw') class TestChangePerspective(unittest.TestCase): def setUp(self): self.added_changes = [] self.master = mock.Mock() def addChange(**chdict): self.added_changes.append(chdict) return defer.succeed(mock.Mock()) self.master.addChange = addChange def test_addChange_noprefix(self): cp = pb.ChangePerspective(self.master, None) d = cp.perspective_addChange(dict(who="bar", files=['a'])) def check(_): self.assertEqual(self.added_changes, [ dict(author="bar", files=['a']) ]) d.addCallback(check) return d def test_addChange_codebase(self): cp = pb.ChangePerspective(self.master, None) d = cp.perspective_addChange(dict(who="bar", files=[], codebase='cb')) def check(_): self.assertEqual(self.added_changes, [ dict(author="bar", files=[], codebase='cb') ]) d.addCallback(check) return d def test_addChange_prefix(self): cp = pb.ChangePerspective(self.master, 'xx/') d = cp.perspective_addChange( dict(who="bar", files=['xx/a', 'yy/b'])) def check(_): self.assertEqual(self.added_changes, [ dict(author="bar", files=['a']) ]) d.addCallback(check) return d def test_addChange_sanitize_None(self): cp = pb.ChangePerspective(self.master, None) d = cp.perspective_addChange( dict(project=None, revlink=None, repository=None) ) def check(_): self.assertEqual(self.added_changes, [ dict(project="", revlink="", repository="", files=[]) ]) d.addCallback(check) return d def test_addChange_when_None(self): cp = pb.ChangePerspective(self.master, None) d = cp.perspective_addChange( dict(when=None) ) def check(_): self.assertEqual(self.added_changes, [ dict(when_timestamp=None, files=[]) ]) d.addCallback(check) return d def test_addChange_files_tuple(self): cp = pb.ChangePerspective(self.master, None) d = cp.perspective_addChange( dict(files=('a', 'b')) ) def check(_): self.assertEqual(self.added_changes, [ dict(files=['a', 'b']) ]) d.addCallback(check) return d def test_addChange_unicode(self): cp = pb.ChangePerspective(self.master, None) d = cp.perspective_addChange(dict(author=u"\N{SNOWMAN}", comments=u"\N{SNOWMAN}", files=[u'\N{VERY MUCH GREATER-THAN}'])) def check(_): self.assertEqual(self.added_changes, [ dict(author=u"\N{SNOWMAN}", comments=u"\N{SNOWMAN}", files=[u'\N{VERY MUCH GREATER-THAN}']) ]) d.addCallback(check) return d def test_addChange_unicode_as_bytestring(self): cp = pb.ChangePerspective(self.master, None) d = cp.perspective_addChange(dict(author=u"\N{SNOWMAN}".encode('utf8'), comments=u"\N{SNOWMAN}".encode('utf8'), files=[u'\N{VERY MUCH GREATER-THAN}'.encode('utf8')])) def check(_): self.assertEqual(self.added_changes, [ dict(author=u"\N{SNOWMAN}", comments=u"\N{SNOWMAN}", files=[u'\N{VERY MUCH GREATER-THAN}']) ]) d.addCallback(check) return d def test_addChange_non_utf8_bytestring(self): cp = pb.ChangePerspective(self.master, None) bogus_utf8 = '\xff\xff\xff\xff' replacement = bogus_utf8.decode('utf8', 'replace') d = cp.perspective_addChange(dict(author=bogus_utf8, files=['a'])) def check(_): self.assertEqual(self.added_changes, [ dict(author=replacement, files=['a']) ]) d.addCallback(check) return d def test_addChange_old_param_names(self): cp = pb.ChangePerspective(self.master, None) d = cp.perspective_addChange(dict(isdir=1, who='me', when=1234, files=[])) def check(_): self.assertEqual(self.added_changes, [ dict(is_dir=1, author='me', files=[], when_timestamp=epoch2datetime(1234)) ]) d.addCallback(check) return d def test_createUserObject_git_src(self): cp = pb.ChangePerspective(self.master, None) d = cp.perspective_addChange(dict(who="c <h@c>", src='git')) def check_change(_): self.assertEqual(self.added_changes, [ dict(author="c <h@c>", files=[], src='git') ]) d.addCallback(check_change) return d
gpl-2.0
strk/mapnik
scons/scons-local-2.2.0/SCons/Variables/PathVariable.py
14
5703
"""SCons.Variables.PathVariable This file defines an option type for SCons implementing path settings. To be used whenever a a user-specified path override should be allowed. Arguments to PathVariable are: option-name = name of this option on the command line (e.g. "prefix") option-help = help string for option option-dflt = default value for this option validator = [optional] validator for option value. Predefined validators are: PathAccept -- accepts any path setting; no validation PathIsDir -- path must be an existing directory PathIsDirCreate -- path must be a dir; will create PathIsFile -- path must be a file PathExists -- path must exist (any type) [default] The validator is a function that is called and which should return True or False to indicate if the path is valid. The arguments to the validator function are: (key, val, env). The key is the name of the option, the val is the path specified for the option, and the env is the env to which the Otions have been added. Usage example: Examples: prefix=/usr/local opts = Variables() opts = Variables() opts.Add(PathVariable('qtdir', 'where the root of Qt is installed', qtdir, PathIsDir)) opts.Add(PathVariable('qt_includes', 'where the Qt includes are installed', '$qtdir/includes', PathIsDirCreate)) opts.Add(PathVariable('qt_libraries', 'where the Qt library is installed', '$qtdir/lib')) """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Variables/PathVariable.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo" __all__ = ['PathVariable',] import os import os.path import SCons.Errors class _PathVariableClass(object): def PathAccept(self, key, val, env): """Accepts any path, no checking done.""" pass def PathIsDir(self, key, val, env): """Validator to check if Path is a directory.""" if not os.path.isdir(val): if os.path.isfile(val): m = 'Directory path for option %s is a file: %s' else: m = 'Directory path for option %s does not exist: %s' raise SCons.Errors.UserError(m % (key, val)) def PathIsDirCreate(self, key, val, env): """Validator to check if Path is a directory, creating it if it does not exist.""" if os.path.isfile(val): m = 'Path for option %s is a file, not a directory: %s' raise SCons.Errors.UserError(m % (key, val)) if not os.path.isdir(val): os.makedirs(val) def PathIsFile(self, key, val, env): """validator to check if Path is a file""" if not os.path.isfile(val): if os.path.isdir(val): m = 'File path for option %s is a directory: %s' else: m = 'File path for option %s does not exist: %s' raise SCons.Errors.UserError(m % (key, val)) def PathExists(self, key, val, env): """validator to check if Path exists""" if not os.path.exists(val): m = 'Path for option %s does not exist: %s' raise SCons.Errors.UserError(m % (key, val)) def __call__(self, key, help, default, validator=None): # NB: searchfunc is currenty undocumented and unsupported """ The input parameters describe a 'path list' option, thus they are returned with the correct converter and validator appended. The result is usable for input to opts.Add() . The 'default' option specifies the default path to use if the user does not specify an override with this option. validator is a validator, see this file for examples """ if validator is None: validator = self.PathExists if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key): return (key, '%s ( /path/to/%s )' % (help, key[0]), default, validator, None) else: return (key, '%s ( /path/to/%s )' % (help, key), default, validator, None) PathVariable = _PathVariableClass() # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
lgpl-2.1
rlouf/patterns-of-segregation
bin/plot_scaling_classes.py
1
3443
"""plot_income_scaling.py Plot the number of households from a given class as a function of the total number of households per city """ import csv import math from matplotlib import pylab as plt from scipy.stats import linregress colours = {'Lower':'#4F8F6B', 'Higher':'#C1A62E', 'Middle':'#4B453C'} # Puerto-rican cities are excluded from the analysis PR_cities = ['7442','0060','6360','4840'] # # Read data # ## List of MSA msa = {} with open('data/names/msa.csv', 'r') as source: reader = csv.reader(source, delimiter='\t') reader.next() for rows in reader: if rows[0] not in PR_cities: msa[rows[0]] = rows[1] ## Classes classes = {} with open('extr/classes/msa_average/classes.csv', 'r') as source: reader = csv.reader(source, delimiter='\t') reader.next() for rows in reader: classes[rows[0]] =[int(r) for r in rows[1:]] ## Number of households per class, and total households_class = {cl:[] for cl in classes} households = [] for i, city in enumerate(msa): print "Compute number of households for %s (%s/%s)"%(msa[city], i+1, len(msa)) ## Import households data incomes = {} with open('data/income/msa/%s/income.csv'%city, 'r') as source: reader = csv.reader(source, delimiter='\t') reader.next() for rows in reader: num_cat = len(rows[1:]) incomes[rows[0]] = {cl: sum([int(rows[1+c]) for c in classes[cl]]) for cl in classes} incomes_cl = {cl: sum([incomes[au][cl] for au in incomes]) for cl in classes} for cl in classes: households_class[cl].append(incomes_cl[cl]) households.append(sum(incomes_cl.values())) # # Fit # slopes = {} r_values = {} intercepts = {} for cl in classes: print "Power-law fit for %s income class"%cl slope, intercept, r_value, p_value, std_err = linregress([math.log(p) for p in households],[math.log(d) for d in households_class[cl]]) slopes[cl] = slope r_values[cl] = r_value intercepts[cl] = intercept print "alpha = %s (R^2=%s)"%(slope, r_value) # # Plot # fig = plt.figure(figsize=(24,8)) for i,cl in enumerate(classes): ax = fig.add_subplot(1, len(classes), i+1) ax.plot(households, households_class[cl], 'o', color=colours[cl], mec=colours[cl], label=r'$%s$'%cl) ax.plot(sorted(households), [math.exp(intercepts[cl])*h**slopes[cl] for h in sorted(households)], label=r'$H_{%s} \sim H^{\,%.2f}$'%(cl, slopes[cl]), linestyle='--', color='black') ax.set_xlabel(r'$H$', fontsize=20) ax.set_ylabel(r'$H_{%s}$'%cl, fontsize=20) ax.set_xscale('log') ax.set_yscale('log') ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) ax.spines['left'].set_position(('outward', 10)) # outward by 10 points ax.spines['bottom'].set_position(('outward', 10)) # outward by 10 points ax.spines['left'].set_smart_bounds(True) ax.spines['bottom'].set_smart_bounds(True) ax.yaxis.set_ticks_position('left') ax.xaxis.set_ticks_position('bottom') ax.legend(loc='upper left', numpoints=1, frameon=False) plt.savefig('figures/paper/si/scaling_class.pdf', bbox_inches='tight') plt.show()
bsd-3-clause
rsteca/python-social-auth
social/backends/persona.py
70
1845
""" Mozilla Persona authentication backend, docs at: http://psa.matiasaguirre.net/docs/backends/persona.html """ from social.utils import handle_http_errors from social.backends.base import BaseAuth from social.exceptions import AuthFailed, AuthMissingParameter class PersonaAuth(BaseAuth): """BrowserID authentication backend""" name = 'persona' def get_user_id(self, details, response): """Use BrowserID email as ID""" return details['email'] def get_user_details(self, response): """Return user details, BrowserID only provides Email.""" # {'status': 'okay', # 'audience': 'localhost:8000', # 'expires': 1328983575529, # 'email': '[email protected]', # 'issuer': 'browserid.org'} email = response['email'] return {'username': email.split('@', 1)[0], 'email': email, 'fullname': '', 'first_name': '', 'last_name': ''} def extra_data(self, user, uid, response, details=None, *args, **kwargs): """Return users extra data""" return {'audience': response['audience'], 'issuer': response['issuer']} @handle_http_errors def auth_complete(self, *args, **kwargs): """Completes loging process, must return user instance""" if 'assertion' not in self.data: raise AuthMissingParameter(self, 'assertion') response = self.get_json('https://browserid.org/verify', data={ 'assertion': self.data['assertion'], 'audience': self.strategy.request_host() }, method='POST') if response.get('status') == 'failure': raise AuthFailed(self) kwargs.update({'response': response, 'backend': self}) return self.strategy.authenticate(*args, **kwargs)
bsd-3-clause
MarcosCommunity/odoo
comunity_modules/hr_payroll_cancel/__openerp__.py
3
1902
# -*- encoding: utf-8 -*- ########################################################################### # Module Writen to OpenERP, Open Source Management Solution # # Copyright (c) 2014 Vauxoo - http://www.vauxoo.com/ # All Rights Reserved. # info Vauxoo ([email protected]) ############################################################################ # Coded by: Luis Torres ([email protected]) ############################################################################ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { "name": "Hr Payroll Cancel", "version": "1.0", "author": "Vauxoo", "category": "Localization/Mexico", "description": """ This module change the workflow from hr.payslip to can cancel after to confirm this """, "website": "http://www.vauxoo.com/", "license": "AGPL-3", "depends": [ "hr_payroll" ], "demo": [], "data": [ "hr_payslip_view.xml", "hr_payslip_workflow.xml", "test/update_payroll_workflow.yml" ], "test": [], "js": [], "css": [], "qweb": [], "installable": True, "auto_install": False, "active": False }
agpl-3.0
tensorflow/tensorflow
tensorflow/python/ops/image_ops_impl.py
6
226930
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Implementation of image ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import numpy as np from tensorflow.python.eager import def_function from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import gen_image_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn from tensorflow.python.ops import nn_ops from tensorflow.python.ops import random_ops from tensorflow.python.ops import sort_ops from tensorflow.python.ops import stateless_random_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variables from tensorflow.python.util import deprecation from tensorflow.python.util import dispatch from tensorflow.python.util.tf_export import tf_export ops.NotDifferentiable('RandomCrop') # TODO(b/31222613): This op may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable('HSVToRGB') ops.NotDifferentiable('DrawBoundingBoxes') ops.NotDifferentiable('SampleDistortedBoundingBox') ops.NotDifferentiable('SampleDistortedBoundingBoxV2') # TODO(bsteiner): Implement the gradient function for extract_glimpse # TODO(b/31222613): This op may be differentiable, and there may be # latent bugs here. ops.NotDifferentiable('ExtractGlimpse') ops.NotDifferentiable('NonMaxSuppression') ops.NotDifferentiable('NonMaxSuppressionV2') ops.NotDifferentiable('NonMaxSuppressionWithOverlaps') ops.NotDifferentiable('GenerateBoundingBoxProposals') # pylint: disable=invalid-name def _assert(cond, ex_type, msg): """A polymorphic assert, works with tensors and boolean expressions. If `cond` is not a tensor, behave like an ordinary assert statement, except that a empty list is returned. If `cond` is a tensor, return a list containing a single TensorFlow assert op. Args: cond: Something evaluates to a boolean value. May be a tensor. ex_type: The exception class to use. msg: The error message. Returns: A list, containing at most one assert op. """ if _is_tensor(cond): return [control_flow_ops.Assert(cond, [msg])] else: if not cond: raise ex_type(msg) else: return [] def _is_tensor(x): """Returns `True` if `x` is a symbolic tensor-like object. Args: x: A python object to check. Returns: `True` if `x` is a `tf.Tensor` or `tf.Variable`, otherwise `False`. """ return isinstance(x, (ops.Tensor, variables.Variable)) def _ImageDimensions(image, rank): """Returns the dimensions of an image tensor. Args: image: A rank-D Tensor. For 3-D of shape: `[height, width, channels]`. rank: The expected rank of the image Returns: A list of corresponding to the dimensions of the input image. Dimensions that are statically known are python integers, otherwise, they are integer scalar tensors. """ if image.get_shape().is_fully_defined(): return image.get_shape().as_list() else: static_shape = image.get_shape().with_rank(rank).as_list() dynamic_shape = array_ops.unstack(array_ops.shape(image), rank) return [ s if s is not None else d for s, d in zip(static_shape, dynamic_shape) ] def _Check3DImage(image, require_static=True): """Assert that we are working with a properly shaped image. Args: image: 3-D Tensor of shape [height, width, channels] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if `image.shape` is not a 3-vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: image_shape = image.get_shape().with_rank(3) except ValueError: raise ValueError("'image' (shape %s) must be three-dimensional." % image.shape) if require_static and not image_shape.is_fully_defined(): raise ValueError("'image' (shape %s) must be fully defined." % image_shape) if any(x == 0 for x in image_shape): raise ValueError("all dims of 'image.shape' must be > 0: %s" % image_shape) if not image_shape.is_fully_defined(): return [ check_ops.assert_positive( array_ops.shape(image), ["all dims of 'image.shape' " 'must be > 0.']) ] else: return [] def _Assert3DImage(image): """Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: 3-D Tensor of shape [height, width, channels] Raises: ValueError: if `image.shape` is not a 3-vector. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape. """ return control_flow_ops.with_dependencies( _Check3DImage(image, require_static=False), image) def _AssertAtLeast3DImage(image): """Assert that we are working with a properly shaped image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 3-D Tensor of size [*, height, width, depth] Raises: ValueError: if image.shape is not a [>= 3] vector. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape. """ return control_flow_ops.with_dependencies( _CheckAtLeast3DImage(image, require_static=False), image) def _CheckAtLeast3DImage(image, require_static=True): """Assert that we are working with a properly shaped image. Args: image: >= 3-D Tensor of size [*, height, width, depth] require_static: If `True`, requires that all dimensions of `image` are known and non-zero. Raises: ValueError: if image.shape is not a [>= 3] vector. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: if image.get_shape().ndims is None: image_shape = image.get_shape().with_rank(3) else: image_shape = image.get_shape().with_rank_at_least(3) except ValueError: raise ValueError("'image' (shape %s) must be at least three-dimensional." % image.shape) if require_static and not image_shape.is_fully_defined(): raise ValueError('\'image\' must be fully defined.') if any(x == 0 for x in image_shape[-3:]): raise ValueError('inner 3 dims of \'image.shape\' must be > 0: %s' % image_shape) if not image_shape[-3:].is_fully_defined(): return [ check_ops.assert_positive( array_ops.shape(image)[-3:], ["inner 3 dims of 'image.shape' " 'must be > 0.']), check_ops.assert_greater_equal( array_ops.rank(image), 3, message="'image' must be at least three-dimensional.") ] else: return [] def _AssertGrayscaleImage(image): """Assert that we are working with a properly shaped grayscale image. Performs the check statically if possible (i.e. if the shape is statically known). Otherwise adds a control dependency to an assert op that checks the dynamic shape. Args: image: >= 2-D Tensor of size [*, 1] Raises: ValueError: if image.shape is not a [>= 2] vector or if last dimension is not size 1. Returns: If the shape of `image` could be verified statically, `image` is returned unchanged, otherwise there will be a control dependency added that asserts the correct dynamic shape. """ return control_flow_ops.with_dependencies( _CheckGrayscaleImage(image, require_static=False), image) def _CheckGrayscaleImage(image, require_static=True): """Assert that we are working with properly shaped grayscale image. Args: image: >= 2-D Tensor of size [*, 1] require_static: Boolean, whether static shape is required. Raises: ValueError: if image.shape is not a [>= 2] vector or if last dimension is not size 1. Returns: An empty list, if `image` has fully defined dimensions. Otherwise, a list containing an assert op is returned. """ try: if image.get_shape().ndims is None: image_shape = image.get_shape().with_rank(2) else: image_shape = image.get_shape().with_rank_at_least(2) except ValueError: raise ValueError('A grayscale image (shape %s) must be at least ' 'two-dimensional.' % image.shape) if require_static and not image_shape.is_fully_defined(): raise ValueError('\'image\' must be fully defined.') if image_shape.is_fully_defined(): if image_shape[-1] != 1: raise ValueError('Last dimension of a grayscale image should be size 1.') if not image_shape.is_fully_defined(): return [ check_ops.assert_equal( array_ops.shape(image)[-1], 1, message='Last dimension of a grayscale image should be size 1.'), check_ops.assert_greater_equal( array_ops.rank(image), 3, message='A grayscale image must be at least two-dimensional.') ] else: return [] def fix_image_flip_shape(image, result): """Set the shape to 3 dimensional if we don't know anything else. Args: image: original image size result: flipped or transformed image Returns: An image whose shape is at least (None, None, None). """ image_shape = image.get_shape() if image_shape == tensor_shape.unknown_shape(): result.set_shape([None, None, None]) else: result.set_shape(image_shape) return result @tf_export('image.random_flip_up_down') @dispatch.add_dispatch_support def random_flip_up_down(image, seed=None): """Randomly flips an image vertically (upside down). With a 1 in 2 chance, outputs the contents of `image` flipped along the first dimension, which is `height`. Otherwise, output the image as-is. When passing a batch of images, each image will be randomly flipped independent of other images. Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> tf.image.random_flip_up_down(image, 3).numpy().tolist() [[[3], [4]], [[1], [2]]] Randomly flip multiple images. >>> images = np.array( ... [ ... [[[1], [2]], [[3], [4]]], ... [[[5], [6]], [[7], [8]]] ... ]) >>> tf.image.random_flip_up_down(images, 4).numpy().tolist() [[[[3], [4]], [[1], [2]]], [[[5], [6]], [[7], [8]]]] For producing deterministic results given a `seed` value, use `tf.image.stateless_random_flip_up_down`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ random_func = functools.partial(random_ops.random_uniform, seed=seed) return _random_flip(image, 0, random_func, 'random_flip_up_down') @tf_export('image.random_flip_left_right') @dispatch.add_dispatch_support def random_flip_left_right(image, seed=None): """Randomly flip an image horizontally (left to right). With a 1 in 2 chance, outputs the contents of `image` flipped along the second dimension, which is `width`. Otherwise output the image as-is. When passing a batch of images, each image will be randomly flipped independent of other images. Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> tf.image.random_flip_left_right(image, 5).numpy().tolist() [[[2], [1]], [[4], [3]]] Randomly flip multiple images. >>> images = np.array( ... [ ... [[[1], [2]], [[3], [4]]], ... [[[5], [6]], [[7], [8]]] ... ]) >>> tf.image.random_flip_left_right(images, 6).numpy().tolist() [[[[2], [1]], [[4], [3]]], [[[5], [6]], [[7], [8]]]] For producing deterministic results given a `seed` value, use `tf.image.stateless_random_flip_left_right`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ random_func = functools.partial(random_ops.random_uniform, seed=seed) return _random_flip(image, 1, random_func, 'random_flip_left_right') @tf_export('image.stateless_random_flip_left_right', v1=[]) @dispatch.add_dispatch_support def stateless_random_flip_left_right(image, seed): """Randomly flip an image horizontally (left to right) deterministically. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> seed = (2, 3) >>> tf.image.stateless_random_flip_left_right(image, seed).numpy().tolist() [[[2], [1]], [[4], [3]]] Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: A tensor of the same type and shape as `image`. """ random_func = functools.partial( stateless_random_ops.stateless_random_uniform, seed=seed) return _random_flip( image, 1, random_func, 'stateless_random_flip_left_right') @tf_export('image.stateless_random_flip_up_down', v1=[]) @dispatch.add_dispatch_support def stateless_random_flip_up_down(image, seed): """Randomly flip an image vertically (upside down) deterministically. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> seed = (2, 3) >>> tf.image.stateless_random_flip_up_down(image, seed).numpy().tolist() [[[3], [4]], [[1], [2]]] Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: A tensor of the same type and shape as `image`. """ random_func = functools.partial( stateless_random_ops.stateless_random_uniform, seed=seed) return _random_flip( image, 0, random_func, 'stateless_random_flip_up_down') def _random_flip(image, flip_index, random_func, scope_name): """Randomly (50% chance) flip an image along axis `flip_index`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. flip_index: Dimension along which to flip the image. Vertical is 0, Horizontal is 1. random_func: partial function for calling either stateful or stateless random ops with `seed` parameter specified. scope_name: Name of the scope in which the ops are added. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(None, scope_name, [image]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() def f_rank3(): uniform_random = random_func(shape=[], minval=0, maxval=1.0) mirror_cond = math_ops.less(uniform_random, .5) result = control_flow_ops.cond( mirror_cond, lambda: array_ops.reverse(image, [flip_index]), lambda: image, name=scope) return fix_image_flip_shape(image, result) def f_rank4(): batch_size = array_ops.shape(image)[0] uniform_random = random_func(shape=[batch_size], minval=0, maxval=1.0) flips = math_ops.round( array_ops.reshape(uniform_random, [batch_size, 1, 1, 1])) flips = math_ops.cast(flips, image.dtype) flipped_input = array_ops.reverse(image, [flip_index + 1]) return flips * flipped_input + (1 - flips) * image if shape.ndims is None: rank = array_ops.rank(image) return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4) if shape.ndims == 3: return f_rank3() elif shape.ndims == 4: return f_rank4() else: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % shape) @tf_export('image.flip_left_right') @dispatch.add_dispatch_support def flip_left_right(image): """Flip an image horizontally (left to right). Outputs the contents of `image` flipped along the width dimension. See also `tf.reverse`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.flip_left_right(x) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 4., 5., 6.], [ 1., 2., 3.]], [[10., 11., 12.], [ 7., 8., 9.]]], dtype=float32)> Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. Returns: A tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _flip(image, 1, 'flip_left_right') @tf_export('image.flip_up_down') @dispatch.add_dispatch_support def flip_up_down(image): """Flip an image vertically (upside down). Outputs the contents of `image` flipped along the height dimension. See also `reverse()`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.flip_up_down(x) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 7., 8., 9.], [10., 11., 12.]], [[ 1., 2., 3.], [ 4., 5., 6.]]], dtype=float32)> Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. Returns: A `Tensor` of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ return _flip(image, 0, 'flip_up_down') def _flip(image, flip_index, scope_name): """Flip an image either horizontally or vertically. Outputs the contents of `image` flipped along the dimension `flip_index`. See also `reverse()`. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. flip_index: 0 For vertical, 1 for horizontal. scope_name: string, scope name. Returns: A `Tensor` of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(None, scope_name, [image]): image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() def f_rank3(): return fix_image_flip_shape(image, array_ops.reverse(image, [flip_index])) def f_rank4(): return array_ops.reverse(image, [flip_index + 1]) if shape.ndims is None: rank = array_ops.rank(image) return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4) elif shape.ndims == 3: return f_rank3() elif shape.ndims == 4: return f_rank4() else: raise ValueError( '\'image\' (shape %s)must have either 3 or 4 dimensions.' % shape) @tf_export('image.rot90') @dispatch.add_dispatch_support def rot90(image, k=1, name=None): """Rotate image(s) counter-clockwise by 90 degrees. For example: >>> a=tf.constant([[[1],[2]], ... [[3],[4]]]) >>> # rotating `a` counter clockwise by 90 degrees >>> a_rot=tf.image.rot90(a) >>> print(a_rot[...,0].numpy()) [[2 4] [1 3]] >>> # rotating `a` counter clockwise by 270 degrees >>> a_rot=tf.image.rot90(a, k=3) >>> print(a_rot[...,0].numpy()) [[3 1] [4 2]] Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the image is rotated by 90 degrees. name: A name for this operation (optional). Returns: A rotated tensor of the same type and shape as `image`. Raises: ValueError: if the shape of `image` not supported. """ with ops.name_scope(name, 'rot90', [image, k]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) k = ops.convert_to_tensor(k, dtype=dtypes.int32, name='k') k.get_shape().assert_has_rank(0) k = math_ops.mod(k, 4) shape = image.get_shape() if shape.ndims is None: rank = array_ops.rank(image) def f_rank3(): return _rot90_3D(image, k, scope) def f_rank4(): return _rot90_4D(image, k, scope) return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4) elif shape.ndims == 3: return _rot90_3D(image, k, scope) elif shape.ndims == 4: return _rot90_4D(image, k, scope) else: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % shape) def _rot90_3D(image, k, name_scope): """Rotate image counter-clockwise by 90 degrees `k` times. Args: image: 3-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the image is rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 3-D tensor of the same type and shape as `image`. """ def _rot90(): return array_ops.transpose(array_ops.reverse_v2(image, [1]), [1, 0, 2]) def _rot180(): return array_ops.reverse_v2(image, [0, 1]) def _rot270(): return array_ops.reverse_v2(array_ops.transpose(image, [1, 0, 2]), [1]) cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)] result = control_flow_ops.case( cases, default=lambda: image, exclusive=True, name=name_scope) result.set_shape([None, None, image.get_shape()[2]]) return result def _rot90_4D(images, k, name_scope): """Rotate batch of images counter-clockwise by 90 degrees `k` times. Args: images: 4-D Tensor of shape `[height, width, channels]`. k: A scalar integer. The number of times the images are rotated by 90 degrees. name_scope: A valid TensorFlow name scope. Returns: A 4-D `Tensor` of the same type and shape as `images`. """ def _rot90(): return array_ops.transpose(array_ops.reverse_v2(images, [2]), [0, 2, 1, 3]) def _rot180(): return array_ops.reverse_v2(images, [1, 2]) def _rot270(): return array_ops.reverse_v2(array_ops.transpose(images, [0, 2, 1, 3]), [2]) cases = [(math_ops.equal(k, 1), _rot90), (math_ops.equal(k, 2), _rot180), (math_ops.equal(k, 3), _rot270)] result = control_flow_ops.case( cases, default=lambda: images, exclusive=True, name=name_scope) shape = result.get_shape() result.set_shape([shape[0], None, None, shape[3]]) return result @tf_export('image.transpose', v1=['image.transpose', 'image.transpose_image']) @dispatch.add_dispatch_support def transpose(image, name=None): """Transpose image(s) by swapping the height and width dimension. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.transpose(x) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 1., 2., 3.], [ 7., 8., 9.]], [[ 4., 5., 6.], [10., 11., 12.]]], dtype=float32)> Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. name: A name for this operation (optional). Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, width, height, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[width, height, channels]` Raises: ValueError: if the shape of `image` not supported. Usage Example: >>> image = [[[1, 2], [3, 4]], ... [[5, 6], [7, 8]], ... [[9, 10], [11, 12]]] >>> image = tf.constant(image) >>> tf.image.transpose(image) <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy= array([[[ 1, 2], [ 5, 6], [ 9, 10]], [[ 3, 4], [ 7, 8], [11, 12]]], dtype=int32)> """ with ops.name_scope(name, 'transpose', [image]): image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) shape = image.get_shape() if shape.ndims is None: rank = array_ops.rank(image) def f_rank3(): return array_ops.transpose(image, [1, 0, 2], name=name) def f_rank4(): return array_ops.transpose(image, [0, 2, 1, 3], name=name) return control_flow_ops.cond(math_ops.equal(rank, 3), f_rank3, f_rank4) elif shape.ndims == 3: return array_ops.transpose(image, [1, 0, 2], name=name) elif shape.ndims == 4: return array_ops.transpose(image, [0, 2, 1, 3], name=name) else: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % shape) @tf_export('image.central_crop') @dispatch.add_dispatch_support def central_crop(image, central_fraction): """Crop the central region of the image(s). Remove the outer parts of an image but retain the central region of the image along each dimension. If we specify central_fraction = 0.5, this function returns the region marked with "X" in the below diagram. -------- | | | XXXX | | XXXX | | | where "X" is the central 50% of the image. -------- This function works on either a single image (`image` is a 3-D Tensor), or a batch of images (`image` is a 4-D Tensor). Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0], ... [7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]], ... [[13.0, 14.0, 15.0], ... [16.0, 17.0, 18.0], ... [19.0, 20.0, 21.0], ... [22.0, 23.0, 24.0]], ... [[25.0, 26.0, 27.0], ... [28.0, 29.0, 30.0], ... [31.0, 32.0, 33.0], ... [34.0, 35.0, 36.0]], ... [[37.0, 38.0, 39.0], ... [40.0, 41.0, 42.0], ... [43.0, 44.0, 45.0], ... [46.0, 47.0, 48.0]]] >>> tf.image.central_crop(x, 0.5) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[16., 17., 18.], [19., 20., 21.]], [[28., 29., 30.], [31., 32., 33.]]], dtype=float32)> Args: image: Either a 3-D float Tensor of shape [height, width, depth], or a 4-D Tensor of shape [batch_size, height, width, depth]. central_fraction: float (0, 1], fraction of size to crop Raises: ValueError: if central_crop_fraction is not within (0, 1]. Returns: 3-D / 4-D float Tensor, as per the input. """ with ops.name_scope(None, 'central_crop', [image]): image = ops.convert_to_tensor(image, name='image') central_fraction_static = tensor_util.constant_value(central_fraction) if central_fraction_static is not None: if central_fraction_static <= 0.0 or central_fraction_static > 1.0: raise ValueError('central_fraction must be within (0, 1]') if central_fraction_static == 1.0: return image else: assert_ops = _assert( math_ops.logical_or(central_fraction > 0.0, central_fraction <= 1.0), ValueError, 'central_fraction must be within (0, 1]') image = control_flow_ops.with_dependencies(assert_ops, image) _AssertAtLeast3DImage(image) rank = image.get_shape().ndims if rank != 3 and rank != 4: raise ValueError('`image` should either be a Tensor with rank = 3 or ' 'rank = 4. Had rank = {}.'.format(rank)) # Helper method to return the `idx`-th dimension of `tensor`, along with # a boolean signifying if the dimension is dynamic. def _get_dim(tensor, idx): static_shape = tensor.get_shape().dims[idx].value if static_shape is not None: return static_shape, False return array_ops.shape(tensor)[idx], True # Get the height, width, depth (and batch size, if the image is a 4-D # tensor). if rank == 3: img_h, dynamic_h = _get_dim(image, 0) img_w, dynamic_w = _get_dim(image, 1) img_d = image.get_shape()[2] else: img_bs = image.get_shape()[0] img_h, dynamic_h = _get_dim(image, 1) img_w, dynamic_w = _get_dim(image, 2) img_d = image.get_shape()[3] dynamic_h = dynamic_h or (central_fraction_static is None) dynamic_w = dynamic_w or (central_fraction_static is None) # Compute the bounding boxes for the crop. The type and value of the # bounding boxes depend on the `image` tensor's rank and whether / not the # dimensions are statically defined. if dynamic_h: img_hd = math_ops.cast(img_h, dtypes.float64) bbox_h_start = math_ops.cast( (img_hd - img_hd * math_ops.cast(central_fraction, dtypes.float64)) / 2, dtypes.int32) else: img_hd = float(img_h) bbox_h_start = int((img_hd - img_hd * central_fraction_static) / 2) if dynamic_w: img_wd = math_ops.cast(img_w, dtypes.float64) bbox_w_start = math_ops.cast( (img_wd - img_wd * math_ops.cast(central_fraction, dtypes.float64)) / 2, dtypes.int32) else: img_wd = float(img_w) bbox_w_start = int((img_wd - img_wd * central_fraction_static) / 2) bbox_h_size = img_h - bbox_h_start * 2 bbox_w_size = img_w - bbox_w_start * 2 if rank == 3: bbox_begin = array_ops.stack([bbox_h_start, bbox_w_start, 0]) bbox_size = array_ops.stack([bbox_h_size, bbox_w_size, -1]) else: bbox_begin = array_ops.stack([0, bbox_h_start, bbox_w_start, 0]) bbox_size = array_ops.stack([-1, bbox_h_size, bbox_w_size, -1]) image = array_ops.slice(image, bbox_begin, bbox_size) # Reshape the `image` tensor to the desired size. if rank == 3: image.set_shape([ None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d ]) else: image.set_shape([ img_bs, None if dynamic_h else bbox_h_size, None if dynamic_w else bbox_w_size, img_d ]) return image @tf_export('image.pad_to_bounding_box') @dispatch.add_dispatch_support def pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width): """Pad `image` with zeros to the specified `height` and `width`. Adds `offset_height` rows of zeros on top, `offset_width` columns of zeros on the left, and then pads the image on the bottom and right with zeros until it has dimensions `target_height`, `target_width`. This op does nothing if `offset_*` is zero and the image already has size `target_height` by `target_width`. Usage Example: >>> x = [[[1., 2., 3.], ... [4., 5., 6.]], ... [[7., 8., 9.], ... [10., 11., 12.]]] >>> padded_image = tf.image.pad_to_bounding_box(x, 1, 1, 4, 4) >>> padded_image <tf.Tensor: shape=(4, 4, 3), dtype=float32, numpy= array([[[ 0., 0., 0.], [ 0., 0., 0.], [ 0., 0., 0.], [ 0., 0., 0.]], [[ 0., 0., 0.], [ 1., 2., 3.], [ 4., 5., 6.], [ 0., 0., 0.]], [[ 0., 0., 0.], [ 7., 8., 9.], [10., 11., 12.], [ 0., 0., 0.]], [[ 0., 0., 0.], [ 0., 0., 0.], [ 0., 0., 0.], [ 0., 0., 0.]]], dtype=float32)> Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. offset_height: Number of rows of zeros to add on top. offset_width: Number of columns of zeros to add on the left. target_height: Height of output image. target_width: Width of output image. Returns: If `image` was 4-D, a 4-D float Tensor of shape `[batch, target_height, target_width, channels]` If `image` was 3-D, a 3-D float Tensor of shape `[target_height, target_width, channels]` Raises: ValueError: If the shape of `image` is incompatible with the `offset_*` or `target_*` arguments, or either `offset_height` or `offset_width` is negative. """ with ops.name_scope(None, 'pad_to_bounding_box', [image]): image = ops.convert_to_tensor(image, name='image') is_batch = True image_shape = image.get_shape() if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % image_shape) assert_ops = _CheckAtLeast3DImage(image, require_static=False) batch, height, width, depth = _ImageDimensions(image, rank=4) after_padding_width = target_width - offset_width - width after_padding_height = target_height - offset_height - height assert_ops += _assert(offset_height >= 0, ValueError, 'offset_height must be >= 0') assert_ops += _assert(offset_width >= 0, ValueError, 'offset_width must be >= 0') assert_ops += _assert(after_padding_width >= 0, ValueError, 'width must be <= target - offset') assert_ops += _assert(after_padding_height >= 0, ValueError, 'height must be <= target - offset') image = control_flow_ops.with_dependencies(assert_ops, image) # Do not pad on the depth dimensions. paddings = array_ops.reshape( array_ops.stack([ 0, 0, offset_height, after_padding_height, offset_width, after_padding_width, 0, 0 ]), [4, 2]) padded = array_ops.pad(image, paddings) padded_shape = [ None if _is_tensor(i) else i for i in [batch, target_height, target_width, depth] ] padded.set_shape(padded_shape) if not is_batch: padded = array_ops.squeeze(padded, axis=[0]) return padded @tf_export('image.crop_to_bounding_box') @dispatch.add_dispatch_support def crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width): """Crops an `image` to a specified bounding box. This op cuts a rectangular bounding box out of `image`. The top-left corner of the bounding box is at `offset_height, offset_width` in `image`, and the lower-right corner is at `offset_height + target_height, offset_width + target_width`. Example Usage: >>> image = tf.constant(np.arange(1, 28, dtype=np.float32), shape=[3, 3, 3]) >>> image[:,:,0] # print the first channel of the 3-D tensor <tf.Tensor: shape=(3, 3), dtype=float32, numpy= array([[ 1., 4., 7.], [10., 13., 16.], [19., 22., 25.]], dtype=float32)> >>> cropped_image = tf.image.crop_to_bounding_box(image, 0, 0, 2, 2) >>> cropped_image[:,:,0] # print the first channel of the cropped 3-D tensor <tf.Tensor: shape=(2, 2), dtype=float32, numpy= array([[ 1., 4.], [10., 13.]], dtype=float32)> Args: image: 4-D `Tensor` of shape `[batch, height, width, channels]` or 3-D `Tensor` of shape `[height, width, channels]`. offset_height: Vertical coordinate of the top-left corner of the bounding box in `image`. offset_width: Horizontal coordinate of the top-left corner of the bounding box in `image`. target_height: Height of the bounding box. target_width: Width of the bounding box. Returns: If `image` was 4-D, a 4-D `Tensor` of shape `[batch, target_height, target_width, channels]`. If `image` was 3-D, a 3-D `Tensor` of shape `[target_height, target_width, channels]`. It has the same dtype with `image`. Raises: ValueError: `image` is not a 3-D or 4-D `Tensor`. ValueError: `offset_width < 0` or `offset_height < 0`. ValueError: `target_width <= 0` or `target_width <= 0`. ValueError: `width < offset_width + target_width` or `height < offset_height + target_height`. """ with ops.name_scope(None, 'crop_to_bounding_box', [image]): image = ops.convert_to_tensor(image, name='image') is_batch = True image_shape = image.get_shape() if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % image_shape) assert_ops = _CheckAtLeast3DImage(image, require_static=False) batch, height, width, depth = _ImageDimensions(image, rank=4) assert_ops += _assert(offset_width >= 0, ValueError, 'offset_width must be >= 0.') assert_ops += _assert(offset_height >= 0, ValueError, 'offset_height must be >= 0.') assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') assert_ops += _assert(width >= (target_width + offset_width), ValueError, 'width must be >= target + offset.') assert_ops += _assert(height >= (target_height + offset_height), ValueError, 'height must be >= target + offset.') image = control_flow_ops.with_dependencies(assert_ops, image) cropped = array_ops.slice( image, array_ops.stack([0, offset_height, offset_width, 0]), array_ops.stack([array_ops.shape(image)[0], target_height, target_width, array_ops.shape(image)[3]])) cropped_shape = [ None if _is_tensor(i) else i for i in [batch, target_height, target_width, depth] ] cropped.set_shape(cropped_shape) if not is_batch: cropped = array_ops.squeeze(cropped, axis=[0]) return cropped @tf_export( 'image.resize_with_crop_or_pad', v1=['image.resize_with_crop_or_pad', 'image.resize_image_with_crop_or_pad']) @dispatch.add_dispatch_support def resize_image_with_crop_or_pad(image, target_height, target_width): """Crops and/or pads an image to a target width and height. Resizes an image to a target width and height by either centrally cropping the image or padding it evenly with zeros. If `width` or `height` is greater than the specified `target_width` or `target_height` respectively, this op centrally crops along that dimension. For example: >>> image = np.arange(75).reshape(5, 5, 3) # create 3-D image input >>> image[:,:,0] # print first channel just for demo purposes array([[ 0, 3, 6, 9, 12], [15, 18, 21, 24, 27], [30, 33, 36, 39, 42], [45, 48, 51, 54, 57], [60, 63, 66, 69, 72]]) >>> image = tf.image.resize_with_crop_or_pad(image, 3, 3) # crop >>> # print first channel for demo purposes; centrally cropped output >>> image[:,:,0] <tf.Tensor: shape=(3, 3), dtype=int64, numpy= array([[18, 21, 24], [33, 36, 39], [48, 51, 54]])> If `width` or `height` is smaller than the specified `target_width` or `target_height` respectively, this op centrally pads with 0 along that dimension. For example: >>> image = np.arange(1, 28).reshape(3, 3, 3) # create 3-D image input >>> image[:,:,0] # print first channel just for demo purposes array([[ 1, 4, 7], [10, 13, 16], [19, 22, 25]]) >>> image = tf.image.resize_with_crop_or_pad(image, 5, 5) # pad >>> # print first channel for demo purposes; we should see 0 paddings >>> image[:,:,0] <tf.Tensor: shape=(5, 5), dtype=int64, numpy= array([[ 0, 0, 0, 0, 0], [ 0, 1, 4, 7, 0], [ 0, 10, 13, 16, 0], [ 0, 19, 22, 25, 0], [ 0, 0, 0, 0, 0]])> Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Cropped and/or padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ with ops.name_scope(None, 'resize_image_with_crop_or_pad', [image]): image = ops.convert_to_tensor(image, name='image') image_shape = image.get_shape() is_batch = True if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % image_shape) assert_ops = _CheckAtLeast3DImage(image, require_static=False) assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') image = control_flow_ops.with_dependencies(assert_ops, image) # `crop_to_bounding_box` and `pad_to_bounding_box` have their own checks. # Make sure our checks come first, so that error messages are clearer. if _is_tensor(target_height): target_height = control_flow_ops.with_dependencies( assert_ops, target_height) if _is_tensor(target_width): target_width = control_flow_ops.with_dependencies(assert_ops, target_width) def max_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.maximum(x, y) else: return max(x, y) def min_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.minimum(x, y) else: return min(x, y) def equal_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.equal(x, y) else: return x == y _, height, width, _ = _ImageDimensions(image, rank=4) width_diff = target_width - width offset_crop_width = max_(-width_diff // 2, 0) offset_pad_width = max_(width_diff // 2, 0) height_diff = target_height - height offset_crop_height = max_(-height_diff // 2, 0) offset_pad_height = max_(height_diff // 2, 0) # Maybe crop if needed. cropped = crop_to_bounding_box(image, offset_crop_height, offset_crop_width, min_(target_height, height), min_(target_width, width)) # Maybe pad if needed. resized = pad_to_bounding_box(cropped, offset_pad_height, offset_pad_width, target_height, target_width) # In theory all the checks below are redundant. if resized.get_shape().ndims is None: raise ValueError('resized contains no shape.') _, resized_height, resized_width, _ = _ImageDimensions(resized, rank=4) assert_ops = [] assert_ops += _assert( equal_(resized_height, target_height), ValueError, 'resized height is not correct.') assert_ops += _assert( equal_(resized_width, target_width), ValueError, 'resized width is not correct.') resized = control_flow_ops.with_dependencies(assert_ops, resized) if not is_batch: resized = array_ops.squeeze(resized, axis=[0]) return resized @tf_export(v1=['image.ResizeMethod']) class ResizeMethodV1(object): """See `v1.image.resize` for details.""" BILINEAR = 0 NEAREST_NEIGHBOR = 1 BICUBIC = 2 AREA = 3 @tf_export('image.ResizeMethod', v1=[]) class ResizeMethod(object): """See `tf.image.resize` for details.""" BILINEAR = 'bilinear' NEAREST_NEIGHBOR = 'nearest' BICUBIC = 'bicubic' AREA = 'area' LANCZOS3 = 'lanczos3' LANCZOS5 = 'lanczos5' GAUSSIAN = 'gaussian' MITCHELLCUBIC = 'mitchellcubic' def _resize_images_common(images, resizer_fn, size, preserve_aspect_ratio, name, skip_resize_if_same): """Core functionality for v1 and v2 resize functions.""" with ops.name_scope(name, 'resize', [images, size]): images = ops.convert_to_tensor(images, name='images') if images.get_shape().ndims is None: raise ValueError('\'images\' contains no shape.') # TODO(shlens): Migrate this functionality to the underlying Op's. is_batch = True if images.get_shape().ndims == 3: is_batch = False images = array_ops.expand_dims(images, 0) elif images.get_shape().ndims != 4: raise ValueError('\'images\' must have either 3 or 4 dimensions.') _, height, width, _ = images.get_shape().as_list() try: size = ops.convert_to_tensor(size, dtypes.int32, name='size') except (TypeError, ValueError): raise ValueError('\'size\' must be a 1-D int32 Tensor') if not size.get_shape().is_compatible_with([2]): raise ValueError('\'size\' must be a 1-D Tensor of 2 elements: ' 'new_height, new_width') if preserve_aspect_ratio: # Get the current shapes of the image, even if dynamic. _, current_height, current_width, _ = _ImageDimensions(images, rank=4) # do the computation to find the right scale and height/width. scale_factor_height = ( math_ops.cast(size[0], dtypes.float32) / math_ops.cast(current_height, dtypes.float32)) scale_factor_width = ( math_ops.cast(size[1], dtypes.float32) / math_ops.cast(current_width, dtypes.float32)) scale_factor = math_ops.minimum(scale_factor_height, scale_factor_width) scaled_height_const = math_ops.cast( math_ops.round(scale_factor * math_ops.cast(current_height, dtypes.float32)), dtypes.int32) scaled_width_const = math_ops.cast( math_ops.round(scale_factor * math_ops.cast(current_width, dtypes.float32)), dtypes.int32) # NOTE: Reset the size and other constants used later. size = ops.convert_to_tensor([scaled_height_const, scaled_width_const], dtypes.int32, name='size') size_const_as_shape = tensor_util.constant_value_as_shape(size) new_height_const = tensor_shape.dimension_at_index(size_const_as_shape, 0).value new_width_const = tensor_shape.dimension_at_index(size_const_as_shape, 1).value # If we can determine that the height and width will be unmodified by this # transformation, we avoid performing the resize. if skip_resize_if_same and all( x is not None for x in [new_width_const, width, new_height_const, height]) and ( width == new_width_const and height == new_height_const): if not is_batch: images = array_ops.squeeze(images, axis=[0]) return images images = resizer_fn(images, size) # NOTE(mrry): The shape functions for the resize ops cannot unpack # the packed values in `new_size`, so set the shape here. images.set_shape([None, new_height_const, new_width_const, None]) if not is_batch: images = array_ops.squeeze(images, axis=[0]) return images @tf_export(v1=['image.resize_images', 'image.resize']) @dispatch.add_dispatch_support def resize_images(images, size, method=ResizeMethodV1.BILINEAR, align_corners=False, preserve_aspect_ratio=False, name=None): """Resize `images` to `size` using the specified `method`. Resized images will be distorted if their original aspect ratio is not the same as `size`. To avoid distortions see `tf.image.resize_with_pad` or `tf.image.resize_with_crop_or_pad`. The `method` can be one of: * <b>`tf.image.ResizeMethod.BILINEAR`</b>: [Bilinear interpolation.]( https://en.wikipedia.org/wiki/Bilinear_interpolation) * <b>`tf.image.ResizeMethod.NEAREST_NEIGHBOR`</b>: [ Nearest neighbor interpolation.]( https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation) * <b>`tf.image.ResizeMethod.BICUBIC`</b>: [Bicubic interpolation.]( https://en.wikipedia.org/wiki/Bicubic_interpolation) * <b>`tf.image.ResizeMethod.AREA`</b>: Area interpolation. The return value has the same type as `images` if `method` is `tf.image.ResizeMethod.NEAREST_NEIGHBOR`. It will also have the same type as `images` if the size of `images` can be statically determined to be the same as `size`, because `images` is returned in this case. Otherwise, the return value has type `float32`. Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. method: ResizeMethod. Defaults to `tf.image.ResizeMethod.BILINEAR`. align_corners: bool. If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to `False`. preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set, then `images` will be resized to a size that fits in `size` while preserving the aspect ratio of the original image. Scales up the image if `size` is bigger than the current size of the `image`. Defaults to False. name: A name for this operation (optional). Raises: ValueError: if the shape of `images` is incompatible with the shape arguments to this function ValueError: if `size` has invalid shape or type. ValueError: if an unsupported resize method is specified. Returns: If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def resize_fn(images_t, new_size): """Legacy resize core function, passed to _resize_images_common.""" if method == ResizeMethodV1.BILINEAR or method == ResizeMethod.BILINEAR: return gen_image_ops.resize_bilinear( images_t, new_size, align_corners=align_corners) elif (method == ResizeMethodV1.NEAREST_NEIGHBOR or method == ResizeMethod.NEAREST_NEIGHBOR): return gen_image_ops.resize_nearest_neighbor( images_t, new_size, align_corners=align_corners) elif method == ResizeMethodV1.BICUBIC or method == ResizeMethod.BICUBIC: return gen_image_ops.resize_bicubic( images_t, new_size, align_corners=align_corners) elif method == ResizeMethodV1.AREA or method == ResizeMethod.AREA: return gen_image_ops.resize_area( images_t, new_size, align_corners=align_corners) else: raise ValueError('Resize method is not implemented: {}'.format(method)) return _resize_images_common( images, resize_fn, size, preserve_aspect_ratio=preserve_aspect_ratio, name=name, skip_resize_if_same=True) @tf_export('image.resize', v1=[]) @dispatch.add_dispatch_support def resize_images_v2(images, size, method=ResizeMethod.BILINEAR, preserve_aspect_ratio=False, antialias=False, name=None): """Resize `images` to `size` using the specified `method`. Resized images will be distorted if their original aspect ratio is not the same as `size`. To avoid distortions see `tf.image.resize_with_pad`. >>> image = tf.constant([ ... [1,0,0,0,0], ... [0,1,0,0,0], ... [0,0,1,0,0], ... [0,0,0,1,0], ... [0,0,0,0,1], ... ]) >>> # Add "batch" and "channels" dimensions >>> image = image[tf.newaxis, ..., tf.newaxis] >>> image.shape.as_list() # [batch, height, width, channels] [1, 5, 5, 1] >>> tf.image.resize(image, [3,5])[0,...,0].numpy() array([[0.6666667, 0.3333333, 0. , 0. , 0. ], [0. , 0. , 1. , 0. , 0. ], [0. , 0. , 0. , 0.3333335, 0.6666665]], dtype=float32) It works equally well with a single image instead of a batch of images: >>> tf.image.resize(image[0], [3,5]).shape.as_list() [3, 5, 1] When `antialias` is true, the sampling filter will anti-alias the input image as well as interpolate. When downsampling an image with [anti-aliasing]( https://en.wikipedia.org/wiki/Spatial_anti-aliasing) the sampling filter kernel is scaled in order to properly anti-alias the input image signal. `antialias` has no effect when upsampling an image: >>> a = tf.image.resize(image, [5,10]) >>> b = tf.image.resize(image, [5,10], antialias=True) >>> tf.reduce_max(abs(a - b)).numpy() 0.0 The `method` argument expects an item from the `image.ResizeMethod` enum, or the string equivalent. The options are: * <b>`bilinear`</b>: [Bilinear interpolation.]( https://en.wikipedia.org/wiki/Bilinear_interpolation) If `antialias` is true, becomes a hat/tent filter function with radius 1 when downsampling. * <b>`lanczos3`</b>: [Lanczos kernel]( https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 3. High-quality practical filter but may have some ringing, especially on synthetic images. * <b>`lanczos5`</b>: [Lanczos kernel] ( https://en.wikipedia.org/wiki/Lanczos_resampling) with radius 5. Very-high-quality filter but may have stronger ringing. * <b>`bicubic`</b>: [Cubic interpolant]( https://en.wikipedia.org/wiki/Bicubic_interpolation) of Keys. Equivalent to Catmull-Rom kernel. Reasonably good quality and faster than Lanczos3Kernel, particularly when upsampling. * <b>`gaussian`</b>: [Gaussian kernel]( https://en.wikipedia.org/wiki/Gaussian_filter) with radius 3, sigma = 1.5 / 3.0. * <b>`nearest`</b>: [Nearest neighbor interpolation.]( https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation) `antialias` has no effect when used with nearest neighbor interpolation. * <b>`area`</b>: Anti-aliased resampling with area interpolation. `antialias` has no effect when used with area interpolation; it always anti-aliases. * <b>`mitchellcubic`</b>: Mitchell-Netravali Cubic non-interpolating filter. For synthetic images (especially those lacking proper prefiltering), less ringing than Keys cubic kernel but less sharp. Note: Near image edges the filtering kernel may be partially outside the image boundaries. For these pixels, only input pixels inside the image will be included in the filter sum, and the output value will be appropriately normalized. The return value has type `float32`, unless the `method` is `ResizeMethod.NEAREST_NEIGHBOR`, then the return dtype is the dtype of `images`: >>> nn = tf.image.resize(image, [5,7], method='nearest') >>> nn[0,...,0].numpy() array([[1, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0, 0], [0, 0, 0, 0, 1, 1, 0], [0, 0, 0, 0, 0, 0, 1]], dtype=int32) With `preserve_aspect_ratio=True`, the aspect ratio is preserved, so `size` is the maximum for each dimension: >>> max_10_20 = tf.image.resize(image, [10,20], preserve_aspect_ratio=True) >>> max_10_20.shape.as_list() [1, 10, 10, 1] Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. size: A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The new size for the images. method: An `image.ResizeMethod`, or string equivalent. Defaults to `bilinear`. preserve_aspect_ratio: Whether to preserve the aspect ratio. If this is set, then `images` will be resized to a size that fits in `size` while preserving the aspect ratio of the original image. Scales up the image if `size` is bigger than the current size of the `image`. Defaults to False. antialias: Whether to use an anti-aliasing filter when downsampling an image. name: A name for this operation (optional). Raises: ValueError: if the shape of `images` is incompatible with the shape arguments to this function ValueError: if `size` has an invalid shape or type. ValueError: if an unsupported resize method is specified. Returns: If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def resize_fn(images_t, new_size): """Resize core function, passed to _resize_images_common.""" scale_and_translate_methods = [ ResizeMethod.LANCZOS3, ResizeMethod.LANCZOS5, ResizeMethod.GAUSSIAN, ResizeMethod.MITCHELLCUBIC ] def resize_with_scale_and_translate(method): scale = ( math_ops.cast(new_size, dtype=dtypes.float32) / math_ops.cast(array_ops.shape(images_t)[1:3], dtype=dtypes.float32)) return gen_image_ops.scale_and_translate( images_t, new_size, scale, array_ops.zeros([2]), kernel_type=method, antialias=antialias) if method == ResizeMethod.BILINEAR: if antialias: return resize_with_scale_and_translate('triangle') else: return gen_image_ops.resize_bilinear( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.NEAREST_NEIGHBOR: return gen_image_ops.resize_nearest_neighbor( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.BICUBIC: if antialias: return resize_with_scale_and_translate('keyscubic') else: return gen_image_ops.resize_bicubic( images_t, new_size, half_pixel_centers=True) elif method == ResizeMethod.AREA: return gen_image_ops.resize_area(images_t, new_size) elif method in scale_and_translate_methods: return resize_with_scale_and_translate(method) else: raise ValueError('Resize method is not implemented: {}'.format(method)) return _resize_images_common( images, resize_fn, size, preserve_aspect_ratio=preserve_aspect_ratio, name=name, skip_resize_if_same=False) def _resize_image_with_pad_common(image, target_height, target_width, resize_fn): """Core functionality for v1 and v2 resize_image_with_pad functions.""" with ops.name_scope(None, 'resize_image_with_pad', [image]): image = ops.convert_to_tensor(image, name='image') image_shape = image.get_shape() is_batch = True if image_shape.ndims == 3: is_batch = False image = array_ops.expand_dims(image, 0) elif image_shape.ndims is None: is_batch = False image = array_ops.expand_dims(image, 0) image.set_shape([None] * 4) elif image_shape.ndims != 4: raise ValueError( '\'image\' (shape %s) must have either 3 or 4 dimensions.' % image_shape) assert_ops = _CheckAtLeast3DImage(image, require_static=False) assert_ops += _assert(target_width > 0, ValueError, 'target_width must be > 0.') assert_ops += _assert(target_height > 0, ValueError, 'target_height must be > 0.') image = control_flow_ops.with_dependencies(assert_ops, image) def max_(x, y): if _is_tensor(x) or _is_tensor(y): return math_ops.maximum(x, y) else: return max(x, y) _, height, width, _ = _ImageDimensions(image, rank=4) # convert values to float, to ease divisions f_height = math_ops.cast(height, dtype=dtypes.float32) f_width = math_ops.cast(width, dtype=dtypes.float32) f_target_height = math_ops.cast(target_height, dtype=dtypes.float32) f_target_width = math_ops.cast(target_width, dtype=dtypes.float32) # Find the ratio by which the image must be adjusted # to fit within the target ratio = max_(f_width / f_target_width, f_height / f_target_height) resized_height_float = f_height / ratio resized_width_float = f_width / ratio resized_height = math_ops.cast( math_ops.floor(resized_height_float), dtype=dtypes.int32) resized_width = math_ops.cast( math_ops.floor(resized_width_float), dtype=dtypes.int32) padding_height = (f_target_height - resized_height_float) / 2 padding_width = (f_target_width - resized_width_float) / 2 f_padding_height = math_ops.floor(padding_height) f_padding_width = math_ops.floor(padding_width) p_height = max_(0, math_ops.cast(f_padding_height, dtype=dtypes.int32)) p_width = max_(0, math_ops.cast(f_padding_width, dtype=dtypes.int32)) # Resize first, then pad to meet requested dimensions resized = resize_fn(image, [resized_height, resized_width]) padded = pad_to_bounding_box(resized, p_height, p_width, target_height, target_width) if padded.get_shape().ndims is None: raise ValueError('padded contains no shape.') _ImageDimensions(padded, rank=4) if not is_batch: padded = array_ops.squeeze(padded, axis=[0]) return padded @tf_export(v1=['image.resize_image_with_pad']) @dispatch.add_dispatch_support def resize_image_with_pad_v1(image, target_height, target_width, method=ResizeMethodV1.BILINEAR, align_corners=False): """Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. method: Method to use for resizing image. See `resize_images()` align_corners: bool. If True, the centers of the 4 corner pixels of the input and output tensors are aligned, preserving the values at the corner pixels. Defaults to `False`. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Resized and padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def _resize_fn(im, new_size): return resize_images(im, new_size, method, align_corners=align_corners) return _resize_image_with_pad_common(image, target_height, target_width, _resize_fn) @tf_export('image.resize_with_pad', v1=[]) @dispatch.add_dispatch_support def resize_image_with_pad_v2(image, target_height, target_width, method=ResizeMethod.BILINEAR, antialias=False): """Resizes and pads an image to a target width and height. Resizes an image to a target width and height by keeping the aspect ratio the same without distortion. If the target dimensions don't match the image dimensions, the image is resized and then padded with zeroes to match requested dimensions. Args: image: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. target_height: Target height. target_width: Target width. method: Method to use for resizing image. See `image.resize()` antialias: Whether to use anti-aliasing when resizing. See 'image.resize()'. Raises: ValueError: if `target_height` or `target_width` are zero or negative. Returns: Resized and padded image. If `images` was 4-D, a 4-D float Tensor of shape `[batch, new_height, new_width, channels]`. If `images` was 3-D, a 3-D float Tensor of shape `[new_height, new_width, channels]`. """ def _resize_fn(im, new_size): return resize_images_v2(im, new_size, method, antialias=antialias) return _resize_image_with_pad_common(image, target_height, target_width, _resize_fn) @tf_export('image.per_image_standardization') @dispatch.add_dispatch_support def per_image_standardization(image): """Linearly scales each image in `image` to have mean 0 and variance 1. For each 3-D image `x` in `image`, computes `(x - mean) / adjusted_stddev`, where - `mean` is the average of all values in `x` - `adjusted_stddev = max(stddev, 1.0/sqrt(N))` is capped away from 0 to protect against division by 0 when handling uniform images - `N` is the number of elements in `x` - `stddev` is the standard deviation of all values in `x` Example Usage: >>> image = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3]) >>> image # 3-D tensor <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy= array([[[ 1, 2, 3], [ 4, 5, 6]], [[ 7, 8, 9], [10, 11, 12]]], dtype=int32)> >>> new_image = tf.image.per_image_standardization(image) >>> new_image # 3-D tensor with mean ~= 0 and variance ~= 1 <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[-1.593255 , -1.3035723 , -1.0138896 ], [-0.7242068 , -0.4345241 , -0.14484136]], [[ 0.14484136, 0.4345241 , 0.7242068 ], [ 1.0138896 , 1.3035723 , 1.593255 ]]], dtype=float32)> Args: image: An n-D `Tensor` with at least 3 dimensions, the last 3 of which are the dimensions of each image. Returns: A `Tensor` with the same shape as `image` and its dtype is `float32`. Raises: ValueError: The shape of `image` has fewer than 3 dimensions. """ with ops.name_scope(None, 'per_image_standardization', [image]) as scope: image = ops.convert_to_tensor(image, name='image') image = _AssertAtLeast3DImage(image) image = math_ops.cast(image, dtype=dtypes.float32) num_pixels = math_ops.reduce_prod(array_ops.shape(image)[-3:]) image_mean = math_ops.reduce_mean(image, axis=[-1, -2, -3], keepdims=True) # Apply a minimum normalization that protects us against uniform images. stddev = math_ops.reduce_std(image, axis=[-1, -2, -3], keepdims=True) min_stddev = math_ops.rsqrt(math_ops.cast(num_pixels, dtypes.float32)) adjusted_stddev = math_ops.maximum(stddev, min_stddev) image -= image_mean image = math_ops.divide(image, adjusted_stddev, name=scope) return image @tf_export('image.random_brightness') @dispatch.add_dispatch_support def random_brightness(image, max_delta, seed=None): """Adjust the brightness of images by a random factor. Equivalent to `adjust_brightness()` using a `delta` randomly picked in the interval `[-max_delta, max_delta)`. For producing deterministic results given a `seed` value, use `tf.image.stateless_random_brightness`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: An image or images to adjust. max_delta: float, must be non-negative. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_brightness(x, 0.2) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...> Returns: The brightness-adjusted image(s). Raises: ValueError: if `max_delta` is negative. """ if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed) return adjust_brightness(image, delta) @tf_export('image.stateless_random_brightness', v1=[]) @dispatch.add_dispatch_support def stateless_random_brightness(image, max_delta, seed): """Adjust the brightness of images by a random factor deterministically. Equivalent to `adjust_brightness()` using a `delta` randomly picked in the interval `[-max_delta, max_delta)`. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_brightness(x, 0.2, seed) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 1.1376241, 2.1376243, 3.1376243], [ 4.1376243, 5.1376243, 6.1376243]], [[ 7.1376243, 8.137624 , 9.137624 ], [10.137624 , 11.137624 , 12.137624 ]]], dtype=float32)> Args: image: An image or images to adjust. max_delta: float, must be non-negative. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: The brightness-adjusted image(s). Raises: ValueError: if `max_delta` is negative. """ if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = stateless_random_ops.stateless_random_uniform( shape=[], minval=-max_delta, maxval=max_delta, seed=seed) return adjust_brightness(image, delta) @tf_export('image.random_contrast') @dispatch.add_dispatch_support def random_contrast(image, lower, upper, seed=None): """Adjust the contrast of an image or images by a random factor. Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly picked in the interval `[lower, upper)`. For producing deterministic results given a `seed` value, use `tf.image.stateless_random_contrast`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: An image tensor with 3 or more dimensions. lower: float. Lower bound for the random contrast factor. upper: float. Upper bound for the random contrast factor. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_contrast(x, 0.2, 0.5) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...> Returns: The contrast-adjusted image(s). Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') contrast_factor = random_ops.random_uniform([], lower, upper, seed=seed) return adjust_contrast(image, contrast_factor) @tf_export('image.stateless_random_contrast', v1=[]) @dispatch.add_dispatch_support def stateless_random_contrast(image, lower, upper, seed): """Adjust the contrast of images by a random factor deterministically. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Args: image: An image tensor with 3 or more dimensions. lower: float. Lower bound for the random contrast factor. upper: float. Upper bound for the random contrast factor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_contrast(x, 0.2, 0.5, seed) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[3.4605184, 4.4605184, 5.4605184], [4.820173 , 5.820173 , 6.820173 ]], [[6.179827 , 7.179827 , 8.179828 ], [7.5394816, 8.539482 , 9.539482 ]]], dtype=float32)> Returns: The contrast-adjusted image(s). Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') contrast_factor = stateless_random_ops.stateless_random_uniform( shape=[], minval=lower, maxval=upper, seed=seed) return adjust_contrast(image, contrast_factor) @tf_export('image.adjust_brightness') @dispatch.add_dispatch_support def adjust_brightness(image, delta): """Adjust the brightness of RGB or Grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their brightness, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. The value `delta` is added to all components of the tensor `image`. `image` is converted to `float` and scaled appropriately if it is in fixed-point representation, and `delta` is converted to the same data type. For regular images, `delta` should be in the range `(-1,1)`, as it is added to the image in floating point representation, where pixel values are in the `[0,1)` range. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_brightness(x, delta=0.1) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 1.1, 2.1, 3.1], [ 4.1, 5.1, 6.1]], [[ 7.1, 8.1, 9.1], [10.1, 11.1, 12.1]]], dtype=float32)> Args: image: RGB image or images to adjust. delta: A scalar. Amount to add to the pixel values. Returns: A brightness-adjusted tensor of the same shape and type as `image`. """ with ops.name_scope(None, 'adjust_brightness', [image, delta]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in [dtypes.float16, dtypes.float32]: flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) adjusted = math_ops.add( flt_image, math_ops.cast(delta, flt_image.dtype), name=name) return convert_image_dtype(adjusted, orig_dtype, saturate=True) @tf_export('image.adjust_contrast') @dispatch.add_dispatch_support def adjust_contrast(images, contrast_factor): """Adjust contrast of RGB or grayscale images. This is a convenience method that converts RGB images to float representation, adjusts their contrast, and then converts them back to the original data type. If several adjustments are chained, it is advisable to minimize the number of redundant conversions. `images` is a tensor of at least 3 dimensions. The last 3 dimensions are interpreted as `[height, width, channels]`. The other dimensions only represent a collection of images, such as `[batch, height, width, channels].` Contrast is adjusted independently for each channel of each image. For each channel, this Op computes the mean of the image pixels in the channel and then adjusts each component `x` of each pixel to `(x - mean) * contrast_factor + mean`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_contrast(x, 2) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[-3.5, -2.5, -1.5], [ 2.5, 3.5, 4.5]], [[ 8.5, 9.5, 10.5], [14.5, 15.5, 16.5]]], dtype=float32)> Args: images: Images to adjust. At least 3-D. contrast_factor: A float multiplier for adjusting contrast. Returns: The contrast-adjusted image or images. """ with ops.name_scope(None, 'adjust_contrast', [images, contrast_factor]) as name: images = ops.convert_to_tensor(images, name='images') # Remember original dtype to so we can convert back if needed orig_dtype = images.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_images = images else: flt_images = convert_image_dtype(images, dtypes.float32) adjusted = gen_image_ops.adjust_contrastv2( flt_images, contrast_factor=contrast_factor, name=name) return convert_image_dtype(adjusted, orig_dtype, saturate=True) @tf_export('image.adjust_gamma') @dispatch.add_dispatch_support def adjust_gamma(image, gamma=1, gain=1): """Performs [Gamma Correction](http://en.wikipedia.org/wiki/Gamma_correction). on the input image. Also known as Power Law Transform. This function converts the input images at first to float representation, then transforms them pixelwise according to the equation `Out = gain * In**gamma`, and then converts the back to the original data type. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_gamma(x, 0.2) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[1. , 1.1486983, 1.2457309], [1.319508 , 1.3797297, 1.4309691]], [[1.4757731, 1.5157166, 1.5518456], [1.5848932, 1.6153942, 1.6437519]]], dtype=float32)> Args: image : RGB image or images to adjust. gamma : A scalar or tensor. Non-negative real number. gain : A scalar or tensor. The constant multiplier. Returns: A Tensor. A Gamma-adjusted tensor of the same shape and type as `image`. Raises: ValueError: If gamma is negative. Notes: For gamma greater than 1, the histogram will shift towards left and the output image will be darker than the input image. For gamma less than 1, the histogram will shift towards right and the output image will be brighter than the input image. References: [Wikipedia](http://en.wikipedia.org/wiki/Gamma_correction) """ with ops.name_scope(None, 'adjust_gamma', [image, gamma, gain]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in [dtypes.float16, dtypes.float32]: flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) assert_op = _assert(gamma >= 0, ValueError, 'Gamma should be a non-negative real number.') if assert_op: gamma = control_flow_ops.with_dependencies(assert_op, gamma) # According to the definition of gamma correction. adjusted_img = gain * flt_image**gamma return convert_image_dtype(adjusted_img, orig_dtype, saturate=True) @tf_export('image.convert_image_dtype') @dispatch.add_dispatch_support def convert_image_dtype(image, dtype, saturate=False, name=None): """Convert `image` to `dtype`, scaling its values if needed. The operation supports data types (for `image` and `dtype`) of `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`, `float16`, `float32`, `float64`, `bfloat16`. Images that are represented using floating point values are expected to have values in the range [0,1). Image data stored in integer data types are expected to have values in the range `[0,MAX]`, where `MAX` is the largest positive representable number for the data type. This op converts between data types, scaling the values appropriately before casting. Usage Example: >>> x = [[[1, 2, 3], [4, 5, 6]], ... [[7, 8, 9], [10, 11, 12]]] >>> x_int8 = tf.convert_to_tensor(x, dtype=tf.int8) >>> tf.image.convert_image_dtype(x_int8, dtype=tf.float16, saturate=False) <tf.Tensor: shape=(2, 2, 3), dtype=float16, numpy= array([[[0.00787, 0.01575, 0.02362], [0.0315 , 0.03937, 0.04724]], [[0.0551 , 0.063 , 0.07086], [0.07874, 0.0866 , 0.0945 ]]], dtype=float16)> Converting integer types to floating point types returns normalized floating point values in the range [0, 1); the values are normalized by the `MAX` value of the input dtype. Consider the following two examples: >>> a = [[[1], [2]], [[3], [4]]] >>> a_int8 = tf.convert_to_tensor(a, dtype=tf.int8) >>> tf.image.convert_image_dtype(a_int8, dtype=tf.float32) <tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy= array([[[0.00787402], [0.01574803]], [[0.02362205], [0.03149606]]], dtype=float32)> >>> a_int32 = tf.convert_to_tensor(a, dtype=tf.int32) >>> tf.image.convert_image_dtype(a_int32, dtype=tf.float32) <tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy= array([[[4.6566129e-10], [9.3132257e-10]], [[1.3969839e-09], [1.8626451e-09]]], dtype=float32)> Despite having identical values of `a` and output dtype of `float32`, the outputs differ due to the different input dtypes (`int8` vs. `int32`). This is, again, because the values are normalized by the `MAX` value of the input dtype. Note that converting floating point values to integer type may lose precision. In the example below, an image tensor `b` of dtype `float32` is converted to `int8` and back to `float32`. The final output, however, is different from the original input `b` due to precision loss. >>> b = [[[0.12], [0.34]], [[0.56], [0.78]]] >>> b_float32 = tf.convert_to_tensor(b, dtype=tf.float32) >>> b_int8 = tf.image.convert_image_dtype(b_float32, dtype=tf.int8) >>> tf.image.convert_image_dtype(b_int8, dtype=tf.float32) <tf.Tensor: shape=(2, 2, 1), dtype=float32, numpy= array([[[0.11811024], [0.33858266]], [[0.5590551 ], [0.77952754]]], dtype=float32)> Scaling up from an integer type (input dtype) to another integer type (output dtype) will not map input dtype's `MAX` to output dtype's `MAX` but converting back and forth should result in no change. For example, as shown below, the `MAX` value of int8 (=127) is not mapped to the `MAX` value of int16 (=32,767) but, when scaled back, we get the same, original values of `c`. >>> c = [[[1], [2]], [[127], [127]]] >>> c_int8 = tf.convert_to_tensor(c, dtype=tf.int8) >>> c_int16 = tf.image.convert_image_dtype(c_int8, dtype=tf.int16) >>> print(c_int16) tf.Tensor( [[[ 256] [ 512]] [[32512] [32512]]], shape=(2, 2, 1), dtype=int16) >>> c_int8_back = tf.image.convert_image_dtype(c_int16, dtype=tf.int8) >>> print(c_int8_back) tf.Tensor( [[[ 1] [ 2]] [[127] [127]]], shape=(2, 2, 1), dtype=int8) Scaling down from an integer type to another integer type can be a lossy conversion. Notice in the example below that converting `int16` to `uint8` and back to `int16` has lost precision. >>> d = [[[1000], [2000]], [[3000], [4000]]] >>> d_int16 = tf.convert_to_tensor(d, dtype=tf.int16) >>> d_uint8 = tf.image.convert_image_dtype(d_int16, dtype=tf.uint8) >>> d_int16_back = tf.image.convert_image_dtype(d_uint8, dtype=tf.int16) >>> print(d_int16_back) tf.Tensor( [[[ 896] [1920]] [[2944] [3968]]], shape=(2, 2, 1), dtype=int16) Note that converting from floating point inputs to integer types may lead to over/underflow problems. Set saturate to `True` to avoid such problem in problematic conversions. If enabled, saturation will clip the output into the allowed range before performing a potentially dangerous cast (and only before performing such a cast, i.e., when casting from a floating point to an integer type, and when casting from a signed to an unsigned type; `saturate` has no effect on casts between floats, or on casts that increase the type's range). Args: image: An image. dtype: A `DType` to convert `image` to. saturate: If `True`, clip the input before casting (if necessary). name: A name for this operation (optional). Returns: `image`, converted to `dtype`. Raises: AttributeError: Raises an attribute error when dtype is neither float nor integer """ image = ops.convert_to_tensor(image, name='image') dtype = dtypes.as_dtype(dtype) if not dtype.is_floating and not dtype.is_integer: raise AttributeError('dtype must be either floating point or integer') if dtype == image.dtype: return array_ops.identity(image, name=name) with ops.name_scope(name, 'convert_image', [image]) as name: # Both integer: use integer multiplication in the larger range if image.dtype.is_integer and dtype.is_integer: scale_in = image.dtype.max scale_out = dtype.max if scale_in > scale_out: # Scaling down, scale first, then cast. The scaling factor will # cause in.max to be mapped to above out.max but below out.max+1, # so that the output is safely in the supported range. scale = (scale_in + 1) // (scale_out + 1) scaled = math_ops.floordiv(image, scale) if saturate: return math_ops.saturate_cast(scaled, dtype, name=name) else: return math_ops.cast(scaled, dtype, name=name) else: # Scaling up, cast first, then scale. The scale will not map in.max to # out.max, but converting back and forth should result in no change. if saturate: cast = math_ops.saturate_cast(image, dtype) else: cast = math_ops.cast(image, dtype) scale = (scale_out + 1) // (scale_in + 1) return math_ops.multiply(cast, scale, name=name) elif image.dtype.is_floating and dtype.is_floating: # Both float: Just cast, no possible overflows in the allowed ranges. # Note: We're ignoring float overflows. If your image dynamic range # exceeds float range, you're on your own. return math_ops.cast(image, dtype, name=name) else: if image.dtype.is_integer: # Converting to float: first cast, then scale. No saturation possible. cast = math_ops.cast(image, dtype) scale = 1. / image.dtype.max return math_ops.multiply(cast, scale, name=name) else: # Converting from float: first scale, then cast scale = dtype.max + 0.5 # avoid rounding problems in the cast scaled = math_ops.multiply(image, scale) if saturate: return math_ops.saturate_cast(scaled, dtype, name=name) else: return math_ops.cast(scaled, dtype, name=name) @tf_export('image.rgb_to_grayscale') @dispatch.add_dispatch_support def rgb_to_grayscale(images, name=None): """Converts one or more images from RGB to Grayscale. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 1, containing the Grayscale value of the pixels. >>> original = tf.constant([[[1.0, 2.0, 3.0]]]) >>> converted = tf.image.rgb_to_grayscale(original) >>> print(converted.numpy()) [[[1.81...]]] Args: images: The RGB tensor to convert. The last dimension must have size 3 and should contain RGB values. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with ops.name_scope(name, 'rgb_to_grayscale', [images]) as name: images = ops.convert_to_tensor(images, name='images') # Remember original dtype to so we can convert back if needed orig_dtype = images.dtype flt_image = convert_image_dtype(images, dtypes.float32) # Reference for converting between RGB and grayscale. # https://en.wikipedia.org/wiki/Luma_%28video%29 rgb_weights = [0.2989, 0.5870, 0.1140] gray_float = math_ops.tensordot(flt_image, rgb_weights, [-1, -1]) gray_float = array_ops.expand_dims(gray_float, -1) return convert_image_dtype(gray_float, orig_dtype, name=name) @tf_export('image.grayscale_to_rgb') @dispatch.add_dispatch_support def grayscale_to_rgb(images, name=None): """Converts one or more images from Grayscale to RGB. Outputs a tensor of the same `DType` and rank as `images`. The size of the last dimension of the output is 3, containing the RGB value of the pixels. The input images' last dimension must be size 1. >>> original = tf.constant([[[1.0], [2.0], [3.0]]]) >>> converted = tf.image.grayscale_to_rgb(original) >>> print(converted.numpy()) [[[1. 1. 1.] [2. 2. 2.] [3. 3. 3.]]] Args: images: The Grayscale tensor to convert. The last dimension must be size 1. name: A name for the operation (optional). Returns: The converted grayscale image(s). """ with ops.name_scope(name, 'grayscale_to_rgb', [images]) as name: images = _AssertGrayscaleImage(images) images = ops.convert_to_tensor(images, name='images') rank_1 = array_ops.expand_dims(array_ops.rank(images) - 1, 0) shape_list = ([array_ops.ones(rank_1, dtype=dtypes.int32)] + [array_ops.expand_dims(3, 0)]) multiples = array_ops.concat(shape_list, 0) rgb = array_ops.tile(images, multiples, name=name) rgb.set_shape(images.get_shape()[:-1].concatenate([3])) return rgb # pylint: disable=invalid-name @tf_export('image.random_hue') @dispatch.add_dispatch_support def random_hue(image, max_delta, seed=None): """Adjust the hue of RGB images by a random factor. Equivalent to `adjust_hue()` but uses a `delta` randomly picked in the interval `[-max_delta, max_delta)`. `max_delta` must be in the interval `[0, 0.5]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_hue(x, 0.2) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...> For producing deterministic results given a `seed` value, use `tf.image.stateless_random_hue`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: RGB image or images. The size of the last dimension must be 3. max_delta: float. The maximum value for the random delta. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `max_delta` is invalid. """ if max_delta > 0.5: raise ValueError('max_delta must be <= 0.5.') if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = random_ops.random_uniform([], -max_delta, max_delta, seed=seed) return adjust_hue(image, delta) @tf_export('image.stateless_random_hue', v1=[]) @dispatch.add_dispatch_support def stateless_random_hue(image, max_delta, seed): """Adjust the hue of RGB images by a random factor deterministically. Equivalent to `adjust_hue()` but uses a `delta` randomly picked in the interval `[-max_delta, max_delta)`. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). `max_delta` must be in the interval `[0, 0.5]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_hue(x, 0.2, seed) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 1.6514902, 1. , 3. ], [ 4.65149 , 4. , 6. ]], [[ 7.65149 , 7. , 9. ], [10.65149 , 10. , 12. ]]], dtype=float32)> Args: image: RGB image or images. The size of the last dimension must be 3. max_delta: float. The maximum value for the random delta. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `max_delta` is invalid. """ if max_delta > 0.5: raise ValueError('max_delta must be <= 0.5.') if max_delta < 0: raise ValueError('max_delta must be non-negative.') delta = stateless_random_ops.stateless_random_uniform( shape=[], minval=-max_delta, maxval=max_delta, seed=seed) return adjust_hue(image, delta) @tf_export('image.adjust_hue') @dispatch.add_dispatch_support def adjust_hue(image, delta, name=None): """Adjust hue of RGB images. This is a convenience method that converts an RGB image to float representation, converts it to HSV, adds an offset to the hue channel, converts back to RGB and then back to the original data type. If several adjustments are chained it is advisable to minimize the number of redundant conversions. `image` is an RGB image. The image hue is adjusted by converting the image(s) to HSV and rotating the hue channel (H) by `delta`. The image is then converted back to RGB. `delta` must be in the interval `[-1, 1]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_hue(x, 0.2) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 2.3999996, 1. , 3. ], [ 5.3999996, 4. , 6. ]], [[ 8.4 , 7. , 9. ], [11.4 , 10. , 12. ]]], dtype=float32)> Args: image: RGB image or images. The size of the last dimension must be 3. delta: float. How much to add to the hue channel. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. Usage Example: >>> image = [[[1, 2, 3], [4, 5, 6]], ... [[7, 8, 9], [10, 11, 12]], ... [[13, 14, 15], [16, 17, 18]]] >>> image = tf.constant(image) >>> tf.image.adjust_hue(image, 0.2) <tf.Tensor: shape=(3, 2, 3), dtype=int32, numpy= array([[[ 2, 1, 3], [ 5, 4, 6]], [[ 8, 7, 9], [11, 10, 12]], [[14, 13, 15], [17, 16, 18]]], dtype=int32)> """ with ops.name_scope(name, 'adjust_hue', [image]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) rgb_altered = gen_image_ops.adjust_hue(flt_image, delta) return convert_image_dtype(rgb_altered, orig_dtype) # pylint: disable=invalid-name @tf_export('image.random_jpeg_quality') @dispatch.add_dispatch_support def random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed=None): """Randomly changes jpeg encoding quality for inducing jpeg noise. `min_jpeg_quality` must be in the interval `[0, 100]` and less than `max_jpeg_quality`. `max_jpeg_quality` must be in the interval `[0, 100]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_jpeg_quality(x, 75, 95) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...> For producing deterministic results given a `seed` value, use `tf.image.stateless_random_jpeg_quality`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: 3D image. Size of the last dimension must be 1 or 3. min_jpeg_quality: Minimum jpeg encoding quality to use. max_jpeg_quality: Maximum jpeg encoding quality to use. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid. """ if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or max_jpeg_quality > 100): raise ValueError('jpeg encoding range must be between 0 and 100.') if min_jpeg_quality >= max_jpeg_quality: raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.') jpeg_quality = random_ops.random_uniform([], min_jpeg_quality, max_jpeg_quality, seed=seed, dtype=dtypes.int32) return adjust_jpeg_quality(image, jpeg_quality) @tf_export('image.stateless_random_jpeg_quality', v1=[]) @dispatch.add_dispatch_support def stateless_random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed): """Deterministically radomize jpeg encoding quality for inducing jpeg noise. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). `min_jpeg_quality` must be in the interval `[0, 100]` and less than `max_jpeg_quality`. `max_jpeg_quality` must be in the interval `[0, 100]`. Usage Example: >>> x = [[[1, 2, 3], ... [4, 5, 6]], ... [[7, 8, 9], ... [10, 11, 12]]] >>> x_uint8 = tf.cast(x, tf.uint8) >>> seed = (1, 2) >>> tf.image.stateless_random_jpeg_quality(x_uint8, 75, 95, seed) <tf.Tensor: shape=(2, 2, 3), dtype=uint8, numpy= array([[[ 0, 4, 5], [ 1, 5, 6]], [[ 5, 9, 10], [ 5, 9, 10]]], dtype=uint8)> Args: image: 3D image. Size of the last dimension must be 1 or 3. min_jpeg_quality: Minimum jpeg encoding quality to use. max_jpeg_quality: Maximum jpeg encoding quality to use. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `min_jpeg_quality` or `max_jpeg_quality` is invalid. """ if (min_jpeg_quality < 0 or max_jpeg_quality < 0 or min_jpeg_quality > 100 or max_jpeg_quality > 100): raise ValueError('jpeg encoding range must be between 0 and 100.') if min_jpeg_quality >= max_jpeg_quality: raise ValueError('`min_jpeg_quality` must be less than `max_jpeg_quality`.') jpeg_quality = stateless_random_ops.stateless_random_uniform( shape=[], minval=min_jpeg_quality, maxval=max_jpeg_quality, seed=seed, dtype=dtypes.int32) return adjust_jpeg_quality(image, jpeg_quality) @tf_export('image.adjust_jpeg_quality') @dispatch.add_dispatch_support def adjust_jpeg_quality(image, jpeg_quality, name=None): """Adjust jpeg encoding quality of an image. This is a convenience method that converts an image to uint8 representation, encodes it to jpeg with `jpeg_quality`, decodes it, and then converts back to the original data type. `jpeg_quality` must be in the interval `[0, 100]`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_jpeg_quality(x, 75) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[1., 1., 1.], [1., 1., 1.]], [[1., 1., 1.], [1., 1., 1.]]], dtype=float32)> Args: image: 3D image. The size of the last dimension must be None, 1 or 3. jpeg_quality: Python int or Tensor of type int32. jpeg encoding quality. name: A name for this operation (optional). Returns: Adjusted image, same shape and DType as `image`. Raises: InvalidArgumentError: quality must be in [0,100] InvalidArgumentError: image must have 1 or 3 channels """ with ops.name_scope(name, 'adjust_jpeg_quality', [image]): image = ops.convert_to_tensor(image, name='image') channels = image.shape.as_list()[-1] # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype image = convert_image_dtype(image, dtypes.uint8, saturate=True) if not _is_tensor(jpeg_quality): # If jpeg_quality is a int (not tensor). jpeg_quality = ops.convert_to_tensor(jpeg_quality, dtype=dtypes.int32) image = gen_image_ops.encode_jpeg_variable_quality(image, jpeg_quality) image = gen_image_ops.decode_jpeg(image, channels=channels) return convert_image_dtype(image, orig_dtype, saturate=True) @tf_export('image.random_saturation') @dispatch.add_dispatch_support def random_saturation(image, lower, upper, seed=None): """Adjust the saturation of RGB images by a random factor. Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly picked in the interval `[lower, upper)`. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_saturation(x, 5, 10) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 0. , 1.5, 3. ], [ 0. , 3. , 6. ]], [[ 0. , 4.5, 9. ], [ 0. , 6. , 12. ]]], dtype=float32)> For producing deterministic results given a `seed` value, use `tf.image.stateless_random_saturation`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: RGB image or images. The size of the last dimension must be 3. lower: float. Lower bound for the random saturation factor. upper: float. Upper bound for the random saturation factor. seed: An operation-specific seed. It will be used in conjunction with the graph-level seed to determine the real seeds that will be used in this operation. Please see the documentation of set_random_seed for its interaction with the graph-level random seed. Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') saturation_factor = random_ops.random_uniform([], lower, upper, seed=seed) return adjust_saturation(image, saturation_factor) @tf_export('image.stateless_random_saturation', v1=[]) @dispatch.add_dispatch_support def stateless_random_saturation(image, lower, upper, seed=None): """Adjust the saturation of RGB images by a random factor deterministically. Equivalent to `adjust_saturation()` but uses a `saturation_factor` randomly picked in the interval `[lower, upper)`. Guarantees the same results given the same `seed` independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> seed = (1, 2) >>> tf.image.stateless_random_saturation(x, 0.5, 1.0, seed) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 1.1559395, 2.0779698, 3. ], [ 4.1559396, 5.07797 , 6. ]], [[ 7.1559396, 8.07797 , 9. ], [10.155939 , 11.07797 , 12. ]]], dtype=float32)> Args: image: RGB image or images. The size of the last dimension must be 3. lower: float. Lower bound for the random saturation factor. upper: float. Upper bound for the random saturation factor. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) Returns: Adjusted image(s), same shape and DType as `image`. Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError('upper must be > lower.') if lower < 0: raise ValueError('lower must be non-negative.') saturation_factor = stateless_random_ops.stateless_random_uniform( shape=[], minval=lower, maxval=upper, seed=seed) return adjust_saturation(image, saturation_factor) @tf_export('image.adjust_saturation') @dispatch.add_dispatch_support def adjust_saturation(image, saturation_factor, name=None): """Adjust saturation of RGB images. This is a convenience method that converts RGB images to float representation, converts them to HSV, adds an offset to the saturation channel, converts back to RGB and then back to the original data type. If several adjustments are chained it is advisable to minimize the number of redundant conversions. `image` is an RGB image or images. The image saturation is adjusted by converting the images to HSV and multiplying the saturation (S) channel by `saturation_factor` and clipping. The images are then converted back to RGB. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.adjust_saturation(x, 0.5) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy= array([[[ 2. , 2.5, 3. ], [ 5. , 5.5, 6. ]], [[ 8. , 8.5, 9. ], [11. , 11.5, 12. ]]], dtype=float32)> Args: image: RGB image or images. The size of the last dimension must be 3. saturation_factor: float. Factor to multiply the saturation by. name: A name for this operation (optional). Returns: Adjusted image(s), same shape and DType as `image`. Raises: InvalidArgumentError: input must have 3 channels """ with ops.name_scope(name, 'adjust_saturation', [image]) as name: image = ops.convert_to_tensor(image, name='image') # Remember original dtype to so we can convert back if needed orig_dtype = image.dtype if orig_dtype in (dtypes.float16, dtypes.float32): flt_image = image else: flt_image = convert_image_dtype(image, dtypes.float32) adjusted = gen_image_ops.adjust_saturation(flt_image, saturation_factor) return convert_image_dtype(adjusted, orig_dtype) @tf_export('io.is_jpeg', 'image.is_jpeg', v1=['io.is_jpeg', 'image.is_jpeg']) def is_jpeg(contents, name=None): r"""Convenience function to check if the 'contents' encodes a JPEG image. Args: contents: 0-D `string`. The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a JPEG image. is_jpeg is susceptible to false positives. """ # Normal JPEGs start with \xff\xd8\xff\xe0 # JPEG with EXIF starts with \xff\xd8\xff\xe1 # Use \xff\xd8\xff to cover both. with ops.name_scope(name, 'is_jpeg'): substr = string_ops.substr(contents, 0, 3) return math_ops.equal(substr, b'\xff\xd8\xff', name=name) def _is_png(contents, name=None): r"""Convenience function to check if the 'contents' encodes a PNG image. Args: contents: 0-D `string`. The encoded image bytes. name: A name for the operation (optional) Returns: A scalar boolean tensor indicating if 'contents' may be a PNG image. is_png is susceptible to false positives. """ with ops.name_scope(name, 'is_png'): substr = string_ops.substr(contents, 0, 3) return math_ops.equal(substr, b'\211PN', name=name) tf_export( 'io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg', v1=['io.decode_and_crop_jpeg', 'image.decode_and_crop_jpeg'])( dispatch.add_dispatch_support(gen_image_ops.decode_and_crop_jpeg)) tf_export( 'io.decode_bmp', 'image.decode_bmp', v1=['io.decode_bmp', 'image.decode_bmp'])( dispatch.add_dispatch_support(gen_image_ops.decode_bmp)) tf_export( 'io.decode_gif', 'image.decode_gif', v1=['io.decode_gif', 'image.decode_gif'])( dispatch.add_dispatch_support(gen_image_ops.decode_gif)) tf_export( 'io.decode_jpeg', 'image.decode_jpeg', v1=['io.decode_jpeg', 'image.decode_jpeg'])( dispatch.add_dispatch_support(gen_image_ops.decode_jpeg)) tf_export( 'io.decode_png', 'image.decode_png', v1=['io.decode_png', 'image.decode_png'])( dispatch.add_dispatch_support(gen_image_ops.decode_png)) tf_export( 'io.encode_jpeg', 'image.encode_jpeg', v1=['io.encode_jpeg', 'image.encode_jpeg'])( dispatch.add_dispatch_support(gen_image_ops.encode_jpeg)) tf_export( 'io.extract_jpeg_shape', 'image.extract_jpeg_shape', v1=['io.extract_jpeg_shape', 'image.extract_jpeg_shape'])( dispatch.add_dispatch_support(gen_image_ops.extract_jpeg_shape)) @tf_export('io.encode_png', 'image.encode_png') @dispatch.add_dispatch_support def encode_png(image, compression=-1, name=None): r"""PNG-encode an image. `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` where `channels` is: * 1: for grayscale. * 2: for grayscale + alpha. * 3: for RGB. * 4: for RGBA. The ZLIB compression level, `compression`, can be -1 for the PNG-encoder default or a value from 0 to 9. 9 is the highest compression level, generating the smallest output, but is slower. Args: image: A `Tensor`. Must be one of the following types: `uint8`, `uint16`. 3-D with shape `[height, width, channels]`. compression: An optional `int`. Defaults to `-1`. Compression level. name: A name for the operation (optional). Returns: A `Tensor` of type `string`. """ return gen_image_ops.encode_png( ops.convert_to_tensor(image), compression, name) @tf_export( 'io.decode_image', 'image.decode_image', v1=['io.decode_image', 'image.decode_image']) @dispatch.add_dispatch_support def decode_image(contents, channels=None, dtype=dtypes.uint8, name=None, expand_animations=True): """Function for `decode_bmp`, `decode_gif`, `decode_jpeg`, and `decode_png`. Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the appropriate operation to convert the input bytes `string` into a `Tensor` of type `dtype`. Note: `decode_gif` returns a 4-D array `[num_frames, height, width, 3]`, as opposed to `decode_bmp`, `decode_jpeg` and `decode_png`, which return 3-D arrays `[height, width, num_channels]`. Make sure to take this into account when constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or PNG files. Alternately, set the `expand_animations` argument of this function to `False`, in which case the op will return 3-dimensional tensors and will truncate animated GIF files to the first frame. NOTE: If the first frame of an animated GIF does not occupy the entire canvas (maximum frame width x maximum frame height), then it fills the unoccupied areas (in the first frame) with zeros (black). For frames after the first frame that does not occupy the entire canvas, it uses the previous frame to fill the unoccupied areas. Args: contents: A `Tensor` of type `string`. 0-D. The encoded image bytes. channels: An optional `int`. Defaults to `0`. Number of color channels for the decoded image. dtype: The desired DType of the returned `Tensor`. name: A name for the operation (optional) expand_animations: An optional `bool`. Defaults to `True`. Controls the shape of the returned op's output. If `True`, the returned op will produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all GIFs, whether animated or not. If, `False`, the returned op will produce a 3-D tensor for all file types and will truncate animated GIFs to the first frame. Returns: `Tensor` with type `dtype` and a 3- or 4-dimensional shape, depending on the file type and the value of the `expand_animations` parameter. Raises: ValueError: On incorrect number of channels. """ with ops.name_scope(name, 'decode_image'): channels = 0 if channels is None else channels if dtype not in [dtypes.float32, dtypes.uint8, dtypes.uint16]: dest_dtype = dtype dtype = dtypes.uint16 return convert_image_dtype( gen_image_ops.decode_image( contents=contents, channels=channels, expand_animations=expand_animations, dtype=dtype), dest_dtype) else: return gen_image_ops.decode_image( contents=contents, channels=channels, expand_animations=expand_animations, dtype=dtype) @tf_export('image.total_variation') @dispatch.add_dispatch_support def total_variation(images, name=None): """Calculate and return the total variation for one or more images. The total variation is the sum of the absolute differences for neighboring pixel-values in the input images. This measures how much noise is in the images. This can be used as a loss-function during optimization so as to suppress noise in images. If you have a batch of images, then you should calculate the scalar loss-value as the sum: `loss = tf.reduce_sum(tf.image.total_variation(images))` This implements the anisotropic 2-D version of the formula described here: https://en.wikipedia.org/wiki/Total_variation_denoising Args: images: 4-D Tensor of shape `[batch, height, width, channels]` or 3-D Tensor of shape `[height, width, channels]`. name: A name for the operation (optional). Raises: ValueError: if images.shape is not a 3-D or 4-D vector. Returns: The total variation of `images`. If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the total variation for each image in the batch. If `images` was 3-D, return a scalar float with the total variation for that image. """ with ops.name_scope(name, 'total_variation'): ndims = images.get_shape().ndims if ndims == 3: # The input is a single image with shape [height, width, channels]. # Calculate the difference of neighboring pixel-values. # The images are shifted one pixel along the height and width by slicing. pixel_dif1 = images[1:, :, :] - images[:-1, :, :] pixel_dif2 = images[:, 1:, :] - images[:, :-1, :] # Sum for all axis. (None is an alias for all axis.) sum_axis = None elif ndims == 4: # The input is a batch of images with shape: # [batch, height, width, channels]. # Calculate the difference of neighboring pixel-values. # The images are shifted one pixel along the height and width by slicing. pixel_dif1 = images[:, 1:, :, :] - images[:, :-1, :, :] pixel_dif2 = images[:, :, 1:, :] - images[:, :, :-1, :] # Only sum for the last 3 axis. # This results in a 1-D tensor with the total variation for each image. sum_axis = [1, 2, 3] else: raise ValueError('\'images\' must be either 3 or 4-dimensional.') # Calculate the total variation by taking the absolute value of the # pixel-differences and summing over the appropriate axis. tot_var = ( math_ops.reduce_sum(math_ops.abs(pixel_dif1), axis=sum_axis) + math_ops.reduce_sum(math_ops.abs(pixel_dif2), axis=sum_axis)) return tot_var @tf_export('image.sample_distorted_bounding_box', v1=[]) @dispatch.add_dispatch_support def sample_distorted_bounding_box_v2(image_size, bounding_boxes, seed=0, min_object_covered=0.1, aspect_ratio_range=None, area_range=None, max_attempts=None, use_image_if_no_bounding_boxes=None, name=None): """Generate a single randomly distorted bounding box for an image. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image. For example, ```python # Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes, min_object_covered=0.1) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.compat.v1.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size) ``` Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. For producing deterministic results given a `seed` value, use `tf.image.stateless_sample_distorted_bounding_box`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: An optional `int`. Defaults to `0`. If `seed` is set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect `ratio = width / height` within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`. """ seed1, seed2 = random_seed.get_seed(seed) if seed else (0, 0) with ops.name_scope(name, 'sample_distorted_bounding_box'): return gen_image_ops.sample_distorted_bounding_box_v2( image_size, bounding_boxes, seed=seed1, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes, name=name) @tf_export('image.stateless_sample_distorted_bounding_box', v1=[]) @dispatch.add_dispatch_support def stateless_sample_distorted_bounding_box(image_size, bounding_boxes, seed, min_object_covered=0.1, aspect_ratio_range=None, area_range=None, max_attempts=None, use_image_if_no_bounding_boxes=None, name=None): """Generate a randomly distorted bounding box for an image deterministically. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op, given the same `seed`, deterministically outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image. The output of this Op is guaranteed to be the same given the same `seed` and is independent of how many times the function is called, and independent of global seed settings (e.g. `tf.random.set_seed`). Example usage: >>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]) >>> bbox = tf.constant( ... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) >>> seed = (1, 2) >>> # Generate a single distorted bounding box. >>> bbox_begin, bbox_size, bbox_draw = ( ... tf.image.stateless_sample_distorted_bounding_box( ... tf.shape(image), bounding_boxes=bbox, seed=seed)) >>> # Employ the bounding box to distort the image. >>> tf.slice(image, bbox_begin, bbox_size) <tf.Tensor: shape=(2, 2, 1), dtype=int64, numpy= array([[[1], [2]], [[4], [5]]])> >>> # Draw the bounding box in an image summary. >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) >>> tf.image.draw_bounding_boxes( ... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors) <tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy= array([[[[1.], [1.], [3.]], [[1.], [1.], [6.]], [[7.], [8.], [9.]]]], dtype=float32)> Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = true` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: A shape [2] Tensor, the seed to the random number generator. Must have dtype `int32` or `int64`. (When using XLA, only `int32` is allowed.) min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect `ratio = width / height` within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`. """ with ops.name_scope(name, 'stateless_sample_distorted_bounding_box'): return gen_image_ops.stateless_sample_distorted_bounding_box( image_size=image_size, bounding_boxes=bounding_boxes, seed=seed, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes, name=name) @tf_export(v1=['image.sample_distorted_bounding_box']) @dispatch.add_dispatch_support @deprecation.deprecated( date=None, instructions='`seed2` arg is deprecated.' 'Use sample_distorted_bounding_box_v2 instead.') def sample_distorted_bounding_box(image_size, bounding_boxes, seed=None, seed2=None, min_object_covered=0.1, aspect_ratio_range=None, area_range=None, max_attempts=None, use_image_if_no_bounding_boxes=None, name=None): """Generate a single randomly distorted bounding box for an image. Bounding box annotations are often supplied in addition to ground-truth labels in image recognition or object localization tasks. A common technique for training such a system is to randomly distort an image while preserving its content, i.e. *data augmentation*. This Op outputs a randomly distorted localization of an object, i.e. bounding box, given an `image_size`, `bounding_boxes` and a series of constraints. The output of this Op is a single bounding box that may be used to crop the original image. The output is returned as 3 tensors: `begin`, `size` and `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize what the bounding box looks like. Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and height of the underlying image. For example, ```python # Generate a single distorted bounding box. begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( tf.shape(image), bounding_boxes=bounding_boxes, min_object_covered=0.1) # Draw the bounding box in an image summary. image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), bbox_for_draw) tf.compat.v1.summary.image('images_with_box', image_with_box) # Employ the bounding box to distort the image. distorted_image = tf.slice(image, begin, size) ``` Note that if no bounding box information is available, setting `use_image_if_no_bounding_boxes = True` will assume there is a single implicit bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is false and no bounding boxes are supplied, an error is raised. Args: image_size: A `Tensor`. Must be one of the following types: `uint8`, `int8`, `int16`, `int32`, `int64`. 1-D, containing `[height, width, channels]`. bounding_boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, N, 4]` describing the N bounding boxes associated with the image. seed: An optional `int`. Defaults to `0`. If either `seed` or `seed2` are set to non-zero, the random number generator is seeded by the given `seed`. Otherwise, it is seeded by a random seed. seed2: An optional `int`. Defaults to `0`. A second seed to avoid seed collision. min_object_covered: A Tensor of type `float32`. Defaults to `0.1`. The cropped area of the image must contain at least this fraction of any bounding box supplied. The value of this parameter should be non-negative. In the case of 0, the cropped area does not need to overlap any of the bounding boxes supplied. aspect_ratio_range: An optional list of `floats`. Defaults to `[0.75, 1.33]`. The cropped area of the image must have an aspect ratio = width / height within this range. area_range: An optional list of `floats`. Defaults to `[0.05, 1]`. The cropped area of the image must contain a fraction of the supplied image within this range. max_attempts: An optional `int`. Defaults to `100`. Number of attempts at generating a cropped region of the image of the specified constraints. After `max_attempts` failures, return the entire image. use_image_if_no_bounding_boxes: An optional `bool`. Defaults to `False`. Controls behavior if no bounding boxes supplied. If true, assume an implicit bounding box covering the whole input. If false, raise an error. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (begin, size, bboxes). begin: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[offset_height, offset_width, 0]`. Provide as input to `tf.slice`. size: A `Tensor`. Has the same type as `image_size`. 1-D, containing `[target_height, target_width, -1]`. Provide as input to `tf.slice`. bboxes: A `Tensor` of type `float32`. 3-D with shape `[1, 1, 4]` containing the distorted bounding box. Provide as input to `tf.image.draw_bounding_boxes`. """ with ops.name_scope(name, 'sample_distorted_bounding_box'): return gen_image_ops.sample_distorted_bounding_box_v2( image_size, bounding_boxes, seed=seed, seed2=seed2, min_object_covered=min_object_covered, aspect_ratio_range=aspect_ratio_range, area_range=area_range, max_attempts=max_attempts, use_image_if_no_bounding_boxes=use_image_if_no_bounding_boxes, name=name) @tf_export('image.non_max_suppression') @dispatch.add_dispatch_support def non_max_suppression(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), name=None): """Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices = tf.image.non_max_suppression( boxes, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. iou_threshold: A 0-D float tensor representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A 0-D float tensor representing the threshold for deciding when to remove boxes based on score. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. """ with ops.name_scope(name, 'non_max_suppression'): iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, name='score_threshold') return gen_image_ops.non_max_suppression_v3(boxes, scores, max_output_size, iou_threshold, score_threshold) @tf_export('image.non_max_suppression_with_scores') @dispatch.add_dispatch_support def non_max_suppression_with_scores(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), soft_nms_sigma=0.0, name=None): """Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices, selected_scores = tf.image.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold=1.0, score_threshold=0.1, soft_nms_sigma=0.5) selected_boxes = tf.gather(boxes, selected_indices) ``` This function generalizes the `tf.image.non_max_suppression` op by also supporting a Soft-NMS (with Gaussian weighting) mode (c.f. Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score of other overlapping boxes instead of directly causing them to be pruned. Consequently, in contrast to `tf.image.non_max_suppression`, `tf.image.non_max_suppression_padded` returns the new scores of each input box in the second output, `selected_scores`. To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be larger than 0. When `soft_nms_sigma` equals 0, the behavior of `tf.image.non_max_suppression_padded` is identical to that of `tf.image.non_max_suppression` (except for the extra output) both in function and in running time. Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. iou_threshold: A 0-D float tensor representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A 0-D float tensor representing the threshold for deciding when to remove boxes based on score. soft_nms_sigma: A 0-D float tensor representing the sigma parameter for Soft NMS; see Bodla et al (c.f. https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard) NMS. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. selected_scores: A 1-D float tensor of shape `[M]` representing the corresponding scores for each selected box, where `M <= max_output_size`. Scores only differ from corresponding input scores when using Soft NMS (i.e. when `soft_nms_sigma>0`) """ with ops.name_scope(name, 'non_max_suppression_with_scores'): iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, name='score_threshold') soft_nms_sigma = ops.convert_to_tensor( soft_nms_sigma, name='soft_nms_sigma') (selected_indices, selected_scores, _) = gen_image_ops.non_max_suppression_v5( boxes, scores, max_output_size, iou_threshold, score_threshold, soft_nms_sigma, pad_to_max_output_size=False) return selected_indices, selected_scores @tf_export('image.non_max_suppression_overlaps') @dispatch.add_dispatch_support def non_max_suppression_with_overlaps(overlaps, scores, max_output_size, overlap_threshold=0.5, score_threshold=float('-inf'), name=None): """Greedily selects a subset of bounding boxes in descending order of score. Prunes away boxes that have high overlap with previously selected boxes. N-by-n overlap values are supplied as square matrix. The output of this operation is a set of integers indexing into the input collection of bounding boxes representing the selected boxes. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.gather` operation. For example: ```python selected_indices = tf.image.non_max_suppression_overlaps( overlaps, scores, max_output_size, iou_threshold) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: overlaps: A 2-D float `Tensor` of shape `[num_boxes, num_boxes]` representing the n-by-n box overlap values. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. overlap_threshold: A 0-D float tensor representing the threshold for deciding whether boxes overlap too much with respect to the provided overlap values. score_threshold: A 0-D float tensor representing the threshold for deciding when to remove boxes based on score. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the overlaps tensor, where `M <= max_output_size`. """ with ops.name_scope(name, 'non_max_suppression_overlaps'): overlap_threshold = ops.convert_to_tensor( overlap_threshold, name='overlap_threshold') # pylint: disable=protected-access return gen_image_ops.non_max_suppression_with_overlaps( overlaps, scores, max_output_size, overlap_threshold, score_threshold) # pylint: enable=protected-access _rgb_to_yiq_kernel = [[0.299, 0.59590059, 0.2115], [0.587, -0.27455667, -0.52273617], [0.114, -0.32134392, 0.31119955]] @tf_export('image.rgb_to_yiq') @dispatch.add_dispatch_support def rgb_to_yiq(images): """Converts one or more images from RGB to YIQ. Outputs a tensor of the same shape as the `images` tensor, containing the YIQ value of the pixels. The output is only well defined if the value in images are in [0,1]. Usage Example: >>> x = tf.constant([[[1.0, 2.0, 3.0]]]) >>> tf.image.rgb_to_yiq(x) <tf.Tensor: shape=(1, 1, 3), dtype=float32, numpy=array([[[ 1.815 , -0.91724455, 0.09962624]]], dtype=float32)> Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _rgb_to_yiq_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _yiq_to_rgb_kernel = [[1, 1, 1], [0.95598634, -0.27201283, -1.10674021], [0.6208248, -0.64720424, 1.70423049]] @tf_export('image.yiq_to_rgb') @dispatch.add_dispatch_support def yiq_to_rgb(images): """Converts one or more images from YIQ to RGB. Outputs a tensor of the same shape as the `images` tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], I value are in [-0.5957,0.5957] and Q value are in [-0.5226,0.5226]. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _yiq_to_rgb_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _rgb_to_yuv_kernel = [[0.299, -0.14714119, 0.61497538], [0.587, -0.28886916, -0.51496512], [0.114, 0.43601035, -0.10001026]] @tf_export('image.rgb_to_yuv') @dispatch.add_dispatch_support def rgb_to_yuv(images): """Converts one or more images from RGB to YUV. Outputs a tensor of the same shape as the `images` tensor, containing the YUV value of the pixels. The output is only well defined if the value in images are in [0, 1]. There are two ways of representing an image: [0, 255] pixel values range or [0, 1] (as float) pixel values range. Users need to convert the input image into a float [0, 1] range. Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _rgb_to_yuv_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) _yuv_to_rgb_kernel = [[1, 1, 1], [0, -0.394642334, 2.03206185], [1.13988303, -0.58062185, 0]] @tf_export('image.yuv_to_rgb') @dispatch.add_dispatch_support def yuv_to_rgb(images): """Converts one or more images from YUV to RGB. Outputs a tensor of the same shape as the `images` tensor, containing the RGB value of the pixels. The output is only well defined if the Y value in images are in [0,1], U and V value are in [-0.5,0.5]. As per the above description, you need to scale your YUV images if their pixel values are not in the required range. Below given example illustrates preprocessing of each channel of images before feeding them to `yuv_to_rgb`. ```python yuv_images = tf.random.uniform(shape=[100, 64, 64, 3], maxval=255) last_dimension_axis = len(yuv_images.shape) - 1 yuv_tensor_images = tf.truediv( tf.subtract( yuv_images, tf.reduce_min(yuv_images) ), tf.subtract( tf.reduce_max(yuv_images), tf.reduce_min(yuv_images) ) ) y, u, v = tf.split(yuv_tensor_images, 3, axis=last_dimension_axis) target_uv_min, target_uv_max = -0.5, 0.5 u = u * (target_uv_max - target_uv_min) + target_uv_min v = v * (target_uv_max - target_uv_min) + target_uv_min preprocessed_yuv_images = tf.concat([y, u, v], axis=last_dimension_axis) rgb_tensor_images = tf.image.yuv_to_rgb(preprocessed_yuv_images) ``` Args: images: 2-D or higher rank. Image data to convert. Last dimension must be size 3. Returns: images: tensor with the same shape as `images`. """ images = ops.convert_to_tensor(images, name='images') kernel = ops.convert_to_tensor( _yuv_to_rgb_kernel, dtype=images.dtype, name='kernel') ndims = images.get_shape().ndims return math_ops.tensordot(images, kernel, axes=[[ndims - 1], [0]]) def _verify_compatible_image_shapes(img1, img2): """Checks if two image tensors are compatible for applying SSIM or PSNR. This function checks if two sets of images have ranks at least 3, and if the last three dimensions match. Args: img1: Tensor containing the first image batch. img2: Tensor containing the second image batch. Returns: A tuple containing: the first tensor shape, the second tensor shape, and a list of control_flow_ops.Assert() ops implementing the checks. Raises: ValueError: When static shape check fails. """ shape1 = img1.get_shape().with_rank_at_least(3) shape2 = img2.get_shape().with_rank_at_least(3) shape1[-3:].assert_is_compatible_with(shape2[-3:]) if shape1.ndims is not None and shape2.ndims is not None: for dim1, dim2 in zip( reversed(shape1.dims[:-3]), reversed(shape2.dims[:-3])): if not (dim1 == 1 or dim2 == 1 or dim1.is_compatible_with(dim2)): raise ValueError('Two images are not compatible: %s and %s' % (shape1, shape2)) # Now assign shape tensors. shape1, shape2 = array_ops.shape_n([img1, img2]) # TODO(sjhwang): Check if shape1[:-3] and shape2[:-3] are broadcastable. checks = [] checks.append( control_flow_ops.Assert( math_ops.greater_equal(array_ops.size(shape1), 3), [shape1, shape2], summarize=10)) checks.append( control_flow_ops.Assert( math_ops.reduce_all(math_ops.equal(shape1[-3:], shape2[-3:])), [shape1, shape2], summarize=10)) return shape1, shape2, checks @tf_export('image.psnr') @dispatch.add_dispatch_support def psnr(a, b, max_val, name=None): """Returns the Peak Signal-to-Noise Ratio between a and b. This is intended to be used on signals (or images). Produces a PSNR value for each image in batch. The last three dimensions of input are expected to be [height, width, depth]. Example: ```python # Read images from file. im1 = tf.decode_png('path/to/im1.png') im2 = tf.decode_png('path/to/im2.png') # Compute PSNR over tf.uint8 Tensors. psnr1 = tf.image.psnr(im1, im2, max_val=255) # Compute PSNR over tf.float32 Tensors. im1 = tf.image.convert_image_dtype(im1, tf.float32) im2 = tf.image.convert_image_dtype(im2, tf.float32) psnr2 = tf.image.psnr(im1, im2, max_val=1.0) # psnr1 and psnr2 both have type tf.float32 and are almost equal. ``` Args: a: First set of images. b: Second set of images. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). name: Namespace to embed the computation in. Returns: The scalar PSNR between a and b. The returned tensor has type `tf.float32` and shape [batch_size, 1]. """ with ops.name_scope(name, 'PSNR', [a, b]): # Need to convert the images to float32. Scale max_val accordingly so that # PSNR is computed correctly. max_val = math_ops.cast(max_val, a.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) a = convert_image_dtype(a, dtypes.float32) b = convert_image_dtype(b, dtypes.float32) mse = math_ops.reduce_mean(math_ops.squared_difference(a, b), [-3, -2, -1]) psnr_val = math_ops.subtract( 20 * math_ops.log(max_val) / math_ops.log(10.0), np.float32(10 / np.log(10)) * math_ops.log(mse), name='psnr') _, _, checks = _verify_compatible_image_shapes(a, b) with ops.control_dependencies(checks): return array_ops.identity(psnr_val) def _ssim_helper(x, y, reducer, max_val, compensation=1.0, k1=0.01, k2=0.03): r"""Helper function for computing SSIM. SSIM estimates covariances with weighted sums. The default parameters use a biased estimate of the covariance: Suppose `reducer` is a weighted sum, then the mean estimators are \mu_x = \sum_i w_i x_i, \mu_y = \sum_i w_i y_i, where w_i's are the weighted-sum weights, and covariance estimator is cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y) with assumption \sum_i w_i = 1. This covariance estimator is biased, since E[cov_{xy}] = (1 - \sum_i w_i ^ 2) Cov(X, Y). For SSIM measure with unbiased covariance estimators, pass as `compensation` argument (1 - \sum_i w_i ^ 2). Args: x: First set of images. y: Second set of images. reducer: Function that computes 'local' averages from the set of images. For non-convolutional version, this is usually tf.reduce_mean(x, [1, 2]), and for convolutional version, this is usually tf.nn.avg_pool2d or tf.nn.conv2d with weighted-sum kernel. max_val: The dynamic range (i.e., the difference between the maximum possible allowed value and the minimum allowed value). compensation: Compensation factor. See above. k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A pair containing the luminance measure, and the contrast-structure measure. """ c1 = (k1 * max_val)**2 c2 = (k2 * max_val)**2 # SSIM luminance measure is # (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1). mean0 = reducer(x) mean1 = reducer(y) num0 = mean0 * mean1 * 2.0 den0 = math_ops.square(mean0) + math_ops.square(mean1) luminance = (num0 + c1) / (den0 + c1) # SSIM contrast-structure measure is # (2 * cov_{xy} + c2) / (cov_{xx} + cov_{yy} + c2). # Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then # cov_{xy} = \sum_i w_i (x_i - \mu_x) (y_i - \mu_y) # = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j). num1 = reducer(x * y) * 2.0 den1 = reducer(math_ops.square(x) + math_ops.square(y)) c2 *= compensation cs = (num1 - num0 + c2) / (den1 - den0 + c2) # SSIM score is the product of the luminance and contrast-structure measures. return luminance, cs def _fspecial_gauss(size, sigma): """Function to mimic the 'fspecial' gaussian MATLAB function.""" size = ops.convert_to_tensor(size, dtypes.int32) sigma = ops.convert_to_tensor(sigma) coords = math_ops.cast(math_ops.range(size), sigma.dtype) coords -= math_ops.cast(size - 1, sigma.dtype) / 2.0 g = math_ops.square(coords) g *= -0.5 / math_ops.square(sigma) g = array_ops.reshape(g, shape=[1, -1]) + array_ops.reshape(g, shape=[-1, 1]) g = array_ops.reshape(g, shape=[1, -1]) # For tf.nn.softmax(). g = nn_ops.softmax(g) return array_ops.reshape(g, shape=[size, size, 1, 1]) def _ssim_per_channel(img1, img2, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): """Computes SSIM index between img1 and img2 per color channel. This function matches the standard SSIM implementation from: Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing. Details: - 11x11 Gaussian filter of width 1.5 is used. - k1 = 0.01, k2 = 0.03 as in the original paper. Args: img1: First image batch. img2: Second image batch. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A pair of tensors containing and channel-wise SSIM and contrast-structure values. The shape is [..., channels]. """ filter_size = constant_op.constant(filter_size, dtype=dtypes.int32) filter_sigma = constant_op.constant(filter_sigma, dtype=img1.dtype) shape1, shape2 = array_ops.shape_n([img1, img2]) checks = [ control_flow_ops.Assert( math_ops.reduce_all( math_ops.greater_equal(shape1[-3:-1], filter_size)), [shape1, filter_size], summarize=8), control_flow_ops.Assert( math_ops.reduce_all( math_ops.greater_equal(shape2[-3:-1], filter_size)), [shape2, filter_size], summarize=8) ] # Enforce the check to run before computation. with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # TODO(sjhwang): Try to cache kernels and compensation factor. kernel = _fspecial_gauss(filter_size, filter_sigma) kernel = array_ops.tile(kernel, multiples=[1, 1, shape1[-1], 1]) # The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`, # but to match MATLAB implementation of MS-SSIM, we use 1.0 instead. compensation = 1.0 # TODO(sjhwang): Try FFT. # TODO(sjhwang): Gaussian kernel is separable in space. Consider applying # 1-by-n and n-by-1 Gaussian filters instead of an n-by-n filter. def reducer(x): shape = array_ops.shape(x) x = array_ops.reshape(x, shape=array_ops.concat([[-1], shape[-3:]], 0)) y = nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID') return array_ops.reshape( y, array_ops.concat([shape[:-3], array_ops.shape(y)[1:]], 0)) luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation, k1, k2) # Average over the second and the third from the last: height, width. axes = constant_op.constant([-3, -2], dtype=dtypes.int32) ssim_val = math_ops.reduce_mean(luminance * cs, axes) cs = math_ops.reduce_mean(cs, axes) return ssim_val, cs @tf_export('image.ssim') @dispatch.add_dispatch_support def ssim(img1, img2, max_val, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): """Computes SSIM index between img1 and img2. This function is based on the standard SSIM implementation from: Wang, Z., Bovik, A. C., Sheikh, H. R., & Simoncelli, E. P. (2004). Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing. Note: The true SSIM is only defined on grayscale. This function does not perform any colorspace transform. (If the input is already YUV, then it will compute YUV SSIM average.) Details: - 11x11 Gaussian filter of width 1.5 is used. - k1 = 0.01, k2 = 0.03 as in the original paper. The image sizes must be at least 11x11 because of the filter size. Example: ```python # Read images (of size 255 x 255) from file. im1 = tf.image.decode_image(tf.io.read_file('path/to/im1.png')) im2 = tf.image.decode_image(tf.io.read_file('path/to/im2.png')) tf.shape(im1) # `img1.png` has 3 channels; shape is `(255, 255, 3)` tf.shape(im2) # `img2.png` has 3 channels; shape is `(255, 255, 3)` # Add an outer batch for each image. im1 = tf.expand_dims(im1, axis=0) im2 = tf.expand_dims(im2, axis=0) # Compute SSIM over tf.uint8 Tensors. ssim1 = tf.image.ssim(im1, im2, max_val=255, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) # Compute SSIM over tf.float32 Tensors. im1 = tf.image.convert_image_dtype(im1, tf.float32) im2 = tf.image.convert_image_dtype(im2, tf.float32) ssim2 = tf.image.ssim(im1, im2, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03) # ssim1 and ssim2 both have type tf.float32 and are almost equal. ``` Args: img1: First image batch. 4-D Tensor of shape `[batch, height, width, channels]` with only Positive Pixel Values. img2: Second image batch. 4-D Tensor of shape `[batch, height, width, channels]` with only Positive Pixel Values. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A tensor containing an SSIM value for each image in batch. Returned SSIM values are in range (-1, 1], when pixel values are non-negative. Returns a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3]). """ with ops.name_scope(None, 'SSIM', [img1, img2]): # Convert to tensor if needed. img1 = ops.convert_to_tensor(img1, name='img1') img2 = ops.convert_to_tensor(img2, name='img2') # Shape checking. _, _, checks = _verify_compatible_image_shapes(img1, img2) with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # Need to convert the images to float32. Scale max_val accordingly so that # SSIM is computed correctly. max_val = math_ops.cast(max_val, img1.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) img1 = convert_image_dtype(img1, dtypes.float32) img2 = convert_image_dtype(img2, dtypes.float32) ssim_per_channel, _ = _ssim_per_channel(img1, img2, max_val, filter_size, filter_sigma, k1, k2) # Compute average over color channels. return math_ops.reduce_mean(ssim_per_channel, [-1]) # Default values obtained by Wang et al. _MSSSIM_WEIGHTS = (0.0448, 0.2856, 0.3001, 0.2363, 0.1333) @tf_export('image.ssim_multiscale') @dispatch.add_dispatch_support def ssim_multiscale(img1, img2, max_val, power_factors=_MSSSIM_WEIGHTS, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03): """Computes the MS-SSIM between img1 and img2. This function assumes that `img1` and `img2` are image batches, i.e. the last three dimensions are [height, width, channels]. Note: The true SSIM is only defined on grayscale. This function does not perform any colorspace transform. (If the input is already YUV, then it will compute YUV SSIM average.) Original paper: Wang, Zhou, Eero P. Simoncelli, and Alan C. Bovik. "Multiscale structural similarity for image quality assessment." Signals, Systems and Computers, 2004. Args: img1: First image batch with only Positive Pixel Values. img2: Second image batch with only Positive Pixel Values. Must have the same rank as img1. max_val: The dynamic range of the images (i.e., the difference between the maximum the and minimum allowed values). power_factors: Iterable of weights for each of the scales. The number of scales used is the length of the list. Index 0 is the unscaled resolution's weight and each increasing scale corresponds to the image being downsampled by 2. Defaults to (0.0448, 0.2856, 0.3001, 0.2363, 0.1333), which are the values obtained in the original paper. filter_size: Default value 11 (size of gaussian filter). filter_sigma: Default value 1.5 (width of gaussian filter). k1: Default value 0.01 k2: Default value 0.03 (SSIM is less sensitivity to K2 for lower values, so it would be better if we took the values in the range of 0 < K2 < 0.4). Returns: A tensor containing an MS-SSIM value for each image in batch. The values are in range [0, 1]. Returns a tensor with shape: broadcast(img1.shape[:-3], img2.shape[:-3]). """ with ops.name_scope(None, 'MS-SSIM', [img1, img2]): # Convert to tensor if needed. img1 = ops.convert_to_tensor(img1, name='img1') img2 = ops.convert_to_tensor(img2, name='img2') # Shape checking. shape1, shape2, checks = _verify_compatible_image_shapes(img1, img2) with ops.control_dependencies(checks): img1 = array_ops.identity(img1) # Need to convert the images to float32. Scale max_val accordingly so that # SSIM is computed correctly. max_val = math_ops.cast(max_val, img1.dtype) max_val = convert_image_dtype(max_val, dtypes.float32) img1 = convert_image_dtype(img1, dtypes.float32) img2 = convert_image_dtype(img2, dtypes.float32) imgs = [img1, img2] shapes = [shape1, shape2] # img1 and img2 are assumed to be a (multi-dimensional) batch of # 3-dimensional images (height, width, channels). `heads` contain the batch # dimensions, and `tails` contain the image dimensions. heads = [s[:-3] for s in shapes] tails = [s[-3:] for s in shapes] divisor = [1, 2, 2, 1] divisor_tensor = constant_op.constant(divisor[1:], dtype=dtypes.int32) def do_pad(images, remainder): padding = array_ops.expand_dims(remainder, -1) padding = array_ops.pad(padding, [[1, 0], [1, 0]]) return [array_ops.pad(x, padding, mode='SYMMETRIC') for x in images] mcs = [] for k in range(len(power_factors)): with ops.name_scope(None, 'Scale%d' % k, imgs): if k > 0: # Avg pool takes rank 4 tensors. Flatten leading dimensions. flat_imgs = [ array_ops.reshape(x, array_ops.concat([[-1], t], 0)) for x, t in zip(imgs, tails) ] remainder = tails[0] % divisor_tensor need_padding = math_ops.reduce_any(math_ops.not_equal(remainder, 0)) # pylint: disable=cell-var-from-loop padded = control_flow_ops.cond(need_padding, lambda: do_pad(flat_imgs, remainder), lambda: flat_imgs) # pylint: enable=cell-var-from-loop downscaled = [ nn_ops.avg_pool( x, ksize=divisor, strides=divisor, padding='VALID') for x in padded ] tails = [x[1:] for x in array_ops.shape_n(downscaled)] imgs = [ array_ops.reshape(x, array_ops.concat([h, t], 0)) for x, h, t in zip(downscaled, heads, tails) ] # Overwrite previous ssim value since we only need the last one. ssim_per_channel, cs = _ssim_per_channel( *imgs, max_val=max_val, filter_size=filter_size, filter_sigma=filter_sigma, k1=k1, k2=k2) mcs.append(nn_ops.relu(cs)) # Remove the cs score for the last scale. In the MS-SSIM calculation, # we use the l(p) at the highest scale. l(p) * cs(p) is ssim(p). mcs.pop() # Remove the cs score for the last scale. mcs_and_ssim = array_ops.stack( mcs + [nn_ops.relu(ssim_per_channel)], axis=-1) # Take weighted geometric mean across the scale axis. ms_ssim = math_ops.reduce_prod( math_ops.pow(mcs_and_ssim, power_factors), [-1]) return math_ops.reduce_mean(ms_ssim, [-1]) # Avg over color channels. @tf_export('image.image_gradients') @dispatch.add_dispatch_support def image_gradients(image): """Returns image gradients (dy, dx) for each color channel. Both output tensors have the same shape as the input: [batch_size, h, w, d]. The gradient values are organized so that [I(x+1, y) - I(x, y)] is in location (x, y). That means that dy will always have zeros in the last row, and dx will always have zeros in the last column. Usage Example: ```python BATCH_SIZE = 1 IMAGE_HEIGHT = 5 IMAGE_WIDTH = 5 CHANNELS = 1 image = tf.reshape(tf.range(IMAGE_HEIGHT * IMAGE_WIDTH * CHANNELS, delta=1, dtype=tf.float32), shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS)) dy, dx = tf.image.image_gradients(image) print(image[0, :,:,0]) tf.Tensor( [[ 0. 1. 2. 3. 4.] [ 5. 6. 7. 8. 9.] [10. 11. 12. 13. 14.] [15. 16. 17. 18. 19.] [20. 21. 22. 23. 24.]], shape=(5, 5), dtype=float32) print(dy[0, :,:,0]) tf.Tensor( [[5. 5. 5. 5. 5.] [5. 5. 5. 5. 5.] [5. 5. 5. 5. 5.] [5. 5. 5. 5. 5.] [0. 0. 0. 0. 0.]], shape=(5, 5), dtype=float32) print(dx[0, :,:,0]) tf.Tensor( [[1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.] [1. 1. 1. 1. 0.]], shape=(5, 5), dtype=float32) ``` Args: image: Tensor with shape [batch_size, h, w, d]. Returns: Pair of tensors (dy, dx) holding the vertical and horizontal image gradients (1-step finite difference). Raises: ValueError: If `image` is not a 4D tensor. """ if image.get_shape().ndims != 4: raise ValueError('image_gradients expects a 4D tensor ' '[batch_size, h, w, d], not {}.'.format(image.get_shape())) image_shape = array_ops.shape(image) batch_size, height, width, depth = array_ops.unstack(image_shape) dy = image[:, 1:, :, :] - image[:, :-1, :, :] dx = image[:, :, 1:, :] - image[:, :, :-1, :] # Return tensors with same size as original image by concatenating # zeros. Place the gradient [I(x+1,y) - I(x,y)] on the base pixel (x, y). shape = array_ops.stack([batch_size, 1, width, depth]) dy = array_ops.concat([dy, array_ops.zeros(shape, image.dtype)], 1) dy = array_ops.reshape(dy, image_shape) shape = array_ops.stack([batch_size, height, 1, depth]) dx = array_ops.concat([dx, array_ops.zeros(shape, image.dtype)], 2) dx = array_ops.reshape(dx, image_shape) return dy, dx @tf_export('image.sobel_edges') @dispatch.add_dispatch_support def sobel_edges(image): """Returns a tensor holding Sobel edge maps. Example usage: For general usage, `image` would be loaded from a file as below: ```python image_bytes = tf.io.read_file(path_to_image_file) image = tf.image.decode_image(image_bytes) image = tf.cast(image, tf.float32) image = tf.expand_dims(image, 0) ``` But for demo purposes, we are using randomly generated values for `image`: >>> image = tf.random.uniform( ... maxval=255, shape=[1, 28, 28, 3], dtype=tf.float32) >>> sobel = tf.image.sobel_edges(image) >>> sobel_y = np.asarray(sobel[0, :, :, :, 0]) # sobel in y-direction >>> sobel_x = np.asarray(sobel[0, :, :, :, 1]) # sobel in x-direction For displaying the sobel results, PIL's [Image Module]( https://pillow.readthedocs.io/en/stable/reference/Image.html) can be used: ```python # Display edge maps for the first channel (at index 0) Image.fromarray(sobel_y[..., 0] / 4 + 0.5).show() Image.fromarray(sobel_x[..., 0] / 4 + 0.5).show() ``` Args: image: Image tensor with shape [batch_size, h, w, d] and type float32 or float64. The image(s) must be 2x2 or larger. Returns: Tensor holding edge maps for each channel. Returns a tensor with shape [batch_size, h, w, d, 2] where the last two dimensions hold [[dy[0], dx[0]], [dy[1], dx[1]], ..., [dy[d-1], dx[d-1]]] calculated using the Sobel filter. """ # Define vertical and horizontal Sobel filters. static_image_shape = image.get_shape() image_shape = array_ops.shape(image) kernels = [[[-1, -2, -1], [0, 0, 0], [1, 2, 1]], [[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]] num_kernels = len(kernels) kernels = np.transpose(np.asarray(kernels), (1, 2, 0)) kernels = np.expand_dims(kernels, -2) kernels_tf = constant_op.constant(kernels, dtype=image.dtype) kernels_tf = array_ops.tile( kernels_tf, [1, 1, image_shape[-1], 1], name='sobel_filters') # Use depth-wise convolution to calculate edge maps per channel. pad_sizes = [[0, 0], [1, 1], [1, 1], [0, 0]] padded = array_ops.pad(image, pad_sizes, mode='REFLECT') # Output tensor has shape [batch_size, h, w, d * num_kernels]. strides = [1, 1, 1, 1] output = nn.depthwise_conv2d(padded, kernels_tf, strides, 'VALID') # Reshape to [batch_size, h, w, d, num_kernels]. shape = array_ops.concat([image_shape, [num_kernels]], 0) output = array_ops.reshape(output, shape=shape) output.set_shape(static_image_shape.concatenate([num_kernels])) return output def resize_bicubic(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_bicubic( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) def resize_bilinear(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_bilinear( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) def resize_nearest_neighbor(images, size, align_corners=False, name=None, half_pixel_centers=False): return gen_image_ops.resize_nearest_neighbor( images=images, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, name=name) resize_area_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.AREA...)` instead.')) tf_export(v1=['image.resize_area'])( resize_area_deprecation( dispatch.add_dispatch_support(gen_image_ops.resize_area))) resize_bicubic_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.BICUBIC...)` instead.')) tf_export(v1=['image.resize_bicubic'])( dispatch.add_dispatch_support(resize_bicubic_deprecation(resize_bicubic))) resize_bilinear_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.BILINEAR...)` instead.')) tf_export(v1=['image.resize_bilinear'])( dispatch.add_dispatch_support(resize_bilinear_deprecation(resize_bilinear))) resize_nearest_neighbor_deprecation = deprecation.deprecated( date=None, instructions=( 'Use `tf.image.resize(...method=ResizeMethod.NEAREST_NEIGHBOR...)` ' 'instead.')) tf_export(v1=['image.resize_nearest_neighbor'])( dispatch.add_dispatch_support( resize_nearest_neighbor_deprecation(resize_nearest_neighbor))) @tf_export('image.crop_and_resize', v1=[]) @dispatch.add_dispatch_support def crop_and_resize_v2(image, boxes, box_indices, crop_size, method='bilinear', extrapolation_value=0, name=None): """Extracts crops from the input image tensor and resizes them. Extracts crops from the input image tensor and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change) to a common output size specified by `crop_size`. This is more general than the `crop_to_bounding_box` op which extracts a fixed size slice from the input image and does not allow resizing or aspect ratio change. Returns a tensor with `crops` from the input `image` at positions defined at the bounding box locations in `boxes`. The cropped boxes are all resized (with bilinear or nearest neighbor interpolation) to a fixed `size = [crop_height, crop_width]`. The result is a 4-D tensor `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical results to using `tf.compat.v1.image.resize_bilinear()` or `tf.compat.v1.image.resize_nearest_neighbor()`(depends on the `method` argument) with `align_corners=True`. Args: image: A 4-D tensor of shape `[batch, image_height, image_width, depth]`. Both `image_height` and `image_width` need to be positive. boxes: A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor specifies the coordinates of a box in the `box_ind[i]` image and is specified in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the `[0, 1]` interval of normalized image height is mapped to `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the `[0, 1]` range are allowed, in which case we use `extrapolation_value` to extrapolate the input image values. box_indices: A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. The value of `box_ind[i]` specifies the image that the `i`-th box refers to. crop_size: A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All cropped image patches are resized to this size. The aspect ratio of the image content is not preserved. Both `crop_height` and `crop_width` need to be positive. method: An optional string specifying the sampling method for resizing. It can be either `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling methods are supported: Bilinear and Nearest Neighbor. extrapolation_value: An optional `float`. Defaults to `0`. Value used for extrapolation, when applicable. name: A name for the operation (optional). Returns: A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. Example: ```python import tensorflow as tf BATCH_SIZE = 1 NUM_BOXES = 5 IMAGE_HEIGHT = 256 IMAGE_WIDTH = 256 CHANNELS = 3 CROP_SIZE = (24, 24) image = tf.random.normal(shape=(BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, CHANNELS) ) boxes = tf.random.uniform(shape=(NUM_BOXES, 4)) box_indices = tf.random.uniform(shape=(NUM_BOXES,), minval=0, maxval=BATCH_SIZE, dtype=tf.int32) output = tf.image.crop_and_resize(image, boxes, box_indices, CROP_SIZE) output.shape #=> (5, 24, 24, 3) ``` """ return gen_image_ops.crop_and_resize(image, boxes, box_indices, crop_size, method, extrapolation_value, name) @tf_export(v1=['image.crop_and_resize']) @dispatch.add_dispatch_support @deprecation.deprecated_args(None, 'box_ind is deprecated, use box_indices instead', 'box_ind') def crop_and_resize_v1( # pylint: disable=missing-docstring image, boxes, box_ind=None, crop_size=None, method='bilinear', extrapolation_value=0, name=None, box_indices=None): box_ind = deprecation.deprecated_argument_lookup('box_indices', box_indices, 'box_ind', box_ind) return gen_image_ops.crop_and_resize(image, boxes, box_ind, crop_size, method, extrapolation_value, name) crop_and_resize_v1.__doc__ = gen_image_ops.crop_and_resize.__doc__ @tf_export(v1=['image.extract_glimpse']) @dispatch.add_dispatch_support def extract_glimpse( input, # pylint: disable=redefined-builtin size, offsets, centered=True, normalized=True, uniform_noise=True, name=None): """Extracts a glimpse from the input tensor. Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non-overlapping areas will be filled with random noise. The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter. The argument `normalized` and `centered` controls how the windows are built: * If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension. * If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0). * If the coordinates are not normalized they are interpreted as numbers of pixels. Usage Example: >>> x = [[[[0.0], ... [1.0], ... [2.0]], ... [[3.0], ... [4.0], ... [5.0]], ... [[6.0], ... [7.0], ... [8.0]]]] >>> tf.compat.v1.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]], ... centered=False, normalized=False) <tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy= array([[[[0.], [1.]], [[3.], [4.]]]], dtype=float32)> Args: input: A `Tensor` of type `float32`. A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape `[batch_size, 2]` containing the y, x locations of the center of each window. centered: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. normalized: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are normalized. uniform_noise: An optional `bool`. Defaults to `True`. indicates if the noise should be generated using a uniform distribution or a Gaussian distribution. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ return gen_image_ops.extract_glimpse( input=input, size=size, offsets=offsets, centered=centered, normalized=normalized, uniform_noise=uniform_noise, name=name) @tf_export('image.extract_glimpse', v1=[]) @dispatch.add_dispatch_support def extract_glimpse_v2( input, # pylint: disable=redefined-builtin size, offsets, centered=True, normalized=True, noise='uniform', name=None): """Extracts a glimpse from the input tensor. Returns a set of windows called glimpses extracted at location `offsets` from the input tensor. If the windows only partially overlaps the inputs, the non-overlapping areas will be filled with random noise. The result is a 4-D tensor of shape `[batch_size, glimpse_height, glimpse_width, channels]`. The channels and batch dimensions are the same as that of the input tensor. The height and width of the output windows are specified in the `size` parameter. The argument `normalized` and `centered` controls how the windows are built: * If the coordinates are normalized but not centered, 0.0 and 1.0 correspond to the minimum and maximum of each height and width dimension. * If the coordinates are both normalized and centered, they range from -1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left corner, the lower right corner is located at (1.0, 1.0) and the center is at (0, 0). * If the coordinates are not normalized they are interpreted as numbers of pixels. Usage Example: >>> x = [[[[0.0], ... [1.0], ... [2.0]], ... [[3.0], ... [4.0], ... [5.0]], ... [[6.0], ... [7.0], ... [8.0]]]] >>> tf.image.extract_glimpse(x, size=(2, 2), offsets=[[1, 1]], ... centered=False, normalized=False) <tf.Tensor: shape=(1, 2, 2, 1), dtype=float32, numpy= array([[[[4.], [5.]], [[7.], [8.]]]], dtype=float32)> Args: input: A `Tensor` of type `float32`. A 4-D float tensor of shape `[batch_size, height, width, channels]`. size: A `Tensor` of type `int32`. A 1-D tensor of 2 elements containing the size of the glimpses to extract. The glimpse height must be specified first, following by the glimpse width. offsets: A `Tensor` of type `float32`. A 2-D integer tensor of shape `[batch_size, 2]` containing the y, x locations of the center of each window. centered: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are centered relative to the image, in which case the (0, 0) offset is relative to the center of the input images. If false, the (0,0) offset corresponds to the upper left corner of the input images. normalized: An optional `bool`. Defaults to `True`. indicates if the offset coordinates are normalized. noise: An optional `string`. Defaults to `uniform`. indicates if the noise should be `uniform` (uniform distribution), `gaussian` (gaussian distribution), or `zero` (zero padding). name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ return gen_image_ops.extract_glimpse_v2( input=input, size=size, offsets=offsets, centered=centered, normalized=normalized, noise=noise, uniform_noise=False, name=name) @tf_export('image.combined_non_max_suppression') @dispatch.add_dispatch_support def combined_non_max_suppression(boxes, scores, max_output_size_per_class, max_total_size, iou_threshold=0.5, score_threshold=float('-inf'), pad_per_class=False, clip_boxes=True, name=None): """Greedily selects a subset of bounding boxes in descending order of score. This operation performs non_max_suppression on the inputs per batch, across all classes. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm is agnostic to where the origin is in the coordinate system. Also note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. The output of this operation is the final boxes, scores and classes tensor returned after performing non_max_suppression. Args: boxes: A 4-D float `Tensor` of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then same boxes are used for all classes otherwise, if `q` is equal to number of classes, class-specific boxes are used. scores: A 3-D float `Tensor` of shape `[batch_size, num_boxes, num_classes]` representing a single score corresponding to each box (each row of boxes). max_output_size_per_class: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression per class max_total_size: A int32 scalar representing maximum number of boxes retained over all classes. Note that setting this value to a large number may result in OOM error depending on the system workload. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. pad_per_class: If false, the output nmsed boxes, scores and classes are padded/clipped to `max_total_size`. If true, the output nmsed boxes, scores and classes are padded to be of length `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in which case it is clipped to `max_total_size`. Defaults to false. clip_boxes: If true, the coordinates of output nmsed boxes will be clipped to [0, 1]. If false, output the box coordinates as it is. Defaults to true. name: A name for the operation (optional). Returns: 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes. 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes. 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes. 'valid_detections': A [batch_size] int32 tensor indicating the number of valid detections per batch item. Only the top valid_detections[i] entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the entries are zero paddings. """ with ops.name_scope(name, 'combined_non_max_suppression'): iou_threshold = ops.convert_to_tensor( iou_threshold, dtype=dtypes.float32, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, dtype=dtypes.float32, name='score_threshold') # Convert `max_total_size` to tensor *without* setting the `dtype` param. # This allows us to catch `int32` overflow case with `max_total_size` # whose expected dtype is `int32` by the op registration. Any number within # `int32` will get converted to `int32` tensor. Anything larger will get # converted to `int64`. Passing in `int64` for `max_total_size` to the op # will throw dtype mismatch exception. # TODO(b/173251596): Once there is a more general solution to warn against # int overflow conversions, revisit this check. max_total_size = ops.convert_to_tensor(max_total_size) return gen_image_ops.combined_non_max_suppression( boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold, pad_per_class, clip_boxes) def _bbox_overlap(boxes_a, boxes_b): """Calculates the overlap (iou - intersection over union) between boxes_a and boxes_b. Args: boxes_a: a tensor with a shape of [batch_size, N, 4]. N is the number of boxes per image. The last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form. boxes_b: a tensor with a shape of [batch_size, M, 4]. M is the number of boxes. The last dimension is the pixel coordinates in [ymin, xmin, ymax, xmax] form. Returns: intersection_over_union: a tensor with as a shape of [batch_size, N, M], representing the ratio of intersection area over union area (IoU) between two boxes """ with ops.name_scope('bbox_overlap'): a_y_min, a_x_min, a_y_max, a_x_max = array_ops.split( value=boxes_a, num_or_size_splits=4, axis=2) b_y_min, b_x_min, b_y_max, b_x_max = array_ops.split( value=boxes_b, num_or_size_splits=4, axis=2) # Calculates the intersection area. i_xmin = math_ops.maximum( a_x_min, array_ops.transpose(b_x_min, [0, 2, 1])) i_xmax = math_ops.minimum( a_x_max, array_ops.transpose(b_x_max, [0, 2, 1])) i_ymin = math_ops.maximum( a_y_min, array_ops.transpose(b_y_min, [0, 2, 1])) i_ymax = math_ops.minimum( a_y_max, array_ops.transpose(b_y_max, [0, 2, 1])) i_area = math_ops.maximum( (i_xmax - i_xmin), 0) * math_ops.maximum((i_ymax - i_ymin), 0) # Calculates the union area. a_area = (a_y_max - a_y_min) * (a_x_max - a_x_min) b_area = (b_y_max - b_y_min) * (b_x_max - b_x_min) EPSILON = 1e-8 # Adds a small epsilon to avoid divide-by-zero. u_area = a_area + array_ops.transpose(b_area, [0, 2, 1]) - i_area + EPSILON # Calculates IoU. intersection_over_union = i_area / u_area return intersection_over_union def _self_suppression(iou, _, iou_sum, iou_threshold): """Suppress boxes in the same tile. Compute boxes that cannot be suppressed by others (i.e., can_suppress_others), and then use them to suppress boxes in the same tile. Args: iou: a tensor of shape [batch_size, num_boxes_with_padding] representing intersection over union. iou_sum: a scalar tensor. iou_threshold: a scalar tensor. Returns: iou_suppressed: a tensor of shape [batch_size, num_boxes_with_padding]. iou_diff: a scalar tensor representing whether any box is supressed in this step. iou_sum_new: a scalar tensor of shape [batch_size] that represents the iou sum after suppression. iou_threshold: a scalar tensor. """ batch_size = array_ops.shape(iou)[0] can_suppress_others = math_ops.cast( array_ops.reshape( math_ops.reduce_max(iou, 1) < iou_threshold, [batch_size, -1, 1]), iou.dtype) iou_after_suppression = array_ops.reshape( math_ops.cast( math_ops.reduce_max(can_suppress_others * iou, 1) < iou_threshold, iou.dtype), [batch_size, -1, 1]) * iou iou_sum_new = math_ops.reduce_sum(iou_after_suppression, [1, 2]) return [ iou_after_suppression, math_ops.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new, iou_threshold ] def _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size): """Suppress boxes between different tiles. Args: boxes: a tensor of shape [batch_size, num_boxes_with_padding, 4] box_slice: a tensor of shape [batch_size, tile_size, 4] iou_threshold: a scalar tensor inner_idx: a scalar tensor representing the tile index of the tile that is used to supress box_slice tile_size: an integer representing the number of boxes in a tile Returns: boxes: unchanged boxes as input box_slice_after_suppression: box_slice after suppression iou_threshold: unchanged """ batch_size = array_ops.shape(boxes)[0] new_slice = array_ops.slice( boxes, [0, inner_idx * tile_size, 0], [batch_size, tile_size, 4]) iou = _bbox_overlap(new_slice, box_slice) box_slice_after_suppression = array_ops.expand_dims( math_ops.cast(math_ops.reduce_all(iou < iou_threshold, [1]), box_slice.dtype), 2) * box_slice return boxes, box_slice_after_suppression, iou_threshold, inner_idx + 1 def _suppression_loop_body(boxes, iou_threshold, output_size, idx, tile_size): """Process boxes in the range [idx*tile_size, (idx+1)*tile_size). Args: boxes: a tensor with a shape of [batch_size, anchors, 4]. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IOU. output_size: an int32 tensor of size [batch_size]. Representing the number of selected boxes for each batch. idx: an integer scalar representing induction variable. tile_size: an integer representing the number of boxes in a tile Returns: boxes: updated boxes. iou_threshold: pass down iou_threshold to the next iteration. output_size: the updated output_size. idx: the updated induction variable. """ with ops.name_scope('suppression_loop_body'): num_tiles = array_ops.shape(boxes)[1] // tile_size batch_size = array_ops.shape(boxes)[0] def cross_suppression_func(boxes, box_slice, iou_threshold, inner_idx): return _cross_suppression(boxes, box_slice, iou_threshold, inner_idx, tile_size) # Iterates over tiles that can possibly suppress the current tile. box_slice = array_ops.slice(boxes, [0, idx * tile_size, 0], [batch_size, tile_size, 4]) _, box_slice, _, _ = control_flow_ops.while_loop( lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx, cross_suppression_func, [boxes, box_slice, iou_threshold, constant_op.constant(0)]) # Iterates over the current tile to compute self-suppression. iou = _bbox_overlap(box_slice, box_slice) mask = array_ops.expand_dims( array_ops.reshape( math_ops.range(tile_size), [1, -1]) > array_ops.reshape( math_ops.range(tile_size), [-1, 1]), 0) iou *= math_ops.cast( math_ops.logical_and(mask, iou >= iou_threshold), iou.dtype) suppressed_iou, _, _, _ = control_flow_ops.while_loop( lambda _iou, loop_condition, _iou_sum, _: loop_condition, _self_suppression, [iou, constant_op.constant(True), math_ops.reduce_sum(iou, [1, 2]), iou_threshold]) suppressed_box = math_ops.reduce_sum(suppressed_iou, 1) > 0 box_slice *= array_ops.expand_dims( 1.0 - math_ops.cast(suppressed_box, box_slice.dtype), 2) # Uses box_slice to update the input boxes. mask = array_ops.reshape( math_ops.cast( math_ops.equal(math_ops.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1]) boxes = array_ops.tile(array_ops.expand_dims( box_slice, [1]), [1, num_tiles, 1, 1]) * mask + array_ops.reshape( boxes, [batch_size, num_tiles, tile_size, 4]) * (1 - mask) boxes = array_ops.reshape(boxes, [batch_size, -1, 4]) # Updates output_size. output_size += math_ops.reduce_sum( math_ops.cast( math_ops.reduce_any(box_slice > 0, [2]), dtypes.int32), [1]) return boxes, iou_threshold, output_size, idx + 1 @tf_export('image.non_max_suppression_padded') @dispatch.add_dispatch_support def non_max_suppression_padded(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), pad_to_max_output_size=False, name=None, sorted_input=False, canonicalized_coordinates=False, tile_size=512): """Greedily selects a subset of bounding boxes in descending order of score. Performs algorithmically equivalent operation to tf.image.non_max_suppression, with the addition of an optional parameter which zero-pads the output to be of size `max_output_size`. The output of this operation is a tuple containing the set of integers indexing into the input collection of bounding boxes representing the selected boxes and the number of valid indices in the index set. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.slice` and `tf.gather` operations. For example: ```python selected_indices_padded, num_valid = tf.image.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size=True) selected_indices = tf.slice( selected_indices_padded, tf.constant([0]), num_valid) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4]. Dimensions except the last two are batch dimensions. scores: a tensor of rank 1 or higher with a shape of [..., num_boxes]. max_output_size: a scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. Note that setting this value to a large number may result in OOM error depending on the system workload. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IoU (intersection over union). score_threshold: a float representing the threshold for box scores. Boxes with a score that is not larger than this threshold will be suppressed. pad_to_max_output_size: whether to pad the output idx to max_output_size. Must be set to True when the input is a batch of images. name: name of operation. sorted_input: a boolean indicating whether the input boxes and scores are sorted in descending order by the score. canonicalized_coordinates: if box coordinates are given as `[y_min, x_min, y_max, x_max]`, setting to True eliminate redundant computation to canonicalize box coordinates. tile_size: an integer representing the number of boxes in a tile, i.e., the maximum number of boxes per image that can be used to suppress other boxes in parallel; larger tile_size means larger parallelism and potentially more redundant work. Returns: idx: a tensor with a shape of [..., num_boxes] representing the indices selected by non-max suppression. The leading dimensions are the batch dimensions of the input boxes. All numbers are within [0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i] indices (i.e., idx[i][:num_valid[i]]) are valid. num_valid: a tensor of rank 0 or higher with a shape of [...] representing the number of valid indices in idx. Its dimensions are the batch dimensions of the input boxes. Raises: ValueError: When set pad_to_max_output_size to False for batched input. """ with ops.name_scope(name, 'non_max_suppression_padded'): if not pad_to_max_output_size: # pad_to_max_output_size may be set to False only when the shape of # boxes is [num_boxes, 4], i.e., a single image. We make best effort to # detect violations at compile time. If `boxes` does not have a static # rank, the check allows computation to proceed. if boxes.get_shape().rank is not None and boxes.get_shape().rank > 2: raise ValueError("'pad_to_max_output_size' (value {}) must be True for " 'batched input'.format(pad_to_max_output_size)) if name is None: name = '' idx, num_valid = non_max_suppression_padded_v2( boxes, scores, max_output_size, iou_threshold, score_threshold, sorted_input, canonicalized_coordinates, tile_size) # def_function.function seems to lose shape information, so set it here. if not pad_to_max_output_size: idx = idx[0, :num_valid] else: batch_dims = array_ops.concat([ array_ops.shape(boxes)[:-2], array_ops.expand_dims(max_output_size, 0) ], 0) idx = array_ops.reshape(idx, batch_dims) return idx, num_valid # TODO(b/158709815): Improve performance regression due to # def_function.function. @def_function.function( experimental_implements='non_max_suppression_padded_v2') def non_max_suppression_padded_v2(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), sorted_input=False, canonicalized_coordinates=False, tile_size=512): """Non-maximum suppression. Prunes away boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. Bounding boxes are supplied as `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the coordinates of any diagonal pair of box corners and the coordinates can be provided as normalized (i.e., lying in the interval `[0, 1]`) or absolute. The bounding box coordinates are cannonicalized to `[y_min, x_min, y_max, x_max]`, where `(y_min, x_min)` and `(y_max, x_mas)` are the coordinates of the lower left and upper right corner. User may indiciate the input box coordinates are already canonicalized to eliminate redundant work by setting canonicalized_coordinates to `True`. Note that this algorithm is agnostic to where the origin is in the coordinate system. Note that this algorithm is invariant to orthogonal transformations and translations of the coordinate system; thus translating or reflections of the coordinate system result in the same boxes being selected by the algorithm. Similar to tf.image.non_max_suppression, non_max_suppression_padded implements hard NMS but can operate on a batch of images and improves performance by titling the bounding boxes. Non_max_suppression_padded should be preferred over tf.image_non_max_suppression when running on devices with abundant parallelsim for higher computation speed. For soft NMS, refer to tf.image.non_max_suppression_with_scores. While a serial NMS algorithm iteratively uses the highest-scored unprocessed box to suppress boxes, this algorithm uses many boxes to suppress other boxes in parallel. The key idea is to partition boxes into tiles based on their score and suppresses boxes tile by tile, thus achieving parallelism within a tile. The tile size determines the degree of parallelism. In cross suppression (using boxes of tile A to suppress boxes of tile B), all boxes in A can independently suppress boxes in B. Self suppression (suppressing boxes of the same tile) needs to be iteratively applied until there's no more suppression. In each iteration, boxes that cannot be suppressed are used to suppress boxes in the same tile. boxes = boxes.pad_to_multiply_of(tile_size) num_tiles = len(boxes) // tile_size output_boxes = [] for i in range(num_tiles): box_tile = boxes[i*tile_size : (i+1)*tile_size] for j in range(i - 1): # in parallel suppress boxes in box_tile using boxes from suppressing_tile suppressing_tile = boxes[j*tile_size : (j+1)*tile_size] iou = _bbox_overlap(box_tile, suppressing_tile) # if the box is suppressed in iou, clear it to a dot box_tile *= _update_boxes(iou) # Iteratively handle the diagnal tile. iou = _box_overlap(box_tile, box_tile) iou_changed = True while iou_changed: # boxes that are not suppressed by anything else suppressing_boxes = _get_suppressing_boxes(iou) # boxes that are suppressed by suppressing_boxes suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes) # clear iou to 0 for boxes that are suppressed, as they cannot be used # to suppress other boxes any more new_iou = _clear_iou(iou, suppressed_boxes) iou_changed = (new_iou != iou) iou = new_iou # remaining boxes that can still suppress others, are selected boxes. output_boxes.append(_get_suppressing_boxes(iou)) if len(output_boxes) >= max_output_size: break Args: boxes: a tensor of rank 2 or higher with a shape of [..., num_boxes, 4]. Dimensions except the last two are batch dimensions. The last dimension represents box coordinates, given as [y_1, x_1, y_2, x_2]. The coordinates on each dimension can be given in any order (see also `canonicalized_coordinates`) but must describe a box with a positive area. scores: a tensor of rank 1 or higher with a shape of [..., num_boxes]. max_output_size: a scalar integer `Tensor` representing the maximum number of boxes to be selected by non max suppression. iou_threshold: a float representing the threshold for deciding whether boxes overlap too much with respect to IoU (intersection over union). score_threshold: a float representing the threshold for box scores. Boxes with a score that is not larger than this threshold will be suppressed. sorted_input: a boolean indicating whether the input boxes and scores are sorted in descending order by the score. canonicalized_coordinates: if box coordinates are given as `[y_min, x_min, y_max, x_max]`, setting to True eliminate redundant computation to canonicalize box coordinates. tile_size: an integer representing the number of boxes in a tile, i.e., the maximum number of boxes per image that can be used to suppress other boxes in parallel; larger tile_size means larger parallelism and potentially more redundant work. Returns: idx: a tensor with a shape of [..., num_boxes] representing the indices selected by non-max suppression. The leading dimensions are the batch dimensions of the input boxes. All numbers are within [0, num_boxes). For each image (i.e., idx[i]), only the first num_valid[i] indices (i.e., idx[i][:num_valid[i]]) are valid. num_valid: a tensor of rank 0 or higher with a shape of [...] representing the number of valid indices in idx. Its dimensions are the batch dimensions of the input boxes. Raises: ValueError: When set pad_to_max_output_size to False for batched input. """ def _sort_scores_and_boxes(scores, boxes): """Sort boxes based their score from highest to lowest. Args: scores: a tensor with a shape of [batch_size, num_boxes] representing the scores of boxes. boxes: a tensor with a shape of [batch_size, num_boxes, 4] representing the boxes. Returns: sorted_scores: a tensor with a shape of [batch_size, num_boxes] representing the sorted scores. sorted_boxes: a tensor representing the sorted boxes. sorted_scores_indices: a tensor with a shape of [batch_size, num_boxes] representing the index of the scores in a sorted descending order. """ with ops.name_scope('sort_scores_and_boxes'): batch_size = array_ops.shape(boxes)[0] num_boxes = array_ops.shape(boxes)[1] sorted_scores_indices = sort_ops.argsort( scores, axis=1, direction='DESCENDING') index_offsets = math_ops.range(batch_size) * num_boxes indices = array_ops.reshape( sorted_scores_indices + array_ops.expand_dims(index_offsets, 1), [-1]) sorted_scores = array_ops.reshape( array_ops.gather(array_ops.reshape(scores, [-1]), indices), [batch_size, -1]) sorted_boxes = array_ops.reshape( array_ops.gather(array_ops.reshape(boxes, [-1, 4]), indices), [batch_size, -1, 4]) return sorted_scores, sorted_boxes, sorted_scores_indices batch_dims = array_ops.shape(boxes)[:-2] num_boxes = array_ops.shape(boxes)[-2] boxes = array_ops.reshape(boxes, [-1, num_boxes, 4]) scores = array_ops.reshape(scores, [-1, num_boxes]) batch_size = array_ops.shape(boxes)[0] if score_threshold != float('-inf'): with ops.name_scope('filter_by_score'): score_mask = math_ops.cast(scores > score_threshold, scores.dtype) scores *= score_mask box_mask = array_ops.expand_dims( math_ops.cast(score_mask, boxes.dtype), 2) boxes *= box_mask if not canonicalized_coordinates: with ops.name_scope('canonicalize_coordinates'): y_1, x_1, y_2, x_2 = array_ops.split( value=boxes, num_or_size_splits=4, axis=2) y_1_is_min = math_ops.reduce_all( math_ops.less_equal(y_1[0, 0, 0], y_2[0, 0, 0])) y_min, y_max = control_flow_ops.cond( y_1_is_min, lambda: (y_1, y_2), lambda: (y_2, y_1)) x_1_is_min = math_ops.reduce_all( math_ops.less_equal(x_1[0, 0, 0], x_2[0, 0, 0])) x_min, x_max = control_flow_ops.cond( x_1_is_min, lambda: (x_1, x_2), lambda: (x_2, x_1)) boxes = array_ops.concat([y_min, x_min, y_max, x_max], axis=2) if not sorted_input: scores, boxes, sorted_indices = _sort_scores_and_boxes(scores, boxes) else: # Default value required for Autograph. sorted_indices = array_ops.zeros_like(scores, dtype=dtypes.int32) pad = math_ops.cast( math_ops.ceil( math_ops.cast( math_ops.maximum(num_boxes, max_output_size), dtypes.float32) / math_ops.cast(tile_size, dtypes.float32)), dtypes.int32) * tile_size - num_boxes boxes = array_ops.pad( math_ops.cast(boxes, dtypes.float32), [[0, 0], [0, pad], [0, 0]]) scores = array_ops.pad( math_ops.cast(scores, dtypes.float32), [[0, 0], [0, pad]]) num_boxes_after_padding = num_boxes + pad num_iterations = num_boxes_after_padding // tile_size def _loop_cond(unused_boxes, unused_threshold, output_size, idx): return math_ops.logical_and( math_ops.reduce_min(output_size) < max_output_size, idx < num_iterations) def suppression_loop_body(boxes, iou_threshold, output_size, idx): return _suppression_loop_body( boxes, iou_threshold, output_size, idx, tile_size) selected_boxes, _, output_size, _ = control_flow_ops.while_loop( _loop_cond, suppression_loop_body, [ boxes, iou_threshold, array_ops.zeros([batch_size], dtypes.int32), constant_op.constant(0) ], shape_invariants=[ tensor_shape.TensorShape([None, None, 4]), tensor_shape.TensorShape([]), tensor_shape.TensorShape([None]), tensor_shape.TensorShape([]), ], ) num_valid = math_ops.minimum(output_size, max_output_size) idx = num_boxes_after_padding - math_ops.cast( nn_ops.top_k( math_ops.cast(math_ops.reduce_any( selected_boxes > 0, [2]), dtypes.int32) * array_ops.expand_dims( math_ops.range(num_boxes_after_padding, 0, -1), 0), max_output_size)[0], dtypes.int32) idx = math_ops.minimum(idx, num_boxes - 1) if not sorted_input: index_offsets = math_ops.range(batch_size) * num_boxes gather_idx = array_ops.reshape( idx + array_ops.expand_dims(index_offsets, 1), [-1]) idx = array_ops.reshape( array_ops.gather(array_ops.reshape(sorted_indices, [-1]), gather_idx), [batch_size, -1]) invalid_index = array_ops.zeros([batch_size, max_output_size], dtype=dtypes.int32) idx_index = array_ops.expand_dims(math_ops.range(max_output_size), 0) num_valid_expanded = array_ops.expand_dims(num_valid, 1) idx = array_ops.where(idx_index < num_valid_expanded, idx, invalid_index) num_valid = array_ops.reshape(num_valid, batch_dims) return idx, num_valid def non_max_suppression_padded_v1(boxes, scores, max_output_size, iou_threshold=0.5, score_threshold=float('-inf'), pad_to_max_output_size=False, name=None): """Greedily selects a subset of bounding boxes in descending order of score. Performs algorithmically equivalent operation to tf.image.non_max_suppression, with the addition of an optional parameter which zero-pads the output to be of size `max_output_size`. The output of this operation is a tuple containing the set of integers indexing into the input collection of bounding boxes representing the selected boxes and the number of valid indices in the index set. The bounding box coordinates corresponding to the selected indices can then be obtained using the `tf.slice` and `tf.gather` operations. For example: ```python selected_indices_padded, num_valid = tf.image.non_max_suppression_padded( boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size=True) selected_indices = tf.slice( selected_indices_padded, tf.constant([0]), num_valid) selected_boxes = tf.gather(boxes, selected_indices) ``` Args: boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`. scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single score corresponding to each box (each row of boxes). max_output_size: A scalar integer `Tensor` representing the maximum number of boxes to be selected by non-max suppression. iou_threshold: A float representing the threshold for deciding whether boxes overlap too much with respect to IOU. score_threshold: A float representing the threshold for deciding when to remove boxes based on score. pad_to_max_output_size: bool. If True, size of `selected_indices` output is padded to `max_output_size`. name: A name for the operation (optional). Returns: selected_indices: A 1-D integer `Tensor` of shape `[M]` representing the selected indices from the boxes tensor, where `M <= max_output_size`. valid_outputs: A scalar integer `Tensor` denoting how many elements in `selected_indices` are valid. Valid elements occur first, then padding. """ with ops.name_scope(name, 'non_max_suppression_padded'): iou_threshold = ops.convert_to_tensor(iou_threshold, name='iou_threshold') score_threshold = ops.convert_to_tensor( score_threshold, name='score_threshold') return gen_image_ops.non_max_suppression_v4(boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size) @tf_export('image.draw_bounding_boxes', v1=[]) @dispatch.add_dispatch_support def draw_bounding_boxes_v2(images, boxes, colors, name=None): """Draw bounding boxes on a batch of images. Outputs a copy of `images` but draws on top of the pixels zero or more bounding boxes specified by the locations in `boxes`. The coordinates of the each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image. For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates). Parts of the bounding box may fall outside the image. Args: images: A `Tensor`. Must be one of the following types: `float32`, `half`. 4-D with shape `[batch, height, width, depth]`. A batch of images. boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding boxes. colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle through for the boxes. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `images`. Usage Example: >>> # create an empty image >>> img = tf.zeros([1, 3, 3, 3]) >>> # draw a box around the image >>> box = np.array([0, 0, 1, 1]) >>> boxes = box.reshape([1, 1, 4]) >>> # alternate between red and blue >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) >>> tf.image.draw_bounding_boxes(img, boxes, colors) <tf.Tensor: shape=(1, 3, 3, 3), dtype=float32, numpy= array([[[[1., 0., 0.], [1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [0., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.], [1., 0., 0.]]]], dtype=float32)> """ if colors is None: return gen_image_ops.draw_bounding_boxes(images, boxes, name) return gen_image_ops.draw_bounding_boxes_v2(images, boxes, colors, name) @tf_export(v1=['image.draw_bounding_boxes']) @dispatch.add_dispatch_support def draw_bounding_boxes(images, boxes, name=None, colors=None): """Draw bounding boxes on a batch of images. Outputs a copy of `images` but draws on top of the pixels zero or more bounding boxes specified by the locations in `boxes`. The coordinates of the each bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and the height of the underlying image. For example, if an image is 100 x 200 pixels (height x width) and the bounding box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates). Parts of the bounding box may fall outside the image. Args: images: A `Tensor`. Must be one of the following types: `float32`, `half`. 4-D with shape `[batch, height, width, depth]`. A batch of images. boxes: A `Tensor` of type `float32`. 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding boxes. name: A name for the operation (optional). colors: A `Tensor` of type `float32`. 2-D. A list of RGBA colors to cycle through for the boxes. Returns: A `Tensor`. Has the same type as `images`. Usage Example: >>> # create an empty image >>> img = tf.zeros([1, 3, 3, 3]) >>> # draw a box around the image >>> box = np.array([0, 0, 1, 1]) >>> boxes = box.reshape([1, 1, 4]) >>> # alternate between red and blue >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) >>> tf.image.draw_bounding_boxes(img, boxes, colors) <tf.Tensor: shape=(1, 3, 3, 3), dtype=float32, numpy= array([[[[1., 0., 0.], [1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [0., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.], [1., 0., 0.]]]], dtype=float32)> """ return draw_bounding_boxes_v2(images, boxes, colors, name) @tf_export('image.generate_bounding_box_proposals') @dispatch.add_dispatch_support def generate_bounding_box_proposals(scores, bbox_deltas, image_info, anchors, nms_threshold=0.7, pre_nms_topn=6000, min_size=16, post_nms_topn=300, name=None): """Generate bounding box proposals from encoded bounding boxes. Args: scores: A 4-D float `Tensor` of shape `[num_images, height, width, num_achors]` containing scores of the boxes for given anchors, can be unsorted. bbox_deltas: A 4-D float `Tensor` of shape `[num_images, height, width, 4 x num_anchors]` encoding boxes with respect to each anchor. Coordinates are given in the form `[dy, dx, dh, dw]`. image_info: A 2-D float `Tensor` of shape `[num_images, 5]` containing image information Height, Width, Scale. anchors: A 2-D float `Tensor` of shape `[num_anchors, 4]` describing the anchor boxes. Boxes are formatted in the form `[y1, x1, y2, x2]`. nms_threshold: A scalar float `Tensor` for non-maximal-suppression threshold. Defaults to 0.7. pre_nms_topn: A scalar int `Tensor` for the number of top scoring boxes to be used as input. Defaults to 6000. min_size: A scalar float `Tensor`. Any box that has a smaller size than min_size will be discarded. Defaults to 16. post_nms_topn: An integer. Maximum number of rois in the output. name: A name for this operation (optional). Returns: rois: Region of interest boxes sorted by their scores. roi_probabilities: scores of the ROI boxes in the ROIs' `Tensor`. """ return gen_image_ops.generate_bounding_box_proposals( scores=scores, bbox_deltas=bbox_deltas, image_info=image_info, anchors=anchors, nms_threshold=nms_threshold, pre_nms_topn=pre_nms_topn, min_size=min_size, post_nms_topn=post_nms_topn, name=name)
apache-2.0
motion2015/edx-platform
cms/lib/xblock/authoring_mixin.py
163
1500
""" Mixin class that provides authoring capabilities for XBlocks. """ import logging from django.conf import settings from xblock.core import XBlock from xblock.fields import XBlockMixin from xblock.fragment import Fragment log = logging.getLogger(__name__) VISIBILITY_VIEW = 'visibility_view' @XBlock.needs("i18n") class AuthoringMixin(XBlockMixin): """ Mixin class that provides authoring capabilities for XBlocks. """ _services_requested = { 'i18n': 'need', } def _get_studio_resource_url(self, relative_url): """ Returns the Studio URL to a static resource. """ return settings.STATIC_URL + relative_url def visibility_view(self, _context=None): """ Render the view to manage an xblock's visibility settings in Studio. Args: _context: Not actively used for this view. Returns: (Fragment): An HTML fragment for editing the visibility of this XBlock. """ fragment = Fragment() from contentstore.utils import reverse_course_url fragment.add_content(self.system.render_template('visibility_editor.html', { 'xblock': self, 'manage_groups_url': reverse_course_url('group_configurations_list_handler', self.location.course_key), })) fragment.add_javascript_url(self._get_studio_resource_url('/js/xblock/authoring.js')) fragment.initialize_js('VisibilityEditorInit') return fragment
agpl-3.0
stonebig/bokeh
bokeh/models/axes.py
2
11338
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- ''' Guide renderers for various kinds of axes that can be added to Bokeh plots ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals import logging log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports # External imports # Bokeh imports from ..core.enums import TickLabelOrientation from ..core.has_props import abstract from ..core.properties import Auto, Datetime, Dict, Either, Enum, Float, Include, Instance, Int, Override, Seq, String, Tuple from ..core.property_mixins import LineProps, TextProps from .formatters import BasicTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter, LogTickFormatter, TickFormatter, MercatorTickFormatter from .renderers import GuideRenderer from .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker, FixedTicker, MercatorTicker #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- __all__ = ( 'Axis', 'CategoricalAxis', 'ContinuousAxis', 'DatetimeAxis', 'LinearAxis', 'LogAxis', 'MercatorAxis', ) #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- @abstract class Axis(GuideRenderer): ''' A base class that defines common properties for all axis types. ''' bounds = Either(Auto, Tuple(Float, Float), Tuple(Datetime, Datetime), help=""" Bounds for the rendered axis. If unset, the axis will span the entire plot in the given dimension. """) x_range_name = String('default', help=""" A particular (named) x-range to use for computing screen locations when rendering an axis on the plot. If unset, use the default x-range. """) y_range_name = String('default', help=""" A particular (named) y-range to use for computing screen locations when rendering an axis on the plot. If unset, use the default y-range. """) ticker = Instance(Ticker, help=""" A Ticker to use for computing locations of axis components. The property may also be passed a sequence of floating point numbers as a shorthand for creating and configuring a ``FixedTicker``, e.g. the following code .. code-block:: python from bokeh.plotting import figure p = figure() p.xaxis.ticker = [10, 20, 37.4] is equivalent to: .. code-block:: python from bokeh.plotting import figure from bokeh.models.tickers import FixedTicker p = figure() p.xaxis.ticker = FixedTicker(ticks=[10, 20, 37.4]) """).accepts(Seq(Float), lambda ticks: FixedTicker(ticks=ticks)) formatter = Instance(TickFormatter, help=""" A ``TickFormatter`` to use for formatting the visual appearance of ticks. """) axis_label = String(default='', help=""" A text label for the axis, displayed parallel to the axis rule. .. note:: LaTeX notation is not currently supported; please see :bokeh-issue:`647` to track progress or contribute. """) axis_label_standoff = Int(default=5, help=""" The distance in pixels that the axis labels should be offset from the tick labels. """) axis_label_props = Include(TextProps, help=""" The %s of the axis label. """) axis_label_text_font_size = Override(default={'value': "10pt"}) axis_label_text_font_style = Override(default="italic") major_label_standoff = Int(default=5, help=""" The distance in pixels that the major tick labels should be offset from the associated ticks. """) major_label_orientation = Either(Enum("horizontal", "vertical"), Float, help=""" What direction the major label text should be oriented. If a number is supplied, the angle of the text is measured from horizontal. """) major_label_overrides = Dict(Either(Float, String), String, default={}, help=""" Provide explicit tick label values for specific tick locations that override normal formatting. """) major_label_props = Include(TextProps, help=""" The %s of the major tick labels. """) major_label_text_align = Override(default="center") major_label_text_baseline = Override(default="alphabetic") major_label_text_font_size = Override(default={'value': "8pt"}) axis_props = Include(LineProps, help=""" The %s of the axis line. """) major_tick_props = Include(LineProps, help=""" The %s of the major ticks. """) major_tick_in = Int(default=2, help=""" The distance in pixels that major ticks should extend into the main plot area. """) major_tick_out = Int(default=6, help=""" The distance in pixels that major ticks should extend out of the main plot area. """) minor_tick_props = Include(LineProps, help=""" The %s of the minor ticks. """) minor_tick_in = Int(default=0, help=""" The distance in pixels that minor ticks should extend into the main plot area. """) minor_tick_out = Int(default=4, help=""" The distance in pixels that major ticks should extend out of the main plot area. """) fixed_location = Either(Float, String, Tuple(String, String), Tuple(String, String, String), default=None, help=""" Set to specify a fixed coordinate location to draw the axis. The direction of ticks and major labels is determined by the side panel that the axis belongs to. .. note:: Axes labels are suppressed when axes are positioned at fixed locations inside the central plot area. """) @abstract class ContinuousAxis(Axis): ''' A base class for all numeric, non-categorical axes types. ''' pass class LinearAxis(ContinuousAxis): ''' An axis that picks nice numbers for tick locations on a linear scale. Configured with a ``BasicTickFormatter`` by default. ''' ticker = Override(default=lambda: BasicTicker()) formatter = Override(default=lambda: BasicTickFormatter()) class LogAxis(ContinuousAxis): ''' An axis that picks nice numbers for tick locations on a log scale. Configured with a ``LogTickFormatter`` by default. ''' ticker = Override(default=lambda: LogTicker()) formatter = Override(default=lambda: LogTickFormatter()) class CategoricalAxis(Axis): ''' An axis that displays ticks and labels for categorical ranges. The ``CategoricalAxis`` can handle factor ranges with up to two levels of nesting, including drawing a separator line between top-level groups of factors. ''' ticker = Override(default=lambda: CategoricalTicker()) formatter = Override(default=lambda: CategoricalTickFormatter()) separator_props = Include(LineProps, help=""" The %s of the separator line between top-level categorical groups. This property always applies to factors in the outermost level of nesting. """) separator_line_color = Override(default="lightgrey") separator_line_width = Override(default=2) group_props = Include(TextProps, help=""" The %s of the group categorical labels. This property always applies to factors in the outermost level of nesting. If the list of categorical factors is flat (i.e. no nesting) then this property has no effect. """) group_label_orientation = Either(Enum(TickLabelOrientation), Float, default="parallel", help=""" What direction the group label text should be oriented. If a number is supplied, the angle of the text is measured from horizontal. This property always applies to factors in the outermost level of nesting. If the list of categorical factors is flat (i.e. no nesting) then this property has no effect. """) group_text_font_size = Override(default={'value': "8pt"}) group_text_font_style = Override(default="bold") group_text_color = Override(default="grey") subgroup_props = Include(TextProps, help=""" The %s of the subgroup categorical labels. This property always applies to factors in the middle level of nesting. If the list of categorical factors is has only zero or one levels of nesting, then this property has no effect. """) subgroup_label_orientation = Either(Enum(TickLabelOrientation), Float, default="parallel", help=""" What direction the subgroup label text should be oriented. If a number is supplied, the angle of the text is measured from horizontal. This property always applies to factors in the middle level of nesting. If the list of categorical factors is has only zero or one levels of nesting, then this property has no effect. """) subgroup_text_font_size = Override(default={'value': "8pt"}) subgroup_text_font_style = Override(default="bold") class DatetimeAxis(LinearAxis): ''' A ``LinearAxis`` that picks nice numbers for tick locations on a datetime scale. Configured with a ``DatetimeTickFormatter`` by default. ''' ticker = Override(default=lambda: DatetimeTicker()) formatter = Override(default=lambda: DatetimeTickFormatter()) class MercatorAxis(LinearAxis): ''' An axis that picks nice numbers for tick locations on a Mercator scale. Configured with a ``MercatorTickFormatter`` by default. Args: dimension ('lat' or 'lon', optional) : Whether this axis will display latitude or longitude values. (default: 'lat') ''' def __init__(self, dimension='lat', **kw): super(MercatorAxis, self).__init__(**kw) # Just being careful. It would be defeat the purpose for anyone to actually # configure this axis with differnet kinds of tickers or formatters. if isinstance(self.ticker, MercatorTicker): self.ticker.dimension = dimension if isinstance(self.formatter, MercatorTickFormatter): self.formatter.dimension = dimension ticker = Override(default=lambda: MercatorTicker()) formatter = Override(default=lambda: MercatorTickFormatter()) #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
bsd-3-clause
sopier/django
django/contrib/contenttypes/management.py
476
2521
from django.apps import apps from django.db import DEFAULT_DB_ALIAS, router from django.utils import six from django.utils.six.moves import input def update_contenttypes(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs): """ Creates content types for models in the given app, removing any model entries that no longer have a matching model class. """ if not app_config.models_module: return try: ContentType = apps.get_model('contenttypes', 'ContentType') except LookupError: return if not router.allow_migrate_model(using, ContentType): return ContentType.objects.clear_cache() app_label = app_config.label app_models = { model._meta.model_name: model for model in app_config.get_models()} if not app_models: return # Get all the content types content_types = { ct.model: ct for ct in ContentType.objects.using(using).filter(app_label=app_label) } to_remove = [ ct for (model_name, ct) in six.iteritems(content_types) if model_name not in app_models ] cts = [ ContentType( app_label=app_label, model=model_name, ) for (model_name, model) in six.iteritems(app_models) if model_name not in content_types ] ContentType.objects.using(using).bulk_create(cts) if verbosity >= 2: for ct in cts: print("Adding content type '%s | %s'" % (ct.app_label, ct.model)) # Confirm that the content type is stale before deletion. if to_remove: if interactive: content_type_display = '\n'.join( ' %s | %s' % (ct.app_label, ct.model) for ct in to_remove ) ok_to_delete = input("""The following content types are stale and need to be deleted: %s Any objects related to these content types by a foreign key will also be deleted. Are you sure you want to delete these content types? If you're unsure, answer 'no'. Type 'yes' to continue, or 'no' to cancel: """ % content_type_display) else: ok_to_delete = False if ok_to_delete == 'yes': for ct in to_remove: if verbosity >= 2: print("Deleting stale content type '%s | %s'" % (ct.app_label, ct.model)) ct.delete() else: if verbosity >= 2: print("Stale content types remain.")
bsd-3-clause
datalogics-robb/scons
test/Scanner/parallel-rescan.py
2
2125
#!/usr/bin/env python # # __COPYRIGHT__ # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__" """ Verify that when a source file is generated and the -j option is used, the source file correctly gets re-scanned for implicit dependencies after it's built. """ import TestSCons test = TestSCons.TestSCons() test.write('SConstruct', """\ env = Environment() env['BUILDERS']['COPY'] = Builder(action = Copy("$TARGET", "$SOURCE")) env.COPY('a.c', 'a.in') env.COPY('b.c', 'b.in') env.StaticLibrary('lib', ['a.c', 'b.c']) """) test.write("a.in", """\ #include "a.h" """) test.write("b.in", """\ #include "b.h" """) test.write("a.h", """\ char *A_FILE = "b.in"; """) test.write("b.h", """\ char *B_FILE = "b.in"; """) test.run(arguments = '-j4 .', stderr=TestSCons.noisy_ar, match=TestSCons.match_re_dotall) # If the dependencies weren't re-scanned properly, the .h files won't # show up in the previous run's dependency lists, and the .o files and # library will get rebuilt here. test.up_to_date(arguments = '.') test.pass_test()
mit
Bristol-Braille/canute-ui
ui/i18n.py
1
2127
import gettext import logging from collections import namedtuple, OrderedDict log = logging.getLogger(__name__) def install(locale_code): try: translations = gettext.translation( 'canute', localedir='ui/locale', languages=[locale_code], fallback=False ) except OSError as e: log.warning(e) translations = gettext.NullTranslations() translations.install() # Before having installed _() we need extractors to see language titles. # It's convenient to have it act as the identity function, too. def _(x): return x Builtin = namedtuple('BuiltinLang', ['code', 'title']) # Would prefer "British English, UEB grade N" for the following but # (1) it's too long to be included in the languages menu title, (2) it # might be irrelevant if there are no British-isms in this small # collection of text, (3) US users might object on principle. # TRANSLATORS: This is a language name menu item, so should always appear # in the language it denotes so that it remains readable to those who # speak only that language, just as "Deutsch" should always be left as # "Deutsch" in a language menu. Addition of a Braille grade marker seems # appropriate, if possible. ueb1 = Builtin(code='en_GB.UTF-8@ueb1', title=_('English, UEB grade 1')) # TRANSLATORS: This is a language name menu item, so should always appear # in the language it denotes so that it remains readable to those who # speak only that language, just as "Deutsch" should always be left as # "Deutsch" in a language menu. Addition of a Braille grade marker seems # appropriate, if possible. ueb2 = Builtin(code='en_GB.UTF-8@ueb2', title=_('English, UEB grade 2')) del _ DEFAULT_LOCALE = ueb2 install(DEFAULT_LOCALE.code) # Rely on dedup. BUILTIN_LANGUAGES = OrderedDict([ (DEFAULT_LOCALE.code, _(DEFAULT_LOCALE.title)), (ueb1.code, _(ueb1.title)), (ueb2.code, _(ueb2.title)), ]) # For detecting the default language of older installations, which # didn't really have switchable language but did add a default # sort-of-locale to the global state file. OLD_DEFAULT_LOCALE = 'en_GB:en'
gpl-3.0
super3/PyDev
Old Workspace/EndlessScroll.py
1
1826
# EndlessScroll.py # Objective: Make an endless scrollable world. # Author: Super3boy (super3.org) # Imports import pygame # Start PyGame pygame.init() # Define Colors black = [0, 0 ,0] white = [255, 255, 255] blue = [ 0, 0 , 255] green = [ 0, 255, 0] red = [255, 0, 0] class Block(pygame.sprite.Sprite): def __init__(self, locX, locY, img): # Call the parent class (Sprite) constructor pygame.sprite.Sprite.__init__(self) # Create an image self.image = pygame.image.load(img).convert() self.image.set_colorkey(white) # Set bounds self.rect = self.image.get_rect() # Set draw location self.rect.x = locX self.rect.y = locY # Set and Display Screen sizeX = 800 sizeY = 400 scrollX = 0 scrollSpeed = 5 size = [sizeX, sizeY] screen = pygame.display.set_mode(size) # Set Background and Get Size background_image = pygame.image.load("scrollback4.png").convert() background_size = background_image.get_size() # Set Screen's Title pygame.display.set_caption("Enless Scroll Test") # This is a list of sprites. # The list is managed by a class called 'RenderPlain.' sprites = pygame.sprite.RenderPlain() # Sentinel for Game Loop done = False # Game Timer clock = pygame.time.Clock() # Main Game Loop while done == False: # Limit FPS of Game Loop clock.tick(30) # Check for Events for event in pygame.event.get(): if event.type == pygame.QUIT: done = True # Clear the Screen screen.fill(white) # Set Movement key=pygame.key.get_pressed() #checking pressed keys if key[pygame.K_LEFT]: scrollX += scrollSpeed elif key[pygame.K_RIGHT]: scrollX -= scrollSpeed # Show Background screen.blit( background_image , [scrollX ,0]) # Update and Draw all the sprites sprites.update() sprites.draw(screen) # Update Display pygame.display.flip() # Exit Program pygame.quit()
mit
Tithen-Firion/youtube-dl
youtube_dl/extractor/fivemin.py
79
1917
from __future__ import unicode_literals from .common import InfoExtractor class FiveMinIE(InfoExtractor): IE_NAME = '5min' _VALID_URL = r'(?:5min:|https?://(?:[^/]*?5min\.com/|delivery\.vidible\.tv/aol)(?:(?:Scripts/PlayerSeed\.js|playerseed/?)?\?.*?playList=)?)(?P<id>\d+)' _TESTS = [ { # From http://www.engadget.com/2013/11/15/ipad-mini-retina-display-review/ 'url': 'http://pshared.5min.com/Scripts/PlayerSeed.js?sid=281&width=560&height=345&playList=518013791', 'md5': '4f7b0b79bf1a470e5004f7112385941d', 'info_dict': { 'id': '518013791', 'ext': 'mp4', 'title': 'iPad Mini with Retina Display Review', 'description': 'iPad mini with Retina Display review', 'duration': 177, 'uploader': 'engadget', 'upload_date': '20131115', 'timestamp': 1384515288, }, 'params': { # m3u8 download 'skip_download': True, } }, { # From http://on.aol.com/video/how-to-make-a-next-level-fruit-salad-518086247 'url': '5min:518086247', 'md5': 'e539a9dd682c288ef5a498898009f69e', 'info_dict': { 'id': '518086247', 'ext': 'mp4', 'title': 'How to Make a Next-Level Fruit Salad', 'duration': 184, }, 'skip': 'no longer available', }, { 'url': 'http://embed.5min.com/518726732/', 'only_matching': True, }, { 'url': 'http://delivery.vidible.tv/aol?playList=518013791', 'only_matching': True, } ] def _real_extract(self, url): video_id = self._match_id(url) return self.url_result('aol-video:%s' % video_id)
unlicense
CloudI/cloudi_api_python
cloudi.py
2
34829
#!/usr/bin/env python #-*-Mode:python;coding:utf-8;tab-width:4;c-basic-offset:4;indent-tabs-mode:()-*- # ex: set ft=python fenc=utf-8 sts=4 ts=4 sw=4 et nomod: # # MIT License # # Copyright (c) 2011-2021 Michael Truog <mjtruog at protonmail dot com> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # """ Python CloudI API <https://cloudi.org/api.html#1_Intro>. Example usage is available in the integration tests <https://cloudi.org/tutorials.html#cloudi_examples>. """ import sys import os import struct import socket import select import collections import traceback import inspect from functools import partial from timeit import default_timer from erlang import (binary_to_term, term_to_binary, OtpErlangAtom, OtpErlangBinary) if sys.version_info[0] >= 3: TypeUnicode = str def _function_argc(function): args, _, _, _, _, _, _ = inspect.getfullargspec(function) return len(args) else: TypeUnicode = unicode def _function_argc(function): # pylint: disable=deprecated-method args, _, _, _ = inspect.getargspec(function) return len(args) __all__ = [ 'API', 'InvalidInputException', 'MessageDecodingException', 'TerminateException', ] _MESSAGE_INIT = 1 _MESSAGE_SEND_ASYNC = 2 _MESSAGE_SEND_SYNC = 3 _MESSAGE_RECV_ASYNC = 4 _MESSAGE_RETURN_ASYNC = 5 _MESSAGE_RETURN_SYNC = 6 _MESSAGE_RETURNS_ASYNC = 7 _MESSAGE_KEEPALIVE = 8 _MESSAGE_REINIT = 9 _MESSAGE_SUBSCRIBE_COUNT = 10 _MESSAGE_TERM = 11 # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-public-methods # pylint: disable=useless-object-inheritance class API(object): """ CloudI API object for use in a single thread of execution """ ASYNC = 1 SYNC = -1 def __init__(self, thread_index): protocol_str = os.getenv('CLOUDI_API_INIT_PROTOCOL') if protocol_str is None: sys.stderr.write('CloudI service execution must occur in CloudI\n') raise InvalidInputException() buffer_size_str = os.getenv('CLOUDI_API_INIT_BUFFER_SIZE') if buffer_size_str is None: raise InvalidInputException() if protocol_str == 'tcp': self.__s = socket.fromfd( thread_index + 3, socket.AF_INET, socket.SOCK_STREAM ) self.__use_header = True elif protocol_str == 'udp': self.__s = socket.fromfd( thread_index + 3, socket.AF_INET, socket.SOCK_DGRAM ) self.__use_header = False elif protocol_str == 'local': self.__s = socket.fromfd( thread_index + 3, socket.AF_UNIX, socket.SOCK_STREAM ) self.__use_header = True else: raise InvalidInputException() self.__initialization_complete = False self.__terminate = False self.__size = int(buffer_size_str) self.__callbacks = {} self.__timeout_terminate = 10 # TIMEOUT_TERMINATE_MIN self.__send(term_to_binary(OtpErlangAtom(b'init'))) (self.__process_index, self.__process_count, self.__process_count_max, self.__process_count_min, self.__prefix, self.__timeout_initialize, self.__timeout_async, self.__timeout_sync, self.__timeout_terminate, self.__priority_default) = self.__poll_request(None, False) @staticmethod def thread_count(): """ returns the thread count from the service configuration """ thread_count = os.getenv('CLOUDI_API_INIT_THREAD_COUNT') if thread_count is None: raise InvalidInputException() return int(thread_count) def subscribe(self, pattern, function): """ subscribes to a service name pattern with a callback """ if _function_argc(function) != 10: # self + arguments for a member function # api + arguments for a static function raise InvalidInputException() if not inspect.ismethod(function): function = partial(function, self) key = self.__prefix + pattern value = self.__callbacks.get(key, None) if value is None: self.__callbacks[key] = collections.deque([function]) else: value.append(function) self.__send(term_to_binary((OtpErlangAtom(b'subscribe'), pattern))) def subscribe_count(self, pattern): """ returns the number of subscriptions for a single service name pattern """ self.__send(term_to_binary((OtpErlangAtom(b'subscribe_count'), pattern))) return self.__poll_request(None, False) def unsubscribe(self, pattern): """ unsubscribes from a service name pattern once """ key = self.__prefix + pattern value = self.__callbacks.get(key, None) assert value is not None value.popleft() if value == collections.deque([]): del self.__callbacks[key] self.__send(term_to_binary((OtpErlangAtom(b'unsubscribe'), pattern))) def send_async(self, name, request, timeout=None, request_info=None, priority=None): """ sends an asynchronous service request """ # pylint: disable=too-many-arguments if timeout is None: timeout = self.__timeout_async if request_info is None: request_info = b'' if priority is None: priority = self.__priority_default self.__send(term_to_binary((OtpErlangAtom(b'send_async'), name, OtpErlangBinary(request_info), OtpErlangBinary(request), timeout, priority))) return self.__poll_request(None, False) def send_sync(self, name, request, timeout=None, request_info=None, priority=None): """ sends a synchronous service request """ # pylint: disable=too-many-arguments if timeout is None: timeout = self.__timeout_sync if request_info is None: request_info = b'' if priority is None: priority = self.__priority_default self.__send(term_to_binary((OtpErlangAtom(b'send_sync'), name, OtpErlangBinary(request_info), OtpErlangBinary(request), timeout, priority))) return self.__poll_request(None, False) def mcast_async(self, name, request, timeout=None, request_info=None, priority=None): """ sends asynchronous service requests to all subscribers of the matching service name pattern """ # pylint: disable=too-many-arguments if timeout is None: timeout = self.__timeout_async if request_info is None: request_info = b'' if priority is None: priority = self.__priority_default self.__send(term_to_binary((OtpErlangAtom(b'mcast_async'), name, OtpErlangBinary(request_info), OtpErlangBinary(request), timeout, priority))) return self.__poll_request(None, False) def forward_(self, request_type, name, request_info, request, timeout, priority, trans_id, pid): """ forwards a service request to a different service name """ # pylint: disable=too-many-arguments if request_type == API.ASYNC: self.forward_async(name, request_info, request, timeout, priority, trans_id, pid) elif request_type == API.SYNC: self.forward_sync(name, request_info, request, timeout, priority, trans_id, pid) else: raise InvalidInputException() def forward_async(self, name, request_info, request, timeout, priority, trans_id, pid): """ forwards an asynchronous service request to a different service name """ # pylint: disable=too-many-arguments self.__send(term_to_binary((OtpErlangAtom(b'forward_async'), name, OtpErlangBinary(request_info), OtpErlangBinary(request), timeout, priority, OtpErlangBinary(trans_id), pid))) raise ForwardAsyncException() def forward_sync(self, name, request_info, request, timeout, priority, trans_id, pid): """ forwards a synchronous service request to a different service name """ # pylint: disable=too-many-arguments self.__send(term_to_binary((OtpErlangAtom(b'forward_sync'), name, OtpErlangBinary(request_info), OtpErlangBinary(request), timeout, priority, OtpErlangBinary(trans_id), pid))) raise ForwardSyncException() def return_(self, request_type, name, pattern, response_info, response, timeout, trans_id, pid): """ provides a response to a service request """ # pylint: disable=too-many-arguments if request_type == API.ASYNC: self.return_async(name, pattern, response_info, response, timeout, trans_id, pid) elif request_type == API.SYNC: self.return_sync(name, pattern, response_info, response, timeout, trans_id, pid) else: raise InvalidInputException() def return_async(self, name, pattern, response_info, response, timeout, trans_id, pid): """ provides a response to an asynchronous service request """ # pylint: disable=too-many-arguments self.__send(term_to_binary((OtpErlangAtom(b'return_async'), name, pattern, OtpErlangBinary(response_info), OtpErlangBinary(response), timeout, OtpErlangBinary(trans_id), pid))) raise ReturnAsyncException() def return_sync(self, name, pattern, response_info, response, timeout, trans_id, pid): """ provides a response to a synchronous service request """ # pylint: disable=too-many-arguments self.__send(term_to_binary((OtpErlangAtom(b'return_sync'), name, pattern, OtpErlangBinary(response_info), OtpErlangBinary(response), timeout, OtpErlangBinary(trans_id), pid))) raise ReturnSyncException() def recv_async(self, timeout=None, trans_id=None, consume=True): """ blocks to receive an asynchronous service request response """ if timeout is None: timeout = self.__timeout_sync if trans_id is None: trans_id = b'\0' * 16 self.__send(term_to_binary((OtpErlangAtom(b'recv_async'), timeout, OtpErlangBinary(trans_id), consume))) return self.__poll_request(None, False) def process_index(self): """ returns the 0-based index of this process in the service instance """ return self.__process_index def process_count(self): """ returns the current process count based on the service configuration """ return self.__process_count def process_count_max(self): """ returns the count_process_dynamic maximum count """ return self.__process_count_max def process_count_min(self): """ returns the count_process_dynamic minimum count """ return self.__process_count_min def prefix(self): """ returns the service name pattern prefix from the service configuration """ return self.__prefix def timeout_initialize(self): """ returns the service initialization timeout """ return self.__timeout_initialize def timeout_async(self): """ returns the default asynchronous service request send timeout """ return self.__timeout_async def timeout_sync(self): """ returns the default synchronous service request send timeout """ return self.__timeout_sync def timeout_terminate(self): """ returns the service termination timeout """ return self.__timeout_terminate def priority_default(self): """ returns the default service request send priority """ return self.__priority_default def __null_response(self, request_type, name, pattern, request_info, request, timeout, priority, trans_id, pid): # pylint: disable=no-self-use # pylint: disable=too-many-arguments # pylint: disable=unused-argument return b'' def __callback(self, command, name, pattern, request_info, request, timeout, priority, trans_id, pid): # pylint: disable=too-many-arguments # pylint: disable=bare-except # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements # pylint: disable=too-many-locals # pylint: disable=broad-except function_queue = self.__callbacks.get(pattern, None) if function_queue is None: function = self.__null_response else: function = function_queue.popleft() function_queue.append(function) return_null_response = False if command == _MESSAGE_SEND_ASYNC: try: response = function(API.ASYNC, name, pattern, request_info, request, timeout, priority, trans_id, pid) if isinstance(response, tuple): response_info, response = response if not isinstance(response_info, (bytes, TypeUnicode)): response_info = b'' else: response_info = b'' if not isinstance(response, (bytes, TypeUnicode)): response = b'' except MessageDecodingException: self.__terminate = True return_null_response = True except TerminateException: return_null_response = True except ReturnAsyncException: return except ReturnSyncException: self.__terminate = True traceback.print_exc(file=sys.stderr) return except ForwardAsyncException: return except ForwardSyncException: self.__terminate = True traceback.print_exc(file=sys.stderr) return except AssertionError: traceback.print_exc(file=sys.stderr) sys.exit(1) except SystemExit: traceback.print_exc(file=sys.stderr) raise except Exception: return_null_response = True traceback.print_exc(file=sys.stderr) except: traceback.print_exc(file=sys.stderr) sys.exit(1) if return_null_response: response_info = b'' response = b'' try: self.return_async(name, pattern, response_info, response, timeout, trans_id, pid) except ReturnAsyncException: pass return if command == _MESSAGE_SEND_SYNC: try: response = function(API.SYNC, name, pattern, request_info, request, timeout, priority, trans_id, pid) if isinstance(response, tuple): response_info, response = response if not isinstance(response_info, (bytes, TypeUnicode)): response_info = b'' else: response_info = b'' if not isinstance(response, (bytes, TypeUnicode)): response = b'' except MessageDecodingException: self.__terminate = True return_null_response = True except TerminateException: return_null_response = True except ReturnSyncException: return except ReturnAsyncException: self.__terminate = True traceback.print_exc(file=sys.stderr) return except ForwardSyncException: return except ForwardAsyncException: self.__terminate = True traceback.print_exc(file=sys.stderr) return except AssertionError: traceback.print_exc(file=sys.stderr) sys.exit(1) except SystemExit: traceback.print_exc(file=sys.stderr) raise except Exception: return_null_response = True traceback.print_exc(file=sys.stderr) except: traceback.print_exc(file=sys.stderr) sys.exit(1) if return_null_response: response_info = b'' response = b'' try: self.return_sync(name, pattern, response_info, response, timeout, trans_id, pid) except ReturnSyncException: pass return raise MessageDecodingException() def __handle_events(self, external, data, data_size, j, command=None): # pylint: disable=too-many-arguments if command is None: if j > data_size: raise MessageDecodingException() i, j = j, j + 4 command = struct.unpack(b'=I', data[i:j])[0] while True: if command == _MESSAGE_TERM: self.__terminate = True if external: return False raise TerminateException(self.__timeout_terminate) if command == _MESSAGE_REINIT: i, j = j, j + 4 + 4 + 4 + 1 (self.__process_count, self.__timeout_async, self.__timeout_sync, self.__priority_default) = struct.unpack( b'=IIIb', data[i:j] ) elif command == _MESSAGE_KEEPALIVE: self.__send(term_to_binary(OtpErlangAtom(b'keepalive'))) else: raise MessageDecodingException() if j > data_size: raise MessageDecodingException() if j == data_size: return True i, j = j, j + 4 command = struct.unpack(b'=I', data[i:j])[0] def __poll_request(self, timeout, external): # pylint: disable=too-many-locals # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements if self.__terminate: if external: return False raise TerminateException(self.__timeout_terminate) if external and not self.__initialization_complete: self.__send(term_to_binary(OtpErlangAtom(b'polling'))) self.__initialization_complete = True poll_timer = None if timeout is None or timeout < 0: timeout_value = None elif timeout == 0: timeout_value = 0.0 elif timeout > 0: poll_timer = default_timer() timeout_value = timeout * 0.001 fd_in, _, fd_except = select.select([self.__s], [], [self.__s], timeout_value) if fd_except != []: return False if fd_in == []: return True data = b'' data = self.__recv(data) data_size = len(data) if data_size == 0: return False # socket was closed i, j = 0, 4 while True: command = struct.unpack(b'=I', data[i:j])[0] if command == _MESSAGE_INIT: i, j = j, j + 4 + 4 + 4 + 4 + 4 (process_index, process_count, process_count_max, process_count_min, prefix_size) = struct.unpack(b'=IIIII', data[i:j]) i, j = j, j + prefix_size + 4 + 4 + 4 + 4 + 1 (prefix, _, timeout_initialize, timeout_async, timeout_sync, timeout_terminate, priority_default) = struct.unpack( '=%dscIIIIb' % (prefix_size - 1), data[i:j] ) if j != data_size: assert external is False self.__handle_events(external, data, data_size, j) return (process_index, process_count, process_count_max, process_count_min, prefix.decode('utf-8'), timeout_initialize, timeout_sync, timeout_async, timeout_terminate, priority_default) if command in (_MESSAGE_SEND_ASYNC, _MESSAGE_SEND_SYNC): i, j = j, j + 4 name_size = struct.unpack(b'=I', data[i:j])[0] i, j = j, j + name_size + 4 (name, _, pattern_size) = struct.unpack('=%dscI' % (name_size - 1), data[i:j]) i, j = j, j + pattern_size + 4 (pattern, _, request_info_size) = struct.unpack( '=%dscI' % (pattern_size - 1), data[i:j] ) i, j = j, j + request_info_size + 1 + 4 (request_info, _, request_size) = struct.unpack( '=%dscI' % request_info_size, data[i:j] ) i, j = j, j + request_size + 1 + 4 + 1 + 16 + 4 (request, _, request_timeout, priority, trans_id, pid_size) = struct.unpack( '=%dscIb16sI' % request_size, data[i:j] ) i, j = j, j + pid_size pid = data[i:j] if j != data_size: assert external is True if not self.__handle_events(external, data, data_size, j): return False data = b'' self.__callback(command, name.decode('utf-8'), pattern.decode('utf-8'), request_info, request, request_timeout, priority, trans_id, binary_to_term(pid)) if self.__terminate: return False elif command in (_MESSAGE_RECV_ASYNC, _MESSAGE_RETURN_SYNC): i, j = j, j + 4 response_info_size = struct.unpack(b'=I', data[i:j])[0] i, j = j, j + response_info_size + 1 + 4 (response_info, _, response_size) = struct.unpack( '=%dscI' % response_info_size, data[i:j] ) i, j = j, j + response_size + 1 + 16 (response, _, trans_id) = struct.unpack( '=%dsc16s' % response_size, data[i:j] ) if j != data_size: assert external is False self.__handle_events(external, data, data_size, j) return (response_info, response, trans_id) elif command == _MESSAGE_RETURN_ASYNC: i, j = j, j + 16 trans_id = data[i:j] if j != data_size: assert external is False self.__handle_events(external, data, data_size, j) return trans_id elif command == _MESSAGE_RETURNS_ASYNC: i, j = j, j + 4 trans_id_count = struct.unpack(b'=I', data[i:j])[0] i, j = j, j + 16 * trans_id_count trans_ids = struct.unpack( b'=' + b'16s' * trans_id_count, data[i:j] ) if j != data_size: assert external is False self.__handle_events(external, data, data_size, j) return trans_ids elif command == _MESSAGE_SUBSCRIBE_COUNT: i, j = j, j + 4 count = struct.unpack(b'=I', data[i:j])[0] if j != data_size: assert external is False self.__handle_events(external, data, data_size, j) return count elif command == _MESSAGE_TERM: if not self.__handle_events(external, data, data_size, j, command=command): return False assert False elif command == _MESSAGE_REINIT: i, j = j, j + 4 + 4 + 4 + 1 (self.__process_count, self.__timeout_async, self.__timeout_sync, self.__priority_default) = struct.unpack( b'=IIIb', data[i:j] ) if j == data_size: data = b'' elif j < data_size: i, j = j, j + 4 continue else: raise MessageDecodingException() elif command == _MESSAGE_KEEPALIVE: self.__send(term_to_binary(OtpErlangAtom(b'keepalive'))) if j == data_size: data = b'' elif j < data_size: i, j = j, j + 4 continue else: raise MessageDecodingException() else: raise MessageDecodingException() if poll_timer is not None: poll_timer_new = default_timer() elapsed = max(0, int((poll_timer_new - poll_timer) * 1000.0)) poll_timer = poll_timer_new if elapsed >= timeout: timeout = 0 else: timeout -= elapsed if timeout_value is not None: if timeout == 0: return True if timeout > 0: timeout_value = timeout * 0.001 fd_in, _, fd_except = select.select([self.__s], [], [self.__s], timeout_value) if fd_except != []: return False if fd_in == []: return True data = self.__recv(data) data_size = len(data) if data_size == 0: return False # socket was closed i, j = 0, 4 def poll(self, timeout=-1): """ blocks to process incoming CloudI service requests """ return self.__poll_request(timeout, True) def shutdown(self, reason=None): """ shutdown the service successfully """ if reason is None: reason = b'' self.__send(term_to_binary((OtpErlangAtom(b'shutdown'), reason))) @staticmethod def __text_pairs_parse(text): pairs = {} data = text.split(b'\0') for i in range(0, len(data) - 1, 2): key = data[i] current = pairs.get(key, None) if current is None: pairs[key] = data[i + 1] elif isinstance(current, list): current.append(data[i + 1]) else: pairs[key] = [current, data[i + 1]] return pairs @staticmethod def __text_pairs_new(pairs, response): text_segments = [] for key, values in pairs.items(): if isinstance(values, bytes): text_segments.append(key) text_segments.append(values) else: assert not isinstance(values, str) for value in values: text_segments.append(key) text_segments.append(value) if response and text_segments == []: return b'\0' text_segments.append(b'') return b'\0'.join(text_segments) @staticmethod def info_key_value_parse(info): """ decode service request info key/value data """ return API.__text_pairs_parse(info) @staticmethod def info_key_value_new(pairs, response=True): """ encode service response info key/value data """ return API.__text_pairs_new(pairs, response) def __send(self, data): if self.__use_header: data = struct.pack(b'>I', len(data)) + data self.__s.sendall(data) def __recv(self, data_old): data = b'' if self.__use_header: i = 0 while i < 4: fragment = self.__s.recv(4 - i) data += fragment i += len(fragment) total = struct.unpack(b'>I', data)[0] data = data_old i = 0 while i < total: fragment = self.__s.recv(min(total - i, self.__size)) data += fragment i += len(fragment) else: data = data_old ready = True while ready is True: fragment = self.__s.recv(self.__size) data += fragment ready = (len(fragment) == self.__size) if ready: fd_in, _, _ = select.select([self.__s], [], [], 0) ready = (fd_in != []) return data class InvalidInputException(Exception): """ Invalid Input """ def __init__(self): Exception.__init__(self, 'Invalid Input') class ReturnSyncException(Exception): """ Synchronous Call Return Invalid """ def __init__(self): Exception.__init__(self, 'Synchronous Call Return Invalid') class ReturnAsyncException(Exception): """ Asynchronous Call Return Invalid """ def __init__(self): Exception.__init__(self, 'Asynchronous Call Return Invalid') class ForwardSyncException(Exception): """ Synchronous Call Forward Invalid """ def __init__(self): Exception.__init__(self, 'Synchronous Call Forward Invalid') class ForwardAsyncException(Exception): """ Asynchronous Call Forward Invalid """ def __init__(self): Exception.__init__(self, 'Asynchronous Call Forward Invalid') class MessageDecodingException(Exception): """ Message Decoding Error """ def __init__(self): Exception.__init__(self, 'Message Decoding Error') class TerminateException(Exception): """ Terminate """ def __init__(self, timeout): Exception.__init__(self, 'Terminate') self.__timeout = timeout def timeout(self): """ return the termination timeout """ return self.__timeout class FatalError(BaseException): """ Fatal Error """ def __init__(self, message): BaseException.__init__(self, message) # force unbuffered stdout/stderr handling without external configuration if sys.stderr.__class__.__name__ != '_unbuffered': class _unbuffered(object): # pylint: disable=too-few-public-methods def __init__(self, stream): # pylint: disable=import-outside-toplevel if sys.version_info[0] >= 3: import io self.__stream = io.TextIOWrapper( stream.buffer, encoding='UTF-8', errors=stream.errors, newline=stream.newlines, line_buffering=stream.line_buffering, write_through=False, ) else: import codecs self.encoding = 'UTF-8' self.__stream = codecs.getwriter(self.encoding)(stream) def write(self, data): """ unbuffered write function """ self.__stream.write(data) self.__stream.flush() def __getattr__(self, attr): return getattr(self.__stream, attr) sys.stdout = _unbuffered(sys.stdout) sys.stderr = _unbuffered(sys.stderr)
mit
bopo/tablib
tablib/packages/xlwt/antlr.py
57
84201
## This file is part of PyANTLR. See LICENSE.txt for license ## details..........Copyright (C) Wolfgang Haefelinger, 2004. ## This file was copied for use with xlwt from the 2.7.7 ANTLR distribution. Yes, it ## says 2.7.5 below. The 2.7.5 distribution version didn't have a ## version in it. ## Here is the contents of the ANTLR 2.7.7 LICENSE.txt referred to above. # SOFTWARE RIGHTS # # ANTLR 1989-2006 Developed by Terence Parr # Partially supported by University of San Francisco & jGuru.com # # We reserve no legal rights to the ANTLR--it is fully in the # public domain. An individual or company may do whatever # they wish with source code distributed with ANTLR or the # code generated by ANTLR, including the incorporation of # ANTLR, or its output, into commerical software. # # We encourage users to develop software with ANTLR. However, # we do ask that credit is given to us for developing # ANTLR. By "credit", we mean that if you use ANTLR or # incorporate any source code into one of your programs # (commercial product, research project, or otherwise) that # you acknowledge this fact somewhere in the documentation, # research report, etc... If you like ANTLR and have # developed a nice tool with the output, please mention that # you developed it using ANTLR. In addition, we ask that the # headers remain intact in our source code. As long as these # guidelines are kept, we expect to continue enhancing this # system and expect to make other tools available as they are # completed. # # The primary ANTLR guy: # # Terence Parr # [email protected] # [email protected] ## End of contents of the ANTLR 2.7.7 LICENSE.txt ######################## ## get sys module import sys version = sys.version.split()[0] if version < '2.2.1': False = 0 if version < '2.3': True = not False ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### global symbols ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ANTLR Standard Tokens SKIP = -1 INVALID_TYPE = 0 EOF_TYPE = 1 EOF = 1 NULL_TREE_LOOKAHEAD = 3 MIN_USER_TYPE = 4 ### ANTLR's EOF Symbol EOF_CHAR = '' ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### general functions ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ## Version should be automatically derived from configure.in. For now, ## we need to bump it ourselfs. Don't remove the <version> tags. ## <version> def version(): r = { 'major' : '2', 'minor' : '7', 'micro' : '5', 'patch' : '' , 'version': '2.7.5' } return r ## </version> def error(fmt,*args): if fmt: print "error: ", fmt % tuple(args) def ifelse(cond,_then,_else): if cond : r = _then else: r = _else return r def is_string_type(x): # return (isinstance(x,str) or isinstance(x,unicode)) # Simplify; xlwt doesn't support Python < 2.3 return isinstance(basestring) def assert_string_type(x): assert is_string_type(x) pass ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ANTLR Exceptions ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class ANTLRException(Exception): def __init__(self, *args): Exception.__init__(self, *args) class RecognitionException(ANTLRException): def __init__(self, *args): ANTLRException.__init__(self, *args) self.fileName = None self.line = -1 self.column = -1 if len(args) >= 2: self.fileName = args[1] if len(args) >= 3: self.line = args[2] if len(args) >= 4: self.column = args[3] def __str__(self): buf = [''] if self.fileName: buf.append(self.fileName + ":") if self.line != -1: if not self.fileName: buf.append("line ") buf.append(str(self.line)) if self.column != -1: buf.append(":" + str(self.column)) buf.append(":") buf.append(" ") return str('').join(buf) __repr__ = __str__ class NoViableAltException(RecognitionException): def __init__(self, *args): RecognitionException.__init__(self, *args) self.token = None self.node = None if isinstance(args[0],AST): self.node = args[0] elif isinstance(args[0],Token): self.token = args[0] else: raise TypeError("NoViableAltException requires Token or AST argument") def __str__(self): if self.token: line = self.token.getLine() col = self.token.getColumn() text = self.token.getText() return "unexpected symbol at line %s (column %s): \"%s\"" % (line,col,text) if self.node == ASTNULL: return "unexpected end of subtree" assert self.node ### hackish, we assume that an AST contains method getText return "unexpected node: %s" % (self.node.getText()) __repr__ = __str__ class NoViableAltForCharException(RecognitionException): def __init__(self, *args): self.foundChar = None if len(args) == 2: self.foundChar = args[0] scanner = args[1] RecognitionException.__init__(self, "NoViableAlt", scanner.getFilename(), scanner.getLine(), scanner.getColumn()) elif len(args) == 4: self.foundChar = args[0] fileName = args[1] line = args[2] column = args[3] RecognitionException.__init__(self, "NoViableAlt", fileName, line, column) else: RecognitionException.__init__(self, "NoViableAlt", '', -1, -1) def __str__(self): mesg = "unexpected char: " if self.foundChar >= ' ' and self.foundChar <= '~': mesg += "'" + self.foundChar + "'" elif self.foundChar: mesg += "0x" + hex(ord(self.foundChar)).upper()[2:] else: mesg += "<None>" return mesg __repr__ = __str__ class SemanticException(RecognitionException): def __init__(self, *args): RecognitionException.__init__(self, *args) class MismatchedCharException(RecognitionException): NONE = 0 CHAR = 1 NOT_CHAR = 2 RANGE = 3 NOT_RANGE = 4 SET = 5 NOT_SET = 6 def __init__(self, *args): self.args = args if len(args) == 5: # Expected range / not range if args[3]: self.mismatchType = MismatchedCharException.NOT_RANGE else: self.mismatchType = MismatchedCharException.RANGE self.foundChar = args[0] self.expecting = args[1] self.upper = args[2] self.scanner = args[4] RecognitionException.__init__(self, "Mismatched char range", self.scanner.getFilename(), self.scanner.getLine(), self.scanner.getColumn()) elif len(args) == 4 and is_string_type(args[1]): # Expected char / not char if args[2]: self.mismatchType = MismatchedCharException.NOT_CHAR else: self.mismatchType = MismatchedCharException.CHAR self.foundChar = args[0] self.expecting = args[1] self.scanner = args[3] RecognitionException.__init__(self, "Mismatched char", self.scanner.getFilename(), self.scanner.getLine(), self.scanner.getColumn()) elif len(args) == 4 and isinstance(args[1], BitSet): # Expected BitSet / not BitSet if args[2]: self.mismatchType = MismatchedCharException.NOT_SET else: self.mismatchType = MismatchedCharException.SET self.foundChar = args[0] self.set = args[1] self.scanner = args[3] RecognitionException.__init__(self, "Mismatched char set", self.scanner.getFilename(), self.scanner.getLine(), self.scanner.getColumn()) else: self.mismatchType = MismatchedCharException.NONE RecognitionException.__init__(self, "Mismatched char") ## Append a char to the msg buffer. If special, # then show escaped version # def appendCharName(self, sb, c): if not c or c == 65535: # 65535 = (char) -1 = EOF sb.append("'<EOF>'") elif c == '\n': sb.append("'\\n'") elif c == '\r': sb.append("'\\r'"); elif c == '\t': sb.append("'\\t'") else: sb.append('\'' + c + '\'') ## # Returns an error message with line number/column information # def __str__(self): sb = [''] sb.append(RecognitionException.__str__(self)) if self.mismatchType == MismatchedCharException.CHAR: sb.append("expecting ") self.appendCharName(sb, self.expecting) sb.append(", found ") self.appendCharName(sb, self.foundChar) elif self.mismatchType == MismatchedCharException.NOT_CHAR: sb.append("expecting anything but '") self.appendCharName(sb, self.expecting) sb.append("'; got it anyway") elif self.mismatchType in [MismatchedCharException.RANGE, MismatchedCharException.NOT_RANGE]: sb.append("expecting char ") if self.mismatchType == MismatchedCharException.NOT_RANGE: sb.append("NOT ") sb.append("in range: ") appendCharName(sb, self.expecting) sb.append("..") appendCharName(sb, self.upper) sb.append(", found ") appendCharName(sb, self.foundChar) elif self.mismatchType in [MismatchedCharException.SET, MismatchedCharException.NOT_SET]: sb.append("expecting ") if self.mismatchType == MismatchedCharException.NOT_SET: sb.append("NOT ") sb.append("one of (") for i in range(len(self.set)): self.appendCharName(sb, self.set[i]) sb.append("), found ") self.appendCharName(sb, self.foundChar) return str().join(sb).strip() __repr__ = __str__ class MismatchedTokenException(RecognitionException): NONE = 0 TOKEN = 1 NOT_TOKEN = 2 RANGE = 3 NOT_RANGE = 4 SET = 5 NOT_SET = 6 def __init__(self, *args): self.args = args self.tokenNames = [] self.token = None self.tokenText = '' self.node = None if len(args) == 6: # Expected range / not range if args[3]: self.mismatchType = MismatchedTokenException.NOT_RANGE else: self.mismatchType = MismatchedTokenException.RANGE self.tokenNames = args[0] self.expecting = args[2] self.upper = args[3] self.fileName = args[5] elif len(args) == 4 and isinstance(args[2], int): # Expected token / not token if args[3]: self.mismatchType = MismatchedTokenException.NOT_TOKEN else: self.mismatchType = MismatchedTokenException.TOKEN self.tokenNames = args[0] self.expecting = args[2] elif len(args) == 4 and isinstance(args[2], BitSet): # Expected BitSet / not BitSet if args[3]: self.mismatchType = MismatchedTokenException.NOT_SET else: self.mismatchType = MismatchedTokenException.SET self.tokenNames = args[0] self.set = args[2] else: self.mismatchType = MismatchedTokenException.NONE RecognitionException.__init__(self, "Mismatched Token: expecting any AST node", "<AST>", -1, -1) if len(args) >= 2: if isinstance(args[1],Token): self.token = args[1] self.tokenText = self.token.getText() RecognitionException.__init__(self, "Mismatched Token", self.fileName, self.token.getLine(), self.token.getColumn()) elif isinstance(args[1],AST): self.node = args[1] self.tokenText = str(self.node) RecognitionException.__init__(self, "Mismatched Token", "<AST>", self.node.getLine(), self.node.getColumn()) else: self.tokenText = "<empty tree>" RecognitionException.__init__(self, "Mismatched Token", "<AST>", -1, -1) def appendTokenName(self, sb, tokenType): if tokenType == INVALID_TYPE: sb.append("<Set of tokens>") elif tokenType < 0 or tokenType >= len(self.tokenNames): sb.append("<" + str(tokenType) + ">") else: sb.append(self.tokenNames[tokenType]) ## # Returns an error message with line number/column information # def __str__(self): sb = [''] sb.append(RecognitionException.__str__(self)) if self.mismatchType == MismatchedTokenException.TOKEN: sb.append("expecting ") self.appendTokenName(sb, self.expecting) sb.append(", found " + self.tokenText) elif self.mismatchType == MismatchedTokenException.NOT_TOKEN: sb.append("expecting anything but '") self.appendTokenName(sb, self.expecting) sb.append("'; got it anyway") elif self.mismatchType in [MismatchedTokenException.RANGE, MismatchedTokenException.NOT_RANGE]: sb.append("expecting token ") if self.mismatchType == MismatchedTokenException.NOT_RANGE: sb.append("NOT ") sb.append("in range: ") appendTokenName(sb, self.expecting) sb.append("..") appendTokenName(sb, self.upper) sb.append(", found " + self.tokenText) elif self.mismatchType in [MismatchedTokenException.SET, MismatchedTokenException.NOT_SET]: sb.append("expecting ") if self.mismatchType == MismatchedTokenException.NOT_SET: sb.append("NOT ") sb.append("one of (") for i in range(len(self.set)): self.appendTokenName(sb, self.set[i]) sb.append("), found " + self.tokenText) return str().join(sb).strip() __repr__ = __str__ class TokenStreamException(ANTLRException): def __init__(self, *args): ANTLRException.__init__(self, *args) # Wraps an Exception in a TokenStreamException class TokenStreamIOException(TokenStreamException): def __init__(self, *args): if args and isinstance(args[0], Exception): io = args[0] TokenStreamException.__init__(self, str(io)) self.io = io else: TokenStreamException.__init__(self, *args) self.io = self # Wraps a RecognitionException in a TokenStreamException class TokenStreamRecognitionException(TokenStreamException): def __init__(self, *args): if args and isinstance(args[0], RecognitionException): recog = args[0] TokenStreamException.__init__(self, str(recog)) self.recog = recog else: raise TypeError("TokenStreamRecognitionException requires RecognitionException argument") def __str__(self): return str(self.recog) __repr__ = __str__ class TokenStreamRetryException(TokenStreamException): def __init__(self, *args): TokenStreamException.__init__(self, *args) class CharStreamException(ANTLRException): def __init__(self, *args): ANTLRException.__init__(self, *args) # Wraps an Exception in a CharStreamException class CharStreamIOException(CharStreamException): def __init__(self, *args): if args and isinstance(args[0], Exception): io = args[0] CharStreamException.__init__(self, str(io)) self.io = io else: CharStreamException.__init__(self, *args) self.io = self class TryAgain(Exception): pass ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### Token ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class Token(object): SKIP = -1 INVALID_TYPE = 0 EOF_TYPE = 1 EOF = 1 NULL_TREE_LOOKAHEAD = 3 MIN_USER_TYPE = 4 def __init__(self,**argv): try: self.type = argv['type'] except: self.type = INVALID_TYPE try: self.text = argv['text'] except: self.text = "<no text>" def isEOF(self): return (self.type == EOF_TYPE) def getColumn(self): return 0 def getLine(self): return 0 def getFilename(self): return None def setFilename(self,name): return self def getText(self): return "<no text>" def setText(self,text): if is_string_type(text): pass else: raise TypeError("Token.setText requires string argument") return self def setColumn(self,column): return self def setLine(self,line): return self def getType(self): return self.type def setType(self,type): if isinstance(type,int): self.type = type else: raise TypeError("Token.setType requires integer argument") return self def toString(self): ## not optimal type_ = self.type if type_ == 3: tval = 'NULL_TREE_LOOKAHEAD' elif type_ == 1: tval = 'EOF_TYPE' elif type_ == 0: tval = 'INVALID_TYPE' elif type_ == -1: tval = 'SKIP' else: tval = type_ return '["%s",<%s>]' % (self.getText(),tval) __str__ = toString __repr__ = toString ### static attribute .. Token.badToken = Token( type=INVALID_TYPE, text="<no text>") if __name__ == "__main__": print "testing .." T = Token.badToken print T ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CommonToken ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CommonToken(Token): def __init__(self,**argv): Token.__init__(self,**argv) self.line = 0 self.col = 0 try: self.line = argv['line'] except: pass try: self.col = argv['col'] except: pass def getLine(self): return self.line def getText(self): return self.text def getColumn(self): return self.col def setLine(self,line): self.line = line return self def setText(self,text): self.text = text return self def setColumn(self,col): self.col = col return self def toString(self): ## not optimal type_ = self.type if type_ == 3: tval = 'NULL_TREE_LOOKAHEAD' elif type_ == 1: tval = 'EOF_TYPE' elif type_ == 0: tval = 'INVALID_TYPE' elif type_ == -1: tval = 'SKIP' else: tval = type_ d = { 'text' : self.text, 'type' : tval, 'line' : self.line, 'colm' : self.col } fmt = '["%(text)s",<%(type)s>,line=%(line)s,col=%(colm)s]' return fmt % d __str__ = toString __repr__ = toString if __name__ == '__main__' : T = CommonToken() print T T = CommonToken(col=15,line=1,text="some text", type=5) print T T = CommonToken() T.setLine(1).setColumn(15).setText("some text").setType(5) print T print T.getLine() print T.getColumn() print T.getText() print T.getType() ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CommonHiddenStreamToken ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CommonHiddenStreamToken(CommonToken): def __init__(self,*args): CommonToken.__init__(self,*args) self.hiddenBefore = None self.hiddenAfter = None def getHiddenAfter(self): return self.hiddenAfter def getHiddenBefore(self): return self.hiddenBefore def setHiddenAfter(self,t): self.hiddenAfter = t def setHiddenBefore(self, t): self.hiddenBefore = t ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### Queue ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ## Shall be a circular buffer on tokens .. class Queue(object): def __init__(self): self.buffer = [] # empty list def append(self,item): self.buffer.append(item) def elementAt(self,index): return self.buffer[index] def reset(self): self.buffer = [] def removeFirst(self): self.buffer.pop(0) def length(self): return len(self.buffer) def __str__(self): return str(self.buffer) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### InputBuffer ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class InputBuffer(object): def __init__(self): self.nMarkers = 0 self.markerOffset = 0 self.numToConsume = 0 self.queue = Queue() def __str__(self): return "(%s,%s,%s,%s)" % ( self.nMarkers, self.markerOffset, self.numToConsume, self.queue) def __repr__(self): return str(self) def commit(self): self.nMarkers -= 1 def consume(self) : self.numToConsume += 1 ## probably better to return a list of items ## because of unicode. Or return a unicode ## string .. def getLAChars(self) : i = self.markerOffset n = self.queue.length() s = '' while i<n: s += self.queue.elementAt(i) return s ## probably better to return a list of items ## because of unicode chars def getMarkedChars(self) : s = '' i = 0 n = self.markerOffset while i<n: s += self.queue.elementAt(i) return s def isMarked(self) : return self.nMarkers != 0 def fill(self,k): ### abstract method raise NotImplementedError() def LA(self,k) : self.fill(k) return self.queue.elementAt(self.markerOffset + k - 1) def mark(self) : self.syncConsume() self.nMarkers += 1 return self.markerOffset def rewind(self,mark) : self.syncConsume() self.markerOffset = mark self.nMarkers -= 1 def reset(self) : self.nMarkers = 0 self.markerOffset = 0 self.numToConsume = 0 self.queue.reset() def syncConsume(self) : while self.numToConsume > 0: if self.nMarkers > 0: # guess mode -- leave leading characters and bump offset. self.markerOffset += 1 else: # normal mode -- remove first character self.queue.removeFirst() self.numToConsume -= 1 ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CharBuffer ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CharBuffer(InputBuffer): def __init__(self,reader): ##assert isinstance(reader,file) super(CharBuffer,self).__init__() ## a reader is supposed to be anything that has ## a method 'read(int)'. self.input = reader def __str__(self): base = super(CharBuffer,self).__str__() return "CharBuffer{%s,%s" % (base,str(input)) def fill(self,amount): try: self.syncConsume() while self.queue.length() < (amount + self.markerOffset) : ## retrieve just one char - what happend at end ## of input? c = self.input.read(1) ### python's behaviour is to return the empty string on ### EOF, ie. no exception whatsoever is thrown. An empty ### python string has the nice feature that it is of ### type 'str' and "not ''" would return true. Contrary, ### one can't do this: '' in 'abc'. This should return ### false, but all we get is then a TypeError as an ### empty string is not a character. ### Let's assure then that we have either seen a ### character or an empty string (EOF). assert len(c) == 0 or len(c) == 1 ### And it shall be of type string (ASCII or UNICODE). assert is_string_type(c) ### Just append EOF char to buffer. Note that buffer may ### contain then just more than one EOF char .. ### use unicode chars instead of ASCII .. self.queue.append(c) except Exception,e: raise CharStreamIOException(e) ##except: # (mk) Cannot happen ... ##error ("unexpected exception caught ..") ##assert 0 ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### LexerSharedInputState ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class LexerSharedInputState(object): def __init__(self,ibuf): assert isinstance(ibuf,InputBuffer) self.input = ibuf self.column = 1 self.line = 1 self.tokenStartColumn = 1 self.tokenStartLine = 1 self.guessing = 0 self.filename = None def reset(self): self.column = 1 self.line = 1 self.tokenStartColumn = 1 self.tokenStartLine = 1 self.guessing = 0 self.filename = None self.input.reset() def LA(self,k): return self.input.LA(k) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenStream ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenStream(object): def nextToken(self): pass def __iter__(self): return TokenStreamIterator(self) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenStreamIterator ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenStreamIterator(object): def __init__(self,inst): if isinstance(inst,TokenStream): self.inst = inst return raise TypeError("TokenStreamIterator requires TokenStream object") def next(self): assert self.inst item = self.inst.nextToken() if not item or item.isEOF(): raise StopIteration() return item ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenStreamSelector ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenStreamSelector(TokenStream): def __init__(self): self._input = None self._stmap = {} self._stack = [] def addInputStream(self,stream,key): self._stmap[key] = stream def getCurrentStream(self): return self._input def getStream(self,sname): try: stream = self._stmap[sname] except: raise ValueError("TokenStream " + sname + " not found"); return stream; def nextToken(self): while 1: try: return self._input.nextToken() except TokenStreamRetryException,r: ### just retry "forever" pass def pop(self): stream = self._stack.pop(); self.select(stream); return stream; def push(self,arg): self._stack.append(self._input); self.select(arg) def retry(self): raise TokenStreamRetryException() def select(self,arg): if isinstance(arg,TokenStream): self._input = arg return if is_string_type(arg): self._input = self.getStream(arg) return raise TypeError("TokenStreamSelector.select requires " + "TokenStream or string argument") ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenStreamBasicFilter ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenStreamBasicFilter(TokenStream): def __init__(self,input): self.input = input; self.discardMask = BitSet() def discard(self,arg): if isinstance(arg,int): self.discardMask.add(arg) return if isinstance(arg,BitSet): self.discardMark = arg return raise TypeError("TokenStreamBasicFilter.discard requires" + "integer or BitSet argument") def nextToken(self): tok = self.input.nextToken() while tok and self.discardMask.member(tok.getType()): tok = self.input.nextToken() return tok ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenStreamHiddenTokenFilter ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenStreamHiddenTokenFilter(TokenStreamBasicFilter): def __init__(self,input): TokenStreamBasicFilter.__init__(self,input) self.hideMask = BitSet() self.nextMonitoredToken = None self.lastHiddenToken = None self.firstHidden = None def consume(self): self.nextMonitoredToken = self.input.nextToken() def consumeFirst(self): self.consume() p = None; while self.hideMask.member(self.LA(1).getType()) or \ self.discardMask.member(self.LA(1).getType()): if self.hideMask.member(self.LA(1).getType()): if not p: p = self.LA(1) else: p.setHiddenAfter(self.LA(1)) self.LA(1).setHiddenBefore(p) p = self.LA(1) self.lastHiddenToken = p if not self.firstHidden: self.firstHidden = p self.consume() def getDiscardMask(self): return self.discardMask def getHiddenAfter(self,t): return t.getHiddenAfter() def getHiddenBefore(self,t): return t.getHiddenBefore() def getHideMask(self): return self.hideMask def getInitialHiddenToken(self): return self.firstHidden def hide(self,m): if isinstance(m,int): self.hideMask.add(m) return if isinstance(m.BitMask): self.hideMask = m return def LA(self,i): return self.nextMonitoredToken def nextToken(self): if not self.LA(1): self.consumeFirst() monitored = self.LA(1) monitored.setHiddenBefore(self.lastHiddenToken) self.lastHiddenToken = None self.consume() p = monitored while self.hideMask.member(self.LA(1).getType()) or \ self.discardMask.member(self.LA(1).getType()): if self.hideMask.member(self.LA(1).getType()): p.setHiddenAfter(self.LA(1)) if p != monitored: self.LA(1).setHiddenBefore(p) p = self.lastHiddenToken = self.LA(1) self.consume() return monitored ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### StringBuffer ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class StringBuffer: def __init__(self,string=None): if string: self.text = list(string) else: self.text = [] def setLength(self,sz): if not sz : self.text = [] return assert sz>0 if sz >= self.length(): return ### just reset to empty buffer self.text = self.text[0:sz] def length(self): return len(self.text) def append(self,c): self.text.append(c) ### return buffer as string. Arg 'a' is used as index ## into the buffer and 2nd argument shall be the length. ## If 2nd args is absent, we return chars till end of ## buffer starting with 'a'. def getString(self,a=None,length=None): if not a : a = 0 assert a>=0 if a>= len(self.text) : return "" if not length: ## no second argument L = self.text[a:] else: assert (a+length) <= len(self.text) b = a + length L = self.text[a:b] s = "" for x in L : s += x return s toString = getString ## alias def __str__(self): return str(self.text) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### Reader ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ## When reading Japanese chars, it happens that a stream returns a ## 'char' of length 2. This looks like a bug in the appropriate ## codecs - but I'm rather unsure about this. Anyway, if this is ## the case, I'm going to split this string into a list of chars ## and put them on hold, ie. on a buffer. Next time when called ## we read from buffer until buffer is empty. ## wh: nov, 25th -> problem does not appear in Python 2.4.0.c1. class Reader(object): def __init__(self,stream): self.cin = stream self.buf = [] def read(self,num): assert num==1 if len(self.buf): return self.buf.pop() ## Read a char - this may return a string. ## Is this a bug in codecs/Python? c = self.cin.read(1) if not c or len(c)==1: return c L = list(c) L.reverse() for x in L: self.buf.append(x) ## read one char .. return self.read(1) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CharScanner ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CharScanner(TokenStream): ## class members NO_CHAR = 0 EOF_CHAR = '' ### EOF shall be the empty string. def __init__(self, *argv, **kwargs): super(CharScanner, self).__init__() self.saveConsumedInput = True self.tokenClass = None self.caseSensitive = True self.caseSensitiveLiterals = True self.literals = None self.tabsize = 8 self._returnToken = None self.commitToPath = False self.traceDepth = 0 self.text = StringBuffer() self.hashString = hash(self) self.setTokenObjectClass(CommonToken) self.setInput(*argv) def __iter__(self): return CharScannerIterator(self) def setInput(self,*argv): ## case 1: ## if there's no arg we default to read from ## standard input if not argv: import sys self.setInput(sys.stdin) return ## get 1st argument arg1 = argv[0] ## case 2: ## if arg1 is a string, we assume it's a file name ## and open a stream using 2nd argument as open ## mode. If there's no 2nd argument we fall back to ## mode '+rb'. if is_string_type(arg1): f = open(arg1,"rb") self.setInput(f) self.setFilename(arg1) return ## case 3: ## if arg1 is a file we wrap it by a char buffer ( ## some additional checks?? No, can't do this in ## general). if isinstance(arg1,file): self.setInput(CharBuffer(arg1)) return ## case 4: ## if arg1 is of type SharedLexerInputState we use ## argument as is. if isinstance(arg1,LexerSharedInputState): self.inputState = arg1 return ## case 5: ## check whether argument type is of type input ## buffer. If so create a SharedLexerInputState and ## go ahead. if isinstance(arg1,InputBuffer): self.setInput(LexerSharedInputState(arg1)) return ## case 6: ## check whether argument type has a method read(int) ## If so create CharBuffer ... try: if arg1.read: rd = Reader(arg1) cb = CharBuffer(rd) ss = LexerSharedInputState(cb) self.inputState = ss return except: pass ## case 7: ## raise wrong argument exception raise TypeError(argv) def setTabSize(self,size) : self.tabsize = size def getTabSize(self) : return self.tabsize def setCaseSensitive(self,t) : self.caseSensitive = t def setCommitToPath(self,commit) : self.commitToPath = commit def setFilename(self,f) : self.inputState.filename = f def setLine(self,line) : self.inputState.line = line def setText(self,s) : self.resetText() self.text.append(s) def getCaseSensitive(self) : return self.caseSensitive def getCaseSensitiveLiterals(self) : return self.caseSensitiveLiterals def getColumn(self) : return self.inputState.column def setColumn(self,c) : self.inputState.column = c def getCommitToPath(self) : return self.commitToPath def getFilename(self) : return self.inputState.filename def getInputBuffer(self) : return self.inputState.input def getInputState(self) : return self.inputState def setInputState(self,state) : assert isinstance(state,LexerSharedInputState) self.inputState = state def getLine(self) : return self.inputState.line def getText(self) : return str(self.text) def getTokenObject(self) : return self._returnToken def LA(self,i) : c = self.inputState.input.LA(i) if not self.caseSensitive: ### E0006 c = c.__class__.lower(c) return c def makeToken(self,type) : try: ## dynamically load a class assert self.tokenClass tok = self.tokenClass() tok.setType(type) tok.setColumn(self.inputState.tokenStartColumn) tok.setLine(self.inputState.tokenStartLine) return tok except: self.panic("unable to create new token") return Token.badToken def mark(self) : return self.inputState.input.mark() def _match_bitset(self,b) : if b.member(self.LA(1)): self.consume() else: raise MismatchedCharException(self.LA(1), b, False, self) def _match_string(self,s) : for c in s: if self.LA(1) == c: self.consume() else: raise MismatchedCharException(self.LA(1), c, False, self) def match(self,item): if is_string_type(item): return self._match_string(item) else: return self._match_bitset(item) def matchNot(self,c) : if self.LA(1) != c: self.consume() else: raise MismatchedCharException(self.LA(1), c, True, self) def matchRange(self,c1,c2) : if self.LA(1) < c1 or self.LA(1) > c2 : raise MismatchedCharException(self.LA(1), c1, c2, False, self) else: self.consume() def newline(self) : self.inputState.line += 1 self.inputState.column = 1 def tab(self) : c = self.getColumn() nc = ( ((c-1)/self.tabsize) + 1) * self.tabsize + 1 self.setColumn(nc) def panic(self,s='') : print "CharScanner: panic: " + s sys.exit(1) def reportError(self,ex) : print ex def reportError(self,s) : if not self.getFilename(): print "error: " + str(s) else: print self.getFilename() + ": error: " + str(s) def reportWarning(self,s) : if not self.getFilename(): print "warning: " + str(s) else: print self.getFilename() + ": warning: " + str(s) def resetText(self) : self.text.setLength(0) self.inputState.tokenStartColumn = self.inputState.column self.inputState.tokenStartLine = self.inputState.line def rewind(self,pos) : self.inputState.input.rewind(pos) def setTokenObjectClass(self,cl): self.tokenClass = cl def testForLiteral(self,token): if not token: return assert isinstance(token,Token) _type = token.getType() ## special tokens can't be literals if _type in [SKIP,INVALID_TYPE,EOF_TYPE,NULL_TREE_LOOKAHEAD] : return _text = token.getText() if not _text: return assert is_string_type(_text) _type = self.testLiteralsTable(_text,_type) token.setType(_type) return _type def testLiteralsTable(self,*args): if is_string_type(args[0]): s = args[0] i = args[1] else: s = self.text.getString() i = args[0] ## check whether integer has been given if not isinstance(i,int): assert isinstance(i,int) ## check whether we have a dict assert isinstance(self.literals,dict) try: ## E0010 if not self.caseSensitiveLiterals: s = s.__class__.lower(s) i = self.literals[s] except: pass return i def toLower(self,c): return c.__class__.lower() def traceIndent(self): print ' ' * self.traceDepth def traceIn(self,rname): self.traceDepth += 1 self.traceIndent() print "> lexer %s c== %s" % (rname,self.LA(1)) def traceOut(self,rname): self.traceIndent() print "< lexer %s c== %s" % (rname,self.LA(1)) self.traceDepth -= 1 def uponEOF(self): pass def append(self,c): if self.saveConsumedInput : self.text.append(c) def commit(self): self.inputState.input.commit() def consume(self): if not self.inputState.guessing: c = self.LA(1) if self.caseSensitive: self.append(c) else: # use input.LA(), not LA(), to get original case # CharScanner.LA() would toLower it. c = self.inputState.input.LA(1) self.append(c) if c and c in "\t": self.tab() else: self.inputState.column += 1 self.inputState.input.consume() ## Consume chars until one matches the given char def consumeUntil_char(self,c): while self.LA(1) != EOF_CHAR and self.LA(1) != c: self.consume() ## Consume chars until one matches the given set def consumeUntil_bitset(self,bitset): while self.LA(1) != EOF_CHAR and not self.set.member(self.LA(1)): self.consume() ### If symbol seen is EOF then generate and set token, otherwise ### throw exception. def default(self,la1): if not la1 : self.uponEOF() self._returnToken = self.makeToken(EOF_TYPE) else: self.raise_NoViableAlt(la1) def filterdefault(self,la1,*args): if not la1: self.uponEOF() self._returnToken = self.makeToken(EOF_TYPE) return if not args: self.consume() raise TryAgain() else: ### apply filter object self.commit(); try: func=args[0] args=args[1:] apply(func,args) except RecognitionException, e: ## catastrophic failure self.reportError(e); self.consume(); raise TryAgain() def raise_NoViableAlt(self,la1=None): if not la1: la1 = self.LA(1) fname = self.getFilename() line = self.getLine() col = self.getColumn() raise NoViableAltForCharException(la1,fname,line,col) def set_return_token(self,_create,_token,_ttype,_offset): if _create and not _token and (not _ttype == SKIP): string = self.text.getString(_offset) _token = self.makeToken(_ttype) _token.setText(string) self._returnToken = _token return _token ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CharScannerIterator ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CharScannerIterator: def __init__(self,inst): if isinstance(inst,CharScanner): self.inst = inst return raise TypeError("CharScannerIterator requires CharScanner object") def next(self): assert self.inst item = self.inst.nextToken() if not item or item.isEOF(): raise StopIteration() return item ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### BitSet ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### I'm assuming here that a long is 64bits. It appears however, that ### a long is of any size. That means we can use a single long as the ### bitset (!), ie. Python would do almost all the work (TBD). class BitSet(object): BITS = 64 NIBBLE = 4 LOG_BITS = 6 MOD_MASK = BITS -1 def __init__(self,data=None): if not data: BitSet.__init__(self,[long(0)]) return if isinstance(data,int): BitSet.__init__(self,[long(data)]) return if isinstance(data,long): BitSet.__init__(self,[data]) return if not isinstance(data,list): raise TypeError("BitSet requires integer, long, or " + "list argument") for x in data: if not isinstance(x,long): raise TypeError(self,"List argument item is " + "not a long: %s" % (x)) self.data = data def __str__(self): bits = len(self.data) * BitSet.BITS s = "" for i in xrange(0,bits): if self.at(i): s += "1" else: s += "o" if not ((i+1) % 10): s += '|%s|' % (i+1) return s def __repr__(self): return str(self) def member(self,item): if not item: return False if isinstance(item,int): return self.at(item) if not is_string_type(item): raise TypeError(self,"char or unichar expected: %s" % (item)) ## char is a (unicode) string with at most lenght 1, ie. ## a char. if len(item) != 1: raise TypeError(self,"char expected: %s" % (item)) ### handle ASCII/UNICODE char num = ord(item) ### check whether position num is in bitset return self.at(num) def wordNumber(self,bit): return bit >> BitSet.LOG_BITS def bitMask(self,bit): pos = bit & BitSet.MOD_MASK ## bit mod BITS return (1L << pos) def set(self,bit,on=True): # grow bitset as required (use with care!) i = self.wordNumber(bit) mask = self.bitMask(bit) if i>=len(self.data): d = i - len(self.data) + 1 for x in xrange(0,d): self.data.append(0L) assert len(self.data) == i+1 if on: self.data[i] |= mask else: self.data[i] &= (~mask) ### make add an alias for set add = set def off(self,bit,off=True): self.set(bit,not off) def at(self,bit): i = self.wordNumber(bit) v = self.data[i] m = self.bitMask(bit) return v & m ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### some further funcs ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### def illegalarg_ex(func): raise ValueError( "%s is only valid if parser is built for debugging" % (func.func_name)) def runtime_ex(func): raise RuntimeException( "%s is only valid if parser is built for debugging" % (func.func_name)) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TokenBuffer ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TokenBuffer(object): def __init__(self,stream): self.input = stream self.nMarkers = 0 self.markerOffset = 0 self.numToConsume = 0 self.queue = Queue() def reset(self) : self.nMarkers = 0 self.markerOffset = 0 self.numToConsume = 0 self.queue.reset() def consume(self) : self.numToConsume += 1 def fill(self, amount): self.syncConsume() while self.queue.length() < (amount + self.markerOffset): self.queue.append(self.input.nextToken()) def getInput(self): return self.input def LA(self,k) : self.fill(k) return self.queue.elementAt(self.markerOffset + k - 1).type def LT(self,k) : self.fill(k) return self.queue.elementAt(self.markerOffset + k - 1) def mark(self) : self.syncConsume() self.nMarkers += 1 return self.markerOffset def rewind(self,mark) : self.syncConsume() self.markerOffset = mark self.nMarkers -= 1 def syncConsume(self) : while self.numToConsume > 0: if self.nMarkers > 0: # guess mode -- leave leading characters and bump offset. self.markerOffset += 1 else: # normal mode -- remove first character self.queue.removeFirst() self.numToConsume -= 1 def __str__(self): return "(%s,%s,%s,%s,%s)" % ( self.input, self.nMarkers, self.markerOffset, self.numToConsume, self.queue) def __repr__(self): return str(self) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ParserSharedInputState ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class ParserSharedInputState(object): def __init__(self): self.input = None self.reset() def reset(self): self.guessing = 0 self.filename = None if self.input: self.input.reset() ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### Parser ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class Parser(object): def __init__(self, *args, **kwargs): self.tokenNames = None self.returnAST = None self.astFactory = None self.tokenTypeToASTClassMap = {} self.ignoreInvalidDebugCalls = False self.traceDepth = 0 if not args: self.inputState = ParserSharedInputState() return arg0 = args[0] assert isinstance(arg0,ParserSharedInputState) self.inputState = arg0 return def getTokenTypeToASTClassMap(self): return self.tokenTypeToASTClassMap def addMessageListener(self, l): if not self.ignoreInvalidDebugCalls: illegalarg_ex(addMessageListener) def addParserListener(self,l) : if (not self.ignoreInvalidDebugCalls) : illegalarg_ex(addParserListener) def addParserMatchListener(self, l) : if (not self.ignoreInvalidDebugCalls) : illegalarg_ex(addParserMatchListener) def addParserTokenListener(self, l) : if (not self.ignoreInvalidDebugCalls): illegalarg_ex(addParserTokenListener) def addSemanticPredicateListener(self, l) : if (not self.ignoreInvalidDebugCalls): illegalarg_ex(addSemanticPredicateListener) def addSyntacticPredicateListener(self, l) : if (not self.ignoreInvalidDebugCalls): illegalarg_ex(addSyntacticPredicateListener) def addTraceListener(self, l) : if (not self.ignoreInvalidDebugCalls): illegalarg_ex(addTraceListener) def consume(self): raise NotImplementedError() def _consumeUntil_type(self,tokenType): while self.LA(1) != EOF_TYPE and self.LA(1) != tokenType: self.consume() def _consumeUntil_bitset(self, set): while self.LA(1) != EOF_TYPE and not set.member(self.LA(1)): self.consume() def consumeUntil(self,arg): if isinstance(arg,int): self._consumeUntil_type(arg) else: self._consumeUntil_bitset(arg) def defaultDebuggingSetup(self): pass def getAST(self) : return self.returnAST def getASTFactory(self) : return self.astFactory def getFilename(self) : return self.inputState.filename def getInputState(self) : return self.inputState def setInputState(self, state) : self.inputState = state def getTokenName(self,num) : return self.tokenNames[num] def getTokenNames(self) : return self.tokenNames def isDebugMode(self) : return self.false def LA(self, i): raise NotImplementedError() def LT(self, i): raise NotImplementedError() def mark(self): return self.inputState.input.mark() def _match_int(self,t): if (self.LA(1) != t): raise MismatchedTokenException( self.tokenNames, self.LT(1), t, False, self.getFilename()) else: self.consume() def _match_set(self, b): if (not b.member(self.LA(1))): raise MismatchedTokenException( self.tokenNames,self.LT(1), b, False, self.getFilename()) else: self.consume() def match(self,set) : if isinstance(set,int): self._match_int(set) return if isinstance(set,BitSet): self._match_set(set) return raise TypeError("Parser.match requires integer ot BitSet argument") def matchNot(self,t): if self.LA(1) == t: raise MismatchedTokenException( tokenNames, self.LT(1), t, True, self.getFilename()) else: self.consume() def removeMessageListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeMessageListener) def removeParserListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeParserListener) def removeParserMatchListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeParserMatchListener) def removeParserTokenListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeParserTokenListener) def removeSemanticPredicateListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeSemanticPredicateListener) def removeSyntacticPredicateListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeSyntacticPredicateListener) def removeTraceListener(self, l) : if (not self.ignoreInvalidDebugCalls): runtime_ex(removeTraceListener) def reportError(self,x) : fmt = "syntax error:" f = self.getFilename() if f: fmt = ("%s:" % f) + fmt if isinstance(x,Token): line = x.getColumn() col = x.getLine() text = x.getText() fmt = fmt + 'unexpected symbol at line %s (column %s) : "%s"' print >>sys.stderr, fmt % (line,col,text) else: print >>sys.stderr, fmt,str(x) def reportWarning(self,s): f = self.getFilename() if f: print "%s:warning: %s" % (f,str(x)) else: print "warning: %s" % (str(x)) def rewind(self, pos) : self.inputState.input.rewind(pos) def setASTFactory(self, f) : self.astFactory = f def setASTNodeClass(self, cl) : self.astFactory.setASTNodeType(cl) def setASTNodeType(self, nodeType) : self.setASTNodeClass(nodeType) def setDebugMode(self, debugMode) : if (not self.ignoreInvalidDebugCalls): runtime_ex(setDebugMode) def setFilename(self, f) : self.inputState.filename = f def setIgnoreInvalidDebugCalls(self, value) : self.ignoreInvalidDebugCalls = value def setTokenBuffer(self, t) : self.inputState.input = t def traceIndent(self): print " " * self.traceDepth def traceIn(self,rname): self.traceDepth += 1 self.trace("> ", rname) def traceOut(self,rname): self.trace("< ", rname) self.traceDepth -= 1 ### wh: moved from ASTFactory to Parser def addASTChild(self,currentAST, child): if not child: return if not currentAST.root: currentAST.root = child elif not currentAST.child: currentAST.root.setFirstChild(child) else: currentAST.child.setNextSibling(child) currentAST.child = child currentAST.advanceChildToEnd() ### wh: moved from ASTFactory to Parser def makeASTRoot(self,currentAST,root) : if root: ### Add the current root as a child of new root root.addChild(currentAST.root) ### The new current child is the last sibling of the old root currentAST.child = currentAST.root currentAST.advanceChildToEnd() ### Set the new root currentAST.root = root ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### LLkParser ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class LLkParser(Parser): def __init__(self, *args, **kwargs): try: arg1 = args[0] except: arg1 = 1 if isinstance(arg1,int): super(LLkParser,self).__init__() self.k = arg1 return if isinstance(arg1,ParserSharedInputState): super(LLkParser,self).__init__(arg1) self.set_k(1,*args) return if isinstance(arg1,TokenBuffer): super(LLkParser,self).__init__() self.setTokenBuffer(arg1) self.set_k(1,*args) return if isinstance(arg1,TokenStream): super(LLkParser,self).__init__() tokenBuf = TokenBuffer(arg1) self.setTokenBuffer(tokenBuf) self.set_k(1,*args) return ### unknown argument raise TypeError("LLkParser requires integer, " + "ParserSharedInputStream or TokenStream argument") def consume(self): self.inputState.input.consume() def LA(self,i): return self.inputState.input.LA(i) def LT(self,i): return self.inputState.input.LT(i) def set_k(self,index,*args): try: self.k = args[index] except: self.k = 1 def trace(self,ee,rname): print type(self) self.traceIndent() guess = "" if self.inputState.guessing > 0: guess = " [guessing]" print(ee + rname + guess) for i in xrange(1,self.k+1): if i != 1: print(", ") if self.LT(i) : v = self.LT(i).getText() else: v = "null" print "LA(%s) == %s" % (i,v) print("\n") def traceIn(self,rname): self.traceDepth += 1; self.trace("> ", rname); def traceOut(self,rname): self.trace("< ", rname); self.traceDepth -= 1; ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TreeParserSharedInputState ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TreeParserSharedInputState(object): def __init__(self): self.guessing = 0 ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### TreeParser ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class TreeParser(object): def __init__(self, *args, **kwargs): self.inputState = TreeParserSharedInputState() self._retTree = None self.tokenNames = [] self.returnAST = None self.astFactory = ASTFactory() self.traceDepth = 0 def getAST(self): return self.returnAST def getASTFactory(self): return self.astFactory def getTokenName(self,num) : return self.tokenNames[num] def getTokenNames(self): return self.tokenNames def match(self,t,set) : assert isinstance(set,int) or isinstance(set,BitSet) if not t or t == ASTNULL: raise MismatchedTokenException(self.getTokenNames(), t,set, False) if isinstance(set,int) and t.getType() != set: raise MismatchedTokenException(self.getTokenNames(), t,set, False) if isinstance(set,BitSet) and not set.member(t.getType): raise MismatchedTokenException(self.getTokenNames(), t,set, False) def matchNot(self,t, ttype) : if not t or (t == ASTNULL) or (t.getType() == ttype): raise MismatchedTokenException(getTokenNames(), t, ttype, True) def reportError(self,ex): print >>sys.stderr,"error:",ex def reportWarning(self, s): print "warning:",s def setASTFactory(self,f): self.astFactory = f def setASTNodeType(self,nodeType): self.setASTNodeClass(nodeType) def setASTNodeClass(self,nodeType): self.astFactory.setASTNodeType(nodeType) def traceIndent(self): print " " * self.traceDepth def traceIn(self,rname,t): self.traceDepth += 1 self.traceIndent() print("> " + rname + "(" + ifelse(t,str(t),"null") + ")" + ifelse(self.inputState.guessing>0,"[guessing]","")) def traceOut(self,rname,t): self.traceIndent() print("< " + rname + "(" + ifelse(t,str(t),"null") + ")" + ifelse(self.inputState.guessing>0,"[guessing]","")) self.traceDepth -= 1 ### wh: moved from ASTFactory to TreeParser def addASTChild(self,currentAST, child): if not child: return if not currentAST.root: currentAST.root = child elif not currentAST.child: currentAST.root.setFirstChild(child) else: currentAST.child.setNextSibling(child) currentAST.child = child currentAST.advanceChildToEnd() ### wh: moved from ASTFactory to TreeParser def makeASTRoot(self,currentAST,root): if root: ### Add the current root as a child of new root root.addChild(currentAST.root) ### The new current child is the last sibling of the old root currentAST.child = currentAST.root currentAST.advanceChildToEnd() ### Set the new root currentAST.root = root ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### funcs to work on trees ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### def rightmost(ast): if ast: while(ast.right): ast = ast.right return ast def cmptree(s,t,partial): while(s and t): ### as a quick optimization, check roots first. if not s.equals(t): return False ### if roots match, do full list match test on children. if not cmptree(s.getFirstChild(),t.getFirstChild(),partial): return False s = s.getNextSibling() t = t.getNextSibling() r = ifelse(partial,not t,not s and not t) return r ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### AST ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class AST(object): def __init__(self): pass def addChild(self, c): pass def equals(self, t): return False def equalsList(self, t): return False def equalsListPartial(self, t): return False def equalsTree(self, t): return False def equalsTreePartial(self, t): return False def findAll(self, tree): return None def findAllPartial(self, subtree): return None def getFirstChild(self): return self def getNextSibling(self): return self def getText(self): return "" def getType(self): return INVALID_TYPE def getLine(self): return 0 def getColumn(self): return 0 def getNumberOfChildren(self): return 0 def initialize(self, t, txt): pass def initialize(self, t): pass def setFirstChild(self, c): pass def setNextSibling(self, n): pass def setText(self, text): pass def setType(self, ttype): pass def toString(self): self.getText() __str__ = toString def toStringList(self): return self.getText() def toStringTree(self): return self.getText() ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ASTNULLType ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### There is only one instance of this class **/ class ASTNULLType(AST): def __init__(self): AST.__init__(self) pass def getText(self): return "<ASTNULL>" def getType(self): return NULL_TREE_LOOKAHEAD ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### BaseAST ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class BaseAST(AST): verboseStringConversion = False tokenNames = None def __init__(self): self.down = None ## kid self.right = None ## sibling def addChild(self,node): if node: t = rightmost(self.down) if t: t.right = node else: assert not self.down self.down = node def getNumberOfChildren(self): t = self.down n = 0 while t: n += 1 t = t.right return n def doWorkForFindAll(self,v,target,partialMatch): sibling = self while sibling: c1 = partialMatch and sibling.equalsTreePartial(target) if c1: v.append(sibling) else: c2 = not partialMatch and sibling.equalsTree(target) if c2: v.append(sibling) ### regardless of match or not, check any children for matches if sibling.getFirstChild(): sibling.getFirstChild().doWorkForFindAll(v,target,partialMatch) sibling = sibling.getNextSibling() ### Is node t equal to 'self' in terms of token type and text? def equals(self,t): if not t: return False return self.getText() == t.getText() and self.getType() == t.getType() ### Is t an exact structural and equals() match of this tree. The ### 'self' reference is considered the start of a sibling list. ### def equalsList(self, t): return cmptree(self, t, partial=False) ### Is 't' a subtree of this list? ### The siblings of the root are NOT ignored. ### def equalsListPartial(self,t): return cmptree(self,t,partial=True) ### Is tree rooted at 'self' equal to 't'? The siblings ### of 'self' are ignored. ### def equalsTree(self, t): return self.equals(t) and \ cmptree(self.getFirstChild(), t.getFirstChild(), partial=False) ### Is 't' a subtree of the tree rooted at 'self'? The siblings ### of 'self' are ignored. ### def equalsTreePartial(self, t): if not t: return True return self.equals(t) and cmptree( self.getFirstChild(), t.getFirstChild(), partial=True) ### Walk the tree looking for all exact subtree matches. Return ### an ASTEnumerator that lets the caller walk the list ### of subtree roots found herein. def findAll(self,target): roots = [] ### the empty tree cannot result in an enumeration if not target: return None # find all matches recursively self.doWorkForFindAll(roots, target, False) return roots ### Walk the tree looking for all subtrees. Return ### an ASTEnumerator that lets the caller walk the list ### of subtree roots found herein. def findAllPartial(self,sub): roots = [] ### the empty tree cannot result in an enumeration if not sub: return None self.doWorkForFindAll(roots, sub, True) ### find all matches recursively return roots ### Get the first child of this node None if not children def getFirstChild(self): return self.down ### Get the next sibling in line after this one def getNextSibling(self): return self.right ### Get the token text for this node def getText(self): return "" ### Get the token type for this node def getType(self): return 0 def getLine(self): return 0 def getColumn(self): return 0 ### Remove all children */ def removeChildren(self): self.down = None def setFirstChild(self,c): self.down = c def setNextSibling(self, n): self.right = n ### Set the token text for this node def setText(self, text): pass ### Set the token type for this node def setType(self, ttype): pass ### static def setVerboseStringConversion(verbose,names): verboseStringConversion = verbose tokenNames = names setVerboseStringConversion = staticmethod(setVerboseStringConversion) ### Return an array of strings that maps token ID to it's text. ## @since 2.7.3 def getTokenNames(): return tokenNames def toString(self): return self.getText() ### return tree as lisp string - sibling included def toStringList(self): ts = self.toStringTree() sib = self.getNextSibling() if sib: ts += sib.toStringList() return ts __str__ = toStringList ### return tree as string - siblings ignored def toStringTree(self): ts = "" kid = self.getFirstChild() if kid: ts += " (" ts += " " + self.toString() if kid: ts += kid.toStringList() ts += " )" return ts ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CommonAST ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### Common AST node implementation class CommonAST(BaseAST): def __init__(self,token=None): super(CommonAST,self).__init__() self.ttype = INVALID_TYPE self.text = "<no text>" self.line = 0 self.column= 0 self.initialize(token) #assert self.text ### Get the token text for this node def getText(self): return self.text ### Get the token type for this node def getType(self): return self.ttype ### Get the line for this node def getLine(self): return self.line ### Get the column for this node def getColumn(self): return self.column def initialize(self,*args): if not args: return arg0 = args[0] if isinstance(arg0,int): arg1 = args[1] self.setType(arg0) self.setText(arg1) return if isinstance(arg0,AST) or isinstance(arg0,Token): self.setText(arg0.getText()) self.setType(arg0.getType()) self.line = arg0.getLine() self.column = arg0.getColumn() return ### Set the token text for this node def setText(self,text_): assert is_string_type(text_) self.text = text_ ### Set the token type for this node def setType(self,ttype_): assert isinstance(ttype_,int) self.ttype = ttype_ ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### CommonASTWithHiddenTokens ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class CommonASTWithHiddenTokens(CommonAST): def __init__(self,*args): CommonAST.__init__(self,*args) self.hiddenBefore = None self.hiddenAfter = None def getHiddenAfter(self): return self.hiddenAfter def getHiddenBefore(self): return self.hiddenBefore def initialize(self,*args): CommonAST.initialize(self,*args) if args and isinstance(args[0],Token): assert isinstance(args[0],CommonHiddenStreamToken) self.hiddenBefore = args[0].getHiddenBefore() self.hiddenAfter = args[0].getHiddenAfter() ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ASTPair ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class ASTPair(object): def __init__(self): self.root = None ### current root of tree self.child = None ### current child to which siblings are added ### Make sure that child is the last sibling */ def advanceChildToEnd(self): if self.child: while self.child.getNextSibling(): self.child = self.child.getNextSibling() ### Copy an ASTPair. Don't call it clone() because we want type-safety */ def copy(self): tmp = ASTPair() tmp.root = self.root tmp.child = self.child return tmp def toString(self): r = ifelse(not root,"null",self.root.getText()) c = ifelse(not child,"null",self.child.getText()) return "[%s,%s]" % (r,c) __str__ = toString __repr__ = toString ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ASTFactory ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class ASTFactory(object): def __init__(self,table=None): self._class = None self._classmap = ifelse(table,table,None) def create(self,*args): if not args: return self.create(INVALID_TYPE) arg0 = args[0] arg1 = None arg2 = None try: arg1 = args[1] arg2 = args[2] except: pass # ctor(int) if isinstance(arg0,int) and not arg2: ### get class for 'self' type c = self.getASTNodeType(arg0) t = self.create(c) if t: t.initialize(arg0, ifelse(arg1,arg1,"")) return t # ctor(int,something) if isinstance(arg0,int) and arg2: t = self.create(arg2) if t: t.initialize(arg0,arg1) return t # ctor(AST) if isinstance(arg0,AST): t = self.create(arg0.getType()) if t: t.initialize(arg0) return t # ctor(token) if isinstance(arg0,Token) and not arg1: ttype = arg0.getType() assert isinstance(ttype,int) t = self.create(ttype) if t: t.initialize(arg0) return t # ctor(token,class) if isinstance(arg0,Token) and arg1: assert isinstance(arg1,type) assert issubclass(arg1,AST) # this creates instance of 'arg1' using 'arg0' as # argument. Wow, that's magic! t = arg1(arg0) assert t and isinstance(t,AST) return t # ctor(class) if isinstance(arg0,type): ### next statement creates instance of type (!) t = arg0() assert isinstance(t,AST) return t def setASTNodeClass(self,className=None): if not className: return assert isinstance(className,type) assert issubclass(className,AST) self._class = className ### kind of misnomer - use setASTNodeClass instead. setASTNodeType = setASTNodeClass def getASTNodeClass(self): return self._class def getTokenTypeToASTClassMap(self): return self._classmap def setTokenTypeToASTClassMap(self,amap): self._classmap = amap def error(self, e): import sys print >> sys.stderr, e def setTokenTypeASTNodeType(self, tokenType, className): """ Specify a mapping between a token type and a (AST) class. """ if not self._classmap: self._classmap = {} if not className: try: del self._classmap[tokenType] except: pass else: ### here we should also perform actions to ensure that ### a. class can be loaded ### b. class is a subclass of AST ### assert isinstance(className,type) assert issubclass(className,AST) ## a & b ### enter the class self._classmap[tokenType] = className def getASTNodeType(self,tokenType): """ For a given token type return the AST node type. First we lookup a mapping table, second we try _class and finally we resolve to "antlr.CommonAST". """ # first if self._classmap: try: c = self._classmap[tokenType] if c: return c except: pass # second if self._class: return self._class # default return CommonAST ### methods that have been moved to file scope - just listed ### here to be somewhat consistent with original API def dup(self,t): return antlr.dup(t,self) def dupList(self,t): return antlr.dupList(t,self) def dupTree(self,t): return antlr.dupTree(t,self) ### methods moved to other classes ### 1. makeASTRoot -> Parser ### 2. addASTChild -> Parser ### non-standard: create alias for longish method name maptype = setTokenTypeASTNodeType ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ASTVisitor ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### class ASTVisitor(object): def __init__(self,*args): pass def visit(self,ast): pass ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### static methods and variables ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ASTNULL = ASTNULLType() ### wh: moved from ASTFactory as there's nothing ASTFactory-specific ### in this method. def make(*nodes): if not nodes: return None for i in xrange(0,len(nodes)): node = nodes[i] if node: assert isinstance(node,AST) root = nodes[0] tail = None if root: root.setFirstChild(None) for i in xrange(1,len(nodes)): if not nodes[i]: continue if not root: root = tail = nodes[i] elif not tail: root.setFirstChild(nodes[i]) tail = root.getFirstChild() else: tail.setNextSibling(nodes[i]) tail = tail.getNextSibling() ### Chase tail to last sibling while tail.getNextSibling(): tail = tail.getNextSibling() return root def dup(t,factory): if not t: return None if factory: dup_t = factory.create(t.__class__) else: raise TypeError("dup function requires ASTFactory argument") dup_t.initialize(t) return dup_t def dupList(t,factory): result = dupTree(t,factory) nt = result while t: ## for each sibling of the root t = t.getNextSibling() nt.setNextSibling(dupTree(t,factory)) nt = nt.getNextSibling() return result def dupTree(t,factory): result = dup(t,factory) if t: result.setFirstChild(dupList(t.getFirstChild(),factory)) return result ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx ### $Id: antlr.py 3750 2009-02-13 00:13:04Z sjmachin $ # Local Variables: *** # mode: python *** # py-indent-offset: 4 *** # End: ***
mit
GitAngel/django
django/contrib/admin/templatetags/admin_urls.py
553
1812
from django import template from django.contrib.admin.utils import quote from django.core.urlresolvers import Resolver404, get_script_prefix, resolve from django.utils.http import urlencode from django.utils.six.moves.urllib.parse import parse_qsl, urlparse, urlunparse register = template.Library() @register.filter def admin_urlname(value, arg): return 'admin:%s_%s_%s' % (value.app_label, value.model_name, arg) @register.filter def admin_urlquote(value): return quote(value) @register.simple_tag(takes_context=True) def add_preserved_filters(context, url, popup=False, to_field=None): opts = context.get('opts') preserved_filters = context.get('preserved_filters') parsed_url = list(urlparse(url)) parsed_qs = dict(parse_qsl(parsed_url[4])) merged_qs = dict() if opts and preserved_filters: preserved_filters = dict(parse_qsl(preserved_filters)) match_url = '/%s' % url.partition(get_script_prefix())[2] try: match = resolve(match_url) except Resolver404: pass else: current_url = '%s:%s' % (match.app_name, match.url_name) changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name) if changelist_url == current_url and '_changelist_filters' in preserved_filters: preserved_filters = dict(parse_qsl(preserved_filters['_changelist_filters'])) merged_qs.update(preserved_filters) if popup: from django.contrib.admin.options import IS_POPUP_VAR merged_qs[IS_POPUP_VAR] = 1 if to_field: from django.contrib.admin.options import TO_FIELD_VAR merged_qs[TO_FIELD_VAR] = to_field merged_qs.update(parsed_qs) parsed_url[4] = urlencode(merged_qs) return urlunparse(parsed_url)
bsd-3-clause
tmm1/pygments.rb
vendor/pygments-main/pygments/lexers/rust.py
1
8235
# -*- coding: utf-8 -*- """ pygments.lexers.rust ~~~~~~~~~~~~~~~~~~~~ Lexers for the Rust language. :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from pygments.lexer import RegexLexer, include, bygroups, words, default from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ Number, Punctuation, Whitespace __all__ = ['RustLexer'] class RustLexer(RegexLexer): """ Lexer for the Rust programming language (version 1.47). .. versionadded:: 1.6 """ name = 'Rust' filenames = ['*.rs', '*.rs.in'] aliases = ['rust', 'rs'] mimetypes = ['text/rust', 'text/x-rust'] keyword_types = (words(( 'u8', 'u16', 'u32', 'u64', 'u128', 'i8', 'i16', 'i32', 'i64', 'i128', 'usize', 'isize', 'f32', 'f64', 'char', 'str', 'bool', ), suffix=r'\b'), Keyword.Type) builtin_funcs_types = (words(( 'Copy', 'Send', 'Sized', 'Sync', 'Unpin', 'Drop', 'Fn', 'FnMut', 'FnOnce', 'drop', 'Box', 'ToOwned', 'Clone', 'PartialEq', 'PartialOrd', 'Eq', 'Ord', 'AsRef', 'AsMut', 'Into', 'From', 'Default', 'Iterator', 'Extend', 'IntoIterator', 'DoubleEndedIterator', 'ExactSizeIterator', 'Option', 'Some', 'None', 'Result', 'Ok', 'Err', 'String', 'ToString', 'Vec', ), suffix=r'\b'), Name.Builtin) builtin_macros = (words(( 'asm', 'assert', 'assert_eq', 'assert_ne', 'cfg', 'column', 'compile_error', 'concat', 'concat_idents', 'dbg', 'debug_assert', 'debug_assert_eq', 'debug_assert_ne', 'env', 'eprint', 'eprintln', 'file', 'format', 'format_args', 'format_args_nl', 'global_asm', 'include', 'include_bytes', 'include_str', 'is_aarch64_feature_detected', 'is_arm_feature_detected', 'is_mips64_feature_detected', 'is_mips_feature_detected', 'is_powerpc64_feature_detected', 'is_powerpc_feature_detected', 'is_x86_feature_detected', 'line', 'llvm_asm', 'log_syntax', 'macro_rules', 'matches', 'module_path', 'option_env', 'panic', 'print', 'println', 'stringify', 'thread_local', 'todo', 'trace_macros', 'unimplemented', 'unreachable', 'vec', 'write', 'writeln', ), suffix=r'!'), Name.Function.Magic) tokens = { 'root': [ # rust allows a file to start with a shebang, but if the first line # starts with #![ then it's not a shebang but a crate attribute. (r'#![^[\r\n].*$', Comment.Preproc), default('base'), ], 'base': [ # Whitespace and Comments (r'\n', Whitespace), (r'\s+', Whitespace), (r'//!.*?\n', String.Doc), (r'///(\n|[^/].*?\n)', String.Doc), (r'//(.*?)\n', Comment.Single), (r'/\*\*(\n|[^/*])', String.Doc, 'doccomment'), (r'/\*!', String.Doc, 'doccomment'), (r'/\*', Comment.Multiline, 'comment'), # Macro parameters (r"""\$([a-zA-Z_]\w*|\(,?|\),?|,?)""", Comment.Preproc), # Keywords (words(('as', 'async', 'await', 'box', 'const', 'crate', 'dyn', 'else', 'extern', 'for', 'if', 'impl', 'in', 'loop', 'match', 'move', 'mut', 'pub', 'ref', 'return', 'static', 'super', 'trait', 'unsafe', 'use', 'where', 'while'), suffix=r'\b'), Keyword), (words(('abstract', 'become', 'do', 'final', 'macro', 'override', 'priv', 'typeof', 'try', 'unsized', 'virtual', 'yield'), suffix=r'\b'), Keyword.Reserved), (r'(true|false)\b', Keyword.Constant), (r'self\b', Name.Builtin.Pseudo), (r'mod\b', Keyword, 'modname'), (r'let\b', Keyword.Declaration), (r'fn\b', Keyword, 'funcname'), (r'(struct|enum|type|union)\b', Keyword, 'typename'), (r'(default)(\s+)(type|fn)\b', bygroups(Keyword, Text, Keyword)), keyword_types, (r'[sS]elf\b', Name.Builtin.Pseudo), # Prelude (taken from Rust's src/libstd/prelude.rs) builtin_funcs_types, builtin_macros, # Path seperators, so types don't catch them. (r'::\b', Text), # Types in positions. (r'(?::|->)', Text, 'typename'), # Labels (r'(break|continue)(\s*)(\'[A-Za-z_]\w*)?', bygroups(Keyword, Text.Whitespace, Name.Label)), # Character literals (r"""'(\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0""" r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""", String.Char), (r"""b'(\\['"\\nrt]|\\x[0-9a-fA-F]{2}|\\0""" r"""|\\u\{[0-9a-fA-F]{1,6}\}|.)'""", String.Char), # Binary literals (r'0b[01_]+', Number.Bin, 'number_lit'), # Octal literals (r'0o[0-7_]+', Number.Oct, 'number_lit'), # Hexadecimal literals (r'0[xX][0-9a-fA-F_]+', Number.Hex, 'number_lit'), # Decimal literals (r'[0-9][0-9_]*(\.[0-9_]+[eE][+\-]?[0-9_]+|' r'\.[0-9_]*(?!\.)|[eE][+\-]?[0-9_]+)', Number.Float, 'number_lit'), (r'[0-9][0-9_]*', Number.Integer, 'number_lit'), # String literals (r'b"', String, 'bytestring'), (r'"', String, 'string'), (r'b?r(#*)".*?"\1', String), # Lifetime names (r"'", Operator, 'lifetime'), # Operators and Punctuation (r'\.\.=?', Operator), (r'[{}()\[\],.;]', Punctuation), (r'[+\-*/%&|<>^!~@=:?]', Operator), # Identifiers (r'[a-zA-Z_]\w*', Name), # Raw identifiers (r'r#[a-zA-Z_]\w*', Name), # Attributes (r'#!?\[', Comment.Preproc, 'attribute['), ], 'comment': [ (r'[^*/]+', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline), ], 'doccomment': [ (r'[^*/]+', String.Doc), (r'/\*', String.Doc, '#push'), (r'\*/', String.Doc, '#pop'), (r'[*/]', String.Doc), ], 'modname': [ (r'\s+', Text), (r'[a-zA-Z_]\w*', Name.Namespace, '#pop'), default('#pop'), ], 'funcname': [ (r'\s+', Text), (r'[a-zA-Z_]\w*', Name.Function, '#pop'), default('#pop'), ], 'typename': [ (r'\s+', Text), (r'&', Keyword.Pseudo), (r"'", Operator, 'lifetime'), builtin_funcs_types, keyword_types, (r'[a-zA-Z_]\w*', Name.Class, '#pop'), default('#pop'), ], 'lifetime': [ (r"(static|_)", Name.Builtin), (r"[a-zA-Z_]+\w*", Name.Attribute), default('#pop'), ], 'number_lit': [ (r'[ui](8|16|32|64|size)', Keyword, '#pop'), (r'f(32|64)', Keyword, '#pop'), default('#pop'), ], 'string': [ (r'"', String, '#pop'), (r"""\\['"\\nrt]|\\x[0-7][0-9a-fA-F]|\\0""" r"""|\\u\{[0-9a-fA-F]{1,6}\}""", String.Escape), (r'[^\\"]+', String), (r'\\', String), ], 'bytestring': [ (r"""\\x[89a-fA-F][0-9a-fA-F]""", String.Escape), include('string'), ], 'attribute_common': [ (r'"', String, 'string'), (r'\[', Comment.Preproc, 'attribute['), (r'\(', Comment.Preproc, 'attribute('), ], 'attribute[': [ include('attribute_common'), (r'\];?', Comment.Preproc, '#pop'), (r'[^"\]]+', Comment.Preproc), ], 'attribute(': [ include('attribute_common'), (r'\);?', Comment.Preproc, '#pop'), (r'[^")]+', Comment.Preproc), ], }
mit
UCL-RITS/django-shibboleth-remoteuser
shibboleth/views.py
10
2867
from django.conf import settings from django.contrib import auth from django.contrib.auth.decorators import login_required from django.core.exceptions import ImproperlyConfigured from django.http import HttpResponse from django.shortcuts import redirect from django.utils.decorators import method_decorator from django.views.generic import TemplateView from urllib import quote #Logout settings. from shibboleth.app_settings import LOGOUT_URL, LOGOUT_REDIRECT_URL, LOGOUT_SESSION_KEY class ShibbolethView(TemplateView): """ This is here to offer a Shib protected page that we can route users through to login. """ template_name = 'shibboleth/user_info.html' @method_decorator(login_required) def dispatch(self, request, *args, **kwargs): """ Django docs say to decorate the dispatch method for class based views. https://docs.djangoproject.com/en/dev/topics/auth/ """ return super(ShibbolethView, self).dispatch(request, *args, **kwargs) def get(self, request, **kwargs): """Process the request.""" next = self.request.GET.get('next', None) if next is not None: return redirect(next) return super(ShibbolethView, self).get(request) def get_context_data(self, **kwargs): context = super(ShibbolethView, self).get_context_data(**kwargs) context['user'] = self.request.user return context class ShibbolethLoginView(TemplateView): """ Pass the user to the Shibboleth login page. Some code borrowed from: https://github.com/stefanfoulis/django-class-based-auth-views. """ redirect_field_name = "target" def get(self, *args, **kwargs): #Remove session value that is forcing Shibboleth reauthentication. self.request.session.pop(LOGOUT_SESSION_KEY, None) login = settings.LOGIN_URL + '?target=%s' % quote(self.request.GET.get(self.redirect_field_name)) return redirect(login) class ShibbolethLogoutView(TemplateView): """ Pass the user to the Shibboleth logout page. Some code borrowed from: https://github.com/stefanfoulis/django-class-based-auth-views. """ redirect_field_name = "target" def get(self, *args, **kwargs): #Log the user out. auth.logout(self.request) #Set session key that middleware will use to force #Shibboleth reauthentication. self.request.session[LOGOUT_SESSION_KEY] = True #Get target url in order of preference. target = LOGOUT_REDIRECT_URL or\ quote(self.request.GET.get(self.redirect_field_name)) or\ quote(request.build_absolute_uri()) logout = LOGOUT_URL % target return redirect(logout)
mit
v-iam/azure-sdk-for-python
azure-mgmt-web/azure/mgmt/web/models/recover_response.py
3
1933
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource class RecoverResponse(Resource): """Response for an app recovery request. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Resource Id. :vartype id: str :param name: Resource Name. :type name: str :param kind: Kind of resource. :type kind: str :param location: Resource Location. :type location: str :param type: Resource type. :type type: str :param tags: Resource tags. :type tags: dict :ivar operation_id: ID of the recovery operation. Can be used to check the status of the corresponding operation. :vartype operation_id: str """ _validation = { 'id': {'readonly': True}, 'location': {'required': True}, 'operation_id': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'operation_id': {'key': 'properties.operationId', 'type': 'str'}, } def __init__(self, location, name=None, kind=None, type=None, tags=None): super(RecoverResponse, self).__init__(name=name, kind=kind, location=location, type=type, tags=tags) self.operation_id = None
mit
silviolima/EstudoAppengine
tekton/tekton-master/src/tekton/gae/middleware/email_errors.py
4
2552
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import json import logging import traceback import time from google.appengine.api import app_identity, mail, capabilities from google.appengine.runtime import DeadlineExceededError from tekton.router import PathNotFound def get_apis_statuses(e): if not isinstance(e, DeadlineExceededError): return {} t1 = time.time() statuses = { 'blobstore': capabilities.CapabilitySet('blobstore').is_enabled(), 'datastore_v3': capabilities.CapabilitySet('datastore_v3').is_enabled(), 'datastore_v3_write': capabilities.CapabilitySet('datastore_v3', ['write']).is_enabled(), 'images': capabilities.CapabilitySet('images').is_enabled(), 'mail': capabilities.CapabilitySet('mail').is_enabled(), 'memcache': capabilities.CapabilitySet('memcache').is_enabled(), 'taskqueue': capabilities.CapabilitySet('taskqueue').is_enabled(), 'urlfetch': capabilities.CapabilitySet('urlfetch').is_enabled(), } t2 = time.time() statuses['time'] = t2 - t1 return statuses def send_error_to_admins(exception, handler, write_tmpl): import settings # workaround. See https://github.com/renzon/zenwarch/issues/3 tb = traceback.format_exc() errmsg = exception.message logging.error(errmsg) logging.error(tb) write_tmpl("/templates/error.html") appid = app_identity.get_application_id() subject = 'ERROR in %s: [%s] %s' % (appid, handler.request.path, errmsg) body = """ ------------- request ------------ %s ---------------------------------- ------------- GET params --------- %s ---------------------------------- ----------- POST params ---------- %s ---------------------------------- ----------- traceback ------------ %s ---------------------------------- """ % (handler.request, handler.request.GET, handler.request.POST, tb) body += 'API statuses = ' + json.dumps(get_apis_statuses(exception), indent=4) mail.send_mail_to_admins(sender=settings.SENDER_EMAIL, subject=subject, body=body) def execute(next_process, handler, dependencies, **kwargs): try: next_process(dependencies, **kwargs) except PathNotFound, e: handler.response.set_status(404) send_error_to_admins(e, handler, dependencies['_write_tmpl']) except BaseException, e: handler.response.status_code = 400 send_error_to_admins(e, handler, dependencies['_write_tmpl'])
mit
zielmicha/freeciv-android
lib/freeciv/maptiles.py
4
6047
import ui import graphics import time import contextlib from ui import stream from ui import ctrl from client import freeciv SELECT_POPUP = 0 class MapWidget(ui.Widget): def __init__(self, client): self.client = client self.size = (0, 0) self.drawer = TileDrawer(client) self.tile_size = 512 self.tile_storage = {} self.tile_client_cache = {} # corresponds to client's one self.tile_map_pos = {} self.tile_draw_time = {} self.screen_pos = (0, 0) self.screen_tiles = (2500 // self.tile_size + 1, 1800 // self.tile_size + 1) self.redraw_queue = set() ctrl.bind_event('tile_posnotify', self.pos_notify) ctrl.bind_event('tile_init', self.client_init) ctrl.bind_event('tile_getconfig', self.send_config) freeciv.register(self.global_update_tile) freeciv.register(self.global_set_mapview_center) freeciv.register(self.global_update_everything) def send_config(self, m): stream.add_message({'type': 'tile_config', 'tile_size': self.tile_size}) def back(self): self.client.escape() def event(self, ev): if ev.type in (graphics.const.KEYDOWN, graphics.const.KEYUP): self.client.key_event(ev.type, ev.key) elif ev.type == graphics.const.MOUSEBUTTONDOWN: try: pos = ev.data['tile_pos'] except (AttributeError, KeyError): pass else: self.click(pos) def click(self, pos): x, y = pos self.drawer.click(x, y) def draw(self, surf, pos): surf.draw_rect((255, 255, 255, 0), pos + self.size, blend=graphics.MODE_NONE) stream.add_message({'type': 'tile', 'draw_at': pos + self.size}) self.tick() ui.layer_hooks.execute(id='map', surf=None, pos=pos, offset=(0, 0), size=self.size) def tick(self): need_redraw = self.redraw_queue & set(self.get_screen_tiles()) can_redraw = 5 if self.redraw_queue: print 'queue', len(self.redraw_queue), 'need', len(need_redraw) for tile in list(need_redraw)[:can_redraw]: self.update_tile(*tile) can_redraw -= len(need_redraw) for tile in list(self.redraw_queue)[:can_redraw]: self.update_tile(*tile) for i, j in self.get_screen_tiles(): self.push_tile(i, j) def get_screen_tiles(self): tile_pos = self.screen_pos[0] // self.tile_size, \ self.screen_pos[1] // self.tile_size return [ (i * self.tile_size, j * self.tile_size) for i in range_around(tile_pos[0], self.screen_tiles[0]) for j in range_around(tile_pos[1], self.screen_tiles[1]) ] def global_update_tile(self, x, y): # find nearest tiles by_dist = sorted(self.tile_map_pos.items(), key=lambda (k, v): abs(v[0] - x) + abs(v[1] - y) if v else 100000) by_dist = by_dist[:5] print 'update', by_dist # and queue update for k, v in by_dist: self.redraw_queue.add(k) def global_update_everything(self): print 'update everything' self.redraw_queue |= set(self.tile_storage.keys()) def global_set_mapview_center(self, x, y): stream.add_message({'type': 'tiles_center_at', 'pos': (x, y)}) def push_tile(self, x, y): self.init_tile(x, y) new_data = self.tile_storage[x, y] if new_data != self.tile_client_cache.get((x, y)): self.tile_client_cache[x, y] = new_data stream.add_message({'type': 'tile', 'id': '%d,%d' % (x, y), 'data': new_data}) def init_tile(self, x, y): if (x, y) not in self.tile_storage: self.update_tile(x, y) def update_tile(self, x, y): start = time.time() img, tile_pos = self.drawer.draw_fragment((x, y, self.tile_size, self.tile_size)) print 'updated %s in %d ms' % ((x, y), (time.time() - start) * 1000) new_data = stream.get_texture_data(img) self.tile_storage[x, y] = new_data self.tile_map_pos[x, y] = tile_pos self.tile_draw_time[x, y] = time.time() self.redraw_queue -= {(x, y)} def client_init(self, message): self.tile_client_cache = {} def pos_notify(self, message): x, y = message['pos'] self.screen_pos = -x, -y def range_around(x, phi): return range(x - phi/2, x - phi/2 + phi) def nround(a, r): return int(a // r) * r class TileDrawer(object): def __init__(self, client): self.map_size = (100, 100) self.client = client def draw_fragment(self, rect): with self.save_state(): self.set_map_size((rect[2], rect[3])) self.set_map_origin(rect[0], rect[1]) surf = graphics.create_surface(rect[2], rect[3]) surf.fill((255, 0, 255, 255), blend=graphics.MODE_NONE) self.client.draw_map(surf, (0, 0)) tile_pos = freeciv.func.py_canvas_to_map(rect[2] / 2, rect[3] / 2) return surf, tile_pos def set_map_size(self, size): self.map_size = size self.client.set_map_size(size) def set_map_origin(self, x, y): freeciv.func.base_set_mapview_origin(x, y) def click(self, x, y): with self.save_state(): self.set_map_origin(x, y) freeciv.func.action_button_pressed(0, 0, SELECT_POPUP) @contextlib.contextmanager def save_state(self): origin = freeciv.func.get_map_view_origin() size = self.map_size try: yield finally: self.map_size = size freeciv.func.base_set_mapview_origin(origin[0], origin[1])
gpl-2.0
Distrotech/intellij-community
python/lib/Lib/site-packages/django/contrib/gis/tests/test_spatialrefsys.py
94
6686
from django.db import connection from django.contrib.gis.tests.utils import mysql, no_mysql, oracle, postgis, spatialite from django.utils import unittest test_srs = ({'srid' : 4326, 'auth_name' : ('EPSG', True), 'auth_srid' : 4326, 'srtext' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]', 'srtext14' : 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]', 'proj4' : '+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ', 'spheroid' : 'WGS 84', 'name' : 'WGS 84', 'geographic' : True, 'projected' : False, 'spatialite' : True, 'ellipsoid' : (6378137.0, 6356752.3, 298.257223563), # From proj's "cs2cs -le" and Wikipedia (semi-minor only) 'eprec' : (1, 1, 9), }, {'srid' : 32140, 'auth_name' : ('EPSG', False), 'auth_srid' : 32140, 'srtext' : 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","32140"]]', 'srtext14': 'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",DATUM["North_American_Datum_1983",SPHEROID["GRS 1980",6378137,298.257222101,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6269"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4269"]],UNIT["metre",1,AUTHORITY["EPSG","9001"]],PROJECTION["Lambert_Conformal_Conic_2SP"],PARAMETER["standard_parallel_1",30.28333333333333],PARAMETER["standard_parallel_2",28.38333333333333],PARAMETER["latitude_of_origin",27.83333333333333],PARAMETER["central_meridian",-99],PARAMETER["false_easting",600000],PARAMETER["false_northing",4000000],AUTHORITY["EPSG","32140"],AXIS["X",EAST],AXIS["Y",NORTH]]', 'proj4' : '+proj=lcc +lat_1=30.28333333333333 +lat_2=28.38333333333333 +lat_0=27.83333333333333 +lon_0=-99 +x_0=600000 +y_0=4000000 +ellps=GRS80 +datum=NAD83 +units=m +no_defs ', 'spheroid' : 'GRS 1980', 'name' : 'NAD83 / Texas South Central', 'geographic' : False, 'projected' : True, 'spatialite' : False, 'ellipsoid' : (6378137.0, 6356752.31414, 298.257222101), # From proj's "cs2cs -le" and Wikipedia (semi-minor only) 'eprec' : (1, 5, 10), }, ) if oracle: from django.contrib.gis.db.backends.oracle.models import SpatialRefSys elif postgis: from django.contrib.gis.db.backends.postgis.models import SpatialRefSys elif spatialite: from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys class SpatialRefSysTest(unittest.TestCase): @no_mysql def test01_retrieve(self): "Testing retrieval of SpatialRefSys model objects." for sd in test_srs: srs = SpatialRefSys.objects.get(srid=sd['srid']) self.assertEqual(sd['srid'], srs.srid) # Some of the authority names are borked on Oracle, e.g., SRID=32140. # also, Oracle Spatial seems to add extraneous info to fields, hence the # the testing with the 'startswith' flag. auth_name, oracle_flag = sd['auth_name'] if postgis or (oracle and oracle_flag): self.assertEqual(True, srs.auth_name.startswith(auth_name)) self.assertEqual(sd['auth_srid'], srs.auth_srid) # No proj.4 and different srtext on oracle backends :( if postgis: if connection.ops.spatial_version >= (1, 4, 0): srtext = sd['srtext14'] else: srtext = sd['srtext'] self.assertEqual(srtext, srs.wkt) self.assertEqual(sd['proj4'], srs.proj4text) @no_mysql def test02_osr(self): "Testing getting OSR objects from SpatialRefSys model objects." for sd in test_srs: sr = SpatialRefSys.objects.get(srid=sd['srid']) self.assertEqual(True, sr.spheroid.startswith(sd['spheroid'])) self.assertEqual(sd['geographic'], sr.geographic) self.assertEqual(sd['projected'], sr.projected) if not (spatialite and not sd['spatialite']): # Can't get 'NAD83 / Texas South Central' from PROJ.4 string # on SpatiaLite self.assertEqual(True, sr.name.startswith(sd['name'])) # Testing the SpatialReference object directly. if postgis or spatialite: srs = sr.srs self.assertEqual(sd['proj4'], srs.proj4) # No `srtext` field in the `spatial_ref_sys` table in SpatiaLite if not spatialite: if connection.ops.spatial_version >= (1, 4, 0): srtext = sd['srtext14'] else: srtext = sd['srtext'] self.assertEqual(srtext, srs.wkt) @no_mysql def test03_ellipsoid(self): "Testing the ellipsoid property." for sd in test_srs: # Getting the ellipsoid and precision parameters. ellps1 = sd['ellipsoid'] prec = sd['eprec'] # Getting our spatial reference and its ellipsoid srs = SpatialRefSys.objects.get(srid=sd['srid']) ellps2 = srs.ellipsoid for i in range(3): param1 = ellps1[i] param2 = ellps2[i] self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i]) def suite(): s = unittest.TestSuite() s.addTest(unittest.makeSuite(SpatialRefSysTest)) return s def run(verbosity=2): unittest.TextTestRunner(verbosity=verbosity).run(suite())
apache-2.0
MelanieBittl/dolfin
demo/undocumented/functional/python/demo_functional.py
3
1638
"""This demo program computes the value of the functional M(v) = int v^2 + (grad v)^2 dx on the unit square for v = sin(x) + cos(y). The exact value of the functional is M(v) = 2 + 2*sin(1)*(1 - cos(1)) The functional M corresponds to the energy norm for a simple reaction-diffusion equation.""" # Copyright (C) 2007 Kristian B. Oelgaard # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see <http://www.gnu.org/licenses/>. # # Modified by Anders Logg, 2008. # # First added: 2007-11-14 # Last changed: 2012-11-12 from __future__ import print_function from dolfin import * # Create mesh and define function space mesh = UnitSquareMesh(16, 16) V = FunctionSpace(mesh, "CG", 2) # Define the function v v = Expression("sin(x[0]) + cos(x[1])", element=FiniteElement("CG", triangle, 2)) # Define functional M = (v*v + dot(grad(v), grad(v)))*dx(mesh) # Evaluate functional value = assemble(M) exact_value = 2.0 + 2.0*sin(1.0)*(1.0 - cos(1.0)) print("The energy norm of v is: %.15g" % value) print("It should be: %.15g" % exact_value)
gpl-3.0
CalSol/Impulse
Tracker/register.py
1
3076
# Copyright 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Request handlers for the OAuth authorization process.""" __author__ = 'Ka-Ping Yee <[email protected]>' from google.appengine.ext import db import datetime import latitude import model import oauth import oauth_webapp import utils class RegisterHandler(utils.Handler): """Registration and Latitude API authorization for new users.""" def get(self): self.require_user() nickname = self.request.get('nickname', '') next = self.request.get('next', '') duration = utils.describe_delta( datetime.timedelta(0, int(self.request.get('duration', '0')))) if not nickname: self.render('templates/register.html', next=next, duration=duration, nickname=self.user.nickname().split('@')[0]) else: # Then proceed to the OAuth authorization page. parameters = { 'scope': latitude.LatitudeOAuthClient.SCOPE, 'domain': model.Config.get('oauth_consumer_key'), 'granularity': 'best', 'location': 'current' } callback_url = self.request.host_url + '/_oauth_callback?' + \ utils.urlencode(nickname=nickname, next=next) oauth_webapp.redirect_to_authorization_page( self, latitude.LatitudeOAuthClient(utils.oauth_consumer), callback_url, parameters) class OAuthCallbackHandler(utils.Handler): """Handler for the OAuth callback after a user has granted permission.""" def get(self): self.require_user() next = self.request.get('next', '') access_token = oauth_webapp.handle_authorization_finished( self, latitude.LatitudeOAuthClient(utils.oauth_consumer)) # Store a new Member object, including the user's current location. member = model.Member.create(self.user) member.nickname = self.request.get('nickname') member.latitude_key = access_token.key member.latitude_secret = access_token.secret member.location = utils.get_location(member) member.location_time = datetime.datetime.utcnow() if not member.location: raise utils.ErrorMessage(400, ''' Sorry, Google Latitude has no current location for you. ''') member.put() raise utils.Redirect(next or '/') if __name__ == '__main__': utils.run([ ('/_register', RegisterHandler), ('/_oauth_callback', OAuthCallbackHandler) ])
apache-2.0
livc/Paddle
python/paddle/utils/preprocess_util.py
18
13149
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import math import cPickle as pickle import random import collections def save_file(data, filename): """ Save data into pickle format. data: the data to save. filename: the output filename. """ pickle.dump(data, open(filename, 'wb'), protocol=pickle.HIGHEST_PROTOCOL) def save_list(l, outfile): """ Save a list of string into a text file. There is one line for each string. l: the list of string to save outfile: the output file """ open(outfile, "w").write("\n".join(l)) def exclude_pattern(f): """ Return whether f is in the exlucde pattern. Exclude the files that starts with . or ends with ~. """ return f.startswith(".") or f.endswith("~") def list_dirs(path): """ Return a list of directories in path. Exclude all the directories that start with '.'. path: the base directory to search over. """ return [ os.path.join(path, d) for d in next(os.walk(path))[1] if not exclude_pattern(d) ] def list_images(path, exts=set(["jpg", "png", "bmp", "jpeg"])): """ Return a list of images in path. path: the base directory to search over. exts: the extensions of the images to find. """ return [os.path.join(path, d) for d in os.listdir(path) \ if os.path.isfile(os.path.join(path, d)) and not exclude_pattern(d)\ and os.path.splitext(d)[-1][1:] in exts] def list_files(path): """ Return a list of files in path. path: the base directory to search over. exts: the extensions of the images to find. """ return [os.path.join(path, d) for d in os.listdir(path) \ if os.path.isfile(os.path.join(path, d)) and not exclude_pattern(d)] def get_label_set_from_dir(path): """ Return a dictionary of the labels and label ids from a path. Assume each direcotry in the path corresponds to a unique label. The keys of the dictionary is the label name. The values of the dictionary is the label id. """ dirs = list_dirs(path) return dict([(os.path.basename(d), i) for i, d in enumerate(sorted(dirs))]) class Label: """ A class of label data. """ def __init__(self, label, name): """ label: the id of the label. name: the name of the label. """ self.label = label self.name = name def convert_to_paddle_format(self): """ convert the image into the paddle batch format. """ return int(self.label) def __hash__(self): return hash((self.label)) class Dataset: """ A class to represent a dataset. A dataset contains a set of items. Each item contains multiple slots of data. For example: in image classification dataset, each item contains two slot, The first slot is an image, and the second slot is a label. """ def __init__(self, data, keys): """ data: a list of data. Each data is a tuple containing multiple slots of data. Each slot is an object with convert_to_paddle_format function. keys: contains a list of keys for all the slots. """ self.data = data self.keys = keys def check_valid(self): for d in self.data: assert (len(d) == len(self.keys)) def permute(self, key_id, num_per_batch): """ Permuate data for batching. It supports two types now: 1. if key_id == None, the batching process is completely random. 2. if key_id is not None. The batching process Permuate the data so that the key specified by key_id are uniformly distributed in batches. See the comments of permute_by_key for details. """ if key_id is None: self.uniform_permute() else: self.permute_by_key(key_id, num_per_batch) def uniform_permute(self): """ Permuate the data randomly. """ random.shuffle(self.data) def permute_by_key(self, key_id, num_per_batch): """ Permuate the data so that the key specified by key_id are uniformly distributed in batches. For example: if we have three labels, and the number of data for each label are 100, 200, and 300, respectively. The number of batches is 4. Then, the number of data for these labels is 25, 50, and 75. """ # Store the indices of the data that has the key value # specified by key_id. keyvalue_indices = collections.defaultdict(list) for idx in range(len(self.data)): keyvalue_indices[self.data[idx][key_id].label].append(idx) for k in keyvalue_indices: random.shuffle(keyvalue_indices[k]) num_data_per_key_batch = \ math.ceil(num_per_batch / float(len(keyvalue_indices.keys()))) if num_data_per_key_batch < 2: raise Exception("The number of data in a batch is too small") permuted_data = [] keyvalue_readpointer = collections.defaultdict(int) while len(permuted_data) < len(self.data): for k in keyvalue_indices: begin_idx = keyvalue_readpointer[k] end_idx = int( min(begin_idx + num_data_per_key_batch, len(keyvalue_indices[k]))) print "begin_idx, end_idx" print begin_idx, end_idx for idx in range(begin_idx, end_idx): permuted_data.append(self.data[keyvalue_indices[k][idx]]) keyvalue_readpointer[k] = end_idx self.data = permuted_data class DataBatcher: """ A class that is used to create batches for both training and testing datasets. """ def __init__(self, train_data, test_data, label_set): """ train_data, test_data: Each one is a dataset object repesenting training and testing data, respectively. label_set: a dictionary storing the mapping from label name to label id. """ self.train_data = train_data self.test_data = test_data self.label_set = label_set self.num_per_batch = 5000 assert (self.train_data.keys == self.test_data.keys) def create_batches_and_list(self, output_path, train_list_name, test_list_name, label_set_name): """ Create batches for both training and testing objects. It also create train.list and test.list to indicate the list of the batch files for training and testing data, respectively. """ train_list = self.create_batches(self.train_data, output_path, "train_", self.num_per_batch) test_list = self.create_batches(self.test_data, output_path, "test_", self.num_per_batch) save_list(train_list, os.path.join(output_path, train_list_name)) save_list(test_list, os.path.join(output_path, test_list_name)) save_file(self.label_set, os.path.join(output_path, label_set_name)) def create_batches(self, data, output_path, prefix="", num_data_per_batch=5000): """ Create batches for a Dataset object. data: the Dataset object to process. output_path: the output path of the batches. prefix: the prefix of each batch. num_data_per_batch: number of data in each batch. """ num_batches = int(math.ceil(len(data.data) / float(num_data_per_batch))) batch_names = [] data.check_valid() num_slots = len(data.keys) for i in range(num_batches): batch_name = os.path.join(output_path, prefix + "batch_%03d" % i) out_data = dict([(k, []) for k in data.keys]) begin_idx = i * num_data_per_batch end_idx = min((i + 1) * num_data_per_batch, len(data.data)) for j in range(begin_idx, end_idx): for slot_id in range(num_slots): out_data[data.keys[slot_id]].\ append(data.data[j][slot_id].convert_to_paddle_format()) save_file(out_data, batch_name) batch_names.append(batch_name) return batch_names class DatasetCreater(object): """ A virtual class for creating datasets. The derived clasas needs to implemnt the following methods: - create_dataset() - create_meta_file() """ def __init__(self, data_path): """ data_path: the path to store the training data and batches. train_dir_name: relative training data directory. test_dir_name: relative testing data directory. batch_dir_name: relative batch directory. num_per_batch: the number of data in a batch. meta_filename: the filename of the meta file. train_list_name: training batch list name. test_list_name: testing batch list name. label_set: label set name. overwrite: whether to overwrite the files if the batches are already in the given path. """ self.data_path = data_path self.train_dir_name = 'train' self.test_dir_name = 'test' self.batch_dir_name = 'batches' self.num_per_batch = 50000 self.meta_filename = "batches.meta" self.train_list_name = "train.list" self.test_list_name = "test.list" self.label_set_name = "labels.pkl" self.output_path = os.path.join(self.data_path, self.batch_dir_name) self.overwrite = False self.permutate_key = "labels" self.from_list = False def create_meta_file(self, data): """ Create a meta file from training data. data: training data given in a Dataset format. """ raise NotImplementedError def create_dataset(self, path): """ Create a data set object from a path. It will use directory structure or a file list to determine dataset if self.from_list is True. Otherwise, it will uses a file list to determine the datset. path: the path of the dataset. return a tuple of Dataset object, and a mapping from lable set to label id. """ if self.from_list: return self.create_dataset_from_list(path) else: return self.create_dataset_from_dir(path) def create_dataset_from_list(self, path): """ Create a data set object from a path. It will uses a file list to determine the datset. path: the path of the dataset. return a tuple of Dataset object, and a mapping from lable set to label id """ raise NotImplementedError def create_dataset_from_dir(self, path): """ Create a data set object from a path. It will use directory structure or a file list to determine dataset if self.from_list is True. path: the path of the dataset. return a tuple of Dataset object, and a mapping from lable set to label id """ raise NotImplementedError def create_batches(self): """ create batches and meta file. """ train_path = os.path.join(self.data_path, self.train_dir_name) test_path = os.path.join(self.data_path, self.test_dir_name) out_path = os.path.join(self.data_path, self.batch_dir_name) if not os.path.exists(out_path): os.makedirs(out_path) if (self.overwrite or not os.path.exists( os.path.join(out_path, self.train_list_name))): train_data, train_label_set = \ self.create_dataset(train_path) test_data, test_label_set = \ self.create_dataset(test_path) train_data.permute( self.keys.index(self.permutate_key), self.num_per_batch) assert (train_label_set == test_label_set) data_batcher = DataBatcher(train_data, test_data, train_label_set) data_batcher.num_per_batch = self.num_per_batch data_batcher.create_batches_and_list( self.output_path, self.train_list_name, self.test_list_name, self.label_set_name) self.num_classes = len(train_label_set.keys()) self.create_meta_file(train_data) return out_path
apache-2.0
erikr/django
django/db/migrations/operations/models.py
12
33007
from __future__ import unicode_literals from django.db import models from django.db.migrations.operations.base import Operation from django.db.migrations.state import ModelState from django.db.models.options import normalize_together from django.utils import six from django.utils.functional import cached_property from .fields import ( AddField, AlterField, FieldOperation, RemoveField, RenameField, ) def _check_for_duplicates(arg_name, objs): used_vals = set() for val in objs: if val in used_vals: raise ValueError( "Found duplicate value %s in CreateModel %s argument." % (val, arg_name) ) used_vals.add(val) class ModelOperation(Operation): def __init__(self, name): self.name = name @cached_property def name_lower(self): return self.name.lower() def references_model(self, name, app_label=None): return name.lower() == self.name_lower def reduce(self, operation, in_between, app_label=None): return ( super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or not operation.references_model(self.name, app_label) ) class CreateModel(ModelOperation): """ Create a model's table. """ serialization_expand_args = ['fields', 'options', 'managers'] def __init__(self, name, fields, options=None, bases=None, managers=None): self.fields = fields self.options = options or {} self.bases = bases or (models.Model,) self.managers = managers or [] super(CreateModel, self).__init__(name) # Sanity-check that there are no duplicated field names, bases, or # manager names _check_for_duplicates('fields', (name for name, _ in self.fields)) _check_for_duplicates('bases', ( base._meta.label_lower if hasattr(base, '_meta') else base.lower() if isinstance(base, six.string_types) else base for base in self.bases )) _check_for_duplicates('managers', (name for name, _ in self.managers)) def deconstruct(self): kwargs = { 'name': self.name, 'fields': self.fields, } if self.options: kwargs['options'] = self.options if self.bases and self.bases != (models.Model,): kwargs['bases'] = self.bases if self.managers and self.managers != [('objects', models.Manager())]: kwargs['managers'] = self.managers return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.add_model(ModelState( app_label, self.name, list(self.fields), dict(self.options), tuple(self.bases), list(self.managers), )) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def describe(self): return "Create %smodel %s" % ("proxy " if self.options.get("proxy", False) else "", self.name) def references_model(self, name, app_label=None): name_lower = name.lower() if name_lower == self.name_lower: return True # Check we didn't inherit from the model models_to_check = [ base for base in self.bases if base is not models.Model and isinstance(base, (models.base.ModelBase, six.string_types)) ] # Check we have no FKs/M2Ms with it for fname, field in self.fields: if field.remote_field: models_to_check.append(field.remote_field.model) # Now go over all the models and check against them for model in models_to_check: model_app_label, model_name = self.model_to_key(model) if model_name.lower() == name_lower: if app_label is None or not model_app_label or model_app_label == app_label: return True return False def model_to_key(self, model): """ Take either a model class or an "app_label.ModelName" string and return (app_label, object_name). """ if isinstance(model, six.string_types): return model.split(".", 1) else: return model._meta.app_label, model._meta.object_name def reduce(self, operation, in_between, app_label=None): if (isinstance(operation, DeleteModel) and self.name_lower == operation.name_lower and not self.options.get("proxy", False)): return [] elif isinstance(operation, RenameModel) and self.name_lower == operation.old_name_lower: return [ CreateModel( operation.new_name, fields=self.fields, options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower: if isinstance(operation, AddField): # Don't allow optimizations of FKs through models they reference if hasattr(operation.field, "remote_field") and operation.field.remote_field: for between in in_between: # Check that it doesn't point to the model app_label, object_name = self.model_to_key(operation.field.remote_field.model) if between.references_model(object_name, app_label): return False # Check that it's not through the model if getattr(operation.field.remote_field, "through", None): app_label, object_name = self.model_to_key(operation.field.remote_field.through) if between.references_model(object_name, app_label): return False return [ CreateModel( self.name, fields=self.fields + [(operation.name, operation.field)], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, AlterField): return [ CreateModel( self.name, fields=[ (n, operation.field if n == operation.name else v) for n, v in self.fields ], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RemoveField): return [ CreateModel( self.name, fields=[ (n, v) for n, v in self.fields if n.lower() != operation.name_lower ], options=self.options, bases=self.bases, managers=self.managers, ), ] elif isinstance(operation, RenameField): return [ CreateModel( self.name, fields=[ (operation.new_name if n == operation.old_name else n, v) for n, v in self.fields ], options=self.options, bases=self.bases, managers=self.managers, ), ] return super(CreateModel, self).reduce(operation, in_between, app_label=app_label) class DeleteModel(ModelOperation): """ Drops a model's table. """ def deconstruct(self): kwargs = { 'name': self.name, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.remove_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.delete_model(model) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.create_model(model) def describe(self): return "Delete model %s" % (self.name, ) class RenameModel(ModelOperation): """ Renames a model. """ def __init__(self, old_name, new_name): self.old_name = old_name self.new_name = new_name super(RenameModel, self).__init__(old_name) @cached_property def old_name_lower(self): return self.old_name.lower() @cached_property def new_name_lower(self): return self.new_name.lower() def deconstruct(self): kwargs = { 'old_name': self.old_name, 'new_name': self.new_name, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): # In cases where state doesn't have rendered apps, prevent subsequent # reload_model() calls from rendering models for performance # reasons. This method should be refactored to avoid relying on # state.apps (#27310). reset_apps = 'apps' not in state.__dict__ apps = state.apps model = apps.get_model(app_label, self.old_name) model._meta.apps = apps # Get all of the related objects we need to repoint all_related_objects = ( f for f in model._meta.get_fields(include_hidden=True) if f.auto_created and not f.concrete and (not f.hidden or f.many_to_many) ) if reset_apps: del state.__dict__['apps'] # Rename the model state.models[app_label, self.new_name_lower] = state.models[app_label, self.old_name_lower] state.models[app_label, self.new_name_lower].name = self.new_name state.remove_model(app_label, self.old_name_lower) # Repoint the FKs and M2Ms pointing to us for related_object in all_related_objects: if related_object.model is not model: # The model being renamed does not participate in this relation # directly. Rather, a superclass does. continue # Use the new related key for self referential related objects. if related_object.related_model == model: related_key = (app_label, self.new_name_lower) else: related_key = ( related_object.related_model._meta.app_label, related_object.related_model._meta.model_name, ) new_fields = [] for name, field in state.models[related_key].fields: if name == related_object.field.name: field = field.clone() field.remote_field.model = "%s.%s" % (app_label, self.new_name) new_fields.append((name, field)) state.models[related_key].fields = new_fields state.reload_model(*related_key) # Repoint M2Ms with through pointing to us related_models = { f.remote_field.model for f in model._meta.fields if getattr(f.remote_field, 'model', None) } model_name = '%s.%s' % (app_label, self.old_name) for related_model in related_models: if related_model == model: related_key = (app_label, self.new_name_lower) else: related_key = (related_model._meta.app_label, related_model._meta.model_name) new_fields = [] changed = False for name, field in state.models[related_key].fields: if field.is_relation and field.many_to_many and field.remote_field.through == model_name: field = field.clone() field.remote_field.through = '%s.%s' % (app_label, self.new_name) changed = True new_fields.append((name, field)) if changed: state.models[related_key].fields = new_fields state.reload_model(*related_key) state.reload_model(app_label, self.new_name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.new_name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.old_name) # Move the main table schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Alter the fields pointing to us for related_object in old_model._meta.related_objects: if related_object.related_model == old_model: model = new_model related_key = (app_label, self.new_name_lower) else: model = related_object.related_model related_key = ( related_object.related_model._meta.app_label, related_object.related_model._meta.model_name, ) to_field = to_state.apps.get_model( *related_key )._meta.get_field(related_object.field.name) schema_editor.alter_field( model, related_object.field, to_field, ) # Rename M2M fields whose name is based on this model's name. fields = zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many) for (old_field, new_field) in fields: # Skip self-referential fields as these are renamed above. if new_field.model == new_field.related_model or not new_field.remote_field.through._meta.auto_created: continue # Rename the M2M table that's based on this model's name. old_m2m_model = old_field.remote_field.through new_m2m_model = new_field.remote_field.through schema_editor.alter_db_table( new_m2m_model, old_m2m_model._meta.db_table, new_m2m_model._meta.db_table, ) # Rename the column in the M2M table that's based on this # model's name. schema_editor.alter_field( new_m2m_model, old_m2m_model._meta.get_field(old_model._meta.model_name), new_m2m_model._meta.get_field(new_model._meta.model_name), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower self.new_name, self.old_name = self.old_name, self.new_name self.database_forwards(app_label, schema_editor, from_state, to_state) self.new_name_lower, self.old_name_lower = self.old_name_lower, self.new_name_lower self.new_name, self.old_name = self.old_name, self.new_name def references_model(self, name, app_label=None): return ( name.lower() == self.old_name_lower or name.lower() == self.new_name_lower ) def describe(self): return "Rename model %s to %s" % (self.old_name, self.new_name) def reduce(self, operation, in_between, app_label=None): if (isinstance(operation, RenameModel) and self.new_name_lower == operation.old_name_lower): return [ RenameModel( self.old_name, operation.new_name, ), ] # Skip `ModelOperation.reduce` as we want to run `references_model` # against self.new_name. return ( super(ModelOperation, self).reduce(operation, in_between, app_label=app_label) or not operation.references_model(self.new_name, app_label) ) class AlterModelTable(ModelOperation): """ Renames a model's table """ def __init__(self, name, table): self.table = table super(AlterModelTable, self).__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'table': self.table, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): state.models[app_label, self.name_lower].options["db_table"] = self.table state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_db_table( new_model, old_model._meta.db_table, new_model._meta.db_table, ) # Rename M2M fields whose name is based on this model's db_table for (old_field, new_field) in zip(old_model._meta.local_many_to_many, new_model._meta.local_many_to_many): if new_field.remote_field.through._meta.auto_created: schema_editor.alter_db_table( new_field.remote_field.through, old_field.remote_field.through._meta.db_table, new_field.remote_field.through._meta.db_table, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def describe(self): return "Rename table for %s to %s" % ( self.name, self.table if self.table is not None else "(default)" ) def reduce(self, operation, in_between, app_label=None): if isinstance(operation, (AlterModelTable, DeleteModel)) and self.name_lower == operation.name_lower: return [operation] return super(AlterModelTable, self).reduce(operation, in_between, app_label=app_label) class ModelOptionOperation(ModelOperation): def reduce(self, operation, in_between, app_label=None): if isinstance(operation, (self.__class__, DeleteModel)) and self.name_lower == operation.name_lower: return [operation] return super(ModelOptionOperation, self).reduce(operation, in_between, app_label=app_label) class FieldRelatedOptionOperation(ModelOptionOperation): def reduce(self, operation, in_between, app_label=None): if (isinstance(operation, FieldOperation) and self.name_lower == operation.model_name_lower and not self.references_field(operation.model_name, operation.name)): return [operation, self] return super(FieldRelatedOptionOperation, self).reduce(operation, in_between, app_label=app_label) class AlterUniqueTogether(FieldRelatedOptionOperation): """ Changes the value of unique_together to the target one. Input value of unique_together must be a set of tuples. """ option_name = "unique_together" def __init__(self, name, unique_together): unique_together = normalize_together(unique_together) self.unique_together = set(tuple(cons) for cons in unique_together) super(AlterUniqueTogether, self).__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'unique_together': self.unique_together, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options[self.option_name] = self.unique_together state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_unique_together( new_model, getattr(old_model._meta, self.option_name, set()), getattr(new_model._meta, self.option_name, set()), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label=None): return ( self.references_model(model_name, app_label) and ( not self.unique_together or any((name in together) for together in self.unique_together) ) ) def describe(self): return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.unique_together or '')) class AlterIndexTogether(FieldRelatedOptionOperation): """ Changes the value of index_together to the target one. Input value of index_together must be a set of tuples. """ option_name = "index_together" def __init__(self, name, index_together): index_together = normalize_together(index_together) self.index_together = set(tuple(cons) for cons in index_together) super(AlterIndexTogether, self).__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'index_together': self.index_together, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options[self.option_name] = self.index_together state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): new_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, new_model): old_model = from_state.apps.get_model(app_label, self.name) schema_editor.alter_index_together( new_model, getattr(old_model._meta, self.option_name, set()), getattr(new_model._meta, self.option_name, set()), ) def database_backwards(self, app_label, schema_editor, from_state, to_state): return self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label=None): return ( self.references_model(model_name, app_label) and ( not self.index_together or any((name in together) for together in self.index_together) ) ) def describe(self): return "Alter %s for %s (%s constraint(s))" % (self.option_name, self.name, len(self.index_together or '')) class AlterOrderWithRespectTo(FieldRelatedOptionOperation): """ Represents a change with the order_with_respect_to option. """ def __init__(self, name, order_with_respect_to): self.order_with_respect_to = order_with_respect_to super(AlterOrderWithRespectTo, self).__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'order_with_respect_to': self.order_with_respect_to, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options['order_with_respect_to'] = self.order_with_respect_to state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): to_model = to_state.apps.get_model(app_label, self.name) if self.allow_migrate_model(schema_editor.connection.alias, to_model): from_model = from_state.apps.get_model(app_label, self.name) # Remove a field if we need to if from_model._meta.order_with_respect_to and not to_model._meta.order_with_respect_to: schema_editor.remove_field(from_model, from_model._meta.get_field("_order")) # Add a field if we need to (altering the column is untouched as # it's likely a rename) elif to_model._meta.order_with_respect_to and not from_model._meta.order_with_respect_to: field = to_model._meta.get_field("_order") if not field.has_default(): field.default = 0 schema_editor.add_field( from_model, field, ) def database_backwards(self, app_label, schema_editor, from_state, to_state): self.database_forwards(app_label, schema_editor, from_state, to_state) def references_field(self, model_name, name, app_label=None): return ( self.references_model(model_name, app_label) and ( self.order_with_respect_to is None or name == self.order_with_respect_to ) ) def describe(self): return "Set order_with_respect_to on %s to %s" % (self.name, self.order_with_respect_to) class AlterModelOptions(ModelOptionOperation): """ Sets new model options that don't directly affect the database schema (like verbose_name, permissions, ordering). Python code in migrations may still need them. """ # Model options we want to compare and preserve in an AlterModelOptions op ALTER_OPTION_KEYS = [ "base_manager_name", "default_manager_name", "get_latest_by", "managed", "ordering", "permissions", "default_permissions", "select_on_save", "verbose_name", "verbose_name_plural", ] def __init__(self, name, options): self.options = options super(AlterModelOptions, self).__init__(name) def deconstruct(self): kwargs = { 'name': self.name, 'options': self.options, } return ( self.__class__.__name__, [], kwargs ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.options = dict(model_state.options) model_state.options.update(self.options) for key in self.ALTER_OPTION_KEYS: if key not in self.options and key in model_state.options: del model_state.options[key] state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def describe(self): return "Change Meta options on %s" % (self.name, ) class AlterModelManagers(ModelOptionOperation): """ Alters the model's managers """ serialization_expand_args = ['managers'] def __init__(self, name, managers): self.managers = managers super(AlterModelManagers, self).__init__(name) def deconstruct(self): return ( self.__class__.__name__, [self.name, self.managers], {} ) def state_forwards(self, app_label, state): model_state = state.models[app_label, self.name_lower] model_state.managers = list(self.managers) state.reload_model(app_label, self.name_lower) def database_forwards(self, app_label, schema_editor, from_state, to_state): pass def database_backwards(self, app_label, schema_editor, from_state, to_state): pass def describe(self): return "Change managers on %s" % (self.name, ) class IndexOperation(Operation): option_name = 'indexes' @cached_property def model_name_lower(self): return self.model_name.lower() class AddIndex(IndexOperation): """ Add an index on a model. """ def __init__(self, model_name, index): self.model_name = model_name if not index.name: raise ValueError( "Indexes passed to AddIndex operations require a name " "argument. %r doesn't have one." % index ) self.index = index def state_forwards(self, app_label, state): model_state = state.models[app_label, self.model_name_lower] model_state.options[self.option_name].append(self.index) def database_forwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.add_index(model, self.index) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): schema_editor.remove_index(model, self.index) def deconstruct(self): kwargs = { 'model_name': self.model_name, 'index': self.index, } return ( self.__class__.__name__, [], kwargs, ) def describe(self): return 'Create index %s on field(s) %s of model %s' % ( self.index.name, ', '.join(self.index.fields), self.model_name, ) class RemoveIndex(IndexOperation): """ Remove an index from a model. """ def __init__(self, model_name, name): self.model_name = model_name self.name = name def state_forwards(self, app_label, state): model_state = state.models[app_label, self.model_name_lower] indexes = model_state.options[self.option_name] model_state.options[self.option_name] = [idx for idx in indexes if idx.name != self.name] def database_forwards(self, app_label, schema_editor, from_state, to_state): model = from_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): from_model_state = from_state.models[app_label, self.model_name_lower] index = from_model_state.get_index_by_name(self.name) schema_editor.remove_index(model, index) def database_backwards(self, app_label, schema_editor, from_state, to_state): model = to_state.apps.get_model(app_label, self.model_name) if self.allow_migrate_model(schema_editor.connection.alias, model): to_model_state = to_state.models[app_label, self.model_name_lower] index = to_model_state.get_index_by_name(self.name) schema_editor.add_index(model, index) def deconstruct(self): kwargs = { 'model_name': self.model_name, 'name': self.name, } return ( self.__class__.__name__, [], kwargs, ) def describe(self): return 'Remove index %s from %s' % (self.name, self.model_name)
bsd-3-clause
deniszgonjanin/ckanext-geojsonview
ckanext/geojsonview/plugin.py
1
1204
import ckan.plugins as plugins import ckan.plugins.toolkit as toolkit import ckanext.resourceproxy.plugin as proxy import ckan.lib.datapreview as datapreview from ckan.common import json class GeojsonviewPlugin(plugins.SingletonPlugin): plugins.implements(plugins.IConfigurer, inherit=True) plugins.implements(plugins.IResourceView, inherit=True) # IConfigurer def update_config(self, config_): toolkit.add_template_directory(config_, 'templates') toolkit.add_public_directory(config_, 'public') toolkit.add_resource('public', 'ckanext-geojsonview') # IResourceView def info(self): return { 'name': 'geojson_view', 'title': 'Map View', 'icon': 'globe', 'iframed': True } def setup_template_variables(self, context, data_dict): proxified_url = proxy.get_proxified_resource_url(data_dict) return { 'proxied_url': json.dumps(proxified_url) } def can_view(self, data_dict): return data_dict['resource'].get('format', '').lower() == 'geojson' def view_template(self, context, data_dict): return 'dataviewer/geojsonview.html'
agpl-3.0
cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price
data/13 Honeywell/parseJSON.py
26
1412
def getSocialData(post): # Get Thread Object threadObject = post["thread"] domain_rank = threadObject["domain_rank"] #domain_rank #print 'domain_rank:' + str(domain_rank) socialObject = threadObject["social"] #social data object facebookData = socialObject["facebook"] #facebook data #print 'facebook data:' + str(facebookData["likes"]) + ', ' + str(facebookData["comments"]) + ', ' + str(facebookData["shares"]) fb_likes = facebookData["likes"] fb_comments = facebookData["comments"] fb_shares = facebookData["shares"] gplusData = socialObject["gplus"] #gplus data #print 'gplus data:' + str(gplusData["shares"]) g_shares = gplusData["shares"] pinterestData = socialObject["pinterest"] #pinterest data #print 'pinterest data:' + str(pinterestData["shares"]) pin_shares = pinterestData["shares"] linkedinData = socialObject["linkedin"] #linkedin data #print 'linked data:' + str(linkedinData["shares"]) linkedin_shares = linkedinData["shares"] stumbleduponData= socialObject["stumbledupon"] #print 'lstumbleduponData:' + str(stumbleduponData["shares"]) su_shares = stumbleduponData["shares"] vkData = socialObject["vk"] #print 'vkData:' + str(vkData["shares"]) vk_shares = vkData["shares"] social_impact = (fb_likes + fb_comments + fb_shares + g_shares + pin_shares + linkedin_shares + su_shares + vk_shares) #print str(social_impact) return social_impact
mit
tmpgit/intellij-community
plugins/hg4idea/testData/bin/mercurial/pvec.py
94
5989
# pvec.py - probabilistic vector clocks for Mercurial # # Copyright 2012 Matt Mackall <[email protected]> # # This software may be used and distributed according to the terms of the # GNU General Public License version 2 or any later version. ''' A "pvec" is a changeset property based on the theory of vector clocks that can be compared to discover relatedness without consulting a graph. This can be useful for tasks like determining how a disconnected patch relates to a repository. Currently a pvec consist of 448 bits, of which 24 are 'depth' and the remainder are a bit vector. It is represented as a 70-character base85 string. Construction: - a root changeset has a depth of 0 and a bit vector based on its hash - a normal commit has a changeset where depth is increased by one and one bit vector bit is flipped based on its hash - a merge changeset pvec is constructed by copying changes from one pvec into the other to balance its depth Properties: - for linear changes, difference in depth is always <= hamming distance - otherwise, changes are probably divergent - when hamming distance is < 200, we can reliably detect when pvecs are near Issues: - hamming distance ceases to work over distances of ~ 200 - detecting divergence is less accurate when the common ancestor is very close to either revision or total distance is high - this could probably be improved by modeling the relation between delta and hdist Uses: - a patch pvec can be used to locate the nearest available common ancestor for resolving conflicts - ordering of patches can be established without a DAG - two head pvecs can be compared to determine whether push/pull/merge is needed and approximately how many changesets are involved - can be used to find a heuristic divergence measure between changesets on different branches ''' import base85, util from node import nullrev _size = 448 # 70 chars b85-encoded _bytes = _size / 8 _depthbits = 24 _depthbytes = _depthbits / 8 _vecbytes = _bytes - _depthbytes _vecbits = _vecbytes * 8 _radius = (_vecbits - 30) / 2 # high probability vectors are related def _bin(bs): '''convert a bytestring to a long''' v = 0 for b in bs: v = v * 256 + ord(b) return v def _str(v, l): bs = "" for p in xrange(l): bs = chr(v & 255) + bs v >>= 8 return bs def _split(b): '''depth and bitvec''' return _bin(b[:_depthbytes]), _bin(b[_depthbytes:]) def _join(depth, bitvec): return _str(depth, _depthbytes) + _str(bitvec, _vecbytes) def _hweight(x): c = 0 while x: if x & 1: c += 1 x >>= 1 return c _htab = [_hweight(x) for x in xrange(256)] def _hamming(a, b): '''find the hamming distance between two longs''' d = a ^ b c = 0 while d: c += _htab[d & 0xff] d >>= 8 return c def _mergevec(x, y, c): # Ideally, this function would be x ^ y ^ ancestor, but finding # ancestors is a nuisance. So instead we find the minimal number # of changes to balance the depth and hamming distance d1, v1 = x d2, v2 = y if d1 < d2: d1, d2, v1, v2 = d2, d1, v2, v1 hdist = _hamming(v1, v2) ddist = d1 - d2 v = v1 m = v1 ^ v2 # mask of different bits i = 1 if hdist > ddist: # if delta = 10 and hdist = 100, then we need to go up 55 steps # to the ancestor and down 45 changes = (hdist - ddist + 1) / 2 else: # must make at least one change changes = 1 depth = d1 + changes # copy changes from v2 if m: while changes: if m & i: v ^= i changes -= 1 i <<= 1 else: v = _flipbit(v, c) return depth, v def _flipbit(v, node): # converting bit strings to longs is slow bit = (hash(node) & 0xffffffff) % _vecbits return v ^ (1<<bit) def ctxpvec(ctx): '''construct a pvec for ctx while filling in the cache''' r = ctx._repo if not util.safehasattr(r, "_pveccache"): r._pveccache = {} pvc = r._pveccache if ctx.rev() not in pvc: cl = r.changelog for n in xrange(ctx.rev() + 1): if n not in pvc: node = cl.node(n) p1, p2 = cl.parentrevs(n) if p1 == nullrev: # start with a 'random' vector at root pvc[n] = (0, _bin((node * 3)[:_vecbytes])) elif p2 == nullrev: d, v = pvc[p1] pvc[n] = (d + 1, _flipbit(v, node)) else: pvc[n] = _mergevec(pvc[p1], pvc[p2], node) bs = _join(*pvc[ctx.rev()]) return pvec(base85.b85encode(bs)) class pvec(object): def __init__(self, hashorctx): if isinstance(hashorctx, str): self._bs = hashorctx self._depth, self._vec = _split(base85.b85decode(hashorctx)) else: self._vec = ctxpvec(hashorctx) def __str__(self): return self._bs def __eq__(self, b): return self._vec == b._vec and self._depth == b._depth def __lt__(self, b): delta = b._depth - self._depth if delta < 0: return False # always correct if _hamming(self._vec, b._vec) > delta: return False return True def __gt__(self, b): return b < self def __or__(self, b): delta = abs(b._depth - self._depth) if _hamming(self._vec, b._vec) <= delta: return False return True def __sub__(self, b): if self | b: raise ValueError("concurrent pvecs") return self._depth - b._depth def distance(self, b): d = abs(b._depth - self._depth) h = _hamming(self._vec, b._vec) return max(d, h) def near(self, b): dist = abs(b.depth - self._depth) if dist > _radius or _hamming(self._vec, b._vec) > _radius: return False
apache-2.0
mbareta/edx-platform-ft
common/lib/calc/calc/tests/test_preview.py
257
8723
# -*- coding: utf-8 -*- """ Unit tests for preview.py """ import unittest from calc import preview import pyparsing class LatexRenderedTest(unittest.TestCase): """ Test the initializing code for LatexRendered. Specifically that it stores the correct data and handles parens well. """ def test_simple(self): """ Test that the data values are stored without changing. """ math = 'x^2' obj = preview.LatexRendered(math, tall=True) self.assertEquals(obj.latex, math) self.assertEquals(obj.sans_parens, math) self.assertEquals(obj.tall, True) def _each_parens(self, with_parens, math, parens, tall=False): """ Helper method to test the way parens are wrapped. """ obj = preview.LatexRendered(math, parens=parens, tall=tall) self.assertEquals(obj.latex, with_parens) self.assertEquals(obj.sans_parens, math) self.assertEquals(obj.tall, tall) def test_parens(self): """ Test curvy parens. """ self._each_parens('(x+y)', 'x+y', '(') def test_brackets(self): """ Test brackets. """ self._each_parens('[x+y]', 'x+y', '[') def test_squiggles(self): """ Test curly braces. """ self._each_parens(r'\{x+y\}', 'x+y', '{') def test_parens_tall(self): """ Test curvy parens with the tall parameter. """ self._each_parens(r'\left(x^y\right)', 'x^y', '(', tall=True) def test_brackets_tall(self): """ Test brackets, also tall. """ self._each_parens(r'\left[x^y\right]', 'x^y', '[', tall=True) def test_squiggles_tall(self): """ Test tall curly braces. """ self._each_parens(r'\left\{x^y\right\}', 'x^y', '{', tall=True) def test_bad_parens(self): """ Check that we get an error with invalid parens. """ with self.assertRaisesRegexp(Exception, 'Unknown parenthesis'): preview.LatexRendered('x^2', parens='not parens') class LatexPreviewTest(unittest.TestCase): """ Run integrative tests for `latex_preview`. All functionality was tested `RenderMethodsTest`, but see if it combines all together correctly. """ def test_no_input(self): """ With no input (including just whitespace), see that no error is thrown. """ self.assertEquals('', preview.latex_preview('')) self.assertEquals('', preview.latex_preview(' ')) self.assertEquals('', preview.latex_preview(' \t ')) def test_number_simple(self): """ Simple numbers should pass through. """ self.assertEquals(preview.latex_preview('3.1415'), '3.1415') def test_number_suffix(self): """ Suffixes should be escaped. """ self.assertEquals(preview.latex_preview('1.618k'), r'1.618\text{k}') def test_number_sci_notation(self): """ Numbers with scientific notation should display nicely """ self.assertEquals( preview.latex_preview('6.0221413E+23'), r'6.0221413\!\times\!10^{+23}' ) self.assertEquals( preview.latex_preview('-6.0221413E+23'), r'-6.0221413\!\times\!10^{+23}' ) def test_number_sci_notation_suffix(self): """ Test numbers with both of these. """ self.assertEquals( preview.latex_preview('6.0221413E+23k'), r'6.0221413\!\times\!10^{+23}\text{k}' ) self.assertEquals( preview.latex_preview('-6.0221413E+23k'), r'-6.0221413\!\times\!10^{+23}\text{k}' ) def test_variable_simple(self): """ Simple valid variables should pass through. """ self.assertEquals(preview.latex_preview('x', variables=['x']), 'x') def test_greek(self): """ Variable names that are greek should be formatted accordingly. """ self.assertEquals(preview.latex_preview('pi'), r'\pi') def test_variable_subscript(self): """ Things like 'epsilon_max' should display nicely """ self.assertEquals( preview.latex_preview('epsilon_max', variables=['epsilon_max']), r'\epsilon_{max}' ) def test_function_simple(self): """ Valid function names should be escaped. """ self.assertEquals( preview.latex_preview('f(3)', functions=['f']), r'\text{f}(3)' ) def test_function_tall(self): r""" Functions surrounding a tall element should have \left, \right """ self.assertEquals( preview.latex_preview('f(3^2)', functions=['f']), r'\text{f}\left(3^{2}\right)' ) def test_function_sqrt(self): """ Sqrt function should be handled specially. """ self.assertEquals(preview.latex_preview('sqrt(3)'), r'\sqrt{3}') def test_function_log10(self): """ log10 function should be handled specially. """ self.assertEquals(preview.latex_preview('log10(3)'), r'\log_{10}(3)') def test_function_log2(self): """ log2 function should be handled specially. """ self.assertEquals(preview.latex_preview('log2(3)'), r'\log_2(3)') def test_power_simple(self): """ Powers should wrap the elements with braces correctly. """ self.assertEquals(preview.latex_preview('2^3^4'), '2^{3^{4}}') def test_power_parens(self): """ Powers should ignore the parenthesis of the last math. """ self.assertEquals(preview.latex_preview('2^3^(4+5)'), '2^{3^{4+5}}') def test_parallel(self): r""" Parallel items should combine with '\|'. """ self.assertEquals(preview.latex_preview('2||3'), r'2\|3') def test_product_mult_only(self): r""" Simple products should combine with a '\cdot'. """ self.assertEquals(preview.latex_preview('2*3'), r'2\cdot 3') def test_product_big_frac(self): """ Division should combine with '\frac'. """ self.assertEquals( preview.latex_preview('2*3/4/5'), r'\frac{2\cdot 3}{4\cdot 5}' ) def test_product_single_frac(self): """ Division should ignore parens if they are extraneous. """ self.assertEquals( preview.latex_preview('(2+3)/(4+5)'), r'\frac{2+3}{4+5}' ) def test_product_keep_going(self): """ Complex products/quotients should split into many '\frac's when needed. """ self.assertEquals( preview.latex_preview('2/3*4/5*6'), r'\frac{2}{3}\cdot \frac{4}{5}\cdot 6' ) def test_sum(self): """ Sums should combine its elements. """ # Use 'x' as the first term (instead of, say, '1'), so it can't be # interpreted as a negative number. self.assertEquals( preview.latex_preview('-x+2-3+4', variables=['x']), '-x+2-3+4' ) def test_sum_tall(self): """ A complicated expression should not hide the tallness. """ self.assertEquals( preview.latex_preview('(2+3^2)'), r'\left(2+3^{2}\right)' ) def test_complicated(self): """ Given complicated input, ensure that exactly the correct string is made. """ self.assertEquals( preview.latex_preview('11*f(x)+x^2*(3||4)/sqrt(pi)'), r'11\cdot \text{f}(x)+\frac{x^{2}\cdot (3\|4)}{\sqrt{\pi}}' ) self.assertEquals( preview.latex_preview('log10(1+3/4/Cos(x^2)*(x+1))', case_sensitive=True), (r'\log_{10}\left(1+\frac{3}{4\cdot \text{Cos}\left(x^{2}\right)}' r'\cdot (x+1)\right)') ) def test_syntax_errors(self): """ Test a lot of math strings that give syntax errors Rather than have a lot of self.assertRaises, make a loop and keep track of those that do not throw a `ParseException`, and assert at the end. """ bad_math_list = [ '11+', '11*', 'f((x)', 'sqrt(x^)', '3f(x)', # Not 3*f(x) '3|4', '3|||4' ] bad_exceptions = {} for math in bad_math_list: try: preview.latex_preview(math) except pyparsing.ParseException: pass # This is what we were expecting. (not excepting :P) except Exception as error: # pragma: no cover bad_exceptions[math] = error else: # pragma: no cover # If there is no exception thrown, this is a problem bad_exceptions[math] = None self.assertEquals({}, bad_exceptions)
agpl-3.0
nicanor-romero/OctoPrint
src/octoprint/printer/standard.py
7
31006
# coding=utf-8 """ This module holds the standard implementation of the :class:`PrinterInterface` and it helpers. """ from __future__ import absolute_import __author__ = "Gina Häußge <[email protected]>" __license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html' __copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License" import copy import logging import os import threading import time from octoprint import util as util from octoprint.events import eventManager, Events from octoprint.filemanager import FileDestinations from octoprint.plugin import plugin_manager, ProgressPlugin from octoprint.printer import PrinterInterface, PrinterCallback, UnknownScript from octoprint.printer.estimation import TimeEstimationHelper from octoprint.settings import settings from octoprint.util import comm as comm from octoprint.util import InvariantContainer class Printer(PrinterInterface, comm.MachineComPrintCallback): """ Default implementation of the :class:`PrinterInterface`. Manages the communication layer object and registers itself with it as a callback to react to changes on the communication layer. """ def __init__(self, fileManager, analysisQueue, printerProfileManager): from collections import deque self._logger = logging.getLogger(__name__) self._analysisQueue = analysisQueue self._fileManager = fileManager self._printerProfileManager = printerProfileManager # state # TODO do we really need to hold the temperature here? self._temp = None self._bedTemp = None self._targetTemp = None self._targetBedTemp = None self._temps = TemperatureHistory(cutoff=settings().getInt(["temperature", "cutoff"])*60) self._tempBacklog = [] self._latestMessage = None self._messages = deque([], 300) self._messageBacklog = [] self._latestLog = None self._log = deque([], 300) self._logBacklog = [] self._state = None self._currentZ = None self._progress = None self._printTime = None self._printTimeLeft = None self._printAfterSelect = False # sd handling self._sdPrinting = False self._sdStreaming = False self._sdFilelistAvailable = threading.Event() self._streamingFinishedCallback = None self._selectedFile = None self._timeEstimationData = None # comm self._comm = None # callbacks self._callbacks = [] # progress plugins self._lastProgressReport = None self._progressPlugins = plugin_manager().get_implementations(ProgressPlugin) self._stateMonitor = StateMonitor( interval=0.5, on_update=self._sendCurrentDataCallbacks, on_add_temperature=self._sendAddTemperatureCallbacks, on_add_log=self._sendAddLogCallbacks, on_add_message=self._sendAddMessageCallbacks ) self._stateMonitor.reset( state={"text": self.get_state_string(), "flags": self._getStateFlags()}, job_data={ "file": { "name": None, "size": None, "origin": None, "date": None }, "estimatedPrintTime": None, "lastPrintTime": None, "filament": { "length": None, "volume": None } }, progress={"completion": None, "filepos": None, "printTime": None, "printTimeLeft": None}, current_z=None ) eventManager().subscribe(Events.METADATA_ANALYSIS_FINISHED, self._on_event_MetadataAnalysisFinished) eventManager().subscribe(Events.METADATA_STATISTICS_UPDATED, self._on_event_MetadataStatisticsUpdated) #~~ handling of PrinterCallbacks def register_callback(self, callback): if not isinstance(callback, PrinterCallback): self._logger.warn("Registering an object as printer callback which doesn't implement the PrinterCallback interface") self._callbacks.append(callback) self._sendInitialStateUpdate(callback) def unregister_callback(self, callback): if callback in self._callbacks: self._callbacks.remove(callback) def _sendAddTemperatureCallbacks(self, data): for callback in self._callbacks: try: callback.on_printer_add_temperature(data) except: self._logger.exception("Exception while adding temperature data point") def _sendAddLogCallbacks(self, data): for callback in self._callbacks: try: callback.on_printer_add_log(data) except: self._logger.exception("Exception while adding communication log entry") def _sendAddMessageCallbacks(self, data): for callback in self._callbacks: try: callback.on_printer_add_message(data) except: self._logger.exception("Exception while adding printer message") def _sendCurrentDataCallbacks(self, data): for callback in self._callbacks: try: callback.on_printer_send_current_data(copy.deepcopy(data)) except: self._logger.exception("Exception while pushing current data") #~~ callback from metadata analysis event def _on_event_MetadataAnalysisFinished(self, event, data): if self._selectedFile: self._setJobData(self._selectedFile["filename"], self._selectedFile["filesize"], self._selectedFile["sd"]) def _on_event_MetadataStatisticsUpdated(self, event, data): self._setJobData(self._selectedFile["filename"], self._selectedFile["filesize"], self._selectedFile["sd"]) #~~ progress plugin reporting def _reportPrintProgressToPlugins(self, progress): if not progress or not self._selectedFile or not "sd" in self._selectedFile or not "filename" in self._selectedFile: return storage = "sdcard" if self._selectedFile["sd"] else "local" filename = self._selectedFile["filename"] def call_plugins(storage, filename, progress): for plugin in self._progressPlugins: try: plugin.on_print_progress(storage, filename, progress) except: self._logger.exception("Exception while sending print progress to plugin %s" % plugin._identifier) thread = threading.Thread(target=call_plugins, args=(storage, filename, progress)) thread.daemon = False thread.start() #~~ PrinterInterface implementation def connect(self, port=None, baudrate=None, profile=None): """ Connects to the printer. If port and/or baudrate is provided, uses these settings, otherwise autodetection will be attempted. """ if self._comm is not None: self._comm.close() self._printerProfileManager.select(profile) self._comm = comm.MachineCom(port, baudrate, callbackObject=self, printerProfileManager=self._printerProfileManager) def disconnect(self): """ Closes the connection to the printer. """ if self._comm is not None: self._comm.close() self._comm = None self._printerProfileManager.deselect() eventManager().fire(Events.DISCONNECTED) def get_transport(self): if self._comm is None: return None return self._comm.getTransport() getTransport = util.deprecated("getTransport has been renamed to get_transport", since="1.2.0-dev-590", includedoc="Replaced by :func:`get_transport`") def fake_ack(self): if self._comm is None: return self._comm.fakeOk() def commands(self, commands): """ Sends one or more gcode commands to the printer. """ if self._comm is None: return if not isinstance(commands, (list, tuple)): commands = [commands] for command in commands: self._comm.sendCommand(command) def script(self, name, context=None): if self._comm is None: return if name is None or not name: raise ValueError("name must be set") result = self._comm.sendGcodeScript(name, replacements=context) if not result: raise UnknownScript(name) def jog(self, axis, amount): if not isinstance(axis, (str, unicode)): raise ValueError("axis must be a string: {axis}".format(axis=axis)) axis = axis.lower() if not axis in PrinterInterface.valid_axes: raise ValueError("axis must be any of {axes}: {axis}".format(axes=", ".join(PrinterInterface.valid_axes), axis=axis)) if not isinstance(amount, (int, long, float)): raise ValueError("amount must be a valid number: {amount}".format(amount=amount)) printer_profile = self._printerProfileManager.get_current_or_default() movement_speed = printer_profile["axes"][axis]["speed"] self.commands(["G91", "G1 %s%.4f F%d" % (axis.upper(), amount, movement_speed), "G90"]) def home(self, axes): if not isinstance(axes, (list, tuple)): if isinstance(axes, (str, unicode)): axes = [axes] else: raise ValueError("axes is neither a list nor a string: {axes}".format(axes=axes)) validated_axes = filter(lambda x: x in PrinterInterface.valid_axes, map(lambda x: x.lower(), axes)) if len(axes) != len(validated_axes): raise ValueError("axes contains invalid axes: {axes}".format(axes=axes)) self.commands(["G91", "G28 %s" % " ".join(map(lambda x: "%s0" % x.upper(), validated_axes)), "G90"]) def extrude(self, amount): if not isinstance(amount, (int, long, float)): raise ValueError("amount must be a valid number: {amount}".format(amount=amount)) printer_profile = self._printerProfileManager.get_current_or_default() extrusion_speed = printer_profile["axes"]["e"]["speed"] self.commands(["G91", "G1 E%s F%d" % (amount, extrusion_speed), "G90"]) def change_tool(self, tool): if not PrinterInterface.valid_tool_regex.match(tool): raise ValueError("tool must match \"tool[0-9]+\": {tool}".format(tool=tool)) tool_num = int(tool[len("tool"):]) self.commands("T%d" % tool_num) def set_temperature(self, heater, value): if not PrinterInterface.valid_heater_regex.match(heater): raise ValueError("heater must match \"tool[0-9]+\" or \"bed\": {heater}".format(type=heater)) if not isinstance(value, (int, long, float)) or value < 0: raise ValueError("value must be a valid number >= 0: {value}".format(value=value)) if heater.startswith("tool"): printer_profile = self._printerProfileManager.get_current_or_default() extruder_count = printer_profile["extruder"]["count"] if extruder_count > 1: toolNum = int(heater[len("tool"):]) self.commands("M104 T%d S%f" % (toolNum, value)) else: self.commands("M104 S%f" % value) elif heater == "bed": self.commands("M140 S%f" % value) def set_temperature_offset(self, offsets=None): if offsets is None: offsets = dict() if not isinstance(offsets, dict): raise ValueError("offsets must be a dict") validated_keys = filter(lambda x: PrinterInterface.valid_heater_regex.match(x), offsets.keys()) validated_values = filter(lambda x: isinstance(x, (int, long, float)), offsets.values()) if len(validated_keys) != len(offsets): raise ValueError("offsets contains invalid keys: {offsets}".format(offsets=offsets)) if len(validated_values) != len(offsets): raise ValueError("offsets contains invalid values: {offsets}".format(offsets=offsets)) if self._comm is None: return self._comm.setTemperatureOffset(offsets) self._stateMonitor.set_temp_offsets(offsets) def _convert_rate_value(self, factor, min=0, max=200): if not isinstance(factor, (int, float, long)): raise ValueError("factor is not a number") if isinstance(factor, float): factor = int(factor * 100.0) if factor < min or factor > max: raise ValueError("factor must be a value between %f and %f" % (min, max)) return factor def feed_rate(self, factor): factor = self._convert_rate_value(factor, min=50, max=200) self.commands("M220 S%d" % factor) def flow_rate(self, factor): factor = self._convert_rate_value(factor, min=75, max=125) self.commands("M221 S%d" % factor) def select_file(self, path, sd, printAfterSelect=False): if self._comm is None or (self._comm.isBusy() or self._comm.isStreaming()): self._logger.info("Cannot load file: printer not connected or currently busy") return self._printAfterSelect = printAfterSelect self._comm.selectFile("/" + path if sd else path, sd) self._setProgressData(0, None, None, None) self._setCurrentZ(None) def unselect_file(self): if self._comm is not None and (self._comm.isBusy() or self._comm.isStreaming()): return self._comm.unselectFile() self._setProgressData(0, None, None, None) self._setCurrentZ(None) def start_print(self): """ Starts the currently loaded print job. Only starts if the printer is connected and operational, not currently printing and a printjob is loaded """ if self._comm is None or not self._comm.isOperational() or self._comm.isPrinting(): return if self._selectedFile is None: return rolling_window = None threshold = None countdown = None if self._selectedFile["sd"]: # we are interesting in a rolling window of roughly the last 15s, so the number of entries has to be derived # by that divided by the sd status polling interval rolling_window = 15 / settings().get(["serial", "timeout", "sdStatus"]) # we are happy if the average of the estimates stays within 60s of the prior one threshold = 60 # we are happy when one rolling window has been stable countdown = rolling_window self._timeEstimationData = TimeEstimationHelper(rolling_window=rolling_window, threshold=threshold, countdown=countdown) self._lastProgressReport = None self._setProgressData(0, None, None, None) self._setCurrentZ(None) self._comm.startPrint() def toggle_pause_print(self): """ Pause the current printjob. """ if self._comm is None: return self._comm.setPause(not self._comm.isPaused()) def cancel_print(self): """ Cancel the current printjob. """ if self._comm is None: return self._comm.cancelPrint() # reset progress, height, print time self._setCurrentZ(None) self._setProgressData(None, None, None, None) # mark print as failure if self._selectedFile is not None: self._fileManager.log_print(FileDestinations.SDCARD if self._selectedFile["sd"] else FileDestinations.LOCAL, self._selectedFile["filename"], time.time(), self._comm.getPrintTime(), False, self._printerProfileManager.get_current_or_default()["id"]) payload = { "file": self._selectedFile["filename"], "origin": FileDestinations.LOCAL } if self._selectedFile["sd"]: payload["origin"] = FileDestinations.SDCARD eventManager().fire(Events.PRINT_FAILED, payload) def get_state_string(self): """ Returns a human readable string corresponding to the current communication state. """ if self._comm is None: return "Offline" else: return self._comm.getStateString() def get_current_data(self): return self._stateMonitor.get_current_data() def get_current_job(self): currentData = self._stateMonitor.get_current_data() return currentData["job"] def get_current_temperatures(self): if self._comm is not None: offsets = self._comm.getOffsets() else: offsets = dict() result = {} if self._temp is not None: for tool in self._temp.keys(): result["tool%d" % tool] = { "actual": self._temp[tool][0], "target": self._temp[tool][1], "offset": offsets[tool] if tool in offsets and offsets[tool] is not None else 0 } if self._bedTemp is not None: result["bed"] = { "actual": self._bedTemp[0], "target": self._bedTemp[1], "offset": offsets["bed"] if "bed" in offsets and offsets["bed"] is not None else 0 } return result def get_temperature_history(self): return self._temps def get_current_connection(self): if self._comm is None: return "Closed", None, None, None port, baudrate = self._comm.getConnection() printer_profile = self._printerProfileManager.get_current_or_default() return self._comm.getStateString(), port, baudrate, printer_profile def is_closed_or_error(self): return self._comm is None or self._comm.isClosedOrError() def is_operational(self): return self._comm is not None and self._comm.isOperational() def is_printing(self): return self._comm is not None and self._comm.isPrinting() def is_paused(self): return self._comm is not None and self._comm.isPaused() def is_error(self): return self._comm is not None and self._comm.isError() def is_ready(self): return self.is_operational() and not self._comm.isStreaming() def is_sd_ready(self): if not settings().getBoolean(["feature", "sdSupport"]) or self._comm is None: return False else: return self._comm.isSdReady() #~~ sd file handling def get_sd_files(self): if self._comm is None or not self._comm.isSdReady(): return [] return map(lambda x: (x[0][1:], x[1]), self._comm.getSdFiles()) def add_sd_file(self, filename, absolutePath, streamingFinishedCallback): if not self._comm or self._comm.isBusy() or not self._comm.isSdReady(): self._logger.error("No connection to printer or printer is busy") return self._streamingFinishedCallback = streamingFinishedCallback self.refresh_sd_files(blocking=True) existingSdFiles = map(lambda x: x[0], self._comm.getSdFiles()) remoteName = util.get_dos_filename(filename, existing_filenames=existingSdFiles, extension="gco") self._timeEstimationData = TimeEstimationHelper() self._comm.startFileTransfer(absolutePath, filename, "/" + remoteName) return remoteName def delete_sd_file(self, filename): if not self._comm or not self._comm.isSdReady(): return self._comm.deleteSdFile("/" + filename) def init_sd_card(self): if not self._comm or self._comm.isSdReady(): return self._comm.initSdCard() def release_sd_card(self): if not self._comm or not self._comm.isSdReady(): return self._comm.releaseSdCard() def refresh_sd_files(self, blocking=False): """ Refreshs the list of file stored on the SD card attached to printer (if available and printer communication available). Optional blocking parameter allows making the method block (max 10s) until the file list has been received (and can be accessed via self._comm.getSdFiles()). Defaults to an asynchronous operation. """ if not self._comm or not self._comm.isSdReady(): return self._sdFilelistAvailable.clear() self._comm.refreshSdFiles() if blocking: self._sdFilelistAvailable.wait(10000) #~~ state monitoring def _setCurrentZ(self, currentZ): self._currentZ = currentZ self._stateMonitor.set_current_z(self._currentZ) def _setState(self, state): self._state = state self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) def _addLog(self, log): self._log.append(log) self._stateMonitor.add_log(log) def _addMessage(self, message): self._messages.append(message) self._stateMonitor.add_message(message) def _estimateTotalPrintTime(self, progress, printTime): if not progress or not printTime or not self._timeEstimationData: return None else: newEstimate = printTime / progress self._timeEstimationData.update(newEstimate) result = None if self._timeEstimationData.is_stable(): result = self._timeEstimationData.average_total_rolling return result def _setProgressData(self, progress, filepos, printTime, cleanedPrintTime): estimatedTotalPrintTime = self._estimateTotalPrintTime(progress, cleanedPrintTime) totalPrintTime = estimatedTotalPrintTime if self._selectedFile and "estimatedPrintTime" in self._selectedFile and self._selectedFile["estimatedPrintTime"]: statisticalTotalPrintTime = self._selectedFile["estimatedPrintTime"] if progress and cleanedPrintTime: if estimatedTotalPrintTime is None: totalPrintTime = statisticalTotalPrintTime else: if progress < 0.5: sub_progress = progress * 2 else: sub_progress = 1.0 totalPrintTime = (1 - sub_progress) * statisticalTotalPrintTime + sub_progress * estimatedTotalPrintTime self._progress = progress self._printTime = printTime self._printTimeLeft = totalPrintTime - cleanedPrintTime if (totalPrintTime is not None and cleanedPrintTime is not None) else None self._stateMonitor.set_progress({ "completion": self._progress * 100 if self._progress is not None else None, "filepos": filepos, "printTime": int(self._printTime) if self._printTime is not None else None, "printTimeLeft": int(self._printTimeLeft) if self._printTimeLeft is not None else None }) if progress: progress_int = int(progress * 100) if self._lastProgressReport != progress_int: self._lastProgressReport = progress_int self._reportPrintProgressToPlugins(progress_int) def _addTemperatureData(self, temp, bedTemp): currentTimeUtc = int(time.time()) data = { "time": currentTimeUtc } for tool in temp.keys(): data["tool%d" % tool] = { "actual": temp[tool][0], "target": temp[tool][1] } if bedTemp is not None and isinstance(bedTemp, tuple): data["bed"] = { "actual": bedTemp[0], "target": bedTemp[1] } self._temps.append(data) self._temp = temp self._bedTemp = bedTemp self._stateMonitor.add_temperature(data) def _setJobData(self, filename, filesize, sd): if filename is not None: if sd: path_in_storage = filename path_on_disk = None else: path_in_storage = self._fileManager.path_in_storage(FileDestinations.LOCAL, filename) path_on_disk = self._fileManager.path_on_disk(FileDestinations.LOCAL, filename) self._selectedFile = { "filename": path_in_storage, "filesize": filesize, "sd": sd, "estimatedPrintTime": None } else: self._selectedFile = None self._stateMonitor.set_job_data({ "file": { "name": None, "origin": None, "size": None, "date": None }, "estimatedPrintTime": None, "averagePrintTime": None, "lastPrintTime": None, "filament": None, }) return estimatedPrintTime = None lastPrintTime = None averagePrintTime = None date = None filament = None if path_on_disk: # Use a string for mtime because it could be float and the # javascript needs to exact match if not sd: date = int(os.stat(path_on_disk).st_ctime) try: fileData = self._fileManager.get_metadata(FileDestinations.SDCARD if sd else FileDestinations.LOCAL, path_on_disk) except: fileData = None if fileData is not None: if "analysis" in fileData: if estimatedPrintTime is None and "estimatedPrintTime" in fileData["analysis"]: estimatedPrintTime = fileData["analysis"]["estimatedPrintTime"] if "filament" in fileData["analysis"].keys(): filament = fileData["analysis"]["filament"] if "statistics" in fileData: printer_profile = self._printerProfileManager.get_current_or_default()["id"] if "averagePrintTime" in fileData["statistics"] and printer_profile in fileData["statistics"]["averagePrintTime"]: averagePrintTime = fileData["statistics"]["averagePrintTime"][printer_profile] if "lastPrintTime" in fileData["statistics"] and printer_profile in fileData["statistics"]["lastPrintTime"]: lastPrintTime = fileData["statistics"]["lastPrintTime"][printer_profile] if averagePrintTime is not None: self._selectedFile["estimatedPrintTime"] = averagePrintTime elif estimatedPrintTime is not None: # TODO apply factor which first needs to be tracked! self._selectedFile["estimatedPrintTime"] = estimatedPrintTime self._stateMonitor.set_job_data({ "file": { "name": path_in_storage, "origin": FileDestinations.SDCARD if sd else FileDestinations.LOCAL, "size": filesize, "date": date }, "estimatedPrintTime": estimatedPrintTime, "averagePrintTime": averagePrintTime, "lastPrintTime": lastPrintTime, "filament": filament, }) def _sendInitialStateUpdate(self, callback): try: data = self._stateMonitor.get_current_data() data.update({ "temps": list(self._temps), "logs": list(self._log), "messages": list(self._messages) }) callback.on_printer_send_initial_data(data) except Exception, err: import sys sys.stderr.write("ERROR: %s\n" % str(err)) pass def _getStateFlags(self): return { "operational": self.is_operational(), "printing": self.is_printing(), "closedOrError": self.is_closed_or_error(), "error": self.is_error(), "paused": self.is_paused(), "ready": self.is_ready(), "sdReady": self.is_sd_ready() } #~~ comm.MachineComPrintCallback implementation def on_comm_log(self, message): """ Callback method for the comm object, called upon log output. """ self._addLog(message) def on_comm_temperature_update(self, temp, bedTemp): self._addTemperatureData(temp, bedTemp) def on_comm_state_change(self, state): """ Callback method for the comm object, called if the connection state changes. """ oldState = self._state # forward relevant state changes to gcode manager if oldState == comm.MachineCom.STATE_PRINTING: if self._selectedFile is not None: if state == comm.MachineCom.STATE_CLOSED or state == comm.MachineCom.STATE_ERROR or state == comm.MachineCom.STATE_CLOSED_WITH_ERROR: self._fileManager.log_print(FileDestinations.SDCARD if self._selectedFile["sd"] else FileDestinations.LOCAL, self._selectedFile["filename"], time.time(), self._comm.getPrintTime(), False, self._printerProfileManager.get_current_or_default()["id"]) self._analysisQueue.resume() # printing done, put those cpu cycles to good use elif state == comm.MachineCom.STATE_PRINTING: self._analysisQueue.pause() # do not analyse files while printing elif state == comm.MachineCom.STATE_CLOSED or state == comm.MachineCom.STATE_CLOSED_WITH_ERROR: if self._comm is not None: self._comm = None self._setProgressData(0, None, None, None) self._setCurrentZ(None) self._setJobData(None, None, None) self._setState(state) def on_comm_message(self, message): """ Callback method for the comm object, called upon message exchanges via serial. Stores the message in the message buffer, truncates buffer to the last 300 lines. """ self._addMessage(message) def on_comm_progress(self): """ Callback method for the comm object, called upon any change in progress of the printjob. Triggers storage of new values for printTime, printTimeLeft and the current progress. """ self._setProgressData(self._comm.getPrintProgress(), self._comm.getPrintFilepos(), self._comm.getPrintTime(), self._comm.getCleanedPrintTime()) def on_comm_z_change(self, newZ): """ Callback method for the comm object, called upon change of the z-layer. """ oldZ = self._currentZ if newZ != oldZ: # we have to react to all z-changes, even those that might "go backward" due to a slicer's retraction or # anti-backlash-routines. Event subscribes should individually take care to filter out "wrong" z-changes eventManager().fire(Events.Z_CHANGE, {"new": newZ, "old": oldZ}) self._setCurrentZ(newZ) def on_comm_sd_state_change(self, sdReady): self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) def on_comm_sd_files(self, files): eventManager().fire(Events.UPDATED_FILES, {"type": "gcode"}) self._sdFilelistAvailable.set() def on_comm_file_selected(self, filename, filesize, sd): self._setJobData(filename, filesize, sd) self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) if self._printAfterSelect: self.start_print() def on_comm_print_job_done(self): self._fileManager.log_print(FileDestinations.SDCARD if self._selectedFile["sd"] else FileDestinations.LOCAL, self._selectedFile["filename"], time.time(), self._comm.getPrintTime(), True, self._printerProfileManager.get_current_or_default()["id"]) self._setProgressData(1.0, self._selectedFile["filesize"], self._comm.getPrintTime(), 0) self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) def on_comm_file_transfer_started(self, filename, filesize): self._sdStreaming = True self._setJobData(filename, filesize, True) self._setProgressData(0.0, 0, 0, None) self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) def on_comm_file_transfer_done(self, filename): self._sdStreaming = False if self._streamingFinishedCallback is not None: # in case of SD files, both filename and absolutePath are the same, so we set the (remote) filename for # both parameters self._streamingFinishedCallback(filename, filename, FileDestinations.SDCARD) self._setCurrentZ(None) self._setJobData(None, None, None) self._setProgressData(None, None, None, None) self._stateMonitor.set_state({"text": self.get_state_string(), "flags": self._getStateFlags()}) def on_comm_force_disconnect(self): self.disconnect() class StateMonitor(object): def __init__(self, interval=0.5, on_update=None, on_add_temperature=None, on_add_log=None, on_add_message=None): self._interval = interval self._update_callback = on_update self._on_add_temperature = on_add_temperature self._on_add_log = on_add_log self._on_add_message = on_add_message self._state = None self._job_data = None self._gcode_data = None self._sd_upload_data = None self._current_z = None self._progress = None self._offsets = {} self._change_event = threading.Event() self._state_lock = threading.Lock() self._last_update = time.time() self._worker = threading.Thread(target=self._work) self._worker.daemon = True self._worker.start() def reset(self, state=None, job_data=None, progress=None, current_z=None): self.set_state(state) self.set_job_data(job_data) self.set_progress(progress) self.set_current_z(current_z) def add_temperature(self, temperature): self._on_add_temperature(temperature) self._change_event.set() def add_log(self, log): self._on_add_log(log) self._change_event.set() def add_message(self, message): self._on_add_message(message) self._change_event.set() def set_current_z(self, current_z): self._current_z = current_z self._change_event.set() def set_state(self, state): with self._state_lock: self._state = state self._change_event.set() def set_job_data(self, job_data): self._job_data = job_data self._change_event.set() def set_progress(self, progress): self._progress = progress self._change_event.set() def set_temp_offsets(self, offsets): self._offsets = offsets self._change_event.set() def _work(self): while True: self._change_event.wait() with self._state_lock: now = time.time() delta = now - self._last_update additional_wait_time = self._interval - delta if additional_wait_time > 0: time.sleep(additional_wait_time) data = self.get_current_data() self._update_callback(data) self._last_update = time.time() self._change_event.clear() def get_current_data(self): return { "state": self._state, "job": self._job_data, "currentZ": self._current_z, "progress": self._progress, "offsets": self._offsets } class TemperatureHistory(InvariantContainer): def __init__(self, cutoff=30 * 60): def temperature_invariant(data): data.sort(key=lambda x: x["time"]) now = int(time.time()) return [item for item in data if item["time"] >= now - cutoff] InvariantContainer.__init__(self, guarantee_invariant=temperature_invariant)
agpl-3.0
stevekuznetsov/ansible
test/units/modules/network/nxos/test_nxos_system.py
51
6189
# # (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.compat.tests.mock import patch from ansible.modules.network.nxos import nxos_system from .nxos_module import TestNxosModule, load_fixture, set_module_args class TestNxosSystemModule(TestNxosModule): module = nxos_system def setUp(self): self.mock_get_config = patch('ansible.modules.network.nxos.nxos_system.get_config') self.get_config = self.mock_get_config.start() self.mock_load_config = patch('ansible.modules.network.nxos.nxos_system.load_config') self.load_config = self.mock_load_config.start() def tearDown(self): self.mock_get_config.stop() self.mock_load_config.stop() def load_fixtures(self, commands=None): self.get_config.return_value = load_fixture('nxos_system_config.cfg') self.load_config.return_value = None def test_nxos_system_hostname_changed(self): set_module_args(dict(hostname='foo')) commands = ['hostname foo'] self.execute_module(changed=True, commands=commands) def test_nxos_system_domain_lookup(self): set_module_args(dict(domain_lookup=True)) commands = ['ip domain-lookup'] self.execute_module(changed=True, commands=commands) def test_nxos_system_missing_vrf(self): domain_name = dict(name='example.com', vrf='example') set_module_args(dict(domain_name=domain_name)) self.execute_module(failed=True) def test_nxos_system_domain_name(self): set_module_args(dict(domain_name=['example.net'])) commands = ['no ip domain-name ansible.com', 'vrf context management', 'no ip domain-name eng.ansible.com', 'exit', 'ip domain-name example.net'] self.execute_module(changed=True, commands=commands) def test_nxos_system_domain_name_complex(self): domain_name = dict(name='example.net', vrf='management') set_module_args(dict(domain_name=[domain_name])) commands = ['no ip domain-name ansible.com', 'vrf context management', 'no ip domain-name eng.ansible.com', 'exit', 'vrf context management', 'ip domain-name example.net', 'exit'] self.execute_module(changed=True, commands=commands) def test_nxos_system_domain_search(self): set_module_args(dict(domain_search=['example.net'])) commands = ['vrf context management', 'no ip domain-list ansible.com', 'exit', 'vrf context management', 'no ip domain-list redhat.com', 'exit', 'no ip domain-list ansible.com', 'no ip domain-list redhat.com', 'ip domain-list example.net'] self.execute_module(changed=True, commands=commands) def test_nxos_system_domain_search_complex(self): domain_search = dict(name='example.net', vrf='management') set_module_args(dict(domain_search=[domain_search])) commands = ['vrf context management', 'no ip domain-list ansible.com', 'exit', 'vrf context management', 'no ip domain-list redhat.com', 'exit', 'no ip domain-list ansible.com', 'no ip domain-list redhat.com', 'vrf context management', 'ip domain-list example.net', 'exit'] self.execute_module(changed=True, commands=commands) def test_nxos_system_name_servers(self): set_module_args(dict(name_servers=['1.2.3.4', '8.8.8.8'])) commands = ['no ip name-server 172.26.1.1', 'vrf context management', 'no ip name-server 8.8.8.8', 'exit', 'vrf context management', 'no ip name-server 172.26.1.1', 'exit', 'ip name-server 1.2.3.4'] self.execute_module(changed=True, commands=commands) def test_nxos_system_name_servers_complex(self): name_servers = dict(server='1.2.3.4', vrf='management') set_module_args(dict(name_servers=[name_servers])) commands = ['no ip name-server 8.8.8.8', 'no ip name-server 172.26.1.1', 'vrf context management', 'no ip name-server 8.8.8.8', 'exit', 'vrf context management', 'no ip name-server 172.26.1.1', 'exit', 'vrf context management', 'ip name-server 1.2.3.4', 'exit'] self.execute_module(changed=True, commands=commands) def test_nxos_system_system_mtu(self): set_module_args(dict(system_mtu=2000)) commands = ['system jumbomtu 2000'] self.execute_module(changed=True, commands=commands) def test_nxos_system_state_absent(self): set_module_args(dict(state='absent')) commands = ['no hostname', 'no ip domain-name ansible.com', 'vrf context management', 'no ip domain-name eng.ansible.com', 'exit', 'no ip domain-list ansible.com', 'no ip domain-list redhat.com', 'vrf context management', 'no ip domain-list ansible.com', 'exit', 'vrf context management', 'no ip domain-list redhat.com', 'exit', 'no ip name-server 8.8.8.8', 'no ip name-server 172.26.1.1', 'vrf context management', 'no ip name-server 8.8.8.8', 'exit', 'vrf context management', 'no ip name-server 172.26.1.1', 'exit', 'no system jumbomtu'] self.execute_module(changed=True, commands=commands)
gpl-3.0
vponomaryov/manila
manila/share/drivers/hpe/hpe_3par_mediator.py
1
70220
# Copyright 2015 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HPE 3PAR Mediator for OpenStack Manila. This 'mediator' de-couples the 3PAR focused client from the OpenStack focused driver. """ from oslo_log import log from oslo_utils import importutils from oslo_utils import units import six from manila.data import utils as data_utils from manila import exception from manila import utils from manila.i18n import _, _LE, _LI, _LW hpe3parclient = importutils.try_import("hpe3parclient") if hpe3parclient: from hpe3parclient import file_client LOG = log.getLogger(__name__) MIN_CLIENT_VERSION = (4, 0, 0) DENY = '-' ALLOW = '+' OPEN_STACK_MANILA = 'OpenStack Manila' FULL = 1 THIN = 2 DEDUPE = 6 ENABLED = 1 DISABLED = 2 CACHE = 'cache' CONTINUOUS_AVAIL = 'continuous_avail' ACCESS_BASED_ENUM = 'access_based_enum' SMB_EXTRA_SPECS_MAP = { CACHE: CACHE, CONTINUOUS_AVAIL: 'ca', ACCESS_BASED_ENUM: 'abe', } IP_ALREADY_EXISTS = 'IP address %s already exists' USER_ALREADY_EXISTS = '"allow" permission already exists for "%s"' DOES_NOT_EXIST = 'does not exist, cannot' LOCAL_IP = '127.0.0.1' LOCAL_IP_RO = '127.0.0.2' SUPER_SHARE = 'OPENSTACK_SUPER_SHARE' TMP_RO_SNAP_EXPORT = "Temp RO snapshot export as source for creating RW share." class HPE3ParMediator(object): """3PAR client-facing code for the 3PAR driver. Version history: 1.0.0 - Begin Liberty development (post-Kilo) 1.0.1 - Report thin/dedup/hp_flash_cache capabilities 1.0.2 - Add share server/share network support 1.0.3 - Use hp3par prefix for share types and capabilities 2.0.0 - Rebranded HP to HPE 2.0.1 - Add access_level (e.g. read-only support) 2.0.2 - Add extend/shrink 2.0.3 - Fix SMB read-only access (added in 2.0.1) 2.0.4 - Remove file tree on delete when using nested shares #1538800 2.0.5 - Reduce the fsquota by share size when a share is deleted #1582931 2.0.6 - Read-write share from snapshot (using driver mount and copy) 2.0.7 - Add update_access support 2.0.8 - Multi pools support per backend 2.0.9 - Fix get_vfs() to correctly validate conf IP addresses at boot up #1621016 """ VERSION = "2.0.9" def __init__(self, **kwargs): self.hpe3par_username = kwargs.get('hpe3par_username') self.hpe3par_password = kwargs.get('hpe3par_password') self.hpe3par_api_url = kwargs.get('hpe3par_api_url') self.hpe3par_debug = kwargs.get('hpe3par_debug') self.hpe3par_san_ip = kwargs.get('hpe3par_san_ip') self.hpe3par_san_login = kwargs.get('hpe3par_san_login') self.hpe3par_san_password = kwargs.get('hpe3par_san_password') self.hpe3par_san_ssh_port = kwargs.get('hpe3par_san_ssh_port') self.hpe3par_san_private_key = kwargs.get('hpe3par_san_private_key') self.hpe3par_fstore_per_share = kwargs.get('hpe3par_fstore_per_share') self.hpe3par_require_cifs_ip = kwargs.get('hpe3par_require_cifs_ip') self.hpe3par_cifs_admin_access_username = ( kwargs.get('hpe3par_cifs_admin_access_username')) self.hpe3par_cifs_admin_access_password = ( kwargs.get('hpe3par_cifs_admin_access_password')) self.hpe3par_cifs_admin_access_domain = ( kwargs.get('hpe3par_cifs_admin_access_domain')) self.hpe3par_share_mount_path = kwargs.get('hpe3par_share_mount_path') self.my_ip = kwargs.get('my_ip') self.ssh_conn_timeout = kwargs.get('ssh_conn_timeout') self._client = None self.client_version = None @staticmethod def no_client(): return hpe3parclient is None def do_setup(self): if self.no_client(): msg = _('You must install hpe3parclient before using the 3PAR ' 'driver. Run "pip install --upgrade python-3parclient" ' 'to upgrade the hpe3parclient.') LOG.error(msg) raise exception.HPE3ParInvalidClient(message=msg) self.client_version = hpe3parclient.version_tuple if self.client_version < MIN_CLIENT_VERSION: msg = (_('Invalid hpe3parclient version found (%(found)s). ' 'Version %(minimum)s or greater required. Run "pip' ' install --upgrade python-3parclient" to upgrade' ' the hpe3parclient.') % {'found': '.'.join(map(six.text_type, self.client_version)), 'minimum': '.'.join(map(six.text_type, MIN_CLIENT_VERSION))}) LOG.error(msg) raise exception.HPE3ParInvalidClient(message=msg) try: self._client = file_client.HPE3ParFilePersonaClient( self.hpe3par_api_url) except Exception as e: msg = (_('Failed to connect to HPE 3PAR File Persona Client: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) try: ssh_kwargs = {} if self.hpe3par_san_ssh_port: ssh_kwargs['port'] = self.hpe3par_san_ssh_port if self.ssh_conn_timeout: ssh_kwargs['conn_timeout'] = self.ssh_conn_timeout if self.hpe3par_san_private_key: ssh_kwargs['privatekey'] = self.hpe3par_san_private_key self._client.setSSHOptions( self.hpe3par_san_ip, self.hpe3par_san_login, self.hpe3par_san_password, **ssh_kwargs ) except Exception as e: msg = (_('Failed to set SSH options for HPE 3PAR File Persona ' 'Client: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) LOG.info(_LI("HPE3ParMediator %(version)s, " "hpe3parclient %(client_version)s"), {"version": self.VERSION, "client_version": hpe3parclient.get_version_string()}) try: wsapi_version = self._client.getWsApiVersion()['build'] LOG.info(_LI("3PAR WSAPI %s"), wsapi_version) except Exception as e: msg = (_('Failed to get 3PAR WSAPI version: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(message=msg) if self.hpe3par_debug: self._client.debug_rest(True) # Includes SSH debug (setSSH above) def _wsapi_login(self): try: self._client.login(self.hpe3par_username, self.hpe3par_password) except Exception as e: msg = (_("Failed to Login to 3PAR (%(url)s) as %(user)s " "because: %(err)s") % {'url': self.hpe3par_api_url, 'user': self.hpe3par_username, 'err': six.text_type(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def _wsapi_logout(self): try: self._client.http.unauthenticate() except Exception as e: msg = _LW("Failed to Logout from 3PAR (%(url)s) because %(err)s") LOG.warning(msg, {'url': self.hpe3par_api_url, 'err': six.text_type(e)}) # don't raise exception on logout() @staticmethod def build_export_locations(protocol, ips, path): if not ips: message = _('Failed to build export location due to missing IP.') raise exception.InvalidInput(reason=message) if not path: message = _('Failed to build export location due to missing path.') raise exception.InvalidInput(reason=message) share_proto = HPE3ParMediator.ensure_supported_protocol(protocol) if share_proto == 'nfs': return ['%s:%s' % (ip, path) for ip in ips] else: return [r'\\%s\%s' % (ip, path) for ip in ips] def get_provisioned_gb(self, fpg): total_mb = 0 try: result = self._client.getfsquota(fpg=fpg) except Exception as e: result = {'message': six.text_type(e)} error_msg = result.get('message') if error_msg: message = (_('Error while getting fsquotas for FPG ' '%(fpg)s: %(msg)s') % {'fpg': fpg, 'msg': error_msg}) LOG.error(message) raise exception.ShareBackendException(msg=message) for fsquota in result['members']: total_mb += float(fsquota['hardBlock']) return total_mb / units.Ki def get_fpg_status(self, fpg): """Get capacity and capabilities for FPG.""" try: result = self._client.getfpg(fpg) except Exception as e: msg = (_('Failed to get capacity for fpg %(fpg)s: %(e)s') % {'fpg': fpg, 'e': six.text_type(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) if result['total'] != 1: msg = (_('Failed to get capacity for fpg %s.') % fpg) LOG.error(msg) raise exception.ShareBackendException(msg=msg) member = result['members'][0] total_capacity_gb = float(member['capacityKiB']) / units.Mi free_capacity_gb = float(member['availCapacityKiB']) / units.Mi volumes = member['vvs'] if isinstance(volumes, list): volume = volumes[0] # Use first name from list else: volume = volumes # There is just a name self._wsapi_login() try: volume_info = self._client.getVolume(volume) volume_set = self._client.getVolumeSet(fpg) finally: self._wsapi_logout() provisioning_type = volume_info['provisioningType'] if provisioning_type not in (THIN, FULL, DEDUPE): msg = (_('Unexpected provisioning type for FPG %(fpg)s: ' '%(ptype)s.') % {'fpg': fpg, 'ptype': provisioning_type}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) dedupe = provisioning_type == DEDUPE thin_provisioning = provisioning_type in (THIN, DEDUPE) flash_cache_policy = volume_set.get('flashCachePolicy', DISABLED) hpe3par_flash_cache = flash_cache_policy == ENABLED status = { 'pool_name': fpg, 'total_capacity_gb': total_capacity_gb, 'free_capacity_gb': free_capacity_gb, 'thin_provisioning': thin_provisioning, 'dedupe': dedupe, 'hpe3par_flash_cache': hpe3par_flash_cache, 'hp3par_flash_cache': hpe3par_flash_cache, } if thin_provisioning: status['provisioned_capacity_gb'] = self.get_provisioned_gb(fpg) return status @staticmethod def ensure_supported_protocol(share_proto): protocol = share_proto.lower() if protocol == 'cifs': protocol = 'smb' if protocol not in ['smb', 'nfs']: message = (_('Invalid protocol. Expected nfs or smb. Got %s.') % protocol) LOG.error(message) raise exception.InvalidShareAccess(reason=message) return protocol @staticmethod def other_protocol(share_proto): """Given 'nfs' or 'smb' (or equivalent) return the other one.""" protocol = HPE3ParMediator.ensure_supported_protocol(share_proto) return 'nfs' if protocol == 'smb' else 'smb' @staticmethod def ensure_prefix(uid, protocol=None, readonly=False): if uid.startswith('osf-'): return uid if protocol: proto = '-%s' % HPE3ParMediator.ensure_supported_protocol(protocol) else: proto = '' if readonly: ro = '-ro' else: ro = '' # Format is osf[-ro]-{nfs|smb}-uid return 'osf%s%s-%s' % (proto, ro, uid) @staticmethod def _get_nfs_options(extra_specs, readonly): """Validate the NFS extra_specs and return the options to use.""" nfs_options = extra_specs.get('hpe3par:nfs_options') if nfs_options is None: nfs_options = extra_specs.get('hp3par:nfs_options') if nfs_options: msg = _LW("hp3par:nfs_options is deprecated. Use " "hpe3par:nfs_options instead.") LOG.warning(msg) if nfs_options: options = nfs_options.split(',') else: options = [] # rw, ro, and (no)root_squash (in)secure options are not allowed in # extra_specs because they will be forcibly set below. # no_subtree_check and fsid are not allowed per 3PAR support. # Other strings will be allowed to be sent to the 3PAR which will do # further validation. options_not_allowed = ['ro', 'rw', 'no_root_squash', 'root_squash', 'secure', 'insecure', 'no_subtree_check', 'fsid'] invalid_options = [ option for option in options if option in options_not_allowed ] if invalid_options: raise exception.InvalidInput(_('Invalid hp3par:nfs_options or ' 'hpe3par:nfs_options in ' 'extra-specs. The following ' 'options are not allowed: %s') % invalid_options) options.append('ro' if readonly else 'rw') options.append('no_root_squash') options.append('insecure') return ','.join(options) def _build_createfshare_kwargs(self, protocol, fpg, fstore, readonly, sharedir, extra_specs, comment, client_ip=None): createfshare_kwargs = dict(fpg=fpg, fstore=fstore, sharedir=sharedir, comment=comment) if 'hp3par_flash_cache' in extra_specs: msg = _LW("hp3par_flash_cache is deprecated. Use " "hpe3par_flash_cache instead.") LOG.warning(msg) if protocol == 'nfs': if client_ip: createfshare_kwargs['clientip'] = client_ip else: # New NFS shares needs seed IP to prevent "all" access. # Readonly and readwrite NFS shares client IPs cannot overlap. if readonly: createfshare_kwargs['clientip'] = LOCAL_IP_RO else: createfshare_kwargs['clientip'] = LOCAL_IP options = self._get_nfs_options(extra_specs, readonly) createfshare_kwargs['options'] = options else: # To keep the original (Kilo, Liberty) behavior where CIFS IP # access rules were required in addition to user rules enable # this to use a seed IP instead of the default (all allowed). if self.hpe3par_require_cifs_ip: if client_ip: createfshare_kwargs['allowip'] = client_ip else: createfshare_kwargs['allowip'] = LOCAL_IP smb_opts = (ACCESS_BASED_ENUM, CONTINUOUS_AVAIL, CACHE) for smb_opt in smb_opts: opt_value = extra_specs.get('hpe3par:smb_%s' % smb_opt) if opt_value is None: opt_value = extra_specs.get('hp3par:smb_%s' % smb_opt) if opt_value: msg = _LW("hp3par:smb_* is deprecated. Use " "hpe3par:smb_* instead.") LOG.warning(msg) if opt_value: opt_key = SMB_EXTRA_SPECS_MAP[smb_opt] createfshare_kwargs[opt_key] = opt_value return createfshare_kwargs def _update_capacity_quotas(self, fstore, new_size, old_size, fpg, vfs): @utils.synchronized('hpe3par-update-quota-' + fstore) def _sync_update_capacity_quotas(fstore, new_size, old_size, fpg, vfs): """Update 3PAR quotas and return setfsquota output.""" if self.hpe3par_fstore_per_share: hcapacity = six.text_type(new_size * units.Ki) scapacity = hcapacity else: hard_size_mb = (new_size - old_size) * units.Ki soft_size_mb = hard_size_mb result = self._client.getfsquota( fpg=fpg, vfs=vfs, fstore=fstore) LOG.debug("getfsquota result=%s", result) quotas = result['members'] if len(quotas) == 1: hard_size_mb += int(quotas[0].get('hardBlock', '0')) soft_size_mb += int(quotas[0].get('softBlock', '0')) hcapacity = six.text_type(hard_size_mb) scapacity = six.text_type(soft_size_mb) return self._client.setfsquota(vfs, fpg=fpg, fstore=fstore, scapacity=scapacity, hcapacity=hcapacity) try: result = _sync_update_capacity_quotas( fstore, new_size, old_size, fpg, vfs) LOG.debug("setfsquota result=%s", result) except Exception as e: msg = (_('Failed to update capacity quota ' '%(size)s on %(fstore)s with exception: %(e)s') % {'size': new_size - old_size, 'fstore': fstore, 'e': six.text_type(e)}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) # Non-empty result is an error message returned from the 3PAR if result: msg = (_('Failed to update capacity quota ' '%(size)s on %(fstore)s with error: %(error)s') % {'size': new_size - old_size, 'fstore': fstore, 'error': result}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def _create_share(self, project_id, share_id, protocol, extra_specs, fpg, vfs, fstore, sharedir, readonly, size, comment, client_ip=None): share_name = self.ensure_prefix(share_id, readonly=readonly) if not (sharedir or self.hpe3par_fstore_per_share): sharedir = share_name if fstore: use_existing_fstore = True else: use_existing_fstore = False if self.hpe3par_fstore_per_share: # Do not use -ro in the fstore name. fstore = self.ensure_prefix(share_id, readonly=False) else: fstore = self.ensure_prefix(project_id, protocol) createfshare_kwargs = self._build_createfshare_kwargs( protocol, fpg, fstore, readonly, sharedir, extra_specs, comment, client_ip=client_ip) if not use_existing_fstore: try: result = self._client.createfstore( vfs, fstore, fpg=fpg, comment=comment) LOG.debug("createfstore result=%s", result) except Exception as e: msg = (_('Failed to create fstore %(fstore)s: %(e)s') % {'fstore': fstore, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) if size: self._update_capacity_quotas(fstore, size, 0, fpg, vfs) try: if readonly and protocol == 'nfs': # For NFS, RO is a 2nd 3PAR share pointing to same sharedir share_name = self.ensure_prefix(share_id, readonly=readonly) result = self._client.createfshare(protocol, vfs, share_name, **createfshare_kwargs) LOG.debug("createfshare result=%s", result) except Exception as e: msg = (_('Failed to create share %(share_name)s: %(e)s') % {'share_name': share_name, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) try: result = self._client.getfshare( protocol, share_name, fpg=fpg, vfs=vfs, fstore=fstore) LOG.debug("getfshare result=%s", result) except Exception as e: msg = (_('Failed to get fshare %(share_name)s after creating it: ' '%(e)s') % {'share_name': share_name, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) if result['total'] != 1: msg = (_('Failed to get fshare %(share_name)s after creating it. ' 'Expected to get 1 fshare. Got %(total)s.') % {'share_name': share_name, 'total': result['total']}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) return result['members'][0] def create_share(self, project_id, share_id, share_proto, extra_specs, fpg, vfs, fstore=None, sharedir=None, readonly=False, size=None, comment=OPEN_STACK_MANILA, client_ip=None): """Create the share and return its path. This method can create a share when called by the driver or when called locally from create_share_from_snapshot(). The optional parameters allow re-use. :param project_id: The tenant ID. :param share_id: The share-id with or without osf- prefix. :param share_proto: The protocol (to map to smb or nfs) :param extra_specs: The share type extra-specs :param fpg: The file provisioning group :param vfs: The virtual file system :param fstore: (optional) The file store. When provided, an existing file store is used. Otherwise one is created. :param sharedir: (optional) Share directory. :param readonly: (optional) Create share as read-only. :param size: (optional) Size limit for file store if creating one. :param comment: (optional) Comment to set on the share. :param client_ip: (optional) IP address to give access to. :return: share path string """ protocol = self.ensure_supported_protocol(share_proto) share = self._create_share(project_id, share_id, protocol, extra_specs, fpg, vfs, fstore, sharedir, readonly, size, comment, client_ip=client_ip) if protocol == 'nfs': return share['sharePath'] else: return share['shareName'] def create_share_from_snapshot(self, share_id, share_proto, extra_specs, orig_project_id, orig_share_id, snapshot_id, fpg, vfs, ips, size=None, comment=OPEN_STACK_MANILA): protocol = self.ensure_supported_protocol(share_proto) snapshot_tag = self.ensure_prefix(snapshot_id) orig_share_name = self.ensure_prefix(orig_share_id) snapshot = self._find_fsnap(orig_project_id, orig_share_name, protocol, snapshot_tag, fpg, vfs) if not snapshot: msg = (_('Failed to create share from snapshot for ' 'FPG/VFS/tag %(fpg)s/%(vfs)s/%(tag)s. ' 'Snapshot not found.') % { 'fpg': fpg, 'vfs': vfs, 'tag': snapshot_tag}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) fstore = snapshot['fstoreName'] if fstore == orig_share_name: # No subdir for original share created with fstore_per_share sharedir = '.snapshot/%s' % snapshot['snapName'] else: sharedir = '.snapshot/%s/%s' % (snapshot['snapName'], orig_share_name) if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username or not self.hpe3par_cifs_admin_access_password): LOG.warning(_LW("hpe3par_cifs_admin_access_username and " "hpe3par_cifs_admin_access_password must be " "provided in order for CIFS shares created from " "snapshots to be writable.")) return self.create_share( orig_project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, sharedir=sharedir, readonly=True, comment=comment, ) # Export the snapshot as read-only to copy from. temp = ' '.join((comment, TMP_RO_SNAP_EXPORT)) source_path = self.create_share( orig_project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, sharedir=sharedir, readonly=True, comment=temp, client_ip=self.my_ip ) try: share_name = self.ensure_prefix(share_id) dest_path = self.create_share( orig_project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, readonly=False, size=size, comment=comment, client_ip=','.join((self.my_ip, LOCAL_IP)) ) try: if protocol == 'smb': self._grant_admin_smb_access( protocol, fpg, vfs, fstore, comment, share=share_name) ro_share_name = self.ensure_prefix(share_id, readonly=True) self._grant_admin_smb_access( protocol, fpg, vfs, fstore, temp, share=ro_share_name) source_locations = self.build_export_locations( protocol, ips, source_path) dest_locations = self.build_export_locations( protocol, ips, dest_path) self._copy_share_data( share_id, source_locations[0], dest_locations[0], protocol) # Revoke the admin access that was needed to copy to the dest. if protocol == 'nfs': self._change_access(DENY, orig_project_id, share_id, protocol, 'ip', self.my_ip, 'rw', fpg, vfs) else: self._revoke_admin_smb_access( protocol, fpg, vfs, fstore, comment) except Exception as e: msg = _LE('Exception during mount and copy from RO snapshot ' 'to RW share: %s') LOG.error(msg, e) self._delete_share(share_name, protocol, fpg, vfs, fstore) raise finally: self._delete_ro_share( orig_project_id, share_id, protocol, fpg, vfs, fstore) return dest_path def _copy_share_data(self, dest_id, source_location, dest_location, protocol): mount_location = "%s%s" % (self.hpe3par_share_mount_path, dest_id) source_share_dir = '/'.join((mount_location, "source_snap")) dest_share_dir = '/'.join((mount_location, "dest_share")) dirs_to_remove = [] dirs_to_unmount = [] try: utils.execute('mkdir', '-p', source_share_dir, run_as_root=True) dirs_to_remove.append(source_share_dir) self._mount_share(protocol, source_location, source_share_dir) dirs_to_unmount.append(source_share_dir) utils.execute('mkdir', dest_share_dir, run_as_root=True) dirs_to_remove.append(dest_share_dir) self._mount_share(protocol, dest_location, dest_share_dir) dirs_to_unmount.append(dest_share_dir) self._copy_data(source_share_dir, dest_share_dir) finally: for d in dirs_to_unmount: self._unmount_share(d) if dirs_to_remove: dirs_to_remove.append(mount_location) utils.execute('rmdir', *dirs_to_remove, run_as_root=True) def _copy_data(self, source_share_dir, dest_share_dir): err_msg = None err_data = None try: copy = data_utils.Copy(source_share_dir, dest_share_dir, '') copy.run() progress = copy.get_progress()['total_progress'] if progress != 100: err_msg = _("Failed to copy data, reason: " "Total progress %d != 100.") err_data = progress except Exception as err: err_msg = _("Failed to copy data, reason: %s.") err_data = six.text_type(err) if err_msg: raise exception.ShareBackendException(msg=err_msg % err_data) def _delete_share(self, share_name, protocol, fpg, vfs, fstore): try: self._client.removefshare( protocol, vfs, share_name, fpg=fpg, fstore=fstore) except Exception as e: msg = (_('Failed to remove share %(share_name)s: %(e)s') % {'share_name': share_name, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) def _delete_ro_share(self, project_id, share_id, protocol, fpg, vfs, fstore): share_name_ro = self.ensure_prefix(share_id, readonly=True) if not fstore: fstore = self._find_fstore(project_id, share_name_ro, protocol, fpg, vfs, allow_cross_protocol=True) if fstore: self._delete_share(share_name_ro, protocol, fpg, vfs, fstore) return fstore def delete_share(self, project_id, share_id, share_size, share_proto, fpg, vfs, share_ip): protocol = self.ensure_supported_protocol(share_proto) share_name = self.ensure_prefix(share_id) fstore = self._find_fstore(project_id, share_name, protocol, fpg, vfs, allow_cross_protocol=True) removed_writable = False if fstore: self._delete_share(share_name, protocol, fpg, vfs, fstore) removed_writable = True # Try to delete the read-only twin share, too. fstore = self._delete_ro_share( project_id, share_id, protocol, fpg, vfs, fstore) if fstore == share_name: try: self._client.removefstore(vfs, fstore, fpg=fpg) except Exception as e: msg = (_('Failed to remove fstore %(fstore)s: %(e)s') % {'fstore': fstore, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) elif removed_writable: try: # Attempt to remove file tree on delete when using nested # shares. If the file tree cannot be removed for whatever # reason, we will not treat this as an error_deleting # issue. We will allow the delete to continue as requested. self._delete_file_tree( share_name, protocol, fpg, vfs, fstore, share_ip) # reduce the fsquota by share size when a tree is deleted. self._update_capacity_quotas( fstore, 0, share_size, fpg, vfs) except Exception as e: msg = _LW('Exception during cleanup of deleted ' 'share %(share)s in filestore %(fstore)s: %(e)s') data = { 'fstore': fstore, 'share': share_name, 'e': six.text_type(e), } LOG.warning(msg, data) def _delete_file_tree(self, share_name, protocol, fpg, vfs, fstore, share_ip): # If the share protocol is CIFS, we need to make sure the admin # provided the proper config values. If they have not, we can simply # return out and log a warning. if protocol == "smb" and (not self.hpe3par_cifs_admin_access_username or not self.hpe3par_cifs_admin_access_password): LOG.warning(_LW("hpe3par_cifs_admin_access_username and " "hpe3par_cifs_admin_access_password must be " "provided in order for the file tree to be " "properly deleted.")) return mount_location = "%s%s" % (self.hpe3par_share_mount_path, share_name) share_dir = mount_location + "/%s" % share_name # Create the super share. self._create_super_share(protocol, fpg, vfs, fstore) # Create the mount directory. self._create_mount_directory(mount_location) # Mount the super share. self._mount_super_share(protocol, mount_location, fpg, vfs, fstore, share_ip) # Delete the share from the super share. self._delete_share_directory(share_dir) # Unmount the super share. self._unmount_share(mount_location) # Delete the mount directory. self._delete_share_directory(mount_location) def _grant_admin_smb_access(self, protocol, fpg, vfs, fstore, comment, share=SUPER_SHARE): user = '+%s:fullcontrol' % self.hpe3par_cifs_admin_access_username setfshare_kwargs = { 'fpg': fpg, 'fstore': fstore, 'comment': comment, 'allowperm': user, } try: self._client.setfshare( protocol, vfs, share, **setfshare_kwargs) except Exception as err: raise exception.ShareBackendException( msg=_("There was an error adding permissions: %s") % err) def _revoke_admin_smb_access(self, protocol, fpg, vfs, fstore, comment, share=SUPER_SHARE): user = '-%s:fullcontrol' % self.hpe3par_cifs_admin_access_username setfshare_kwargs = { 'fpg': fpg, 'fstore': fstore, 'comment': comment, 'allowperm': user, } try: self._client.setfshare( protocol, vfs, share, **setfshare_kwargs) except Exception as err: raise exception.ShareBackendException( msg=_("There was an error revoking permissions: %s") % err) def _create_super_share(self, protocol, fpg, vfs, fstore, readonly=False): sharedir = '' extra_specs = {} comment = 'OpenStack super share used to delete nested shares.' createfshare_kwargs = self._build_createfshare_kwargs(protocol, fpg, fstore, readonly, sharedir, extra_specs, comment) # If the share is NFS, we need to give the host access to the share in # order to properly mount it. if protocol == 'nfs': createfshare_kwargs['clientip'] = self.my_ip else: createfshare_kwargs['allowip'] = self.my_ip try: result = self._client.createfshare(protocol, vfs, SUPER_SHARE, **createfshare_kwargs) LOG.debug("createfshare for %(name)s, result=%(result)s", {'name': SUPER_SHARE, 'result': result}) except Exception as e: msg = (_('Failed to create share %(share_name)s: %(e)s'), {'share_name': SUPER_SHARE, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # If the share is CIFS, we need to grant access to the specified admin. if protocol == 'smb': self._grant_admin_smb_access(protocol, fpg, vfs, fstore, comment) def _create_mount_directory(self, mount_location): try: utils.execute('mkdir', mount_location, run_as_root=True) except Exception as err: message = (_LW("There was an error creating mount directory: " "%s. The nested file tree will not be deleted."), six.text_type(err)) LOG.warning(message) def _mount_share(self, protocol, export_location, mount_dir): if protocol == 'nfs': cmd = ('mount', '-t', 'nfs', export_location, mount_dir) utils.execute(*cmd, run_as_root=True) else: export_location = export_location.replace('\\', '/') cred = ('username=' + self.hpe3par_cifs_admin_access_username + ',password=' + self.hpe3par_cifs_admin_access_password + ',domain=' + self.hpe3par_cifs_admin_access_domain) cmd = ('mount', '-t', 'cifs', export_location, mount_dir, '-o', cred) utils.execute(*cmd, run_as_root=True) def _mount_super_share(self, protocol, mount_dir, fpg, vfs, fstore, share_ip): try: mount_location = self._generate_mount_path( protocol, fpg, vfs, fstore, share_ip) self._mount_share(protocol, mount_location, mount_dir) except Exception as err: message = (_LW("There was an error mounting the super share: " "%s. The nested file tree will not be deleted."), six.text_type(err)) LOG.warning(message) def _unmount_share(self, mount_location): try: utils.execute('umount', mount_location, run_as_root=True) except Exception as err: message = _LW("There was an error unmounting the share at " "%(mount_location)s: %(error)s") msg_data = { 'mount_location': mount_location, 'error': six.text_type(err), } LOG.warning(message, msg_data) def _delete_share_directory(self, directory): try: utils.execute('rm', '-rf', directory, run_as_root=True) except Exception as err: message = (_LW("There was an error removing the share: " "%s. The nested file tree will not be deleted."), six.text_type(err)) LOG.warning(message) def _generate_mount_path(self, protocol, fpg, vfs, fstore, share_ip): path = None if protocol == 'nfs': path = (("%(share_ip)s:/%(fpg)s/%(vfs)s/%(fstore)s/") % {'share_ip': share_ip, 'fpg': fpg, 'vfs': vfs, 'fstore': fstore}) else: path = (("//%(share_ip)s/%(share_name)s/") % {'share_ip': share_ip, 'share_name': SUPER_SHARE}) return path def get_vfs(self, fpg, vfs=None): """Get the VFS or raise an exception.""" try: result = self._client.getvfs(fpg=fpg, vfs=vfs) except Exception as e: msg = (_('Exception during getvfs %(vfs)s: %(e)s') % {'vfs': vfs, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) if result['total'] != 1: error_msg = result.get('message') if error_msg: message = (_('Error while validating FPG/VFS ' '(%(fpg)s/%(vfs)s): %(msg)s') % {'fpg': fpg, 'vfs': vfs, 'msg': error_msg}) LOG.error(message) raise exception.ShareBackendException(msg=message) else: message = (_('Error while validating FPG/VFS ' '(%(fpg)s/%(vfs)s): Expected 1, ' 'got %(total)s.') % {'fpg': fpg, 'vfs': vfs, 'total': result['total']}) LOG.error(message) raise exception.ShareBackendException(msg=message) value = result['members'][0] if isinstance(value['vfsip'], dict): # This is for 3parclient returning only one VFS entry LOG.debug("3parclient version up to 4.2.1 is in use. Client " "upgrade may be needed if using a VFS with multiple " "IP addresses.") value['vfsip']['address'] = [value['vfsip']['address']] else: # This is for 3parclient returning list of VFS entries # Format get_vfs ret value to combine all IP addresses discovered_vfs_ips = [] for vfs_entry in value['vfsip']: if vfs_entry['address']: discovered_vfs_ips.append(vfs_entry['address']) value['vfsip'] = value['vfsip'][0] value['vfsip']['address'] = discovered_vfs_ips return value @staticmethod def _is_share_from_snapshot(fshare): path = fshare.get('shareDir') if path: return '.snapshot' in path.split('/') path = fshare.get('sharePath') return path and '.snapshot' in path.split('/') def create_snapshot(self, orig_project_id, orig_share_id, orig_share_proto, snapshot_id, fpg, vfs): """Creates a snapshot of a share.""" fshare = self._find_fshare(orig_project_id, orig_share_id, orig_share_proto, fpg, vfs) if not fshare: msg = (_('Failed to create snapshot for FPG/VFS/fshare ' '%(fpg)s/%(vfs)s/%(fshare)s: Failed to find fshare.') % {'fpg': fpg, 'vfs': vfs, 'fshare': orig_share_id}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) if self._is_share_from_snapshot(fshare): msg = (_('Failed to create snapshot for FPG/VFS/fshare ' '%(fpg)s/%(vfs)s/%(fshare)s: Share is a read-only ' 'share of an existing snapshot.') % {'fpg': fpg, 'vfs': vfs, 'fshare': orig_share_id}) LOG.error(msg) raise exception.ShareBackendException(msg=msg) fstore = fshare.get('fstoreName') snapshot_tag = self.ensure_prefix(snapshot_id) try: result = self._client.createfsnap( vfs, fstore, snapshot_tag, fpg=fpg) LOG.debug("createfsnap result=%s", result) except Exception as e: msg = (_('Failed to create snapshot for FPG/VFS/fstore ' '%(fpg)s/%(vfs)s/%(fstore)s: %(e)s') % {'fpg': fpg, 'vfs': vfs, 'fstore': fstore, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) def delete_snapshot(self, orig_project_id, orig_share_id, orig_proto, snapshot_id, fpg, vfs): """Deletes a snapshot of a share.""" snapshot_tag = self.ensure_prefix(snapshot_id) snapshot = self._find_fsnap(orig_project_id, orig_share_id, orig_proto, snapshot_tag, fpg, vfs) if not snapshot: return fstore = snapshot.get('fstoreName') for protocol in ('nfs', 'smb'): try: shares = self._client.getfshare(protocol, fpg=fpg, vfs=vfs, fstore=fstore) except Exception as e: msg = (_('Unexpected exception while getting share list. ' 'Cannot delete snapshot without checking for ' 'dependent shares first: %s') % six.text_type(e)) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) for share in shares['members']: if protocol == 'nfs': path = share['sharePath'][1:].split('/') dot_snapshot_index = 3 else: if share['shareDir']: path = share['shareDir'].split('/') else: path = None dot_snapshot_index = 0 snapshot_index = dot_snapshot_index + 1 if path and len(path) > snapshot_index: if (path[dot_snapshot_index] == '.snapshot' and path[snapshot_index].endswith(snapshot_tag)): msg = (_('Cannot delete snapshot because it has a ' 'dependent share.')) raise exception.Invalid(msg) snapname = snapshot['snapName'] try: result = self._client.removefsnap( vfs, fstore, snapname=snapname, fpg=fpg) LOG.debug("removefsnap result=%s", result) except Exception as e: msg = (_('Failed to delete snapshot for FPG/VFS/fstore/snapshot ' '%(fpg)s/%(vfs)s/%(fstore)s/%(snapname)s: %(e)s') % { 'fpg': fpg, 'vfs': vfs, 'fstore': fstore, 'snapname': snapname, 'e': six.text_type(e)}) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # Try to reclaim the space try: self._client.startfsnapclean(fpg, reclaimStrategy='maxspeed') except Exception: # Remove already happened so only log this. LOG.exception(_LE('Unexpected exception calling startfsnapclean ' 'for FPG %(fpg)s.'), {'fpg': fpg}) @staticmethod def _validate_access_type(protocol, access_type): if access_type not in ('ip', 'user'): msg = (_("Invalid access type. Expected 'ip' or 'user'. " "Actual '%s'.") % access_type) LOG.error(msg) raise exception.InvalidInput(reason=msg) if protocol == 'nfs' and access_type != 'ip': msg = (_("Invalid NFS access type. HPE 3PAR NFS supports 'ip'. " "Actual '%s'.") % access_type) LOG.error(msg) raise exception.HPE3ParInvalid(err=msg) return protocol @staticmethod def _validate_access_level(protocol, access_type, access_level, fshare): readonly = access_level == 'ro' snapshot = HPE3ParMediator._is_share_from_snapshot(fshare) if snapshot and not readonly: reason = _('3PAR shares from snapshots require read-only access') LOG.error(reason) raise exception.InvalidShareAccess(reason=reason) if protocol == 'smb' and access_type == 'ip' and snapshot != readonly: msg = (_("Invalid CIFS access rule. HPE 3PAR optionally supports " "IP access rules for CIFS shares, but they must be " "read-only for shares from snapshots and read-write for " "other shares. Use the required CIFS 'user' access rules " "to refine access.")) LOG.error(msg) raise exception.InvalidShareAccess(reason=msg) @staticmethod def ignore_benign_access_results(plus_or_minus, access_type, access_to, result): # TODO(markstur): Remove the next line when hpe3parclient is fixed. result = [x for x in result if x != '\r'] if result: if plus_or_minus == DENY: if DOES_NOT_EXIST in result[0]: return None else: if access_type == 'user': if USER_ALREADY_EXISTS % access_to in result[0]: return None elif IP_ALREADY_EXISTS % access_to in result[0]: return None return result def _change_access(self, plus_or_minus, project_id, share_id, share_proto, access_type, access_to, access_level, fpg, vfs, extra_specs=None): """Allow or deny access to a share. Plus_or_minus character indicates add to allow list (+) or remove from allow list (-). """ readonly = access_level == 'ro' protocol = self.ensure_supported_protocol(share_proto) try: self._validate_access_type(protocol, access_type) except Exception: if plus_or_minus == DENY: # Catch invalid rules for deny. Allow them to be deleted. return else: raise fshare = self._find_fshare(project_id, share_id, protocol, fpg, vfs, readonly=readonly) if not fshare: # Change access might apply to the share with the name that # does not match the access_level prefix. other_fshare = self._find_fshare(project_id, share_id, protocol, fpg, vfs, readonly=not readonly) if other_fshare: if plus_or_minus == DENY: # Try to deny rule from 'other' share for SMB or legacy. fshare = other_fshare elif self._is_share_from_snapshot(other_fshare): # Found a share-from-snapshot from before # "-ro" was added to the name. Use it. fshare = other_fshare elif protocol == 'nfs': # We don't have the RO|RW share we need, but the # opposite one already exists. It is OK to create # the one we need for ALLOW with NFS (not from snapshot). fstore = other_fshare.get('fstoreName') sharedir = other_fshare.get('shareDir') comment = other_fshare.get('comment') fshare = self._create_share(project_id, share_id, protocol, extra_specs, fpg, vfs, fstore=fstore, sharedir=sharedir, readonly=readonly, size=None, comment=comment) else: # SMB only has one share for RO and RW. Try to use it. fshare = other_fshare if not fshare: msg = _('Failed to change (%(change)s) access ' 'to FPG/share %(fpg)s/%(share)s ' 'for %(type)s %(to)s %(level)s): ' 'Share does not exist on 3PAR.') msg_data = { 'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, } if plus_or_minus == DENY: LOG.warning(msg, msg_data) return else: raise exception.HPE3ParInvalid(err=msg % msg_data) try: self._validate_access_level( protocol, access_type, access_level, fshare) except exception.InvalidShareAccess as e: if plus_or_minus == DENY: # Allow invalid access rules to be deleted. msg = _('Ignoring deny invalid access rule ' 'for FPG/share %(fpg)s/%(share)s ' 'for %(type)s %(to)s %(level)s): %(e)s') msg_data = { 'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, 'e': six.text_type(e), } LOG.info(msg, msg_data) return else: raise share_name = fshare.get('shareName') setfshare_kwargs = { 'fpg': fpg, 'fstore': fshare.get('fstoreName'), 'comment': fshare.get('comment'), } if protocol == 'nfs': access_change = '%s%s' % (plus_or_minus, access_to) setfshare_kwargs['clientip'] = access_change elif protocol == 'smb': if access_type == 'ip': access_change = '%s%s' % (plus_or_minus, access_to) setfshare_kwargs['allowip'] = access_change else: access_str = 'read' if readonly else 'fullcontrol' perm = '%s%s:%s' % (plus_or_minus, access_to, access_str) setfshare_kwargs['allowperm'] = perm try: result = self._client.setfshare( protocol, vfs, share_name, **setfshare_kwargs) result = self.ignore_benign_access_results( plus_or_minus, access_type, access_to, result) except Exception as e: result = six.text_type(e) LOG.debug("setfshare result=%s", result) if result: msg = (_('Failed to change (%(change)s) access to FPG/share ' '%(fpg)s/%(share)s for %(type)s %(to)s %(level)s: ' '%(error)s') % {'change': plus_or_minus, 'fpg': fpg, 'share': share_id, 'type': access_type, 'to': access_to, 'level': access_level, 'error': result}) raise exception.ShareBackendException(msg=msg) def _find_fstore(self, project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=False): share = self._find_fshare(project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=allow_cross_protocol) return share.get('fstoreName') if share else None def _find_fshare(self, project_id, share_id, share_proto, fpg, vfs, allow_cross_protocol=False, readonly=False): share = self._find_fshare_with_proto(project_id, share_id, share_proto, fpg, vfs, readonly=readonly) if not share and allow_cross_protocol: other_proto = self.other_protocol(share_proto) share = self._find_fshare_with_proto(project_id, share_id, other_proto, fpg, vfs, readonly=readonly) return share def _find_fshare_with_proto(self, project_id, share_id, share_proto, fpg, vfs, readonly=False): protocol = self.ensure_supported_protocol(share_proto) share_name = self.ensure_prefix(share_id, readonly=readonly) project_fstore = self.ensure_prefix(project_id, share_proto) search_order = [ {'fpg': fpg, 'vfs': vfs, 'fstore': project_fstore}, {'fpg': fpg, 'vfs': vfs, 'fstore': share_name}, {'fpg': fpg}, {} ] try: for search_params in search_order: result = self._client.getfshare(protocol, share_name, **search_params) shares = result.get('members', []) if len(shares) == 1: return shares[0] except Exception as e: msg = (_('Unexpected exception while getting share list: %s') % six.text_type(e)) raise exception.ShareBackendException(msg=msg) def _find_fsnap(self, project_id, share_id, orig_proto, snapshot_tag, fpg, vfs): share_name = self.ensure_prefix(share_id) osf_project_id = self.ensure_prefix(project_id, orig_proto) pattern = '*_%s' % self.ensure_prefix(snapshot_tag) search_order = [ {'pat': True, 'fpg': fpg, 'vfs': vfs, 'fstore': osf_project_id}, {'pat': True, 'fpg': fpg, 'vfs': vfs, 'fstore': share_name}, {'pat': True, 'fpg': fpg}, {'pat': True}, ] try: for search_params in search_order: result = self._client.getfsnap(pattern, **search_params) snapshots = result.get('members', []) if len(snapshots) == 1: return snapshots[0] except Exception as e: msg = (_('Unexpected exception while getting snapshots: %s') % six.text_type(e)) raise exception.ShareBackendException(msg=msg) def update_access(self, project_id, share_id, share_proto, extra_specs, access_rules, add_rules, delete_rules, fpg, vfs): """Update access to a share.""" protocol = self.ensure_supported_protocol(share_proto) if not (delete_rules or add_rules): # We need to re add all the rules. Check with 3PAR on it's current # list and only add the deltas. share = self._find_fshare(project_id, share_id, share_proto, fpg, vfs) ref_users = [] ro_ref_rules = [] if protocol == 'nfs': ref_rules = share['clients'] # Check for RO rules. ro_share = self._find_fshare(project_id, share_id, share_proto, fpg, vfs, readonly=True) if ro_share: ro_ref_rules = ro_share['clients'] else: ref_rules = [x[0] for x in share['allowPerm']] ref_users = ref_rules[:] # Get IP access as well ips = share['allowIP'] if not isinstance(ips, list): # If there is only one IP, the API returns a string # rather than a list. We need to account for that. ips = [ips] ref_rules += ips # Retrieve base rules. base_rules = [] for rule in access_rules: base_rules.append(rule['access_to']) # Check if we need to remove any rules from 3PAR. for rule in ref_rules: if rule in ref_users: rule_type = 'user' else: rule_type = 'ip' if rule not in base_rules + [LOCAL_IP, LOCAL_IP_RO]: self._change_access(DENY, project_id, share_id, share_proto, rule_type, rule, None, fpg, vfs) # Check to see if there are any RO rules to remove. for rule in ro_ref_rules: if rule not in base_rules + [LOCAL_IP, LOCAL_IP_RO]: self._change_access(DENY, project_id, share_id, share_proto, rule_type, rule, 'ro', fpg, vfs) # Check the rules we need to add. for rule in access_rules: if rule['access_to'] not in ref_rules and ( rule['access_to'] not in ro_ref_rules): # Rule does not exist, we need to add it self._change_access(ALLOW, project_id, share_id, share_proto, rule['access_type'], rule['access_to'], rule['access_level'], fpg, vfs, extra_specs=extra_specs) else: # We have deltas of the rules that need to be added and deleted. for rule in delete_rules: self._change_access(DENY, project_id, share_id, share_proto, rule['access_type'], rule['access_to'], rule['access_level'], fpg, vfs) for rule in add_rules: self._change_access(ALLOW, project_id, share_id, share_proto, rule['access_type'], rule['access_to'], rule['access_level'], fpg, vfs, extra_specs=extra_specs) def resize_share(self, project_id, share_id, share_proto, new_size, old_size, fpg, vfs): """Extends or shrinks size of existing share.""" share_name = self.ensure_prefix(share_id) fstore = self._find_fstore(project_id, share_name, share_proto, fpg, vfs, allow_cross_protocol=False) if not fstore: msg = (_('Cannot resize share because it was not found.')) raise exception.InvalidShare(reason=msg) self._update_capacity_quotas(fstore, new_size, old_size, fpg, vfs) def fsip_exists(self, fsip): """Try to get FSIP. Return True if it exists.""" vfs = fsip['vfs'] fpg = fsip['fspool'] try: result = self._client.getfsip(vfs, fpg=fpg) LOG.debug("getfsip result: %s", result) except Exception: msg = (_('Failed to get FSIPs for FPG/VFS %(fspool)s/%(vfs)s.') % fsip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) for member in result['members']: if all(item in member.items() for item in fsip.items()): return True return False def create_fsip(self, ip, subnet, vlantag, fpg, vfs): vlantag_str = six.text_type(vlantag) if vlantag else '0' # Try to create it. It's OK if it already exists. try: result = self._client.createfsip(ip, subnet, vfs, fpg=fpg, vlantag=vlantag_str) LOG.debug("createfsip result: %s", result) except Exception: msg = (_('Failed to create FSIP for %s') % ip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # Verify that it really exists. fsip = { 'fspool': fpg, 'vfs': vfs, 'address': ip, 'prefixLen': subnet, 'vlanTag': vlantag_str, } if not self.fsip_exists(fsip): msg = (_('Failed to get FSIP after creating it for ' 'FPG/VFS/IP/subnet/VLAN ' '%(fspool)s/%(vfs)s/' '%(address)s/%(prefixLen)s/%(vlanTag)s.') % fsip) LOG.error(msg) raise exception.ShareBackendException(msg=msg) def remove_fsip(self, ip, fpg, vfs): if not (vfs and ip): # If there is no VFS and/or IP, then there is no FSIP to remove. return try: result = self._client.removefsip(vfs, ip, fpg=fpg) LOG.debug("removefsip result: %s", result) except Exception: msg = (_('Failed to remove FSIP %s') % ip) LOG.exception(msg) raise exception.ShareBackendException(msg=msg) # Verify that it really no longer exists. fsip = { 'fspool': fpg, 'vfs': vfs, 'address': ip, } if self.fsip_exists(fsip): msg = (_('Failed to remove FSIP for FPG/VFS/IP ' '%(fspool)s/%(vfs)s/%(address)s.') % fsip) LOG.error(msg) raise exception.ShareBackendException(msg=msg)
apache-2.0
Opentaste/bombolone
bombolone/routes/content.py
1
5760
# -*- coding: utf-8 -*- """ content.py ~~~~~~ :copyright: (c) 2014 by @zizzamia :license: BSD (See LICENSE for details) """ from flask import (Blueprint, abort, request, session, g, current_app, render_template, send_from_directory) # Imports inside Bombolone import bombolone.model.pages from bombolone.core.utils import get_content_dict content = Blueprint('content', __name__) def get_page_content(code_lan, num_of_path, path): """ By passing the language code and path, is return the page content object """ # Inside any page it saved the path with this format url = "url_{}.{}".format(num_of_path, code_lan) # Create a list of pages list_pages = [ page for page in bombolone.model.pages.find(field=url, field_value={ "$exists" : True })] for page in list_pages: count = 0 # Any time the "path" is the same or we have some # value like "<i_am_variable>" increase the counter for i in range(num_of_path): print page["url_"+str(num_of_path)] page_by_lang = page["url_"+str(num_of_path)].get(code_lan, None) if page_by_lang: word = page_by_lang[i] if path[i] == word: count += 1 #if word[0] == '<' and word[-1] == '>': # count += 1 # If the counter is the same of num_of_path # means we found the page we need it if count == num_of_path: return page return None def render_content_page(num_of_path, path): """ Using the path of the url, look inside the collection of pages that matches the page. If it matches, then it is rendered. The main for loop is searching the "page_document" by the languages "code_lan", inside every page we serch the kind of url with a specific "num_of_path", like url_1.en or url_2.it { "_id" : ObjectId("123456"), ... "url" : { "en" : "about/story", "it" : "chi_siamo/storia" }, "url_2" : { "en" : [ "about", "story" ], "it" : [ "chi_siamo", "storia" ] }, ... } """ languages = g.languages_object.available_lang_code # Retrive page document by g.lan code = g.lang page_document = get_page_content(code, num_of_path, path) if page_document is None: # Retrive page document by one of the available languages for code_lan in languages: code = code_lan page_document = get_page_content(code, num_of_path, path) if page_document is not None: break # If page is None then there doesn't exist # the page for that url if page_document is None: abort(404) else: # 1) dinamic page # =============================================================== page_from = page_document['from'] page_import = page_document['import'] if page_from and page_import: page_from = "pages."+page_from modules = page_from.split(".") if len(modules) == 1: module = __import__(page_from, globals(), locals(), [], -1) method_to_call = getattr(module, page_import) else: module = __import__(page_from, globals(), locals(), [], -1) module_called = getattr(module, modules[1]) method_to_call = getattr(module_called, page_import) return method_to_call(page_document, path, code) # 2) static page # =============================================================== title = page_document['title'].get(code, '') description = page_document['description'].get(code, '') content = {} if page_document['content']: content = get_content_dict(page_document, code) # For every page you must specify the file where you want # to use the contents stored in the database. template_file = 'pages/{0}.html'.format(page_document['file']) return render_template(template_file, **locals()) @content.route('/api/1.0/<three>/', methods=['POST', 'GET']) @content.route('/api/1.0/<three>/<four>', methods=['POST', 'GET']) def api_404(three, four): abort(404) @content.route('/', methods=['POST', 'GET']) def home(): """Path home page level deep""" path = [''] return render_content_page(1, path) @content.route('/robots.txt/') @content.route('/sitemap.xml/') @content.route('/favicon.ico/') def static_from_root(): return send_from_directory(current_app.static_folder, request.path[1:]) @content.route('/<regex("((?!static).*)"):one>/', methods=['POST', 'GET']) def one(one): """Path one level deep""" path = [one] return render_content_page(1, path) @content.route('/<regex("((?!static).*)"):one>/<two>/', methods=['POST', 'GET']) def two(one, two): """Path two level deep""" path = [one, two] return render_content_page(2, path) @content.route('/<regex("((?!static).*)"):one>/<two>/<three>/', methods=['POST', 'GET']) def three(one, two, three): """Path three level deep""" path = [one, two, three] return render_content_page(3, path) @content.route('/<regex("((?!static).*)"):one>/<two>/<three>/<four>/', methods=['POST', 'GET']) def four(one, two, three, four): """Path four level deep""" path = [one, two, three, four] return render_content_page(4, path)
bsd-3-clause
fujunwei/chromium-crosswalk
tools/resources/find_used_resources.py
24
2073
#!/usr/bin/env python # Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import argparse import re import sys USAGE = """find_used_resources.py [-h] [-i INPUT] [-o OUTPUT] Outputs the sorted list of resource ids that are part of unknown pragma warning in the given build log. This script is used to find the resources that are actually compiled in Chrome in order to only include the needed strings/images in Chrome PAK files. The script parses out the list of used resource ids. These resource ids show up in the build output after building Chrome with gyp variable enable_resource_whitelist_generation set to 1. This gyp flag causes the compiler to print out a UnknownPragma message every time a resource id is used. E.g.: foo.cc:22:0: warning: ignoring #pragma whitelisted_resource_12345 [-Wunknown-pragmas] On Windows, the message is simply a message via __pragma(message(...)). """ def GetResourceIdsInPragmaWarnings(input): """Returns sorted set of resource ids that are inside unknown pragma warnings for the given input. """ used_resources = set() unknown_pragma_warning_pattern = re.compile( 'whitelisted_resource_(?P<resource_id>[0-9]+)') for ln in input: match = unknown_pragma_warning_pattern.search(ln) if match: resource_id = int(match.group('resource_id')) used_resources.add(resource_id) return sorted(used_resources) def Main(): parser = argparse.ArgumentParser(usage=USAGE) parser.add_argument( '-i', '--input', type=argparse.FileType('r'), default=sys.stdin, help='The build log to read (default stdin)') parser.add_argument( '-o', '--output', type=argparse.FileType('w'), default=sys.stdout, help='The resource list path to write (default stdout)') args = parser.parse_args() used_resources = GetResourceIdsInPragmaWarnings(args.input) for resource_id in used_resources: args.output.write('%d\n' % resource_id) if __name__ == '__main__': Main()
bsd-3-clause
mavenlin/tensorflow
tensorflow/contrib/slim/python/slim/data/data_decoder.py
146
2302
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Contains helper functions and classes necessary for decoding data. While data providers read data from disk, sstables or other formats, data decoders decode the data (if necessary). A data decoder is provided with a serialized or encoded piece of data as well as a list of items and returns a set of tensors, each of which correspond to the requested list of items extracted from the data: def Decode(self, data, items): ... For example, if data is a compressed map, the implementation might be: def Decode(self, data, items): decompressed_map = _Decompress(data) outputs = [] for item in items: outputs.append(decompressed_map[item]) return outputs. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc class DataDecoder(object): """An abstract class which is used to decode data for a provider.""" __metaclass__ = abc.ABCMeta @abc.abstractmethod def decode(self, data, items): """Decodes the data to returns the tensors specified by the list of items. Args: data: A possibly encoded data format. items: A list of strings, each of which indicate a particular data type. Returns: A list of `Tensors`, whose length matches the length of `items`, where each `Tensor` corresponds to each item. Raises: ValueError: If any of the items cannot be satisfied. """ pass @abc.abstractmethod def list_items(self): """Lists the names of the items that the decoder can decode. Returns: A list of string names. """ pass
apache-2.0
BryceBrown/LinkstrDjango
rest_framework/tests/hyperlinkedserializers.py
1
9456
from __future__ import unicode_literals import json from django.test import TestCase from django.test.client import RequestFactory from rest_framework import generics, status, serializers from rest_framework.compat import patterns, url from rest_framework.tests.models import Anchor, BasicModel, ManyToManyModel, BlogPost, BlogPostComment, Album, Photo, OptionalRelationModel factory = RequestFactory() class BlogPostCommentSerializer(serializers.ModelSerializer): url = serializers.HyperlinkedIdentityField(view_name='blogpostcomment-detail') text = serializers.CharField() blog_post_url = serializers.HyperlinkedRelatedField(source='blog_post', view_name='blogpost-detail') class Meta: model = BlogPostComment fields = ('text', 'blog_post_url', 'url') class PhotoSerializer(serializers.Serializer): description = serializers.CharField() album_url = serializers.HyperlinkedRelatedField(source='album', view_name='album-detail', queryset=Album.objects.all(), slug_field='title', slug_url_kwarg='title') def restore_object(self, attrs, instance=None): return Photo(**attrs) class BasicList(generics.ListCreateAPIView): model = BasicModel model_serializer_class = serializers.HyperlinkedModelSerializer class BasicDetail(generics.RetrieveUpdateDestroyAPIView): model = BasicModel model_serializer_class = serializers.HyperlinkedModelSerializer class AnchorDetail(generics.RetrieveAPIView): model = Anchor model_serializer_class = serializers.HyperlinkedModelSerializer class ManyToManyList(generics.ListAPIView): model = ManyToManyModel model_serializer_class = serializers.HyperlinkedModelSerializer class ManyToManyDetail(generics.RetrieveAPIView): model = ManyToManyModel model_serializer_class = serializers.HyperlinkedModelSerializer class BlogPostCommentListCreate(generics.ListCreateAPIView): model = BlogPostComment serializer_class = BlogPostCommentSerializer class BlogPostCommentDetail(generics.RetrieveAPIView): model = BlogPostComment serializer_class = BlogPostCommentSerializer class BlogPostDetail(generics.RetrieveAPIView): model = BlogPost class PhotoListCreate(generics.ListCreateAPIView): model = Photo model_serializer_class = PhotoSerializer class AlbumDetail(generics.RetrieveAPIView): model = Album class OptionalRelationDetail(generics.RetrieveUpdateDestroyAPIView): model = OptionalRelationModel model_serializer_class = serializers.HyperlinkedModelSerializer urlpatterns = patterns('', url(r'^basic/$', BasicList.as_view(), name='basicmodel-list'), url(r'^basic/(?P<pk>\d+)/$', BasicDetail.as_view(), name='basicmodel-detail'), url(r'^anchor/(?P<pk>\d+)/$', AnchorDetail.as_view(), name='anchor-detail'), url(r'^manytomany/$', ManyToManyList.as_view(), name='manytomanymodel-list'), url(r'^manytomany/(?P<pk>\d+)/$', ManyToManyDetail.as_view(), name='manytomanymodel-detail'), url(r'^posts/(?P<pk>\d+)/$', BlogPostDetail.as_view(), name='blogpost-detail'), url(r'^comments/$', BlogPostCommentListCreate.as_view(), name='blogpostcomment-list'), url(r'^comments/(?P<pk>\d+)/$', BlogPostCommentDetail.as_view(), name='blogpostcomment-detail'), url(r'^albums/(?P<title>\w[\w-]*)/$', AlbumDetail.as_view(), name='album-detail'), url(r'^photos/$', PhotoListCreate.as_view(), name='photo-list'), url(r'^optionalrelation/(?P<pk>\d+)/$', OptionalRelationDetail.as_view(), name='optionalrelationmodel-detail'), ) class TestBasicHyperlinkedView(TestCase): urls = 'rest_framework.tests.hyperlinkedserializers' def setUp(self): """ Create 3 BasicModel instances. """ items = ['foo', 'bar', 'baz'] for item in items: BasicModel(text=item).save() self.objects = BasicModel.objects self.data = [ {'url': 'http://testserver/basic/%d/' % obj.id, 'text': obj.text} for obj in self.objects.all() ] self.list_view = BasicList.as_view() self.detail_view = BasicDetail.as_view() def test_get_list_view(self): """ GET requests to ListCreateAPIView should return list of objects. """ request = factory.get('/basic/') response = self.list_view(request).render() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, self.data) def test_get_detail_view(self): """ GET requests to ListCreateAPIView should return list of objects. """ request = factory.get('/basic/1') response = self.detail_view(request, pk=1).render() self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, self.data[0]) class TestManyToManyHyperlinkedView(TestCase): urls = 'rest_framework.tests.hyperlinkedserializers' def setUp(self): """ Create 3 BasicModel instances. """ items = ['foo', 'bar', 'baz'] anchors = [] for item in items: anchor = Anchor(text=item) anchor.save() anchors.append(anchor) manytomany = ManyToManyModel() manytomany.save() manytomany.rel.add(*anchors) self.data = [{ 'url': 'http://testserver/manytomany/1/', 'rel': [ 'http://testserver/anchor/1/', 'http://testserver/anchor/2/', 'http://testserver/anchor/3/', ] }] self.list_view = ManyToManyList.as_view() self.detail_view = ManyToManyDetail.as_view() def test_get_list_view(self): """ GET requests to ListCreateAPIView should return list of objects. """ request = factory.get('/manytomany/') response = self.list_view(request) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, self.data) def test_get_detail_view(self): """ GET requests to ListCreateAPIView should return list of objects. """ request = factory.get('/manytomany/1/') response = self.detail_view(request, pk=1) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, self.data[0]) class TestCreateWithForeignKeys(TestCase): urls = 'rest_framework.tests.hyperlinkedserializers' def setUp(self): """ Create a blog post """ self.post = BlogPost.objects.create(title="Test post") self.create_view = BlogPostCommentListCreate.as_view() def test_create_comment(self): data = { 'text': 'A test comment', 'blog_post_url': 'http://testserver/posts/1/' } request = factory.post('/comments/', data=data) response = self.create_view(request) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response['Location'], 'http://testserver/comments/1/') self.assertEqual(self.post.blogpostcomment_set.count(), 1) self.assertEqual(self.post.blogpostcomment_set.all()[0].text, 'A test comment') class TestCreateWithForeignKeysAndCustomSlug(TestCase): urls = 'rest_framework.tests.hyperlinkedserializers' def setUp(self): """ Create an Album """ self.post = Album.objects.create(title='test-album') self.list_create_view = PhotoListCreate.as_view() def test_create_photo(self): data = { 'description': 'A test photo', 'album_url': 'http://testserver/albums/test-album/' } request = factory.post('/photos/', data=data) response = self.list_create_view(request) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertNotIn('Location', response, msg='Location should only be included if there is a "url" field on the serializer') self.assertEqual(self.post.photo_set.count(), 1) self.assertEqual(self.post.photo_set.all()[0].description, 'A test photo') class TestOptionalRelationHyperlinkedView(TestCase): urls = 'rest_framework.tests.hyperlinkedserializers' def setUp(self): """ Create 1 OptionalRelationModel instances. """ OptionalRelationModel().save() self.objects = OptionalRelationModel.objects self.detail_view = OptionalRelationDetail.as_view() self.data = {"url": "http://testserver/optionalrelation/1/", "other": None} def test_get_detail_view(self): """ GET requests to RetrieveAPIView with optional relations should return None for non existing relations. """ request = factory.get('/optionalrelationmodel-detail/1') response = self.detail_view(request, pk=1) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data, self.data) def test_put_detail_view(self): """ PUT requests to RetrieveUpdateDestroyAPIView with optional relations should accept None for non existing relations. """ response = self.client.put('/optionalrelation/1/', data=json.dumps(self.data), content_type='application/json') self.assertEqual(response.status_code, status.HTTP_200_OK)
apache-2.0
bisphon/pontiac
settings.py
1
5654
import multiprocessing from six.moves import queue DEBUG = True LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'simple': { 'format': '%(levelname)s %(message)s' }, 'standard': { 'format': '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s', # 'datefmt': '%Y-%m-%d %H:%M:%S.%f %z' }, 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s(%(name)s:%(lineno)s) P %(process)d (%(processName)s) T %(thread)d (%(threadName)s) %(message)s' }, 'email': { 'format': 'Timestamp: %(asctime)s\nModule: %(module)s\nLine: %(lineno)d\nMessage: %(message)s', }, }, 'filters': { 'require_debug_true': { '()': 'log_utils.RequireDebugTrue', }, 'require_debug_false': { '()': 'log_utils.RequireDebugFalse' }, }, 'handlers': { 'null': { 'level': 'DEBUG', 'class': 'logging.NullHandler', }, 'stderr': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple', # 'filters': ['require_debug_true'], }, 'file_watched': { 'level': 'DEBUG', 'class': 'logging.handlers.WatchedFileHandler', 'filename': './logs/pontiac.log', 'formatter': 'verbose', }, 'file_rotating': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': './logs/pontiac.log', 'maxBytes': 1024 * 1024, 'backupCount': 5, 'formatter': 'verbose', }, 'socket_tcp': { 'level': 'DEBUG', 'class': 'logging.handlers.SocketHandler', 'host': 'localhost', 'port': '12345', 'formatter': 'standard', 'filters': ['require_debug_false'], }, 'syslog': { 'level': 'DEBUG', 'class': 'logging.handlers.SysLogHandler', 'address': '/dev/log', 'facility': 'LOG_USER', 'formatter': 'standard', 'filters': ['require_debug_false'], }, 'smtp': { 'level': 'DEBUG', 'class': 'logging.handlers.SMTPHandler', 'mailhost': '(localhost, 25)', 'fromaddr': '[email protected]', 'toaddrs': ['[email protected]'], 'subject': 'Pontiac Message', 'credentials': '(username, password)', 'formatter': 'email', 'filters': ['require_debug_false'], }, 'http': { 'level': 'DEBUG', 'class': 'logging.handlers.HTTPHandler', 'host': 'localhost', 'url': '/log', 'method': 'GET', 'filters': ['require_debug_false'], }, # 'queue': { # only available on python 3.2+ # 'level': 'DEBUG', # 'class': 'logging.handlers.QueueHandler', # 'filters': ['require_debug_false'], # }, 'logutils_queue': { 'level': 'DEBUG', 'class': 'logutils.queue.QueueHandler', 'queue': queue.Queue() }, # 'logutils_redis': { # 'level': 'DEBUG', # 'class': 'logutils.redis.RedisQueueHandler', # 'key': 'pontiac.logging', # }, 'redis': { 'level': 'DEBUG', 'class': 'log_utils.RedisLogHandler', 'host': 'localhost', 'port': 6379, 'log_key': 'pontiac.logging', }, 'rlog_redis': { 'level': 'DEBUG', 'class': 'rlog.RedisHandler', 'host': 'localhost', 'password': 'password', 'port': 6379, 'channel': 'pontiac_logs' }, 'logstash': { 'level': 'DEBUG', 'class': 'logstash.LogstashHandler', 'host': 'localhost', 'port': 5959, 'version': 1, 'message_type': 'logstash', 'fqdn': False, 'tags': None, }, }, 'loggers': { '': { 'handlers': ['stderr'], 'level': 'DEBUG', 'propagate': True }, 'webservice': { 'handlers': ['stderr', 'file_watched'], 'level': 'DEBUG', 'propagate': False }, 'notifier': { 'handlers': ['stderr', 'file_watched'], 'level': 'DEBUG', 'propagate': False, }, }, 'root': { 'handlers': ['stderr', 'file_watched'], 'level': 'NOTSET', } } HTTP_SOCKET = { 'host': '0.0.0.0', 'port': 1234 } SCHEMA = { 'NOTIFICATION': 'schemas/notification.schema.json', } QUEUE_MAX_SIZE = 1000000 REDIS = { 'host': 'localhost', 'port': 6379, 'password': '', # empty string or None disables 'db': 0, 'max_size': 0, # 0 disables 'expires': 300 # in seconds } try: CPU_COUNT = multiprocessing.cpu_count() except NotImplementedError: CPU_COUNT = 1 THREAD_COUNT = { 'WEBSERVICE': 1, 'NOTIFICATION': CPU_COUNT * 2, } FCM = { #'proxy': 'http://localhost:8000', 'api_key': 'AIzaSyDKsu9nrr9YRVzwNPw7XamW1x6zoYkIjBo', 'proto': 'xmpp', # low_priority # delay_while_idle # time_to_live # restricted_package_name # dry_run } APNS = { #'proxy': 'http://localhost:8000', 'cert': 'certs/cert.pem', 'key': 'certs/key.pem', 'dist': False, }
mit
sontek/rethinkdb
test/interface/table_wait.py
13
5807
#!/usr/bin/env python # Copyright 2014 RethinkDB, all rights reserved. """The `interface.table_wait` test checks that waiting for a table returns when the table is available for writing.""" from __future__ import print_function import multiprocessing, os, sys, time, traceback, pprint startTime = time.time() sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import driver, scenario_common, utils, vcoptparse r = utils.import_python_driver() op = vcoptparse.OptParser() scenario_common.prepare_option_parser_mode_flags(op) _, command_prefix, serve_options = scenario_common.parse_mode_flags(op.parse(sys.argv)) db = "test" tables = ["table1", "table2", "table3"] delete_table = "delete" def check_table_states(conn, ready): statuses = r.expr(tables).map(r.db(db).table(r.row).status()).run(conn) return all(map(lambda s: (s["status"]['ready_for_writes'] == ready), statuses)) def wait_for_table_states(conn, ready): while not check_table_states(conn, ready=ready): time.sleep(0.1) def create_tables(conn): r.expr(tables).for_each(r.db(db).table_create(r.row)).run(conn) r.db(db).table_create(delete_table).run(conn) # An extra table to be deleted during a wait r.db(db).table_list().for_each(r.db(db).table(r.row).insert(r.range(200).map(lambda i: {'id':i}))).run(conn) r.db(db).reconfigure(shards=2, replicas=2).run(conn) r.db(db).wait().run(conn) assert check_table_states(conn, ready=True), \ "Wait after reconfigure returned before tables were ready, statuses: %s" % str(statuses) def spawn_table_wait(port, tbl): def do_table_wait(port, tbl, done_event): conn = r.connect("localhost", port) try: if tbl is None: r.db(db).wait().run(conn) else: res = r.db(db).table(tbl).wait().run(conn) assert res["ready"] == 1 finally: done_event.set() def do_post_write(port, tbl, start_event): conn = r.connect("localhost", port) start_event.wait() if tbl is None: r.db(db).table_list().for_each(r.db(db).table(r.row).insert({})).run(conn) else: r.db(db).table(tbl).insert({}).run(conn) sync_event = multiprocessing.Event() wait_proc = multiprocessing.Process(target=do_table_wait, args=(port, tbl, sync_event)) write_proc = multiprocessing.Process(target=do_post_write, args=(port, tbl, sync_event)) wait_proc.start() write_proc.start() return write_proc print("Spinning up two servers (%.2fs)" % (time.time() - startTime)) with driver.Cluster(initial_servers=['a', 'b'], output_folder='.', command_prefix=command_prefix, extra_options=serve_options) as cluster: cluster.check() proc1 = cluster[0] proc2 = cluster[1] files2 = proc2.files print("Establishing ReQL connection (%.2fs)" % (time.time() - startTime)) conn = r.connect("localhost", proc1.driver_port) if db not in r.db_list().run(conn): print("Creating db (%.2fs)" % (time.time() - startTime)) r.db_create(db).run(conn) print("Testing simple table (several times) (%.2fs)" % (time.time() - startTime)) for i in xrange(5): res = r.db(db).table_create("simple").run(conn) assert res["tables_created"] == 1 r.db(db).table("simple").reconfigure(shards=12, replicas=1).run(conn) r.db(db).table("simple").wait().run(conn) count = r.db(db).table("simple").count().run(conn) assert count == 0 res = r.db(db).table_drop("simple").run(conn) assert res["tables_dropped"] == 1 print("Creating %d tables (%.2fs)" % (len(tables) + 1, time.time() - startTime)) create_tables(conn) print("Killing second server (%.2fs)" % (time.time() - startTime)) proc2.close() wait_for_table_states(conn, ready=False) print("Spawning waiters (%.2fs)" % (time.time() - startTime)) waiter_procs = [ spawn_table_wait(proc1.driver_port, tables[0]), spawn_table_wait(proc1.driver_port, tables[1]), spawn_table_wait(proc1.driver_port, None) # Wait on all tables ] print("Waiting on a deleted table (%.2fs)" % (time.time() - startTime)) def wait_for_deleted_table(port, db, table): c = r.connect("localhost", port) try: r.db(db).table(table).wait().run(c) raise RuntimeError("`table_wait` did not error when waiting on a deleted table.") except r.ReqlRuntimeError as ex: assert ex.message == "Table `%s.%s` does not exist." % (db, table), \ "Unexpected error when waiting for a deleted table: %s" % ex.message error_wait_proc = multiprocessing.Process(target=wait_for_deleted_table, args=(proc1.driver_port, db, delete_table)) error_wait_proc.start() r.db(db).table_drop(delete_table).run(conn) error_wait_proc.join() print("Waiting 15 seconds (%.2fs)" % (time.time() - startTime)) # Wait some time to make sure the wait doesn't return early waiter_procs[0].join(15) assert all(map(lambda w: w.is_alive(), waiter_procs)), "Wait returned while a server was still down." print("Restarting second server (%.2fs)" % (time.time() - startTime)) proc2 = driver.Process(cluster, files2, console_output=True, command_prefix=command_prefix, extra_options=serve_options) proc2.wait_until_started_up() print("Waiting for table readiness (%.2fs)" % (time.time() - startTime)) map(lambda w: w.join(), waiter_procs) assert check_table_states(conn, ready=True), "`wait` returned, but not all tables are ready" print("Cleaning up (%.2fs)" % (time.time() - startTime)) print("Done. (%.2fs)" % (time.time() - startTime))
agpl-3.0
Impactstory/sherlockoa
endpoint.py
1
25387
import datetime import json import os from random import random from time import sleep from time import time import requests import shortuuid from sickle import Sickle, oaiexceptions from sickle.iterator import OAIItemIterator from sickle.models import ResumptionToken from sickle.oaiexceptions import NoRecordsMatch, BadArgument from sickle.response import OAIResponse from sqlalchemy import or_ import pmh_record from app import db from app import logger from http_cache import request_ua_headers from repository import Repository from util import elapsed from util import safe_commit def lookup_endpoint_by_pmh_url(pmh_url_query=None): endpoints = Endpoint.query.filter(Endpoint.pmh_url.ilike(u"%{}%".format(pmh_url_query))).all() return endpoints class Endpoint(db.Model): id = db.Column(db.Text, primary_key=True) id_old = db.Column(db.Text) repo_unique_id = db.Column(db.Text, db.ForeignKey(Repository.id)) pmh_url = db.Column(db.Text) pmh_set = db.Column(db.Text) last_harvest_started = db.Column(db.DateTime) last_harvest_finished = db.Column(db.DateTime) most_recent_year_harvested = db.Column(db.DateTime) earliest_timestamp = db.Column(db.DateTime) email = db.Column(db.Text) # to help us figure out what kind of repo it is error = db.Column(db.Text) repo_request_id = db.Column(db.Text) harvest_identify_response = db.Column(db.Text) harvest_test_recent_dates = db.Column(db.Text) sample_pmh_record = db.Column(db.Text) contacted = db.Column(db.DateTime) contacted_text = db.Column(db.Text) policy_promises_no_submitted = db.Column(db.Boolean) policy_promises_no_submitted_evidence = db.Column(db.Text) ready_to_run = db.Column(db.Boolean) metadata_prefix = db.Column(db.Text) retry_interval = db.Column(db.Interval) retry_at = db.Column(db.DateTime) meta = db.relationship( 'Repository', lazy='subquery', cascade="all", backref=db.backref("endpoints", lazy="subquery") ) def __init__(self, **kwargs): super(self.__class__, self).__init__(**kwargs) if not self.id: self.id = shortuuid.uuid()[0:20].lower() if not self.metadata_prefix: self.metadata_prefix = 'oai_dc' @property def repo(self): return self.meta def run_diagnostics(self): response = test_harvest_url(self.pmh_url) self.harvest_identify_response = response["harvest_identify_response"] # self.harvest_test_initial_dates = response["harvest_test_initial_dates"] self.harvest_test_recent_dates = response["harvest_test_recent_dates"] self.sample_pmh_record = response["sample_pmh_record"] def harvest(self): if not self.harvest_identify_response or not self.harvest_test_recent_dates: self.set_identify_and_initial_query() today = datetime.datetime.utcnow().date() tomorrow = today + datetime.timedelta(days=1) yesterday = today - datetime.timedelta(days=1) first = (self.most_recent_year_harvested or datetime.datetime(2000, 1, 1)).date() first = min(first, yesterday) if self.id_old in ['citeseerx.ist.psu.edu/oai2', 'europepmc.org/oai.cgi']: first_plus_delta = first + datetime.timedelta(days=1) elif self.id_old in ['www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi', 'www.ncbi.nlm.nih.gov/pmc/oai/oai.cgi2']: first_plus_delta = first + datetime.timedelta(days=7) elif self.id == '4bd6f8f5107c0df6f48': first_plus_delta = first + datetime.timedelta(days=1) elif self.id == '0d27b133730393e00e1': first_plus_delta = first + datetime.timedelta(days=1) elif self.id == 'jmpfmmfru5pzhy4lbrdm': first_plus_delta = first + datetime.timedelta(days=1) elif self.id_old in ['export.arxiv.org/oai2']: first_plus_delta = first + datetime.timedelta(days=1) elif 'osti.gov/oai' in self.pmh_url: first_plus_delta = first + datetime.timedelta(days=1) elif 'share.osf.io' in self.pmh_url: first_plus_delta = first + datetime.timedelta(days=1) else: first_plus_delta = first + datetime.timedelta(days=7) last = min(first_plus_delta, tomorrow) # now do the harvesting self.call_pmh_endpoint(first=first, last=last) # if success, update so we start at next point next time base_retry_interval = datetime.timedelta(minutes=5) if self.error: logger.info(u"error so not saving finished info: {}".format(self.error)) retry_interval = self.retry_interval or base_retry_interval self.retry_at = datetime.datetime.utcnow() + retry_interval self.retry_interval = retry_interval * 2 self.last_harvest_started = None else: logger.info(u"success! saving info") self.last_harvest_finished = datetime.datetime.utcnow().isoformat() self.most_recent_year_harvested = min(yesterday, last) self.last_harvest_started = None self.retry_at = None self.retry_interval = base_retry_interval def get_pmh_record(self, record_id): my_sickle = _get_my_sickle(self.pmh_url) pmh_input_record = my_sickle.GetRecord(identifier=record_id, metadataPrefix=self.metadata_prefix) my_pmh_record = pmh_record.PmhRecord() my_pmh_record.populate(self.id, pmh_input_record, metadata_prefix=self.metadata_prefix) my_pmh_record.repo_id = self.id_old # delete once endpoint_id is populated return my_pmh_record def set_identify_and_initial_query(self): if not self.pmh_url: self.harvest_identify_response = u"error, no pmh_url given" return my_sickle = None try: # set timeout quick... if it can't do this quickly, won't be good for harvesting logger.debug(u"getting my_sickle for {}".format(self)) my_sickle = _get_my_sickle(self.pmh_url, timeout=10) my_sickle.Identify() self.harvest_identify_response = "SUCCESS!" except Exception as e: logger.exception(u"in set_identify_and_initial_query") self.error = u"error in calling identify: {} {}".format( e.__class__.__name__, unicode(e.message).encode("utf-8")) if my_sickle: self.error += u" calling {}".format(my_sickle.get_http_response_url()) self.harvest_identify_response = self.error self.sample_pmh_record = None try: sample_pmh_record = self.get_recent_pmh_record() if sample_pmh_record: self.harvest_test_recent_dates = "SUCCESS!" self.sample_pmh_record = json.dumps(sample_pmh_record.metadata) else: self.harvest_test_recent_dates = "error, no pmh_input_records returned" except Exception as e: self.error = u"error in get_recent_pmh_record: {} {}".format( e.__class__.__name__, unicode(e.message).encode("utf-8")) self.harvest_test_recent_dates = self.error def get_recent_pmh_record(self): last = datetime.datetime.utcnow() first = last - datetime.timedelta(days=30) args = {'metadataPrefix': self.metadata_prefix} my_sickle = _get_my_sickle(self.pmh_url) logger.info(u"connected to sickle with {}".format(self.pmh_url)) args['from'] = first.isoformat()[0:10] args["until"] = last.isoformat()[0:10] if self.pmh_set: args["set"] = self.pmh_set logger.info(u"calling ListIdentifiers with {} {}".format(self.pmh_url, args)) try: pmh_identifiers = my_sickle.ListIdentifiers(ignore_deleted=True, **args) pmh_identifier = self.safe_get_next_record(pmh_identifiers) if pmh_identifier: return my_sickle.GetRecord(identifier=pmh_identifier.identifier, metadataPrefix=self.metadata_prefix) else: return None except NoRecordsMatch: logger.info(u"no records with {} {}".format(self.pmh_url, args)) return None def get_pmh_input_record(self, first, last, use_date_default_format=True): args = {'metadataPrefix': self.metadata_prefix} pmh_records = [] self.error = None my_sickle = _get_my_sickle(self.pmh_url) logger.info(u"connected to sickle with {}".format(self.pmh_url)) args['from'] = first.isoformat()[0:10] if not use_date_default_format: args['from'] += "T00:00:00Z" if last: args["until"] = last.isoformat()[0:10] if not use_date_default_format: args['until'] += "T00:00:00Z" if self.pmh_set: args["set"] = self.pmh_set logger.info(u"calling ListRecords with {} {}".format(self.pmh_url, args)) try: try: pmh_records = my_sickle.ListRecords(ignore_deleted=True, **args) pmh_input_record = self.safe_get_next_record(pmh_records) except NoRecordsMatch: logger.info(u"no records with {} {}".format(self.pmh_url, args)) pmh_input_record = None except BadArgument as e: if use_date_default_format: return self.get_pmh_input_record(first, last, use_date_default_format=False) else: raise e except Exception as e: logger.exception(u"error with {} {}".format(self.pmh_url, args)) pmh_input_record = None self.error = u"error in get_pmh_input_record: {} {}".format( e.__class__.__name__, unicode(e.message).encode("utf-8")) if my_sickle: self.error += u" calling {}".format(my_sickle.get_http_response_url()) return pmh_input_record, pmh_records, self.error def call_pmh_endpoint(self, first=None, last=None, chunk_size=50, scrape=False): start_time = time() records_to_save = [] num_records_updated = 0 loop_counter = 0 self.error = None (pmh_input_record, pmh_records, error) = self.get_pmh_input_record(first, last) if error: self.error = u"error in get_pmh_input_record: {}".format(error) return while pmh_input_record: loop_counter += 1 # create the record my_pmh_record = pmh_record.PmhRecord() # set its vars my_pmh_record.repo_id = self.id_old # delete once endpoint_ids are all populated my_pmh_record.rand = random() my_pmh_record.populate(self.id, pmh_input_record, metadata_prefix=self.metadata_prefix) if is_complete(my_pmh_record): my_pages = my_pmh_record.mint_pages() my_pmh_record.pages = my_pages if scrape: for my_page in my_pages: my_page.scrape_if_matches_pub() records_to_save.append(my_pmh_record) my_pmh_record.delete_old_record() db.session.merge(my_pmh_record) else: logger.info(u"pmh record is not complete") # print my_pmh_record pass if len(records_to_save) >= chunk_size: num_records_updated += len(records_to_save) safe_commit(db) records_to_save = [] if loop_counter % 100 == 0: logger.info(u"iterated through 100 more items, loop_counter={} for {}".format(loop_counter, self.id)) pmh_input_record = self.safe_get_next_record(pmh_records) # make sure to get the last ones if records_to_save: num_records_updated += len(records_to_save) last_record = records_to_save[-1] logger.info(u"saving {} last ones, last record saved: {} for {}, loop_counter={}".format( len(records_to_save), last_record.id, self.id, loop_counter)) safe_commit(db) else: logger.info(u"finished loop, but no records to save, loop_counter={}".format(loop_counter)) logger.info(u"updated {} PMH records for endpoint_id={}, took {} seconds".format( num_records_updated, self.id, elapsed(start_time, 2))) def safe_get_next_record(self, current_record, tries=3): self.error = None try: next_record = current_record.next() except (requests.exceptions.HTTPError, requests.exceptions.SSLError) as e: if tries > 0: logger.info(u"requests exception! trying again {}".format(e)) return self.safe_get_next_record(current_record, tries-1) else: logger.info(u"requests exception! skipping {}".format(e)) self.error = u"requests error in safe_get_next_record; try again" return None except (KeyboardInterrupt, SystemExit): # done return None except StopIteration: logger.info(u"stop iteration! stopping") return None except Exception as e: logger.exception(u"misc exception!: {} skipping".format(e)) self.error = u"error in safe_get_next_record" return None return next_record def get_num_pmh_records(self): from pmh_record import PmhRecord num = db.session.query(PmhRecord.id).filter(PmhRecord.endpoint_id == self.id).count() return num def get_num_pages(self): from page import PageNew num = db.session.query(PageNew.id).filter(PageNew.endpoint_id == self.id).count() return num def get_num_open_with_dois(self): from page import PageNew num = db.session.query(PageNew.id).\ distinct(PageNew.normalized_title).\ filter(PageNew.endpoint_id == self.id).\ filter(PageNew.num_pub_matches.isnot(None), PageNew.num_pub_matches >= 1).\ filter(or_(PageNew.scrape_pdf_url.isnot(None), PageNew.scrape_metadata_url.isnot(None))).\ count() return num def get_num_title_matching_dois(self): from page import PageNew num = db.session.query(PageNew.id).\ distinct(PageNew.normalized_title).\ filter(PageNew.endpoint_id == self.id).\ filter(PageNew.num_pub_matches.isnot(None), PageNew.num_pub_matches >= 1).\ count() return num def get_open_pages(self, limit=10): from page import PageNew pages = db.session.query(PageNew).\ distinct(PageNew.normalized_title).\ filter(PageNew.endpoint_id == self.id).\ filter(PageNew.num_pub_matches.isnot(None), PageNew.num_pub_matches >= 1).\ filter(or_(PageNew.scrape_pdf_url.isnot(None), PageNew.scrape_metadata_url.isnot(None))).\ limit(limit).all() return [(p.id, p.url, p.normalized_title, p.pub.url, p.pub.unpaywall_api_url, p.scrape_version) for p in pages] def get_closed_pages(self, limit=10): from page import PageNew pages = db.session.query(PageNew).\ distinct(PageNew.normalized_title).\ filter(PageNew.endpoint_id == self.id).\ filter(PageNew.num_pub_matches.isnot(None), PageNew.num_pub_matches >= 1).\ filter(PageNew.scrape_updated.isnot(None), PageNew.scrape_pdf_url.is_(None), PageNew.scrape_metadata_url .is_(None)).\ limit(limit).all() return [(p.id, p.url, p.normalized_title, p.pub.url, p.pub.unpaywall_api_url, p.scrape_updated) for p in pages] def get_num_pages_still_processing(self): from page import PageNew num = db.session.query(PageNew.id).filter(PageNew.endpoint_id == self.id, PageNew.num_pub_matches.is_(None)).count() return num def __repr__(self): return u"<Endpoint ( {} ) {}>".format(self.id, self.pmh_url) def to_dict(self): response = { "_endpoint_id": self.id, "_pmh_url": self.pmh_url, "num_pmh_records": self.get_num_pmh_records(), "num_pages": self.get_num_pages(), "num_open_with_dois": self.get_num_open_with_dois(), "num_title_matching_dois": self.get_num_title_matching_dois(), "num_pages_still_processing": self.get_num_pages_still_processing(), "pages_open": u"{}/debug/repo/{}/examples/open".format("http://localhost:5000", self.repo_unique_id), # self.get_open_pages(), "pages_closed": u"{}/debug/repo/{}/examples/closed".format("http://localhost:5000", self.repo_unique_id), # self.get_closed_pages(), "metadata": {} } if self.meta: response.update({ "metadata": { "home_page": self.repo.home_page, "institution_name": self.repo.institution_name, "repository_name": self.repo.repository_name } }) return response def to_dict_status(self): response = { "results": {}, "metadata": {} } for field in ["id", "repo_unique_id", "pmh_url", "email"]: response[field] = getattr(self, field) for field in ["harvest_identify_response", "harvest_test_recent_dates", "sample_pmh_record"]: response["results"][field] = getattr(self, field) if self.meta: for field in ["home_page", "institution_name", "repository_name"]: response["metadata"][field] = getattr(self.meta, field) return response def to_dict_repo_pulse(self): return { "metadata": { "endpoint_id": self.id, "repository_name": self.repo.repository_name, "institution_name": self.repo.institution_name, "pmh_url": self.pmh_url }, "status": { "check0_identify_status": self.harvest_identify_response, "check1_query_status": self.harvest_test_recent_dates, "num_pmh_records": None, "last_harvest": self.most_recent_year_harvested, "num_pmh_records_matching_dois": None, "num_pmh_records_matching_dois_with_fulltext": None }, "by_version_distinct_pmh_records_matching_dois": {} } def test_harvest_url(pmh_url): response = {} temp_endpoint = Endpoint() temp_endpoint.pmh_url = pmh_url temp_endpoint.set_identify_and_initial_query() response["harvest_identify_response"] = temp_endpoint.harvest_identify_response response["sample_pmh_record"] = temp_endpoint.sample_pmh_record response["harvest_test_recent_dates"] = temp_endpoint.harvest_test_recent_dates return response def is_complete(record): if not record.pmh_id: return False if not record.title: return False if not record.urls: return False if record.oa == "0": logger.info(u"record {} is closed access. skipping.".format(record["id"])) return False return True class MyOAIItemIterator(OAIItemIterator): def _get_resumption_token(self): """Extract and store the resumptionToken from the last response.""" resumption_token_element = self.oai_response.xml.find( './/' + self.sickle.oai_namespace + 'resumptionToken') if resumption_token_element is None: return None token = resumption_token_element.text cursor = resumption_token_element.attrib.get('cursor', None) complete_list_size = resumption_token_element.attrib.get( 'completeListSize', None) expiration_date = resumption_token_element.attrib.get( 'expirationDate', None) resumption_token = ResumptionToken( token=token, cursor=cursor, complete_list_size=complete_list_size, expiration_date=expiration_date ) return resumption_token def get_complete_list_size(self): """Extract and store the resumptionToken from the last response.""" resumption_token_element = self.oai_response.xml.find( './/' + self.sickle.oai_namespace + 'resumptionToken') if resumption_token_element is None: return None complete_list_size = resumption_token_element.attrib.get( 'completeListSize', None) if complete_list_size: return int(complete_list_size) return complete_list_size class OSTIOAIItemIterator(MyOAIItemIterator): def _next_response(self): """Get the next response from the OAI server. Copy-pasted from OAIItemIterator._next_response but adds metadataPrefix to params. """ params = self.params if self.resumption_token: params = { 'resumptionToken': self.resumption_token.token, 'verb': self.verb, 'metadataPrefix': params.get('metadataPrefix') } self.oai_response = self.sickle.harvest(**params) error = self.oai_response.xml.find( './/' + self.sickle.oai_namespace + 'error') if error is not None: code = error.attrib.get('code', 'UNKNOWN') description = error.text or '' try: raise getattr( oaiexceptions, code[0].upper() + code[1:])(description) except AttributeError: raise oaiexceptions.OAIError(description) self.resumption_token = self._get_resumption_token() self._items = self.oai_response.xml.iterfind( './/' + self.sickle.oai_namespace + self.element) def _get_my_sickle(repo_pmh_url, timeout=120): if not repo_pmh_url: return None proxy_url = None if any(fragment in repo_pmh_url for fragment in ["citeseerx"]): proxy_url = os.getenv("STATIC_IP_PROXY") elif any(fragment in repo_pmh_url for fragment in ["pure.coventry.ac.uk"]): proxy_url = os.getenv("VERY_STATIC_IP_PROXY") if proxy_url: proxies = {"https": proxy_url, "http": proxy_url} else: proxies = {} iterator = OSTIOAIItemIterator if 'osti.gov/oai' in repo_pmh_url else MyOAIItemIterator sickle = EuropePMCSickle if 'europepmc.org' in repo_pmh_url else MySickle my_sickle = sickle(repo_pmh_url, proxies=proxies, timeout=timeout, iterator=iterator) return my_sickle # subclass so we can customize the number of retry seconds class MySickle(Sickle): RETRY_SECONDS = 120 def __init__(self, *args, **kwargs): self.http_response_url = None super(MySickle, self).__init__(*args, **kwargs) def get_http_response_url(self): if hasattr(self, "http_response_url"): return self.http_response_url return None def _massage_http_response(self, http_response): return http_response def harvest(self, **kwargs): # pragma: no cover """Make HTTP requests to the OAI server. :param kwargs: OAI HTTP parameters. :rtype: :class:`sickle.OAIResponse` """ start_time = time() verify = not self.endpoint.startswith(u'https://rcin.org.pl') for _ in range(self.max_retries): if self.http_method == 'GET': payload_str = "&".join("%s=%s" % (k, v) for k, v in kwargs.items()) url_without_encoding = u"{}?{}".format(self.endpoint, payload_str) http_response = requests.get(url_without_encoding, headers=request_ua_headers(), verify=verify, **self.request_args) self.http_response_url = http_response.url else: http_response = requests.post(self.endpoint, headers=request_ua_headers(), data=kwargs, **self.request_args) self.http_response_url = http_response.url if http_response.status_code == 503: retry_after = self.RETRY_SECONDS logger.info("HTTP 503! Retrying after %d seconds..." % retry_after) sleep(retry_after) else: logger.info("took {} seconds to call pmh url: {}".format(elapsed(start_time), http_response.url)) http_response = self._massage_http_response(http_response) http_response.raise_for_status() if self.encoding: http_response.encoding = self.encoding return OAIResponse(http_response, params=kwargs) class EuropePMCSickle(MySickle): def _massage_http_response(self, http_response): # server returns a 404 with NoRecordsMatch responses # treat this as a successful http request and handle the OAI-PMH error further up the stack if http_response.status_code == 404: http_response.status_code = 200 return http_response
mit
xavfernandez/pip
tests/functional/test_search.py
2
5507
import logging import pretend import pytest from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS from pip._internal.commands import create_command from pip._internal.commands.search import ( highest_version, print_results, transform_hits, ) from tests.lib import pyversion if pyversion >= '3': VERBOSE_FALSE = False else: VERBOSE_FALSE = 0 def test_version_compare(): """ Test version comparison. """ assert highest_version(['1.0', '2.0', '0.1']) == '2.0' assert highest_version(['1.0a1', '1.0']) == '1.0' def test_pypi_xml_transformation(): """ Test transformation of data structures (PyPI xmlrpc to custom list). """ pypi_hits = [ { 'name': 'foo', 'summary': 'foo summary', 'version': '1.0', }, { 'name': 'foo', 'summary': 'foo summary v2', 'version': '2.0', }, { '_pypi_ordering': 50, 'name': 'bar', 'summary': 'bar summary', 'version': '1.0', }, ] expected = [ { 'versions': ['1.0', '2.0'], 'name': 'foo', 'summary': 'foo summary v2', }, { 'versions': ['1.0'], 'name': 'bar', 'summary': 'bar summary', }, ] assert transform_hits(pypi_hits) == expected @pytest.mark.network def test_basic_search(script): """ End to end test of search command. """ output = script.pip('search', 'pip') assert ( 'The PyPA recommended tool for installing ' 'Python packages.' in output.stdout ) @pytest.mark.network @pytest.mark.skip( reason=("Warehouse search behavior is different and no longer returns " "multiple results. See " "https://github.com/pypa/warehouse/issues/3717 for more " "information."), ) def test_multiple_search(script): """ Test searching for multiple packages at once. """ output = script.pip('search', 'pip', 'INITools') assert ( 'The PyPA recommended tool for installing ' 'Python packages.' in output.stdout ) assert 'Tools for parsing and using INI-style files' in output.stdout def test_search_missing_argument(script): """ Test missing required argument for search """ result = script.pip('search', expect_error=True) assert 'ERROR: Missing required argument (search query).' in result.stderr @pytest.mark.network def test_run_method_should_return_success_when_find_packages(): """ Test SearchCommand.run for found package """ command = create_command('search') cmdline = "--index=https://pypi.org/pypi pip" with command.main_context(): options, args = command.parse_args(cmdline.split()) status = command.run(options, args) assert status == SUCCESS @pytest.mark.network def test_run_method_should_return_no_matches_found_when_does_not_find_pkgs(): """ Test SearchCommand.run for no matches """ command = create_command('search') cmdline = "--index=https://pypi.org/pypi nonexistentpackage" with command.main_context(): options, args = command.parse_args(cmdline.split()) status = command.run(options, args) assert status == NO_MATCHES_FOUND @pytest.mark.network def test_search_should_exit_status_code_zero_when_find_packages(script): """ Test search exit status code for package found """ result = script.pip('search', 'pip') assert result.returncode == SUCCESS @pytest.mark.network def test_search_exit_status_code_when_finds_no_package(script): """ Test search exit status code for no matches """ result = script.pip('search', 'nonexistentpackage', expect_error=True) assert result.returncode == NO_MATCHES_FOUND, result.returncode def test_latest_prerelease_install_message(caplog, monkeypatch): """ Test documentation for installing pre-release packages is displayed """ hits = [ { 'name': 'ni', 'summary': 'For knights who say Ni!', 'versions': ['1.0.0', '1.0.1a'] } ] installed_package = pretend.stub(project_name="ni") monkeypatch.setattr("pip._vendor.pkg_resources.working_set", [installed_package]) dist = pretend.stub(version="1.0.0") get_dist = pretend.call_recorder(lambda x: dist) monkeypatch.setattr("pip._vendor.pkg_resources.get_distribution", get_dist) with caplog.at_level(logging.INFO): print_results(hits) message = caplog.records[-1].getMessage() assert 'pre-release; install with "pip install --pre"' in message assert get_dist.calls == [pretend.call('ni')] def test_search_print_results_should_contain_latest_versions(caplog): """ Test that printed search results contain the latest package versions """ hits = [ { 'name': 'testlib1', 'summary': 'Test library 1.', 'versions': ['1.0.5', '1.0.3'] }, { 'name': 'testlib2', 'summary': 'Test library 1.', 'versions': ['2.0.1', '2.0.3'] } ] with caplog.at_level(logging.INFO): print_results(hits) log_messages = sorted([r.getMessage() for r in caplog.records]) assert log_messages[0].startswith('testlib1 (1.0.5)') assert log_messages[1].startswith('testlib2 (2.0.3)')
mit
kbrebanov/ansible
lib/ansible/modules/monitoring/honeybadger_deployment.py
49
3829
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2014 Benjamin Curtis <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: honeybadger_deployment author: "Benjamin Curtis (@stympy)" version_added: "2.2" short_description: Notify Honeybadger.io about app deployments description: - Notify Honeybadger.io about app deployments (see http://docs.honeybadger.io/article/188-deployment-tracking) options: token: description: - API token. required: true environment: description: - The environment name, typically 'production', 'staging', etc. required: true user: description: - The username of the person doing the deployment required: false default: None repo: description: - URL of the project repository required: false default: None revision: description: - A hash, number, tag, or other identifier showing what revision was deployed required: false default: None url: description: - Optional URL to submit the notification to. required: false default: "https://api.honeybadger.io/v1/deploys" validate_certs: description: - If C(no), SSL certificates for the target url will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] requirements: [] ''' EXAMPLES = ''' - honeybadger_deployment: token: AAAAAA environment: staging user: ansible revision: b6826b8 repo: '[email protected]:user/repo.git' ''' RETURN = '''# ''' import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import urlencode from ansible.module_utils._text import to_native from ansible.module_utils.urls import fetch_url # =========================================== # Module execution. # def main(): module = AnsibleModule( argument_spec=dict( token=dict(required=True, no_log=True), environment=dict(required=True), user=dict(required=False), repo=dict(required=False), revision=dict(required=False), url=dict(required=False, default='https://api.honeybadger.io/v1/deploys'), validate_certs=dict(default='yes', type='bool'), ), supports_check_mode=True ) params = {} if module.params["environment"]: params["deploy[environment]"] = module.params["environment"] if module.params["user"]: params["deploy[local_username]"] = module.params["user"] if module.params["repo"]: params["deploy[repository]"] = module.params["repo"] if module.params["revision"]: params["deploy[revision]"] = module.params["revision"] params["api_key"] = module.params["token"] url = module.params.get('url') # If we're in check mode, just exit pretending like we succeeded if module.check_mode: module.exit_json(changed=True) try: data = urlencode(params) response, info = fetch_url(module, url, data=data) except Exception as e: module.fail_json(msg='Unable to notify Honeybadger: %s' % to_native(e), exception=traceback.format_exc()) else: if info['status'] == 201: module.exit_json(changed=True) else: module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) if __name__ == '__main__': main()
gpl-3.0
youdonghai/intellij-community
python/lib/Lib/posixfile.py
87
7843
"""Extended file operations available in POSIX. f = posixfile.open(filename, [mode, [bufsize]]) will create a new posixfile object f = posixfile.fileopen(fileobject) will create a posixfile object from a builtin file object f.file() will return the original builtin file object f.dup() will return a new file object based on a new filedescriptor f.dup2(fd) will return a new file object based on the given filedescriptor f.flags(mode) will turn on the associated flag (merge) mode can contain the following characters: (character representing a flag) a append only flag c close on exec flag n no delay flag s synchronization flag (modifiers) ! turn flags 'off' instead of default 'on' = copy flags 'as is' instead of default 'merge' ? return a string in which the characters represent the flags that are set note: - the '!' and '=' modifiers are mutually exclusive. - the '?' modifier will return the status of the flags after they have been changed by other characters in the mode string f.lock(mode [, len [, start [, whence]]]) will (un)lock a region mode can contain the following characters: (character representing type of lock) u unlock r read lock w write lock (modifiers) | wait until the lock can be granted ? return the first lock conflicting with the requested lock or 'None' if there is no conflict. The lock returned is in the format (mode, len, start, whence, pid) where mode is a character representing the type of lock ('r' or 'w') note: - the '?' modifier prevents a region from being locked; it is query only """ class _posixfile_: """File wrapper class that provides extra POSIX file routines.""" states = ['open', 'closed'] # # Internal routines # def __repr__(self): file = self._file_ return "<%s posixfile '%s', mode '%s' at %s>" % \ (self.states[file.closed], file.name, file.mode, \ hex(id(self))[2:]) # # Initialization routines # def open(self, name, mode='r', bufsize=-1): import __builtin__ return self.fileopen(__builtin__.open(name, mode, bufsize)) def fileopen(self, file): import types if repr(type(file)) != "<type 'file'>": raise TypeError, 'posixfile.fileopen() arg must be file object' self._file_ = file # Copy basic file methods for maybemethod in dir(file): if not maybemethod.startswith('_'): attr = getattr(file, maybemethod) if isinstance(attr, types.BuiltinMethodType): setattr(self, maybemethod, attr) return self # # New methods # def file(self): return self._file_ def dup(self): import posix if not hasattr(posix, 'fdopen'): raise AttributeError, 'dup() method unavailable' return posix.fdopen(posix.dup(self._file_.fileno()), self._file_.mode) def dup2(self, fd): import posix if not hasattr(posix, 'fdopen'): raise AttributeError, 'dup() method unavailable' posix.dup2(self._file_.fileno(), fd) return posix.fdopen(fd, self._file_.mode) def flags(self, *which): import fcntl, os if which: if len(which) > 1: raise TypeError, 'Too many arguments' which = which[0] else: which = '?' l_flags = 0 if 'n' in which: l_flags = l_flags | os.O_NDELAY if 'a' in which: l_flags = l_flags | os.O_APPEND if 's' in which: l_flags = l_flags | os.O_SYNC file = self._file_ if '=' not in which: cur_fl = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0) if '!' in which: l_flags = cur_fl & ~ l_flags else: l_flags = cur_fl | l_flags l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFL, l_flags) if 'c' in which: arg = ('!' not in which) # 0 is don't, 1 is do close on exec l_flags = fcntl.fcntl(file.fileno(), fcntl.F_SETFD, arg) if '?' in which: which = '' # Return current flags l_flags = fcntl.fcntl(file.fileno(), fcntl.F_GETFL, 0) if os.O_APPEND & l_flags: which = which + 'a' if fcntl.fcntl(file.fileno(), fcntl.F_GETFD, 0) & 1: which = which + 'c' if os.O_NDELAY & l_flags: which = which + 'n' if os.O_SYNC & l_flags: which = which + 's' return which def lock(self, how, *args): import struct, fcntl if 'w' in how: l_type = fcntl.F_WRLCK elif 'r' in how: l_type = fcntl.F_RDLCK elif 'u' in how: l_type = fcntl.F_UNLCK else: raise TypeError, 'no type of lock specified' if '|' in how: cmd = fcntl.F_SETLKW elif '?' in how: cmd = fcntl.F_GETLK else: cmd = fcntl.F_SETLK l_whence = 0 l_start = 0 l_len = 0 if len(args) == 1: l_len = args[0] elif len(args) == 2: l_len, l_start = args elif len(args) == 3: l_len, l_start, l_whence = args elif len(args) > 3: raise TypeError, 'too many arguments' # Hack by [email protected] to get locking to go on freebsd; # additions for AIX by [email protected] import sys, os if sys.platform in ('netbsd1', 'openbsd2', 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5', 'freebsd6', 'freebsd7', 'bsdos2', 'bsdos3', 'bsdos4'): flock = struct.pack('lxxxxlxxxxlhh', \ l_start, l_len, os.getpid(), l_type, l_whence) elif sys.platform in ('aix3', 'aix4'): flock = struct.pack('hhlllii', \ l_type, l_whence, l_start, l_len, 0, 0, 0) else: flock = struct.pack('hhllhh', \ l_type, l_whence, l_start, l_len, 0, 0) flock = fcntl.fcntl(self._file_.fileno(), cmd, flock) if '?' in how: if sys.platform in ('netbsd1', 'openbsd2', 'freebsd2', 'freebsd3', 'freebsd4', 'freebsd5', 'bsdos2', 'bsdos3', 'bsdos4'): l_start, l_len, l_pid, l_type, l_whence = \ struct.unpack('lxxxxlxxxxlhh', flock) elif sys.platform in ('aix3', 'aix4'): l_type, l_whence, l_start, l_len, l_sysid, l_pid, l_vfs = \ struct.unpack('hhlllii', flock) elif sys.platform == "linux2": l_type, l_whence, l_start, l_len, l_pid, l_sysid = \ struct.unpack('hhllhh', flock) else: l_type, l_whence, l_start, l_len, l_sysid, l_pid = \ struct.unpack('hhllhh', flock) if l_type != fcntl.F_UNLCK: if l_type == fcntl.F_RDLCK: return 'r', l_len, l_start, l_whence, l_pid else: return 'w', l_len, l_start, l_whence, l_pid def open(name, mode='r', bufsize=-1): """Public routine to open a file as a posixfile object.""" return _posixfile_().open(name, mode, bufsize) def fileopen(file): """Public routine to get a posixfile object from a Python file object.""" return _posixfile_().fileopen(file) # # Constants # SEEK_SET = 0 SEEK_CUR = 1 SEEK_END = 2 # # End of posixfile.py #
apache-2.0
Distrotech/qtwebkit
Tools/Scripts/webkitpy/w3c/test_importer_unittest.py
115
3011
#!/usr/bin/env python # Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # 2. Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, # OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF # THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. import optparse import shutil import tempfile import unittest2 as unittest from webkitpy.common.host_mock import MockHost from webkitpy.common.system.filesystem_mock import MockFileSystem from webkitpy.common.system.executive_mock import MockExecutive2, ScriptError from webkitpy.common.system.outputcapture import OutputCapture from webkitpy.w3c.test_importer import TestImporter FAKE_SOURCE_DIR = '/blink/w3c' FAKE_REPO_DIR = '/blink' FAKE_FILES = { '/blink/w3c/empty_dir/README.txt': '', '/mock-checkout/LayoutTests/w3c/README.txt': '', } class TestImporterTest(unittest.TestCase): def test_import_dir_with_no_tests_and_no_hg(self): host = MockHost() host.executive = MockExecutive2(exception=OSError()) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({"overwrite": False})) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output() def test_import_dir_with_no_tests(self): host = MockHost() host.executive = MockExecutive2(exception=ScriptError("abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!")) host.filesystem = MockFileSystem(files=FAKE_FILES) importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values({"overwrite": False})) oc = OutputCapture() oc.capture_output() try: importer.do_import() finally: oc.restore_output() # FIXME: Needs more tests.
lgpl-3.0
jaehyuk/High-Frequency-Trading-Model-with-IB
params/ib_data_types.py
7
1835
""" Author: James Ma Email stuff here: [email protected] """ """ API doumentation: https://www.interactivebrokers.com/en/software/api/apiguide/java/reqhistoricaldata.htm https://www.interactivebrokers.com/en/software/api/apiguide/tables/tick_types.htm """ FIELD_BID_SIZE = 0 FIELD_BID_PRICE = 1 FIELD_ASK_PRICE = 2 FIELD_ASK_SIZE = 3 FIELD_LAST_PRICE = 4 FIELD_LAST_SIZE = 5 FIELD_HIGH = 6 FIELD_LOW = 7 FIELD_VOLUME = 8 FIELD_CLOSE_PRICE = 9 FIELD_AVG_VOLUME = 21 FIELD_BID_EXCH = 32 FIELD_ASK_EXCH = 33 FIELD_AUCTION_VOLUME = 34 FIELD_AUCTION_PRICE = 35 FIELD_LAST_TIMESTAMP = 45 FIELD_HALTED = 49 FIELD_TRADE_COUNT = 54 FIELD_TRADE_RATE = 55 FIELD_VOLUME_RATE = 56 FIELD_HALTED_NOT_HALTED = 0 FIELD_HALTED_IS_HALTED = 1 FIELD_HALTED_BY_VOLATILITY = 2 DURATION_1_HR = '3600 S' DURATION_1_MIN = "60 S" DURATION_1_DAY = '1 D' BAR_SIZE_5_SEC = '5 secs' BAR_SIZE_1_MIN = '1 min' RTH_ALL = 0 RTH_ONLY_TRADING_HRS = 1 WHAT_TO_SHOW_TRADES = "TRADES" WHAT_TO_SHOW_MID_PT = "MIDPOINT" WHAT_TO_SHOW_BID = "BID" WHAT_TO_SHOW_ASK = "ASK" WHAT_TO_SHOW_BID_ASK = "BID_ASK" WHAT_TO_SHOW_HVOL = "HISTORICAL_VOLATILITY" WHAT_TO_SHOW_OPT_IMPV = "OPTION_IMPLIED_VOLATILITY" DATEFORMAT_STRING = 1 DATEFORMAT_UNIX_TS = 2 MSG_TYPE_HISTORICAL_DATA = "historicalData" MSG_TYPE_UPDATE_PORTFOLIO = "updatePortfolio" MSG_TYPE_MANAGED_ACCOUNTS = "managedAccounts" MSG_TYPE_NEXT_ORDER_ID = "nextValidId" MSG_TYPE_TICK_PRICE = "tickPrice" MSG_TYPE_TICK_STRING = "tickString" MSG_TYPE_STICK_SIZE = "tickSize" DATE_TIME_FORMAT = "%Y%m%d %H:%M:%S" DATE_TIME_FORMAT_LONG = "%Y-%m-%d %H:%M:%S" DATE_TIME_FORMAT_LONG_MILLISECS = "%Y-%m-%d %H:%M:%S.%f" GENERIC_TICKS_NONE = '' GENERIC_TICKS_RTVOLUME = "233" SNAPSHOT_NONE = False SNAPSHOT_TRUE = True ORDER_TYPE_MARKET = "MKT" ORDER_TYPE_LIMIT = "LMT" ORDER_ACTION_SELL = "SELL" ORDER_ACTION_BUY = "BUY"
mit
arante/pyloc
microblog/flask/lib/python3.5/site-packages/whoosh/support/base85.py
95
2473
""" This module contains generic base85 encoding and decoding functions. The whoosh.util.numeric module contains faster variants for encoding and decoding integers. Modified from: http://paste.lisp.org/display/72815 """ import struct from whoosh.compat import xrange # Instead of using the character set from the ascii85 algorithm, I put the # characters in order so that the encoded text sorts properly (my life would be # a lot easier if they had just done that from the start) b85chars = ("!$%&*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ" "^_abcdefghijklmnopqrstuvwxyz{|}~") b85dec = {} for i in range(len(b85chars)): b85dec[b85chars[i]] = i # Integer encoding and decoding functions def to_base85(x, islong=False): "Encodes the given integer using base 85." size = 10 if islong else 5 rems = "" for i in xrange(size): rems = b85chars[x % 85] + rems x //= 85 return rems def from_base85(text): "Decodes the given base 85 text into an integer." acc = 0 for c in text: acc = acc * 85 + b85dec[c] return acc # Bytes encoding and decoding functions def b85encode(text, pad=False): l = len(text) r = l % 4 if r: text += '\0' * (4 - r) longs = len(text) >> 2 out = [] words = struct.unpack('>' + 'L' * longs, text[0:longs * 4]) for word in words: rems = [0, 0, 0, 0, 0] for i in range(4, -1, -1): rems[i] = b85chars[word % 85] word /= 85 out.extend(rems) out = ''.join(out) if pad: return out # Trim padding olen = l % 4 if olen: olen += 1 olen += l / 4 * 5 return out[0:olen] def b85decode(text): l = len(text) out = [] for i in range(0, len(text), 5): chunk = text[i:i + 5] acc = 0 for j in range(len(chunk)): try: acc = acc * 85 + b85dec[chunk[j]] except KeyError: raise TypeError('Bad base85 character at byte %d' % (i + j)) if acc > 4294967295: raise OverflowError('Base85 overflow in hunk starting at byte %d' % i) out.append(acc) # Pad final chunk if necessary cl = l % 5 if cl: acc *= 85 ** (5 - cl) if cl > 1: acc += 0xffffff >> (cl - 2) * 8 out[-1] = acc out = struct.pack('>' + 'L' * ((l + 4) / 5), *out) if cl: out = out[:-(5 - cl)] return out
gpl-3.0