repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
aut-sepanta/Sepanta3 | DCM/Speech/Audio/sound_play/scripts/say.py | 2 | 3101 | #!/usr/bin/env python
#***********************************************************
#* Software License Agreement (BSD License)
#*
#* Copyright (c) 2009, Willow Garage, Inc.
#* All rights reserved.
#*
#* Redistribution and use in source and binary forms, with or without
#* modification, are permitted provided that the following conditions
#* are met:
#*
#* * Redistributions of source code must retain the above copyright
#* notice, this list of conditions and the following disclaimer.
#* * Redistributions in binary form must reproduce the above
#* copyright notice, this list of conditions and the following
#* disclaimer in the documentation and/or other materials provided
#* with the distribution.
#* * Neither the name of the Willow Garage nor the names of its
#* contributors may be used to endorse or promote products derived
#* from this software without specific prior written permission.
#*
#* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#* POSSIBILITY OF SUCH DAMAGE.
#***********************************************************
# Author: Blaise Gassend
import sys
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '--help':
print 'Usage: %s \'String to say.\''%sys.argv[0]
print ' %s < file_to_say.txt'%sys.argv[0]
print
print 'Says a string. For a string on the command line, you must use quotes as'
print 'appropriate. For a string on standard input, the command will wait for'
print 'EOF before saying anything.'
exit(-1)
# Import after printing usage for speed.
import rospy
from sound_play.msg import SoundRequest
from sound_play.libsoundplay import SoundClient
if len(sys.argv) == 1:
print 'Awaiting something to say on standard input.'
# Ordered this way to minimize wait time.
rospy.init_node('say', anonymous = True)
soundhandle = SoundClient()
rospy.sleep(1)
voice = 'voice_kal_diphone'
volume = 1.0
if len(sys.argv) == 1:
s = sys.stdin.read()
else:
s = sys.argv[1]
if len(sys.argv) > 2:
voice = sys.argv[2]
if len(sys.argv) > 3:
volume = float(sys.argv[3])
print 'Saying: %s' % s
print 'Voice: %s' % voice
print 'Volume: %s' % volume
soundhandle.say(s, voice, volume)
rospy.sleep(1)
| mit |
kbdick/RecycleTracker | recyclecollector/scrap/gdata-2.0.18/build/lib.linux-x86_64-2.7/gdata/tlslite/integration/XMLRPCTransport.py | 271 | 5812 | """TLS Lite + xmlrpclib."""
import xmlrpclib
import httplib
from gdata.tlslite.integration.HTTPTLSConnection import HTTPTLSConnection
from gdata.tlslite.integration.ClientHelper import ClientHelper
class XMLRPCTransport(xmlrpclib.Transport, ClientHelper):
"""Handles an HTTPS transaction to an XML-RPC server."""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings=None):
"""Create a new XMLRPCTransport.
An instance of this class can be passed to L{xmlrpclib.ServerProxy}
to use TLS with XML-RPC calls::
from tlslite.api import XMLRPCTransport
from xmlrpclib import ServerProxy
transport = XMLRPCTransport(user="alice", password="abra123")
server = ServerProxy("https://localhost", transport)
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Thus you should be prepared to handle TLS-specific
exceptions when calling methods of L{xmlrpclib.ServerProxy}. See the
client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
ClientHelper.__init__(self,
username, password, sharedKey,
certChain, privateKey,
cryptoID, protocol,
x509Fingerprint,
x509TrustList, x509CommonName,
settings)
def make_connection(self, host):
# create a HTTPS connection object from a host descriptor
host, extra_headers, x509 = self.get_host_info(host)
http = HTTPTLSConnection(host, None,
self.username, self.password,
self.sharedKey,
self.certChain, self.privateKey,
self.checker.cryptoID,
self.checker.protocol,
self.checker.x509Fingerprint,
self.checker.x509TrustList,
self.checker.x509CommonName,
self.settings)
http2 = httplib.HTTP()
http2._setup(http)
return http2
| gpl-3.0 |
kapt/django-oscar | oscar/apps/offer/south_migrations/0006_auto__add_field_conditionaloffer_max_applications.py | 17 | 15049 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ConditionalOffer.max_applications'
db.add_column('offer_conditionaloffer', 'max_applications', self.gf('django.db.models.fields.PositiveIntegerField')(null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'ConditionalOffer.max_applications'
db.delete_column('offer_conditionaloffer', 'max_applications')
models = {
'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"})
},
'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'})
},
'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1024', 'db_index': 'True'})
},
'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'db_index': 'True'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'catalogue.productcategory': {
'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"})
},
'offer.benefit': {
'Meta': {'object_name': 'Benefit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_affected_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'})
},
'offer.condition': {
'Meta': {'object_name': 'Condition'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('oscar.models.fields.PositiveDecimalField', [], {'max_digits': '12', 'decimal_places': '2'})
},
'offer.conditionaloffer': {
'Meta': {'ordering': "['-priority']", 'object_name': 'ConditionalOffer'},
'benefit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Benefit']"}),
'condition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Condition']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'offer_type': ('django.db.models.fields.CharField', [], {'default': "'Site'", 'max_length': '128'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'redirect_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'})
},
'offer.range': {
'Meta': {'object_name': 'Range'},
'classes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': "orm['catalogue.ProductClass']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'excluded_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excludes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'included_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Category']"}),
'included_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}),
'includes_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}
}
complete_apps = ['offer']
| bsd-3-clause |
yasserglez/tagfs | packages/tagfs/contrib/magic.py | 2 | 58944 | #!/usr/bin/env python
'''
magic.py
determines a file type by its magic number
(C)opyright 2000 Jason Petrone <[email protected]>
All Rights Reserved
Command Line Usage: running as `python magic.py file` will print
a description of what 'file' is.
Module Usage:
magic.whatis(data): when passed a string 'data' containing
binary or text data, a description of
what the data is will be returned.
magic.file(filename): returns a description of what the file
'filename' contains.
'''
import re, struct, string
__version__ = '0.1'
magic = [
[0L, 'leshort', '=', 1538L, 'application/x-alan-adventure-game'],
[0L, 'string', '=', 'TADS', 'application/x-tads-game'],
[0L, 'short', '=', 420L, 'application/x-executable-file'],
[0L, 'short', '=', 421L, 'application/x-executable-file'],
[0L, 'leshort', '=', 603L, 'application/x-executable-file'],
[0L, 'string', '=', 'Core\001', 'application/x-executable-file'],
[0L, 'string', '=', 'AMANDA: TAPESTART DATE', 'application/x-amanda-header'],
[0L, 'belong', '=', 1011L, 'application/x-executable-file'],
[0L, 'belong', '=', 999L, 'application/x-library-file'],
[0L, 'belong', '=', 435L, 'video/mpeg'],
[0L, 'belong', '=', 442L, 'video/mpeg'],
[0L, 'beshort&0xfff0', '=', 65520L, 'audio/mpeg'],
[4L, 'leshort', '=', 44817L, 'video/fli'],
[4L, 'leshort', '=', 44818L, 'video/flc'],
[0L, 'string', '=', 'MOVI', 'video/x-sgi-movie'],
[4L, 'string', '=', 'moov', 'video/quicktime'],
[4L, 'string', '=', 'mdat', 'video/quicktime'],
[0L, 'long', '=', 100554L, 'application/x-apl-workspace'],
[0L, 'string', '=', 'FiLeStArTfIlEsTaRt', 'text/x-apple-binscii'],
[0L, 'string', '=', '\012GL', 'application/data'],
[0L, 'string', '=', 'v\377', 'application/data'],
[0L, 'string', '=', 'NuFile', 'application/data'],
[0L, 'string', '=', 'N\365F\351l\345', 'application/data'],
[0L, 'belong', '=', 333312L, 'application/data'],
[0L, 'belong', '=', 333319L, 'application/data'],
[257L, 'string', '=', 'ustar\000', 'application/x-tar'],
[257L, 'string', '=', 'ustar \000', 'application/x-gtar'],
[0L, 'short', '=', 70707L, 'application/x-cpio'],
[0L, 'short', '=', 143561L, 'application/x-bcpio'],
[0L, 'string', '=', '070707', 'application/x-cpio'],
[0L, 'string', '=', '070701', 'application/x-cpio'],
[0L, 'string', '=', '070702', 'application/x-cpio'],
[0L, 'string', '=', '!<arch>\012debian', 'application/x-dpkg'],
[0L, 'long', '=', 177555L, 'application/x-ar'],
[0L, 'short', '=', 177555L, 'application/data'],
[0L, 'long', '=', 177545L, 'application/data'],
[0L, 'short', '=', 177545L, 'application/data'],
[0L, 'long', '=', 100554L, 'application/x-apl-workspace'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'string', '=', '!<arch>\012__________E', 'application/x-ar'],
[0L, 'string', '=', '-h-', 'application/data'],
[0L, 'string', '=', '!<arch>', 'application/x-ar'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'string', '=', '<ar>', 'application/x-ar'],
[0L, 'belong', '=', 1711210496L, 'application/x-ar'],
[0L, 'belong', '=', 1013019198L, 'application/x-ar'],
[0L, 'long', '=', 557605234L, 'application/x-ar'],
[0L, 'lelong', '=', 177555L, 'application/data'],
[0L, 'leshort', '=', 177555L, 'application/data'],
[0L, 'lelong', '=', 177545L, 'application/data'],
[0L, 'leshort', '=', 177545L, 'application/data'],
[0L, 'lelong', '=', 236525L, 'application/data'],
[0L, 'lelong', '=', 236526L, 'application/data'],
[0L, 'lelong&0x8080ffff', '=', 2074L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 2330L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 538L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 794L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 1050L, 'application/x-arc'],
[0L, 'lelong&0x8080ffff', '=', 1562L, 'application/x-arc'],
[0L, 'string', '=', '\032archive', 'application/data'],
[0L, 'leshort', '=', 60000L, 'application/x-arj'],
[0L, 'string', '=', 'HPAK', 'application/data'],
[0L, 'string', '=', '\351,\001JAM application/data', ''],
[2L, 'string', '=', '-lh0-', 'application/x-lha'],
[2L, 'string', '=', '-lh1-', 'application/x-lha'],
[2L, 'string', '=', '-lz4-', 'application/x-lha'],
[2L, 'string', '=', '-lz5-', 'application/x-lha'],
[2L, 'string', '=', '-lzs-', 'application/x-lha'],
[2L, 'string', '=', '-lh -', 'application/x-lha'],
[2L, 'string', '=', '-lhd-', 'application/x-lha'],
[2L, 'string', '=', '-lh2-', 'application/x-lha'],
[2L, 'string', '=', '-lh3-', 'application/x-lha'],
[2L, 'string', '=', '-lh4-', 'application/x-lha'],
[2L, 'string', '=', '-lh5-', 'application/x-lha'],
[0L, 'string', '=', 'Rar!', 'application/x-rar'],
[0L, 'string', '=', 'SQSH', 'application/data'],
[0L, 'string', '=', 'UC2\032', 'application/data'],
[0L, 'string', '=', 'PK\003\004', 'application/zip'],
[20L, 'lelong', '=', 4257523676L, 'application/x-zoo'],
[10L, 'string', '=', '# This is a shell archive', 'application/x-shar'],
[0L, 'string', '=', '*STA', 'application/data'],
[0L, 'string', '=', '2278', 'application/data'],
[0L, 'beshort', '=', 560L, 'application/x-executable-file'],
[0L, 'beshort', '=', 561L, 'application/x-executable-file'],
[0L, 'string', '=', '\000\004\036\212\200', 'application/core'],
[0L, 'string', '=', '.snd', 'audio/basic'],
[0L, 'lelong', '=', 6583086L, 'audio/basic'],
[0L, 'string', '=', 'MThd', 'audio/midi'],
[0L, 'string', '=', 'CTMF', 'audio/x-cmf'],
[0L, 'string', '=', 'SBI', 'audio/x-sbi'],
[0L, 'string', '=', 'Creative Voice File', 'audio/x-voc'],
[0L, 'belong', '=', 1314148939L, 'audio/x-multitrack'],
[0L, 'string', '=', 'RIFF', 'audio/x-wav'],
[0L, 'string', '=', 'EMOD', 'audio/x-emod'],
[0L, 'belong', '=', 779248125L, 'audio/x-pn-realaudio'],
[0L, 'string', '=', 'MTM', 'audio/x-multitrack'],
[0L, 'string', '=', 'if', 'audio/x-669-mod'],
[0L, 'string', '=', 'FAR', 'audio/mod'],
[0L, 'string', '=', 'MAS_U', 'audio/x-multimate-mod'],
[44L, 'string', '=', 'SCRM', 'audio/x-st3-mod'],
[0L, 'string', '=', 'GF1PATCH110\000ID#000002\000', 'audio/x-gus-patch'],
[0L, 'string', '=', 'GF1PATCH100\000ID#000002\000', 'audio/x-gus-patch'],
[0L, 'string', '=', 'JN', 'audio/x-669-mod'],
[0L, 'string', '=', 'UN05', 'audio/x-mikmod-uni'],
[0L, 'string', '=', 'Extended Module:', 'audio/x-ft2-mod'],
[21L, 'string', '=', '!SCREAM!', 'audio/x-st2-mod'],
[1080L, 'string', '=', 'M.K.', 'audio/x-protracker-mod'],
[1080L, 'string', '=', 'M!K!', 'audio/x-protracker-mod'],
[1080L, 'string', '=', 'FLT4', 'audio/x-startracker-mod'],
[1080L, 'string', '=', '4CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', '6CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', '8CHN', 'audio/x-fasttracker-mod'],
[1080L, 'string', '=', 'CD81', 'audio/x-oktalyzer-mod'],
[1080L, 'string', '=', 'OKTA', 'audio/x-oktalyzer-mod'],
[1080L, 'string', '=', '16CN', 'audio/x-taketracker-mod'],
[1080L, 'string', '=', '32CN', 'audio/x-taketracker-mod'],
[0L, 'string', '=', 'TOC', 'audio/x-toc'],
[0L, 'short', '=', 3401L, 'application/x-executable-file'],
[0L, 'long', '=', 406L, 'application/x-executable-file'],
[0L, 'short', '=', 406L, 'application/x-executable-file'],
[0L, 'short', '=', 3001L, 'application/x-executable-file'],
[0L, 'lelong', '=', 314L, 'application/x-executable-file'],
[0L, 'string', '=', '//', 'text/cpp'],
[0L, 'string', '=', '\\\\1cw\\', 'application/data'],
[0L, 'string', '=', '\\\\1cw', 'application/data'],
[0L, 'belong&0xffffff00', '=', 2231440384L, 'application/data'],
[0L, 'belong&0xffffff00', '=', 2231487232L, 'application/data'],
[0L, 'short', '=', 575L, 'application/x-executable-file'],
[0L, 'short', '=', 577L, 'application/x-executable-file'],
[4L, 'string', '=', 'pipe', 'application/data'],
[4L, 'string', '=', 'prof', 'application/data'],
[0L, 'string', '=', ': shell', 'application/data'],
[0L, 'string', '=', '#!/bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/sh', 'application/x-sh'],
[0L, 'string', '=', '#!/bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/csh', 'application/x-csh'],
[0L, 'string', '=', '#!/bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#! /bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#! /bin/ksh', 'application/x-ksh'],
[0L, 'string', '=', '#!/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#! /usr/local/bin/tcsh', 'application/x-csh'],
[0L, 'string', '=', '#!/usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/zsh', 'application/x-zsh'],
[0L, 'string', '=', '#!/usr/local/bin/ash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/ash', 'application/x-zsh'],
[0L, 'string', '=', '#! /usr/local/bin/ash', 'application/x-zsh'],
[0L, 'string', '=', '#!/usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#! /usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#! /usr/local/bin/ae', 'text/script'],
[0L, 'string', '=', '#!/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/nawk', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/local/bin/gawk', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#!/usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', '#! /usr/bin/awk', 'application/x-awk'],
[0L, 'string', '=', 'BEGIN', 'application/x-awk'],
[0L, 'string', '=', '#!/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /usr/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#! /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', 'eval "exec /usr/local/bin/perl', 'application/x-perl'],
[0L, 'string', '=', '#!/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /usr/bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', 'eval "exec /usr/local/bin/python', 'application/x-python'],
[0L, 'string', '=', '#!/usr/bin/env python', 'application/x-python'],
[0L, 'string', '=', '#! /usr/bin/env python', 'application/x-python'],
[0L, 'string', '=', '#!/bin/rc', 'text/script'],
[0L, 'string', '=', '#! /bin/rc', 'text/script'],
[0L, 'string', '=', '#! /bin/rc', 'text/script'],
[0L, 'string', '=', '#!/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#!/usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /usr/local/bin/bash', 'application/x-sh'],
[0L, 'string', '=', '#! /', 'text/script'],
[0L, 'string', '=', '#! /', 'text/script'],
[0L, 'string', '=', '#!/', 'text/script'],
[0L, 'string', '=', '#! text/script', ''],
[0L, 'string', '=', '\037\235', 'application/compress'],
[0L, 'string', '=', '\037\213', 'application/x-gzip'],
[0L, 'string', '=', '\037\036', 'application/data'],
[0L, 'short', '=', 17437L, 'application/data'],
[0L, 'short', '=', 8191L, 'application/data'],
[0L, 'string', '=', '\377\037', 'application/data'],
[0L, 'short', '=', 145405L, 'application/data'],
[0L, 'string', '=', 'BZh', 'application/x-bzip2'],
[0L, 'leshort', '=', 65398L, 'application/data'],
[0L, 'leshort', '=', 65142L, 'application/data'],
[0L, 'leshort', '=', 64886L, 'application/x-lzh'],
[0L, 'string', '=', '\037\237', 'application/data'],
[0L, 'string', '=', '\037\236', 'application/data'],
[0L, 'string', '=', '\037\240', 'application/data'],
[0L, 'string', '=', 'BZ', 'application/x-bzip'],
[0L, 'string', '=', '\211LZO\000\015\012\032\012', 'application/data'],
[0L, 'belong', '=', 507L, 'application/x-object-file'],
[0L, 'belong', '=', 513L, 'application/x-executable-file'],
[0L, 'belong', '=', 515L, 'application/x-executable-file'],
[0L, 'belong', '=', 517L, 'application/x-executable-file'],
[0L, 'belong', '=', 70231L, 'application/core'],
[24L, 'belong', '=', 60011L, 'application/data'],
[24L, 'belong', '=', 60012L, 'application/data'],
[24L, 'belong', '=', 60013L, 'application/data'],
[24L, 'belong', '=', 60014L, 'application/data'],
[0L, 'belong', '=', 601L, 'application/x-object-file'],
[0L, 'belong', '=', 607L, 'application/data'],
[0L, 'belong', '=', 324508366L, 'application/x-gdbm'],
[0L, 'lelong', '=', 324508366L, 'application/x-gdbm'],
[0L, 'string', '=', 'GDBM', 'application/x-gdbm'],
[0L, 'belong', '=', 398689L, 'application/x-db'],
[0L, 'belong', '=', 340322L, 'application/x-db'],
[0L, 'string', '=', '<list>\012<protocol bbn-m', 'application/data'],
[0L, 'string', '=', 'diff text/x-patch', ''],
[0L, 'string', '=', '*** text/x-patch', ''],
[0L, 'string', '=', 'Only in text/x-patch', ''],
[0L, 'string', '=', 'Common subdirectories: text/x-patch', ''],
[0L, 'string', '=', '!<arch>\012________64E', 'application/data'],
[0L, 'leshort', '=', 387L, 'application/x-executable-file'],
[0L, 'leshort', '=', 392L, 'application/x-executable-file'],
[0L, 'leshort', '=', 399L, 'application/x-object-file'],
[0L, 'string', '=', '\377\377\177', 'application/data'],
[0L, 'string', '=', '\377\377|', 'application/data'],
[0L, 'string', '=', '\377\377~', 'application/data'],
[0L, 'string', '=', '\033c\033', 'application/data'],
[0L, 'long', '=', 4553207L, 'image/x11'],
[0L, 'string', '=', '!<PDF>!\012', 'application/x-prof'],
[0L, 'short', '=', 1281L, 'application/x-locale'],
[24L, 'belong', '=', 60012L, 'application/x-dump'],
[24L, 'belong', '=', 60011L, 'application/x-dump'],
[24L, 'lelong', '=', 60012L, 'application/x-dump'],
[24L, 'lelong', '=', 60011L, 'application/x-dump'],
[0L, 'string', '=', '\177ELF', 'application/x-executable-file'],
[0L, 'short', '=', 340L, 'application/data'],
[0L, 'short', '=', 341L, 'application/x-executable-file'],
[1080L, 'leshort', '=', 61267L, 'application/x-linux-ext2fs'],
[0L, 'string', '=', '\366\366\366\366', 'application/x-pc-floppy'],
[774L, 'beshort', '=', 55998L, 'application/data'],
[510L, 'leshort', '=', 43605L, 'application/data'],
[1040L, 'leshort', '=', 4991L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 5007L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 9320L, 'application/x-filesystem'],
[1040L, 'leshort', '=', 9336L, 'application/x-filesystem'],
[0L, 'string', '=', '-rom1fs-\000', 'application/x-filesystem'],
[395L, 'string', '=', 'OS/2', 'application/x-bootable'],
[0L, 'string', '=', 'FONT', 'font/x-vfont'],
[0L, 'short', '=', 436L, 'font/x-vfont'],
[0L, 'short', '=', 17001L, 'font/x-vfont'],
[0L, 'string', '=', '%!PS-AdobeFont-1.0', 'font/type1'],
[6L, 'string', '=', '%!PS-AdobeFont-1.0', 'font/type1'],
[0L, 'belong', '=', 4L, 'font/x-snf'],
[0L, 'lelong', '=', 4L, 'font/x-snf'],
[0L, 'string', '=', 'STARTFONT font/x-bdf', ''],
[0L, 'string', '=', '\001fcp', 'font/x-pcf'],
[0L, 'string', '=', 'D1.0\015', 'font/x-speedo'],
[0L, 'string', '=', 'flf', 'font/x-figlet'],
[0L, 'string', '=', 'flc', 'application/x-font'],
[0L, 'belong', '=', 335698201L, 'font/x-libgrx'],
[0L, 'belong', '=', 4282797902L, 'font/x-dos'],
[7L, 'belong', '=', 4540225L, 'font/x-dos'],
[7L, 'belong', '=', 5654852L, 'font/x-dos'],
[4098L, 'string', '=', 'DOSFONT', 'font/x-dos'],
[0L, 'string', '=', '<MakerFile', 'application/x-framemaker'],
[0L, 'string', '=', '<MIFFile', 'application/x-framemaker'],
[0L, 'string', '=', '<MakerDictionary', 'application/x-framemaker'],
[0L, 'string', '=', '<MakerScreenFont', 'font/x-framemaker'],
[0L, 'string', '=', '<MML', 'application/x-framemaker'],
[0L, 'string', '=', '<BookFile', 'application/x-framemaker'],
[0L, 'string', '=', '<Maker', 'application/x-framemaker'],
[0L, 'lelong&0377777777', '=', 41400407L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400410L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400413L, 'application/x-executable-file'],
[0L, 'lelong&0377777777', '=', 41400314L, 'application/x-executable-file'],
[7L, 'string', '=', '\357\020\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000', 'application/core'],
[0L, 'lelong', '=', 11421044151L, 'application/data'],
[0L, 'string', '=', 'GIMP Gradient', 'application/x-gimp-gradient'],
[0L, 'string', '=', 'gimp xcf', 'application/x-gimp-image'],
[20L, 'string', '=', 'GPAT', 'application/x-gimp-pattern'],
[20L, 'string', '=', 'GIMP', 'application/x-gimp-brush'],
[0L, 'string', '=', '\336\022\004\225', 'application/x-locale'],
[0L, 'string', '=', '\225\004\022\336', 'application/x-locale'],
[0L, 'beshort', '=', 627L, 'application/x-executable-file'],
[0L, 'beshort', '=', 624L, 'application/x-executable-file'],
[0L, 'string', '=', '\000\001\000\000\000', 'font/ttf'],
[0L, 'long', '=', 1203604016L, 'application/data'],
[0L, 'long', '=', 1702407010L, 'application/data'],
[0L, 'long', '=', 1003405017L, 'application/data'],
[0L, 'long', '=', 1602007412L, 'application/data'],
[0L, 'belong', '=', 34603270L, 'application/x-object-file'],
[0L, 'belong', '=', 34603271L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603272L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603275L, 'application/x-executable-file'],
[0L, 'belong', '=', 34603278L, 'application/x-library-file'],
[0L, 'belong', '=', 34603277L, 'application/x-library-file'],
[0L, 'belong', '=', 34865414L, 'application/x-object-file'],
[0L, 'belong', '=', 34865415L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865416L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865419L, 'application/x-executable-file'],
[0L, 'belong', '=', 34865422L, 'application/x-library-file'],
[0L, 'belong', '=', 34865421L, 'application/x-object-file'],
[0L, 'belong', '=', 34275590L, 'application/x-object-file'],
[0L, 'belong', '=', 34275591L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275592L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275595L, 'application/x-executable-file'],
[0L, 'belong', '=', 34275598L, 'application/x-library-file'],
[0L, 'belong', '=', 34275597L, 'application/x-library-file'],
[0L, 'belong', '=', 557605234L, 'application/x-ar'],
[0L, 'long', '=', 34078982L, 'application/x-executable-file'],
[0L, 'long', '=', 34078983L, 'application/x-executable-file'],
[0L, 'long', '=', 34078984L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341128L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341127L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341131L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341126L, 'application/x-executable-file'],
[0L, 'belong', '=', 34210056L, 'application/x-executable-file'],
[0L, 'belong', '=', 34210055L, 'application/x-executable-file'],
[0L, 'belong', '=', 34341134L, 'application/x-library-file'],
[0L, 'belong', '=', 34341133L, 'application/x-library-file'],
[0L, 'long', '=', 65381L, 'application/x-library-file'],
[0L, 'long', '=', 34275173L, 'application/x-library-file'],
[0L, 'long', '=', 34406245L, 'application/x-library-file'],
[0L, 'long', '=', 34144101L, 'application/x-library-file'],
[0L, 'long', '=', 22552998L, 'application/core'],
[0L, 'long', '=', 1302851304L, 'font/x-hp-windows'],
[0L, 'string', '=', 'Bitmapfile', 'image/unknown'],
[0L, 'string', '=', 'IMGfile', 'CIS image/unknown'],
[0L, 'long', '=', 34341132L, 'application/x-lisp'],
[0L, 'string', '=', 'msgcat01', 'application/x-locale'],
[0L, 'string', '=', 'HPHP48-', 'HP48 binary'],
[0L, 'string', '=', '%%HP:', 'HP48 text'],
[0L, 'beshort', '=', 200L, 'hp200 (68010) BSD'],
[0L, 'beshort', '=', 300L, 'hp300 (68020+68881) BSD'],
[0L, 'beshort', '=', 537L, '370 XA sysV executable'],
[0L, 'beshort', '=', 532L, '370 XA sysV pure executable'],
[0L, 'beshort', '=', 54001L, '370 sysV pure executable'],
[0L, 'beshort', '=', 55001L, '370 XA sysV pure executable'],
[0L, 'beshort', '=', 56401L, '370 sysV executable'],
[0L, 'beshort', '=', 57401L, '370 XA sysV executable'],
[0L, 'beshort', '=', 531L, 'SVR2 executable (Amdahl-UTS)'],
[0L, 'beshort', '=', 534L, 'SVR2 pure executable (Amdahl-UTS)'],
[0L, 'beshort', '=', 530L, 'SVR2 pure executable (USS/370)'],
[0L, 'beshort', '=', 535L, 'SVR2 executable (USS/370)'],
[0L, 'beshort', '=', 479L, 'executable (RISC System/6000 V3.1) or obj module'],
[0L, 'beshort', '=', 260L, 'shared library'],
[0L, 'beshort', '=', 261L, 'ctab data'],
[0L, 'beshort', '=', 65028L, 'structured file'],
[0L, 'string', '=', '0xabcdef', 'AIX message catalog'],
[0L, 'belong', '=', 505L, 'AIX compiled message catalog'],
[0L, 'string', '=', '<aiaff>', 'archive'],
[0L, 'string', '=', 'FORM', 'IFF data'],
[0L, 'string', '=', 'P1', 'image/x-portable-bitmap'],
[0L, 'string', '=', 'P2', 'image/x-portable-graymap'],
[0L, 'string', '=', 'P3', 'image/x-portable-pixmap'],
[0L, 'string', '=', 'P4', 'image/x-portable-bitmap'],
[0L, 'string', '=', 'P5', 'image/x-portable-graymap'],
[0L, 'string', '=', 'P6', 'image/x-portable-pixmap'],
[0L, 'string', '=', 'IIN1', 'image/tiff'],
[0L, 'string', '=', 'MM\000*', 'image/tiff'],
[0L, 'string', '=', 'II*\000', 'image/tiff'],
[0L, 'string', '=', '\211PNG', 'image/x-png'],
[1L, 'string', '=', 'PNG', 'image/x-png'],
[0L, 'string', '=', 'GIF8', 'image/gif'],
[0L, 'string', '=', '\361\000@\273', 'image/x-cmu-raster'],
[0L, 'string', '=', 'id=ImageMagick', 'MIFF image data'],
[0L, 'long', '=', 1123028772L, 'Artisan image data'],
[0L, 'string', '=', '#FIG', 'FIG image text'],
[0L, 'string', '=', 'ARF_BEGARF', 'PHIGS clear text archive'],
[0L, 'string', '=', '@(#)SunPHIGS', 'SunPHIGS'],
[0L, 'string', '=', 'GKSM', 'GKS Metafile'],
[0L, 'string', '=', 'BEGMF', 'clear text Computer Graphics Metafile'],
[0L, 'beshort&0xffe0', '=', 32L, 'binary Computer Graphics Metafile'],
[0L, 'beshort', '=', 12320L, 'character Computer Graphics Metafile'],
[0L, 'string', '=', 'yz', 'MGR bitmap, modern format, 8-bit aligned'],
[0L, 'string', '=', 'zz', 'MGR bitmap, old format, 1-bit deep, 16-bit aligned'],
[0L, 'string', '=', 'xz', 'MGR bitmap, old format, 1-bit deep, 32-bit aligned'],
[0L, 'string', '=', 'yx', 'MGR bitmap, modern format, squeezed'],
[0L, 'string', '=', '%bitmap\000', 'FBM image data'],
[1L, 'string', '=', 'PC Research, Inc', 'group 3 fax data'],
[0L, 'beshort', '=', 65496L, 'image/jpeg'],
[0L, 'string', '=', 'hsi1', 'image/x-jpeg-proprietary'],
[0L, 'string', '=', 'BM', 'image/x-bmp'],
[0L, 'string', '=', 'IC', 'image/x-ico'],
[0L, 'string', '=', 'PI', 'PC pointer image data'],
[0L, 'string', '=', 'CI', 'PC color icon data'],
[0L, 'string', '=', 'CP', 'PC color pointer image data'],
[0L, 'string', '=', '/* XPM */', 'X pixmap image text'],
[0L, 'leshort', '=', 52306L, 'RLE image data,'],
[0L, 'string', '=', 'Imagefile version-', 'iff image data'],
[0L, 'belong', '=', 1504078485L, 'x/x-image-sun-raster'],
[0L, 'beshort', '=', 474L, 'x/x-image-sgi'],
[0L, 'string', '=', 'IT01', 'FIT image data'],
[0L, 'string', '=', 'IT02', 'FIT image data'],
[2048L, 'string', '=', 'PCD_IPI', 'x/x-photo-cd-pack-file'],
[0L, 'string', '=', 'PCD_OPA', 'x/x-photo-cd-overfiew-file'],
[0L, 'string', '=', 'SIMPLE =', 'FITS image data'],
[0L, 'string', '=', 'This is a BitMap file', 'Lisp Machine bit-array-file'],
[0L, 'string', '=', '!!', 'Bennet Yee\'s "face" format'],
[0L, 'beshort', '=', 4112L, 'PEX Binary Archive'],
[3000L, 'string', '=', 'Visio (TM) Drawing', '%s'],
[0L, 'leshort', '=', 502L, 'basic-16 executable'],
[0L, 'leshort', '=', 503L, 'basic-16 executable (TV)'],
[0L, 'leshort', '=', 510L, 'application/x-executable-file'],
[0L, 'leshort', '=', 511L, 'application/x-executable-file'],
[0L, 'leshort', '=', 512L, 'application/x-executable-file'],
[0L, 'leshort', '=', 522L, 'application/x-executable-file'],
[0L, 'leshort', '=', 514L, 'application/x-executable-file'],
[0L, 'string', '=', '\210OPS', 'Interleaf saved data'],
[0L, 'string', '=', '<!OPS', 'Interleaf document text'],
[4L, 'string', '=', 'pgscriptver', 'IslandWrite document'],
[13L, 'string', '=', 'DrawFile', 'IslandDraw document'],
[0L, 'leshort&0xFFFC', '=', 38400L, 'little endian ispell'],
[0L, 'beshort&0xFFFC', '=', 38400L, 'big endian ispell'],
[0L, 'belong', '=', 3405691582L, 'compiled Java class data,'],
[0L, 'beshort', '=', 44269L, 'Java serialization data'],
[0L, 'string', '=', 'KarmaRHD', 'Version Karma Data Structure Version'],
[0L, 'string', '=', 'lect', 'DEC SRC Virtual Paper Lectern file'],
[53L, 'string', '=', 'yyprevious', 'C program text (from lex)'],
[21L, 'string', '=', 'generated by flex', 'C program text (from flex)'],
[0L, 'string', '=', '%{', 'lex description text'],
[0L, 'short', '=', 32768L, 'lif file'],
[0L, 'lelong', '=', 6553863L, 'Linux/i386 impure executable (OMAGIC)'],
[0L, 'lelong', '=', 6553864L, 'Linux/i386 pure executable (NMAGIC)'],
[0L, 'lelong', '=', 6553867L, 'Linux/i386 demand-paged executable (ZMAGIC)'],
[0L, 'lelong', '=', 6553804L, 'Linux/i386 demand-paged executable (QMAGIC)'],
[0L, 'string', '=', '\007\001\000', 'Linux/i386 object file'],
[0L, 'string', '=', '\001\003\020\004', 'Linux-8086 impure executable'],
[0L, 'string', '=', '\001\003 \004', 'Linux-8086 executable'],
[0L, 'string', '=', '\243\206\001\000', 'Linux-8086 object file'],
[0L, 'string', '=', '\001\003\020\020', 'Minix-386 impure executable'],
[0L, 'string', '=', '\001\003 \020', 'Minix-386 executable'],
[0L, 'string', '=', '*nazgul*', 'Linux compiled message catalog'],
[216L, 'lelong', '=', 421L, 'Linux/i386 core file'],
[2L, 'string', '=', 'LILO', 'Linux/i386 LILO boot/chain loader'],
[0L, 'string', '=', '0.9', ''],
[0L, 'leshort', '=', 1078L, 'font/linux-psf'],
[4086L, 'string', '=', 'SWAP-SPACE', 'Linux/i386 swap file'],
[0L, 'leshort', '=', 387L, 'ECOFF alpha'],
[514L, 'string', '=', 'HdrS', 'Linux kernel'],
[0L, 'belong', '=', 3099592590L, 'Linux kernel'],
[0L, 'string', '=', 'Begin3', 'Linux Software Map entry text'],
[0L, 'string', '=', ';;', 'Lisp/Scheme program text'],
[0L, 'string', '=', '\012(', 'byte-compiled Emacs-Lisp program data'],
[0L, 'string', '=', ';ELC\023\000\000\000', 'byte-compiled Emacs-Lisp program data'],
[0L, 'string', '=', "(SYSTEM::VERSION '", 'CLISP byte-compiled Lisp program text'],
[0L, 'long', '=', 1886817234L, 'CLISP memory image data'],
[0L, 'long', '=', 3532355184L, 'CLISP memory image data, other endian'],
[0L, 'long', '=', 3725722773L, 'GNU-format message catalog data'],
[0L, 'long', '=', 2500072158L, 'GNU-format message catalog data'],
[0L, 'belong', '=', 3405691582L, 'mach-o fat file'],
[0L, 'belong', '=', 4277009102L, 'mach-o'],
[11L, 'string', '=', 'must be converted with BinHex', 'BinHex binary text'],
[0L, 'string', '=', 'SIT!', 'StuffIt Archive (data)'],
[65L, 'string', '=', 'SIT!', 'StuffIt Archive (rsrc + data)'],
[0L, 'string', '=', 'SITD', 'StuffIt Deluxe (data)'],
[65L, 'string', '=', 'SITD', 'StuffIt Deluxe (rsrc + data)'],
[0L, 'string', '=', 'Seg', 'StuffIt Deluxe Segment (data)'],
[65L, 'string', '=', 'Seg', 'StuffIt Deluxe Segment (rsrc + data)'],
[0L, 'string', '=', 'APPL', 'Macintosh Application (data)'],
[65L, 'string', '=', 'APPL', 'Macintosh Application (rsrc + data)'],
[0L, 'string', '=', 'zsys', 'Macintosh System File (data)'],
[65L, 'string', '=', 'zsys', 'Macintosh System File(rsrc + data)'],
[0L, 'string', '=', 'FNDR', 'Macintosh Finder (data)'],
[65L, 'string', '=', 'FNDR', 'Macintosh Finder(rsrc + data)'],
[0L, 'string', '=', 'libr', 'Macintosh Library (data)'],
[65L, 'string', '=', 'libr', 'Macintosh Library(rsrc + data)'],
[0L, 'string', '=', 'shlb', 'Macintosh Shared Library (data)'],
[65L, 'string', '=', 'shlb', 'Macintosh Shared Library(rsrc + data)'],
[0L, 'string', '=', 'cdev', 'Macintosh Control Panel (data)'],
[65L, 'string', '=', 'cdev', 'Macintosh Control Panel(rsrc + data)'],
[0L, 'string', '=', 'INIT', 'Macintosh Extension (data)'],
[65L, 'string', '=', 'INIT', 'Macintosh Extension(rsrc + data)'],
[0L, 'string', '=', 'FFIL', 'font/ttf'],
[65L, 'string', '=', 'FFIL', 'font/ttf'],
[0L, 'string', '=', 'LWFN', 'font/type1'],
[65L, 'string', '=', 'LWFN', 'font/type1'],
[0L, 'string', '=', 'PACT', 'Macintosh Compact Pro Archive (data)'],
[65L, 'string', '=', 'PACT', 'Macintosh Compact Pro Archive(rsrc + data)'],
[0L, 'string', '=', 'ttro', 'Macintosh TeachText File (data)'],
[65L, 'string', '=', 'ttro', 'Macintosh TeachText File(rsrc + data)'],
[0L, 'string', '=', 'TEXT', 'Macintosh TeachText File (data)'],
[65L, 'string', '=', 'TEXT', 'Macintosh TeachText File(rsrc + data)'],
[0L, 'string', '=', 'PDF', 'Macintosh PDF File (data)'],
[65L, 'string', '=', 'PDF', 'Macintosh PDF File(rsrc + data)'],
[0L, 'string', '=', '# Magic', 'magic text file for file(1) cmd'],
[0L, 'string', '=', 'Relay-Version:', 'old news text'],
[0L, 'string', '=', '#! rnews', 'batched news text'],
[0L, 'string', '=', 'N#! rnews', 'mailed, batched news text'],
[0L, 'string', '=', 'Forward to', 'mail forwarding text'],
[0L, 'string', '=', 'Pipe to', 'mail piping text'],
[0L, 'string', '=', 'Return-Path:', 'message/rfc822'],
[0L, 'string', '=', 'Path:', 'message/news'],
[0L, 'string', '=', 'Xref:', 'message/news'],
[0L, 'string', '=', 'From:', 'message/rfc822'],
[0L, 'string', '=', 'Article', 'message/news'],
[0L, 'string', '=', 'BABYL', 'message/x-gnu-rmail'],
[0L, 'string', '=', 'Received:', 'message/rfc822'],
[0L, 'string', '=', 'MIME-Version:', 'MIME entity text'],
[0L, 'string', '=', 'Content-Type: ', ''],
[0L, 'string', '=', 'Content-Type:', ''],
[0L, 'long', '=', 31415L, 'Mirage Assembler m.out executable'],
[0L, 'string', '=', '\311\304', 'ID tags data'],
[0L, 'string', '=', '\001\001\001\001', 'MMDF mailbox'],
[4L, 'string', '=', 'Research,', 'Digifax-G3-File'],
[0L, 'short', '=', 256L, 'raw G3 data, byte-padded'],
[0L, 'short', '=', 5120L, 'raw G3 data'],
[0L, 'string', '=', 'RMD1', 'raw modem data'],
[0L, 'string', '=', 'PVF1\012', 'portable voice format'],
[0L, 'string', '=', 'PVF2\012', 'portable voice format'],
[0L, 'beshort', '=', 520L, 'mc68k COFF'],
[0L, 'beshort', '=', 521L, 'mc68k executable (shared)'],
[0L, 'beshort', '=', 522L, 'mc68k executable (shared demand paged)'],
[0L, 'beshort', '=', 554L, '68K BCS executable'],
[0L, 'beshort', '=', 555L, '88K BCS executable'],
[0L, 'string', '=', 'S0', 'Motorola S-Record; binary data in text format'],
[0L, 'string', '=', '@echo off', 'MS-DOS batch file text'],
[128L, 'string', '=', 'PE\000\000', 'MS Windows PE'],
[0L, 'leshort', '=', 332L, 'MS Windows COFF Intel 80386 object file'],
[0L, 'leshort', '=', 358L, 'MS Windows COFF MIPS R4000 object file'],
[0L, 'leshort', '=', 388L, 'MS Windows COFF Alpha object file'],
[0L, 'leshort', '=', 616L, 'MS Windows COFF Motorola 68000 object file'],
[0L, 'leshort', '=', 496L, 'MS Windows COFF PowerPC object file'],
[0L, 'leshort', '=', 656L, 'MS Windows COFF PA-RISC object file'],
[0L, 'string', '=', 'MZ', 'application/x-ms-dos-executable'],
[0L, 'string', '=', 'LZ', 'MS-DOS executable (built-in)'],
[0L, 'string', '=', 'regf', 'Windows NT Registry file'],
[2080L, 'string', '=', 'Microsoft Word 6.0 Document', 'text/vnd.ms-word'],
[2080L, 'string', '=', 'Documento Microsoft Word 6', 'text/vnd.ms-word'],
[2112L, 'string', '=', 'MSWordDoc', 'text/vnd.ms-word'],
[0L, 'belong', '=', 834535424L, 'text/vnd.ms-word'],
[0L, 'string', '=', 'PO^Q`', 'text/vnd.ms-word'],
[2080L, 'string', '=', 'Microsoft Excel 5.0 Worksheet', 'application/vnd.ms-excel'],
[2114L, 'string', '=', 'Biff5', 'application/vnd.ms-excel'],
[0L, 'belong', '=', 6656L, 'Lotus 1-2-3'],
[0L, 'belong', '=', 512L, 'Lotus 1-2-3'],
[1L, 'string', '=', 'WPC', 'text/vnd.wordperfect'],
[0L, 'beshort', '=', 610L, 'Tower/XP rel 2 object'],
[0L, 'beshort', '=', 615L, 'Tower/XP rel 2 object'],
[0L, 'beshort', '=', 620L, 'Tower/XP rel 3 object'],
[0L, 'beshort', '=', 625L, 'Tower/XP rel 3 object'],
[0L, 'beshort', '=', 630L, 'Tower32/600/400 68020 object'],
[0L, 'beshort', '=', 640L, 'Tower32/800 68020'],
[0L, 'beshort', '=', 645L, 'Tower32/800 68010'],
[0L, 'lelong', '=', 407L, 'NetBSD little-endian object file'],
[0L, 'belong', '=', 407L, 'NetBSD big-endian object file'],
[0L, 'belong&0377777777', '=', 41400413L, 'NetBSD/i386 demand paged'],
[0L, 'belong&0377777777', '=', 41400410L, 'NetBSD/i386 pure'],
[0L, 'belong&0377777777', '=', 41400407L, 'NetBSD/i386'],
[0L, 'belong&0377777777', '=', 41400507L, 'NetBSD/i386 core'],
[0L, 'belong&0377777777', '=', 41600413L, 'NetBSD/m68k demand paged'],
[0L, 'belong&0377777777', '=', 41600410L, 'NetBSD/m68k pure'],
[0L, 'belong&0377777777', '=', 41600407L, 'NetBSD/m68k'],
[0L, 'belong&0377777777', '=', 41600507L, 'NetBSD/m68k core'],
[0L, 'belong&0377777777', '=', 42000413L, 'NetBSD/m68k4k demand paged'],
[0L, 'belong&0377777777', '=', 42000410L, 'NetBSD/m68k4k pure'],
[0L, 'belong&0377777777', '=', 42000407L, 'NetBSD/m68k4k'],
[0L, 'belong&0377777777', '=', 42000507L, 'NetBSD/m68k4k core'],
[0L, 'belong&0377777777', '=', 42200413L, 'NetBSD/ns32532 demand paged'],
[0L, 'belong&0377777777', '=', 42200410L, 'NetBSD/ns32532 pure'],
[0L, 'belong&0377777777', '=', 42200407L, 'NetBSD/ns32532'],
[0L, 'belong&0377777777', '=', 42200507L, 'NetBSD/ns32532 core'],
[0L, 'belong&0377777777', '=', 42400413L, 'NetBSD/sparc demand paged'],
[0L, 'belong&0377777777', '=', 42400410L, 'NetBSD/sparc pure'],
[0L, 'belong&0377777777', '=', 42400407L, 'NetBSD/sparc'],
[0L, 'belong&0377777777', '=', 42400507L, 'NetBSD/sparc core'],
[0L, 'belong&0377777777', '=', 42600413L, 'NetBSD/pmax demand paged'],
[0L, 'belong&0377777777', '=', 42600410L, 'NetBSD/pmax pure'],
[0L, 'belong&0377777777', '=', 42600407L, 'NetBSD/pmax'],
[0L, 'belong&0377777777', '=', 42600507L, 'NetBSD/pmax core'],
[0L, 'belong&0377777777', '=', 43000413L, 'NetBSD/vax demand paged'],
[0L, 'belong&0377777777', '=', 43000410L, 'NetBSD/vax pure'],
[0L, 'belong&0377777777', '=', 43000407L, 'NetBSD/vax'],
[0L, 'belong&0377777777', '=', 43000507L, 'NetBSD/vax core'],
[0L, 'lelong', '=', 459141L, 'ECOFF NetBSD/alpha binary'],
[0L, 'belong&0377777777', '=', 43200507L, 'NetBSD/alpha core'],
[0L, 'belong&0377777777', '=', 43400413L, 'NetBSD/mips demand paged'],
[0L, 'belong&0377777777', '=', 43400410L, 'NetBSD/mips pure'],
[0L, 'belong&0377777777', '=', 43400407L, 'NetBSD/mips'],
[0L, 'belong&0377777777', '=', 43400507L, 'NetBSD/mips core'],
[0L, 'belong&0377777777', '=', 43600413L, 'NetBSD/arm32 demand paged'],
[0L, 'belong&0377777777', '=', 43600410L, 'NetBSD/arm32 pure'],
[0L, 'belong&0377777777', '=', 43600407L, 'NetBSD/arm32'],
[0L, 'belong&0377777777', '=', 43600507L, 'NetBSD/arm32 core'],
[0L, 'string', '=', 'StartFontMetrics', 'font/x-sunos-news'],
[0L, 'string', '=', 'StartFont', 'font/x-sunos-news'],
[0L, 'belong', '=', 326773060L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773063L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773072L, 'font/x-sunos-news'],
[0L, 'belong', '=', 326773073L, 'font/x-sunos-news'],
[8L, 'belong', '=', 326773573L, 'font/x-sunos-news'],
[8L, 'belong', '=', 326773576L, 'font/x-sunos-news'],
[0L, 'string', '=', 'Octave-1-L', 'Octave binary data (little endian)'],
[0L, 'string', '=', 'Octave-1-B', 'Octave binary data (big endian)'],
[0L, 'string', '=', '\177OLF', 'OLF'],
[0L, 'beshort', '=', 34765L, 'OS9/6809 module:'],
[0L, 'beshort', '=', 19196L, 'OS9/68K module:'],
[0L, 'long', '=', 61374L, 'OSF/Rose object'],
[0L, 'short', '=', 565L, 'i386 COFF object'],
[0L, 'short', '=', 10775L, '"compact bitmap" format (Poskanzer)'],
[0L, 'string', '=', '%PDF-', 'PDF document'],
[0L, 'lelong', '=', 101555L, 'PDP-11 single precision APL workspace'],
[0L, 'lelong', '=', 101554L, 'PDP-11 double precision APL workspace'],
[0L, 'leshort', '=', 407L, 'PDP-11 executable'],
[0L, 'leshort', '=', 401L, 'PDP-11 UNIX/RT ldp'],
[0L, 'leshort', '=', 405L, 'PDP-11 old overlay'],
[0L, 'leshort', '=', 410L, 'PDP-11 pure executable'],
[0L, 'leshort', '=', 411L, 'PDP-11 separate I&D executable'],
[0L, 'leshort', '=', 437L, 'PDP-11 kernel overlay'],
[0L, 'beshort', '=', 39168L, 'PGP key public ring'],
[0L, 'beshort', '=', 38145L, 'PGP key security ring'],
[0L, 'beshort', '=', 38144L, 'PGP key security ring'],
[0L, 'beshort', '=', 42496L, 'PGP encrypted data'],
[0L, 'string', '=', '-----BEGIN PGP', 'PGP armored data'],
[0L, 'string', '=', '# PaCkAgE DaTaStReAm', 'pkg Datastream (SVR4)'],
[0L, 'short', '=', 601L, 'mumps avl global'],
[0L, 'short', '=', 602L, 'mumps blt global'],
[0L, 'string', '=', '%!', 'application/postscript'],
[0L, 'string', '=', '\004%!', 'application/postscript'],
[0L, 'belong', '=', 3318797254L, 'DOS EPS Binary File'],
[0L, 'string', '=', '*PPD-Adobe:', 'PPD file'],
[0L, 'string', '=', '\033%-12345X@PJL', 'HP Printer Job Language data'],
[0L, 'string', '=', '\033%-12345X@PJL', 'HP Printer Job Language data'],
[0L, 'string', '=', '\033E\033', 'image/x-pcl-hp'],
[0L, 'string', '=', '@document(', 'Imagen printer'],
[0L, 'string', '=', 'Rast', 'RST-format raster font data'],
[0L, 'belong&0xff00ffff', '=', 1442840576L, 'ps database'],
[0L, 'long', '=', 1351614727L, 'Pyramid 90x family executable'],
[0L, 'long', '=', 1351614728L, 'Pyramid 90x family pure executable'],
[0L, 'long', '=', 1351614731L, 'Pyramid 90x family demand paged pure executable'],
[0L, 'beshort', '=', 60843L, ''],
[0L, 'string', '=', '{\\\\rtf', 'Rich Text Format data,'],
[38L, 'string', '=', 'Spreadsheet', 'sc spreadsheet file'],
[8L, 'string', '=', '\001s SCCS', 'archive data'],
[0L, 'byte', '=', 46L, 'Sendmail frozen configuration'],
[0L, 'short', '=', 10012L, 'Sendmail frozen configuration'],
[0L, 'lelong', '=', 234L, 'BALANCE NS32000 .o'],
[0L, 'lelong', '=', 4330L, 'BALANCE NS32000 executable (0 @ 0)'],
[0L, 'lelong', '=', 8426L, 'BALANCE NS32000 executable (invalid @ 0)'],
[0L, 'lelong', '=', 12522L, 'BALANCE NS32000 standalone executable'],
[0L, 'leshort', '=', 4843L, 'SYMMETRY i386 .o'],
[0L, 'leshort', '=', 8939L, 'SYMMETRY i386 executable (0 @ 0)'],
[0L, 'leshort', '=', 13035L, 'SYMMETRY i386 executable (invalid @ 0)'],
[0L, 'leshort', '=', 17131L, 'SYMMETRY i386 standalone executable'],
[0L, 'string', '=', 'kbd!map', 'kbd map file'],
[0L, 'belong', '=', 407L, 'old SGI 68020 executable'],
[0L, 'belong', '=', 410L, 'old SGI 68020 pure executable'],
[0L, 'beshort', '=', 34661L, 'disk quotas file'],
[0L, 'beshort', '=', 1286L, 'IRIS Showcase file'],
[0L, 'beshort', '=', 550L, 'IRIS Showcase template'],
[0L, 'belong', '=', 1396917837L, 'IRIS Showcase file'],
[0L, 'belong', '=', 1413695053L, 'IRIS Showcase template'],
[0L, 'belong', '=', 3735927486L, 'IRIX Parallel Arena'],
[0L, 'beshort', '=', 352L, 'MIPSEB COFF executable'],
[0L, 'beshort', '=', 354L, 'MIPSEL COFF executable'],
[0L, 'beshort', '=', 24577L, 'MIPSEB-LE COFF executable'],
[0L, 'beshort', '=', 25089L, 'MIPSEL-LE COFF executable'],
[0L, 'beshort', '=', 355L, 'MIPSEB MIPS-II COFF executable'],
[0L, 'beshort', '=', 358L, 'MIPSEL MIPS-II COFF executable'],
[0L, 'beshort', '=', 25345L, 'MIPSEB-LE MIPS-II COFF executable'],
[0L, 'beshort', '=', 26113L, 'MIPSEL-LE MIPS-II COFF executable'],
[0L, 'beshort', '=', 320L, 'MIPSEB MIPS-III COFF executable'],
[0L, 'beshort', '=', 322L, 'MIPSEL MIPS-III COFF executable'],
[0L, 'beshort', '=', 16385L, 'MIPSEB-LE MIPS-III COFF executable'],
[0L, 'beshort', '=', 16897L, 'MIPSEL-LE MIPS-III COFF executable'],
[0L, 'beshort', '=', 384L, 'MIPSEB Ucode'],
[0L, 'beshort', '=', 386L, 'MIPSEL Ucode'],
[0L, 'belong', '=', 3735924144L, 'IRIX core dump'],
[0L, 'belong', '=', 3735924032L, 'IRIX 64-bit core dump'],
[0L, 'belong', '=', 3133063355L, 'IRIX N32 core dump'],
[0L, 'string', '=', 'CrshDump', 'IRIX vmcore dump of'],
[0L, 'string', '=', 'SGIAUDIT', 'SGI Audit file'],
[0L, 'string', '=', 'WNGZWZSC', 'Wingz compiled script'],
[0L, 'string', '=', 'WNGZWZSS', 'Wingz spreadsheet'],
[0L, 'string', '=', 'WNGZWZHP', 'Wingz help file'],
[0L, 'string', '=', '\\#Inventor', 'V IRIS Inventor 1.0 file'],
[0L, 'string', '=', '\\#Inventor', 'V2 Open Inventor 2.0 file'],
[0L, 'string', '=', 'glfHeadMagic();', 'GLF_TEXT'],
[4L, 'belong', '=', 1090584576L, 'GLF_BINARY_LSB_FIRST'],
[4L, 'belong', '=', 321L, 'GLF_BINARY_MSB_FIRST'],
[0L, 'string', '=', '<!DOCTYPE HTML', 'text/html'],
[0L, 'string', '=', '<!doctype html', 'text/html'],
[0L, 'string', '=', '<HEAD', 'text/html'],
[0L, 'string', '=', '<head', 'text/html'],
[0L, 'string', '=', '<TITLE', 'text/html'],
[0L, 'string', '=', '<title', 'text/html'],
[0L, 'string', '=', '<html', 'text/html'],
[0L, 'string', '=', '<HTML', 'text/html'],
[0L, 'string', '=', '<!DOCTYPE', 'exported SGML document text'],
[0L, 'string', '=', '<!doctype', 'exported SGML document text'],
[0L, 'string', '=', '<!SUBDOC', 'exported SGML subdocument text'],
[0L, 'string', '=', '<!subdoc', 'exported SGML subdocument text'],
[0L, 'string', '=', '<!--', 'exported SGML document text'],
[0L, 'string', '=', 'RTSS', 'NetMon capture file'],
[0L, 'string', '=', 'TRSNIFF data \032', 'Sniffer capture file'],
[0L, 'string', '=', 'XCP\000', 'NetXRay capture file'],
[0L, 'ubelong', '=', 2712847316L, 'tcpdump capture file (big-endian)'],
[0L, 'ulelong', '=', 2712847316L, 'tcpdump capture file (little-endian)'],
[0L, 'string', '=', '<!SQ DTD>', 'Compiled SGML rules file'],
[0L, 'string', '=', '<!SQ A/E>', 'A/E SGML Document binary'],
[0L, 'string', '=', '<!SQ STS>', 'A/E SGML binary styles file'],
[0L, 'short', '=', 49374L, 'Compiled PSI (v1) data'],
[0L, 'short', '=', 49370L, 'Compiled PSI (v2) data'],
[0L, 'short', '=', 125252L, 'SoftQuad DESC or font file binary'],
[0L, 'string', '=', 'SQ BITMAP1', 'SoftQuad Raster Format text'],
[0L, 'string', '=', 'X SoftQuad', 'troff Context intermediate'],
[0L, 'belong&077777777', '=', 600413L, 'sparc demand paged'],
[0L, 'belong&077777777', '=', 600410L, 'sparc pure'],
[0L, 'belong&077777777', '=', 600407L, 'sparc'],
[0L, 'belong&077777777', '=', 400413L, 'mc68020 demand paged'],
[0L, 'belong&077777777', '=', 400410L, 'mc68020 pure'],
[0L, 'belong&077777777', '=', 400407L, 'mc68020'],
[0L, 'belong&077777777', '=', 200413L, 'mc68010 demand paged'],
[0L, 'belong&077777777', '=', 200410L, 'mc68010 pure'],
[0L, 'belong&077777777', '=', 200407L, 'mc68010'],
[0L, 'belong', '=', 407L, 'old sun-2 executable'],
[0L, 'belong', '=', 410L, 'old sun-2 pure executable'],
[0L, 'belong', '=', 413L, 'old sun-2 demand paged executable'],
[0L, 'belong', '=', 525398L, 'SunOS core file'],
[0L, 'long', '=', 4197695630L, 'SunPC 4.0 Hard Disk'],
[0L, 'string', '=', '#SUNPC_CONFIG', 'SunPC 4.0 Properties Values'],
[0L, 'string', '=', 'snoop', 'Snoop capture file'],
[36L, 'string', '=', 'acsp', 'Kodak Color Management System, ICC Profile'],
[0L, 'string', '=', '#!teapot\012xdr', 'teapot work sheet (XDR format)'],
[0L, 'string', '=', '\032\001', 'Compiled terminfo entry'],
[0L, 'short', '=', 433L, 'Curses screen image'],
[0L, 'short', '=', 434L, 'Curses screen image'],
[0L, 'string', '=', '\367\002', 'TeX DVI file'],
[0L, 'string', '=', '\367\203', 'font/x-tex'],
[0L, 'string', '=', '\367Y', 'font/x-tex'],
[0L, 'string', '=', '\367\312', 'font/x-tex'],
[0L, 'string', '=', 'This is TeX,', 'TeX transcript text'],
[0L, 'string', '=', 'This is METAFONT,', 'METAFONT transcript text'],
[2L, 'string', '=', '\000\021', 'font/x-tex-tfm'],
[2L, 'string', '=', '\000\022', 'font/x-tex-tfm'],
[0L, 'string', '=', '\\\\input\\', 'texinfo Texinfo source text'],
[0L, 'string', '=', 'This is Info file', 'GNU Info text'],
[0L, 'string', '=', '\\\\input', 'TeX document text'],
[0L, 'string', '=', '\\\\section', 'LaTeX document text'],
[0L, 'string', '=', '\\\\setlength', 'LaTeX document text'],
[0L, 'string', '=', '\\\\documentstyle', 'LaTeX document text'],
[0L, 'string', '=', '\\\\chapter', 'LaTeX document text'],
[0L, 'string', '=', '\\\\documentclass', 'LaTeX 2e document text'],
[0L, 'string', '=', '\\\\relax', 'LaTeX auxiliary file'],
[0L, 'string', '=', '\\\\contentsline', 'LaTeX table of contents'],
[0L, 'string', '=', '\\\\indexentry', 'LaTeX raw index file'],
[0L, 'string', '=', '\\\\begin{theindex}', 'LaTeX sorted index'],
[0L, 'string', '=', '\\\\glossaryentry', 'LaTeX raw glossary'],
[0L, 'string', '=', '\\\\begin{theglossary}', 'LaTeX sorted glossary'],
[0L, 'string', '=', 'This is makeindex', 'Makeindex log file'],
[0L, 'string', '=', '**TI82**', 'TI-82 Graphing Calculator'],
[0L, 'string', '=', '**TI83**', 'TI-83 Graphing Calculator'],
[0L, 'string', '=', '**TI85**', 'TI-85 Graphing Calculator'],
[0L, 'string', '=', '**TI92**', 'TI-92 Graphing Calculator'],
[0L, 'string', '=', '**TI80**', 'TI-80 Graphing Calculator File.'],
[0L, 'string', '=', '**TI81**', 'TI-81 Graphing Calculator File.'],
[0L, 'string', '=', 'TZif', 'timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\001\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\002\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\003\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\004\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\005\000', 'old timezone data'],
[0L, 'string', '=', '\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\006\000', 'old timezone data'],
[0L, 'string', '=', '.\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\'\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\'.\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', '\\\\"', 'troff or preprocessor input text'],
[0L, 'string', '=', 'x T', 'ditroff text'],
[0L, 'string', '=', '@\357', 'very old (C/A/T) troff output data'],
[0L, 'string', '=', 'Interpress/Xerox', 'Xerox InterPress data'],
[0L, 'short', '=', 263L, 'unknown machine executable'],
[0L, 'short', '=', 264L, 'unknown pure executable'],
[0L, 'short', '=', 265L, 'PDP-11 separate I&D'],
[0L, 'short', '=', 267L, 'unknown pure executable'],
[0L, 'long', '=', 268L, 'unknown demand paged pure executable'],
[0L, 'long', '=', 269L, 'unknown demand paged pure executable'],
[0L, 'long', '=', 270L, 'unknown readable demand paged pure executable'],
[0L, 'string', '=', 'begin uuencoded', 'or xxencoded text'],
[0L, 'string', '=', 'xbtoa Begin', "btoa'd text"],
[0L, 'string', '=', '$\012ship', "ship'd binary text"],
[0L, 'string', '=', 'Decode the following with bdeco', 'bencoded News text'],
[11L, 'string', '=', 'must be converted with BinHex', 'BinHex binary text'],
[0L, 'short', '=', 610L, 'Perkin-Elmer executable'],
[0L, 'beshort', '=', 572L, 'amd 29k coff noprebar executable'],
[0L, 'beshort', '=', 1572L, 'amd 29k coff prebar executable'],
[0L, 'beshort', '=', 160007L, 'amd 29k coff archive'],
[6L, 'beshort', '=', 407L, 'unicos (cray) executable'],
[596L, 'string', '=', 'X\337\377\377', 'Ultrix core file'],
[0L, 'string', '=', 'Joy!peffpwpc', 'header for PowerPC PEF executable'],
[0L, 'lelong', '=', 101557L, 'VAX single precision APL workspace'],
[0L, 'lelong', '=', 101556L, 'VAX double precision APL workspace'],
[0L, 'lelong', '=', 407L, 'VAX executable'],
[0L, 'lelong', '=', 410L, 'VAX pure executable'],
[0L, 'lelong', '=', 413L, 'VAX demand paged pure executable'],
[0L, 'leshort', '=', 570L, 'VAX COFF executable'],
[0L, 'leshort', '=', 575L, 'VAX COFF pure executable'],
[0L, 'string', '=', 'LBLSIZE=', 'VICAR image data'],
[43L, 'string', '=', 'SFDU_LABEL', 'VICAR label file'],
[0L, 'short', '=', 21845L, 'VISX image file'],
[0L, 'string', '=', '\260\0000\000', 'VMS VAX executable'],
[0L, 'belong', '=', 50331648L, 'VMS Alpha executable'],
[1L, 'string', '=', 'WPC', '(Corel/WP)'],
[0L, 'string', '=', 'core', 'core file (Xenix)'],
[0L, 'byte', '=', 128L, '8086 relocatable (Microsoft)'],
[0L, 'leshort', '=', 65381L, 'x.out'],
[0L, 'leshort', '=', 518L, 'Microsoft a.out'],
[0L, 'leshort', '=', 320L, 'old Microsoft 8086 x.out'],
[0L, 'lelong', '=', 518L, 'b.out'],
[0L, 'leshort', '=', 1408L, 'XENIX 8086 relocatable or 80286 small model'],
[0L, 'long', '=', 59399L, 'object file (z8000 a.out)'],
[0L, 'long', '=', 59400L, 'pure object file (z8000 a.out)'],
[0L, 'long', '=', 59401L, 'separate object file (z8000 a.out)'],
[0L, 'long', '=', 59397L, 'overlay object file (z8000 a.out)'],
[0L, 'string', '=', 'ZyXEL\002', 'ZyXEL voice data'],
]
magicNumbers = []
def strToNum(n):
val = 0
col = long(1)
if n[:1] == 'x': n = '0' + n
if n[:2] == '0x':
# hex
n = string.lower(n[2:])
while len(n) > 0:
l = n[len(n) - 1]
val = val + string.hexdigits.index(l) * col
col = col * 16
n = n[:len(n)-1]
elif n[0] == '\\':
# octal
n = n[1:]
while len(n) > 0:
l = n[len(n) - 1]
if ord(l) < 48 or ord(l) > 57: break
val = val + int(l) * col
col = col * 8
n = n[:len(n)-1]
else:
val = string.atol(n)
return val
def unescape(s):
# replace string escape sequences
while 1:
m = re.search(r'\\', s)
if not m: break
x = m.start()+1
if m.end() == len(s):
# escaped space at end
s = s[:len(s)-1] + ' '
elif s[x:x+2] == '0x':
# hex ascii value
c = chr(strToNum(s[x:x+4]))
s = s[:x-1] + c + s[x+4:]
elif s[m.start()+1] == 'x':
# hex ascii value
c = chr(strToNum(s[x:x+3]))
s = s[:x-1] + c + s[x+3:]
elif ord(s[x]) > 47 and ord(s[x]) < 58:
# octal ascii value
end = x
while (ord(s[end]) > 47 and ord(s[end]) < 58):
end = end + 1
if end > len(s) - 1: break
c = chr(strToNum(s[x-1:end]))
s = s[:x-1] + c + s[end:]
elif s[x] == 'n':
# newline
s = s[:x-1] + '\n' + s[x+1:]
else:
break
return s
class magicTest:
def __init__(self, offset, t, op, value, msg, mask = None):
if t.count('&') > 0:
mask = strToNum(t[t.index('&')+1:])
t = t[:t.index('&')]
if type(offset) == type('a'):
self.offset = strToNum(offset)
else:
self.offset = offset
self.type = t
self.msg = msg
self.subTests = []
self.op = op
self.mask = mask
self.value = value
def test(self, data):
if self.mask:
data = data & self.mask
if self.op == '=':
if self.value == data: return self.msg
elif self.op == '<':
pass
elif self.op == '>':
pass
elif self.op == '&':
pass
elif self.op == '^':
pass
return None
def compare(self, data):
#print str([self.type, self.value, self.msg])
try:
if self.type == 'string':
c = ''; s = ''
for i in range(0, len(self.value)+1):
if i + self.offset > len(data) - 1: break
s = s + c
[c] = struct.unpack('c', data[self.offset + i])
data = s
elif self.type == 'short':
[data] = struct.unpack('h', data[self.offset : self.offset + 2])
elif self.type == 'leshort':
[data] = struct.unpack('<h', data[self.offset : self.offset + 2])
elif self.type == 'beshort':
[data] = struct.unpack('>H', data[self.offset : self.offset + 2])
elif self.type == 'long':
[data] = struct.unpack('l', data[self.offset : self.offset + 4])
elif self.type == 'lelong':
[data] = struct.unpack('<l', data[self.offset : self.offset + 4])
elif self.type == 'belong':
[data] = struct.unpack('>l', data[self.offset : self.offset + 4])
else:
#print 'UNKNOWN TYPE: ' + self.type
pass
except:
return None
# print str([self.msg, self.value, data])
return self.test(data)
def load(file):
global magicNumbers
lines = open(file).readlines()
last = { 0: None }
for line in lines:
if re.match(r'\s*#', line):
# comment
continue
else:
# split up by space delimiters, and remove trailing space
line = string.rstrip(line)
line = re.split(r'\s*', line)
if len(line) < 3:
# bad line
continue
offset = line[0]
type = line[1]
value = line[2]
level = 0
while offset[0] == '>':
# count the level of the type
level = level + 1
offset = offset[1:]
l = magicNumbers
if level > 0:
l = last[level - 1].subTests
if offset[0] == '(':
# don't handle indirect offsets just yet
print 'SKIPPING ' + string.join(list(line[3:]))
pass
elif offset[0] == '&':
# don't handle relative offsets just yet
print 'SKIPPING ' + string.join(list(line[3:]))
pass
else:
operands = ['=', '<', '>', '&']
if operands.count(value[0]) > 0:
# a comparison operator is specified
op = value[0]
value = value[1:]
else:
print str([value, operands])
if len(value) >1 and value[0] == '\\' and operands.count(value[1]) >0:
# literal value that collides with operands is escaped
value = value[1:]
op = '='
mask = None
if type == 'string':
while 1:
value = unescape(value)
if value[len(value)-1] == ' ' and len(line) > 3:
# last value was an escaped space, join
value = value + line[3]
del line[3]
else:
break
else:
if value.count('&') != 0:
mask = value[(value.index('&') + 1):]
print 'MASK: ' + mask
value = value[:(value.index('&')+1)]
try: value = strToNum(value)
except: continue
msg = string.join(list(line[3:]))
new = magicTest(offset, type, op, value, msg, mask)
last[level] = new
l.append(new)
def whatis(data):
for test in magicNumbers:
m = test.compare(data)
if m: return m
# no matching, magic number. is it binary or text?
for c in data:
if ord(c) > 128:
return 'data'
# its ASCII, now do text tests
if string.find('The', data, 0, 8192) > -1:
return 'English text'
if string.find('def', data, 0, 8192) > -1:
return 'Python Source'
return 'ASCII text'
def file(file):
try:
return whatis(open(file, 'r').read(8192))
except Exception, e:
if str(e) == '[Errno 21] Is a directory':
return 'directory'
else:
raise e
#### BUILD DATA ####
#load('mime-magic')
#f = open('out', 'w')
#for m in magicNumbers:
# f.write(str([m.offset, m.type, m.op, m.value, m.msg]) + ',\n')
#f.close
import sys
for m in magic:
magicNumbers.append(magicTest(m[0], m[1], m[2], m[3], m[4]))
if __name__ == '__main__':
import sys
for arg in sys.argv[1:]:
msg = file(arg)
if msg:
print arg + ': ' + msg
else:
print arg + ': unknown'
| mit |
m2candre/ansible-modules-extras | cloud/cloudstack/cs_account.py | 14 | 12533 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_account
short_description: Manages accounts on Apache CloudStack based clouds.
description:
- Create, disable, lock, enable and remove accounts.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of account.
required: true
username:
description:
- Username of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
password:
description:
- Password of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
first_name:
description:
- First name of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
last_name:
description:
- Last name of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
email:
description:
- Email of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
timezone:
description:
- Timezone of the user to be created if account did not exist.
required: false
default: null
network_domain:
description:
- Network domain of the account.
required: false
default: null
account_type:
description:
- Type of the account.
required: false
default: 'user'
choices: [ 'user', 'root_admin', 'domain_admin' ]
domain:
description:
- Domain the account is related to.
required: false
default: 'ROOT'
state:
description:
- State of the account.
required: false
default: 'present'
choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create an account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
username: customer_xy
password: S3Cur3
last_name: Doe
first_name: John
email: [email protected]
domain: CUSTOMERS
# Lock an existing account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: locked
# Disable an existing account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: disabled
# Enable an existing account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: enabled
# Remove an account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: absent
'''
RETURN = '''
---
name:
description: Name of the account.
returned: success
type: string
sample: [email protected]
account_type:
description: Type of the account.
returned: success
type: string
sample: user
account_state:
description: State of the account.
returned: success
type: string
sample: enabled
network_domain:
description: Network domain of the account.
returned: success
type: string
sample: example.local
domain:
description: Domain the account is related.
returned: success
type: string
sample: ROOT
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackAccount(AnsibleCloudStack):
def __init__(self, module):
AnsibleCloudStack.__init__(self, module)
self.account = None
self.account_types = {
'user': 0,
'root_admin': 1,
'domain_admin': 2,
}
def get_account_type(self):
account_type = self.module.params.get('account_type')
return self.account_types[account_type]
def get_account(self):
if not self.account:
args = {}
args['listall'] = True
args['domainid'] = self.get_domain('id')
accounts = self.cs.listAccounts(**args)
if accounts:
account_name = self.module.params.get('name')
for a in accounts['account']:
if account_name in [ a['name'] ]:
self.account = a
break
return self.account
def enable_account(self):
account = self.get_account()
if not account:
self.module.fail_json(msg="Failed: account not present")
if account['state'].lower() != 'enabled':
self.result['changed'] = True
args = {}
args['id'] = account['id']
args['account'] = self.module.params.get('name')
args['domainid'] = self.get_domain('id')
if not self.module.check_mode:
res = self.cs.enableAccount(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
account = res['account']
return account
def lock_account(self):
return self.lock_or_disable_account(lock=True)
def disable_account(self):
return self.lock_or_disable_account()
def lock_or_disable_account(self, lock=False):
account = self.get_account()
if not account:
self.module.fail_json(msg="Failed: account not present")
# we need to enable the account to lock it.
if lock and account['state'].lower() == 'disabled':
account = self.enable_account()
if lock and account['state'].lower() != 'locked' \
or not lock and account['state'].lower() != 'disabled':
self.result['changed'] = True
args = {}
args['id'] = account['id']
args['account'] = self.module.params.get('name')
args['domainid'] = self.get_domain('id')
args['lock'] = lock
if not self.module.check_mode:
account = self.cs.disableAccount(**args)
if 'errortext' in account:
self.module.fail_json(msg="Failed: '%s'" % account['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
account = self._poll_job(account, 'account')
return account
def present_account(self):
missing_params = []
if not self.module.params.get('email'):
missing_params.append('email')
if not self.module.params.get('username'):
missing_params.append('username')
if not self.module.params.get('password'):
missing_params.append('password')
if not self.module.params.get('first_name'):
missing_params.append('first_name')
if not self.module.params.get('last_name'):
missing_params.append('last_name')
if missing_params:
self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
account = self.get_account()
if not account:
self.result['changed'] = True
args = {}
args['account'] = self.module.params.get('name')
args['domainid'] = self.get_domain('id')
args['accounttype'] = self.get_account_type()
args['networkdomain'] = self.module.params.get('network_domain')
args['username'] = self.module.params.get('username')
args['password'] = self.module.params.get('password')
args['firstname'] = self.module.params.get('first_name')
args['lastname'] = self.module.params.get('last_name')
args['email'] = self.module.params.get('email')
args['timezone'] = self.module.params.get('timezone')
if not self.module.check_mode:
res = self.cs.createAccount(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
account = res['account']
return account
def absent_account(self):
account = self.get_account()
if account:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.deleteAccount(id=account['id'])
if 'errortext' in account:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self._poll_job(res, 'account')
return account
def get_result(self, account):
if account:
if 'name' in account:
self.result['name'] = account['name']
if 'accounttype' in account:
for key,value in self.account_types.items():
if value == account['accounttype']:
self.result['account_type'] = key
break
if 'state' in account:
self.result['account_state'] = account['state']
if 'domain' in account:
self.result['domain'] = account['domain']
if 'networkdomain' in account:
self.result['network_domain'] = account['networkdomain']
return self.result
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked' ], default='present'),
account_type = dict(choices=['user', 'root_admin', 'domain_admin'], default='user'),
network_domain = dict(default=None),
domain = dict(default='ROOT'),
email = dict(default=None),
first_name = dict(default=None),
last_name = dict(default=None),
username = dict(default=None),
password = dict(default=None),
timezone = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
api_key = dict(default=None),
api_secret = dict(default=None, no_log=True),
api_url = dict(default=None),
api_http_method = dict(choices=['get', 'post'], default='get'),
api_timeout = dict(type='int', default=10),
),
required_together = (
['api_key', 'api_secret', 'api_url'],
),
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_acc = AnsibleCloudStackAccount(module)
state = module.params.get('state')
if state in ['absent']:
account = acs_acc.absent_account()
elif state in ['enabled']:
account = acs_acc.enable_account()
elif state in ['disabled']:
account = acs_acc.disable_account()
elif state in ['locked']:
account = acs_acc.lock_account()
else:
account = acs_acc.present_account()
result = acs_acc.get_result(account)
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
The-Compiler/qutebrowser | tests/unit/scripts/test_run_vulture.py | 1 | 2574 | #!/usr/bin/env python3
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2020 Florian Bruhin (The Compiler) <[email protected]>
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
import sys
import textwrap
import pytest
from tests.helpers import utils
try:
from scripts.dev import run_vulture
except ImportError:
if hasattr(sys, 'frozen'):
# Tests aren't going to run anyways because of the mark
pass
else:
raise
pytestmark = [pytest.mark.not_frozen]
class VultureDir:
"""Fixture similar to pytest's testdir fixture for vulture.
Attributes:
_tmp_path: The pytest tmp_path fixture.
"""
def __init__(self, tmp_path):
self._tmp_path = tmp_path
def run(self):
"""Run vulture over all generated files and return the output."""
names = [p.name for p in self._tmp_path.glob('*')]
assert names
with utils.change_cwd(self._tmp_path):
return run_vulture.run(names)
def makepyfile(self, **kwargs):
"""Create a python file, similar to TestDir.makepyfile."""
for filename, data in kwargs.items():
text = textwrap.dedent(data)
(self._tmp_path / (filename + '.py')).write_text(text, 'utf-8')
@pytest.fixture
def vultdir(tmp_path):
return VultureDir(tmp_path)
def test_used(vultdir):
vultdir.makepyfile(foo="""
def foo():
pass
foo()
""")
assert not vultdir.run()
def test_unused_func(vultdir):
vultdir.makepyfile(foo="""
def foo():
pass
""")
msg = "foo.py:2: unused function 'foo' (60% confidence)"
assert vultdir.run() == [msg]
def test_unused_method_camelcase(vultdir):
"""Should be ignored because those are Qt methods."""
vultdir.makepyfile(foo="""
class Foo():
def fooBar(self):
pass
Foo()
""")
assert not vultdir.run()
| gpl-3.0 |
loli/semisupervisedforests | examples/model_selection/plot_roc_crossval.py | 247 | 3253 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
desmovalvo/virtualsib-part2 | manager/lib/SSAPLib.py | 8 | 2366 | #!/usr/bin/python
############################################################
#
# SSAP message templates
#
############################################################
SSAP_MESSAGE_CONFIRM_TEMPLATE = '''<SSAP_message>
<node_id>%s</node_id>
<space_id>%s</space_id>
<transaction_type>%s</transaction_type>
<message_type>CONFIRM</message_type>
<transaction_id>%s</transaction_id>
%s
</SSAP_message>'''
SSAP_SUCCESS_PARAM_TEMPLATE = '<parameter name = "status">%s</parameter>'
SSAP_BNODES_PARAM_TEMPLATE = '<parameter name = "bnodes"><urllist>%s</urllist></parameter>'
### Templates used to build query results
SSAP_RESULTS_SPARQL_PARAM_TEMPLATE = """
<parameter name="status">m3:Success</parameter>
<parameter name="results">
<sparql xmlns="http://www.w3.org/2005/sparql-results#">
%s
</sparql>
</parameter>
"""
SSAP_HEAD_TEMPLATE = """<head>
%s</head>"""
SSAP_VARIABLE_TEMPLATE = """<variable name="%s"/>
"""
SSAP_RESULTS_TEMPLATE = """<results>
%s</results>
"""
SSAP_RESULT_TEMPLATE = """<result>
%s</result>
"""
SSAP_BINDING_TEMPLATE = """<binding name="%s"><uri>%s</uri>
</binding>
"""
SSAP_MESSAGE_REQUEST_TEMPLATE = '''<SSAP_message>
<node_id>%s</node_id>
<space_id>%s</space_id>
<transaction_type>%s</transaction_type>
<message_type>REQUEST</message_type>
<transaction_id>%s</transaction_id>
%s
</SSAP_message>'''
SSAP_SUCCESS_PARAM_TEMPLATE = '<parameter name = "status">%s</parameter>'
SSAP_RESULTS_RDF_PARAM_TEMPLATE = """
<parameter name="status">m3:Success</parameter>
<parameter name="results">
%s
</parameter>
"""
SSAP_RESULTS_SUB_RDF_PARAM_TEMPLATE = """
<parameter name="status">m3:Success</parameter>
<parameter name="subscription_id">%s</parameter>
<parameter name="results">
%s
</parameter>
"""
SSAP_TRIPLE_TEMPLATE = """
<triple>
<subject type="uri">%s</subject>
<predicate>%s</predicate>
<object type="uri">%s</object>
</triple>
"""
SSAP_TRIPLE_LIST_TEMPLATE = """
<triple_list>
%s
</triple_list>
"""
SSAP_INDICATION_TEMPLATE = """
<SSAP_message>
<message_type>INDICATION</message_type>
<transaction_type>SUBSCRIBE</transaction_type>
<space_id>%s</space_id>
<node_id>%s</node_id>
<transaction_id>%s</transaction_id>
<parameter name="ind_sequence">%s</parameter>
<parameter name="subscription_id">%s</parameter>
<parameter name="new_results">%s</parameter>
<parameter name="obsolete_results">%s</parameter>
</SSAP_message>
"""
| lgpl-3.0 |
JioCloud/heat | heat/api/aws/ec2token.py | 4 | 9426 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import requests
from heat.openstack.common import gettextutils
from heat.api.aws.exception import HeatAPIException
gettextutils.install('heat')
from heat.common import wsgi
from heat.openstack.common import jsonutils as json
from oslo.config import cfg
from heat.openstack.common import importutils
import webob
from heat.api.aws import exception
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
opts = [
cfg.StrOpt('auth_uri',
default=None,
help=_("Authentication Endpoint URI")),
cfg.BoolOpt('multi_cloud',
default=False,
help=_('Allow orchestration of multiple clouds')),
cfg.ListOpt('allowed_auth_uris',
default=[],
help=_('Allowed keystone endpoints for auth_uri when '
'multi_cloud is enabled. At least one endpoint needs '
'to be specified.'))
]
cfg.CONF.register_opts(opts, group='ec2authtoken')
class EC2Token(wsgi.Middleware):
"""Authenticate an EC2 request with keystone and convert to token."""
def __init__(self, app, conf):
self.conf = conf
self.application = app
def _conf_get(self, name):
# try config from paste-deploy first
if name in self.conf:
return self.conf[name]
else:
return cfg.CONF.ec2authtoken[name]
def _conf_get_auth_uri(self):
auth_uri = self._conf_get('auth_uri')
if auth_uri:
return auth_uri
else:
# Import auth_token to have keystone_authtoken settings setup.
# We can use the auth_uri from the keystone_authtoken section
importutils.import_module('keystoneclient.middleware.auth_token')
return cfg.CONF.keystone_authtoken['auth_uri']
@staticmethod
def _conf_get_keystone_ec2_uri(auth_uri):
if auth_uri.endswith('/'):
return '%sec2tokens' % auth_uri
return '%s/ec2tokens' % auth_uri
def _get_signature(self, req):
"""
Extract the signature from the request, this can be a get/post
variable or for v4 also in a header called 'Authorization'
- params['Signature'] == version 0,1,2,3
- params['X-Amz-Signature'] == version 4
- header 'Authorization' == version 4
see http://docs.aws.amazon.com/general/latest/gr/
sigv4-signed-request-examples.html
"""
sig = req.params.get('Signature') or req.params.get('X-Amz-Signature')
if sig is None and 'Authorization' in req.headers:
auth_str = req.headers['Authorization']
sig = auth_str.partition("Signature=")[2].split(',')[0]
return sig
def _get_access(self, req):
"""
Extract the access key identifier, for v 0/1/2/3 this is passed
as the AccessKeyId parameter, for version4 it is either and
X-Amz-Credential parameter or a Credential= field in the
'Authorization' header string
"""
access = req.params.get('AWSAccessKeyId')
if access is None:
cred_param = req.params.get('X-Amz-Credential')
if cred_param:
access = cred_param.split("/")[0]
if access is None and 'Authorization' in req.headers:
auth_str = req.headers['Authorization']
cred_str = auth_str.partition("Credential=")[2].split(',')[0]
access = cred_str.split("/")[0]
return access
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if not self._conf_get('multi_cloud'):
return self._authorize(req, self._conf_get_auth_uri())
else:
# attempt to authorize for each configured allowed_auth_uris
# until one is successful.
# This is safe for the following reasons:
# 1. AWSAccessKeyId is a randomly generated sequence
# 2. No secret is transferred to validate a request
last_failure = None
for auth_uri in self._conf_get('allowed_auth_uris'):
try:
logger.debug("Attempt authorize on %s" % auth_uri)
return self._authorize(req, auth_uri)
except HeatAPIException as e:
logger.debug("Authorize failed: %s" % e.__class__)
last_failure = e
raise last_failure or exception.HeatAccessDeniedError()
def _authorize(self, req, auth_uri):
# Read request signature and access id.
# If we find X-Auth-User in the headers we ignore a key error
# here so that we can use both authentication methods.
# Returning here just means the user didn't supply AWS
# authentication and we'll let the app try native keystone next.
logger.info("Checking AWS credentials..")
signature = self._get_signature(req)
if not signature:
if 'X-Auth-User' in req.headers:
return self.application
else:
logger.info("No AWS Signature found.")
raise exception.HeatIncompleteSignatureError()
access = self._get_access(req)
if not access:
if 'X-Auth-User' in req.headers:
return self.application
else:
logger.info("No AWSAccessKeyId/Authorization Credential")
raise exception.HeatMissingAuthenticationTokenError()
logger.info("AWS credentials found, checking against keystone.")
if not auth_uri:
logger.error("Ec2Token authorization failed, no auth_uri "
"specified in config file")
raise exception.HeatInternalFailureError("Service misconfigured")
# Make a copy of args for authentication and signature verification.
auth_params = dict(req.params)
# 'Signature' param Not part of authentication args
auth_params.pop('Signature', None)
# Authenticate the request.
# AWS v4 authentication requires a hash of the body
body_hash = hashlib.sha256(req.body).hexdigest()
creds = {'ec2Credentials': {'access': access,
'signature': signature,
'host': req.host,
'verb': req.method,
'path': req.path,
'params': auth_params,
'headers': req.headers,
'body_hash': body_hash
}}
creds_json = json.dumps(creds)
headers = {'Content-Type': 'application/json'}
keystone_ec2_uri = self._conf_get_keystone_ec2_uri(auth_uri)
logger.info('Authenticating with %s' % keystone_ec2_uri)
response = requests.post(keystone_ec2_uri, data=creds_json,
headers=headers)
result = response.json()
try:
token_id = result['access']['token']['id']
tenant = result['access']['token']['tenant']['name']
tenant_id = result['access']['token']['tenant']['id']
logger.info("AWS authentication successful.")
except (AttributeError, KeyError):
logger.info("AWS authentication failure.")
# Try to extract the reason for failure so we can return the
# appropriate AWS error via raising an exception
try:
reason = result['error']['message']
except KeyError:
reason = None
if reason == "EC2 access key not found.":
raise exception.HeatInvalidClientTokenIdError()
elif reason == "EC2 signature not supplied.":
raise exception.HeatSignatureError()
else:
raise exception.HeatAccessDeniedError()
# Authenticated!
ec2_creds = {'ec2Credentials': {'access': access,
'signature': signature}}
req.headers['X-Auth-EC2-Creds'] = json.dumps(ec2_creds)
req.headers['X-Auth-Token'] = token_id
req.headers['X-Tenant-Name'] = tenant
req.headers['X-Tenant-Id'] = tenant_id
req.headers['X-Auth-URL'] = auth_uri
metadata = result['access'].get('metadata', {})
roles = metadata.get('roles', [])
req.headers['X-Roles'] = ','.join(roles)
return self.application
def EC2Token_filter_factory(global_conf, **local_conf):
"""
Factory method for paste.deploy
"""
conf = global_conf.copy()
conf.update(local_conf)
def filter(app):
return EC2Token(app, conf)
return filter
| apache-2.0 |
txm/potato | django/contrib/gis/db/models/sql/where.py | 309 | 3938 | from django.db.models.fields import Field, FieldDoesNotExist
from django.db.models.sql.constants import LOOKUP_SEP
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.where import Constraint, WhereNode
from django.contrib.gis.db.models.fields import GeometryField
class GeoConstraint(Constraint):
"""
This subclass overrides `process` to better handle geographic SQL
construction.
"""
def __init__(self, init_constraint):
self.alias = init_constraint.alias
self.col = init_constraint.col
self.field = init_constraint.field
def process(self, lookup_type, value, connection):
if isinstance(value, SQLEvaluator):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = GeoWhereNode._check_geo_field(value.opts, value.expression.name)
if not geo_fld:
raise ValueError('No geographic field found in expression.')
value.srid = geo_fld.srid
db_type = self.field.db_type(connection=connection)
params = self.field.get_db_prep_lookup(lookup_type, value, connection=connection)
return (self.alias, self.col, db_type), params
class GeoWhereNode(WhereNode):
"""
Used to represent the SQL where-clause for spatial databases --
these are tied to the GeoQuery class that created it.
"""
def add(self, data, connector):
if isinstance(data, (list, tuple)):
obj, lookup_type, value = data
if ( isinstance(obj, Constraint) and
isinstance(obj.field, GeometryField) ):
data = (GeoConstraint(obj), lookup_type, value)
super(GeoWhereNode, self).add(data, connector)
def make_atom(self, child, qn, connection):
lvalue, lookup_type, value_annot, params_or_value = child
if isinstance(lvalue, GeoConstraint):
data, params = lvalue.process(lookup_type, params_or_value, connection)
spatial_sql = connection.ops.spatial_lookup_sql(data, lookup_type, params_or_value, lvalue.field, qn)
return spatial_sql, params
else:
return super(GeoWhereNode, self).make_atom(child, qn, connection)
@classmethod
def _check_geo_field(cls, opts, lookup):
"""
Utility for checking the given lookup with the given model options.
The lookup is a string either specifying the geographic field, e.g.
'point, 'the_geom', or a related lookup on a geographic field like
'address__point'.
If a GeometryField exists according to the given lookup on the model
options, it will be returned. Otherwise returns None.
"""
# This takes into account the situation where the lookup is a
# lookup to a related geographic field, e.g., 'address__point'.
field_list = lookup.split(LOOKUP_SEP)
# Reversing so list operates like a queue of related lookups,
# and popping the top lookup.
field_list.reverse()
fld_name = field_list.pop()
try:
geo_fld = opts.get_field(fld_name)
# If the field list is still around, then it means that the
# lookup was for a geometry field across a relationship --
# thus we keep on getting the related model options and the
# model field associated with the next field in the list
# until there's no more left.
while len(field_list):
opts = geo_fld.rel.to._meta
geo_fld = opts.get_field(field_list.pop())
except (FieldDoesNotExist, AttributeError):
return False
# Finally, make sure we got a Geographic field and return.
if isinstance(geo_fld, GeometryField):
return geo_fld
else:
return False
| bsd-3-clause |
iAmMrinal0/CouchPotatoServer | libs/chardet/utf8prober.py | 2919 | 2652 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == constants.eError:
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
| gpl-3.0 |
sarthakmeh03/django | tests/select_related_regress/models.py | 282 | 3677 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Building(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return "Building: %s" % self.name
@python_2_unicode_compatible
class Device(models.Model):
building = models.ForeignKey('Building', models.CASCADE)
name = models.CharField(max_length=10)
def __str__(self):
return "device '%s' in building %s" % (self.name, self.building)
@python_2_unicode_compatible
class Port(models.Model):
device = models.ForeignKey('Device', models.CASCADE)
port_number = models.CharField(max_length=10)
def __str__(self):
return "%s/%s" % (self.device.name, self.port_number)
@python_2_unicode_compatible
class Connection(models.Model):
start = models.ForeignKey(
Port,
models.CASCADE,
related_name='connection_start',
unique=True,
)
end = models.ForeignKey(
Port,
models.CASCADE,
related_name='connection_end',
unique=True,
)
def __str__(self):
return "%s to %s" % (self.start, self.end)
# Another non-tree hierarchy that exercises code paths similar to the above
# example, but in a slightly different configuration.
class TUser(models.Model):
name = models.CharField(max_length=200)
class Person(models.Model):
user = models.ForeignKey(TUser, models.CASCADE, unique=True)
class Organizer(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
class Student(models.Model):
person = models.ForeignKey(Person, models.CASCADE)
class Class(models.Model):
org = models.ForeignKey(Organizer, models.CASCADE)
class Enrollment(models.Model):
std = models.ForeignKey(Student, models.CASCADE)
cls = models.ForeignKey(Class, models.CASCADE)
# Models for testing bug #8036.
class Country(models.Model):
name = models.CharField(max_length=50)
class State(models.Model):
name = models.CharField(max_length=50)
country = models.ForeignKey(Country, models.CASCADE)
class ClientStatus(models.Model):
name = models.CharField(max_length=50)
class Client(models.Model):
name = models.CharField(max_length=50)
state = models.ForeignKey(State, models.SET_NULL, null=True)
status = models.ForeignKey(ClientStatus, models.CASCADE)
class SpecialClient(Client):
value = models.IntegerField()
# Some model inheritance exercises
@python_2_unicode_compatible
class Parent(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class Child(Parent):
value = models.IntegerField()
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
child = models.ForeignKey(Child, models.SET_NULL, null=True)
def __str__(self):
return self.name
# Models for testing bug #19870.
@python_2_unicode_compatible
class Fowl(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
class Hen(Fowl):
pass
class Chick(Fowl):
mother = models.ForeignKey(Hen, models.CASCADE)
class Base(models.Model):
name = models.CharField(max_length=10)
lots_of_text = models.TextField()
class Meta:
abstract = True
class A(Base):
a_field = models.CharField(max_length=10)
class B(Base):
b_field = models.CharField(max_length=10)
class C(Base):
c_a = models.ForeignKey(A, models.CASCADE)
c_b = models.ForeignKey(B, models.CASCADE)
is_published = models.BooleanField(default=False)
| bsd-3-clause |
dimroc/tensorflow-mnist-tutorial | lib/python3.6/site-packages/numpy/polynomial/hermite.py | 22 | 57847 | """
Objects for dealing with Hermite series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite series, including a `Hermite` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermdomain` -- Hermite series default domain, [-1,1].
- `hermzero` -- Hermite series that evaluates identically to 0.
- `hermone` -- Hermite series that evaluates identically to 1.
- `hermx` -- Hermite series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``.
- `hermadd` -- add two Hermite series.
- `hermsub` -- subtract one Hermite series from another.
- `hermmul` -- multiply two Hermite series.
- `hermdiv` -- divide one Hermite series by another.
- `hermval` -- evaluate a Hermite series at given points.
- `hermval2d` -- evaluate a 2D Hermite series at given points.
- `hermval3d` -- evaluate a 3D Hermite series at given points.
- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product.
- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product.
Calculus
--------
- `hermder` -- differentiate a Hermite series.
- `hermint` -- integrate a Hermite series.
Misc Functions
--------------
- `hermfromroots` -- create a Hermite series with specified roots.
- `hermroots` -- find the roots of a Hermite series.
- `hermvander` -- Vandermonde-like matrix for Hermite polynomials.
- `hermvander2d` -- Vandermonde-like matrix for 2D power series.
- `hermvander3d` -- Vandermonde-like matrix for 3D power series.
- `hermgauss` -- Gauss-Hermite quadrature, points and weights.
- `hermweight` -- Hermite weight function.
- `hermcompanion` -- symmetrized companion matrix in Hermite form.
- `hermfit` -- least-squares fit returning a Hermite series.
- `hermtrim` -- trim leading coefficients from a Hermite series.
- `hermline` -- Hermite series of given straight line.
- `herm2poly` -- convert a Hermite series to a polynomial.
- `poly2herm` -- convert a polynomial to a Hermite series.
Classes
-------
- `Hermite` -- A Hermite series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline', 'hermadd',
'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow', 'hermval',
'hermder', 'hermint', 'herm2poly', 'poly2herm', 'hermfromroots',
'hermvander', 'hermfit', 'hermtrim', 'hermroots', 'Hermite',
'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d', 'hermvander2d',
'hermvander3d', 'hermcompanion', 'hermgauss', 'hermweight']
hermtrim = pu.trimcoef
def poly2herm(pol):
"""
poly2herm(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herm2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import poly2herm
>>> poly2herm(np.arange(4))
array([ 1. , 2.75 , 0.5 , 0.375])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermadd(hermmulx(res), pol[i])
return res
def herm2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herm
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import herm2poly
>>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
c[1] *= 2
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(2*(i - 1)))
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1)*2)
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermdomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermzero = np.array([0])
# Hermite coefficients representing one.
hermone = np.array([1])
# Hermite coefficients representing the identity x.
hermx = np.array([0, 1/2])
def hermline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite import hermline, hermval
>>> hermval(0,hermline(3, 2))
3.0
>>> hermval(1,hermline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl/2])
else:
return np.array([off])
def hermfromroots(roots):
"""
Generate a Hermite series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Hermite form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Hermite form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, chebfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.hermite import hermfromroots, hermval
>>> coef = hermfromroots((-1, 0, 1))
>>> hermval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermfromroots((-1j, 1j))
>>> hermval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermsub, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermadd
>>> hermadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermsub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermadd, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermsub
>>> hermsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermmulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.hermite import hermmulx
>>> hermmulx([1, 2, 3])
array([ 2. , 6.5, 1. , 1.5])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]/2
for i in range(1, len(c)):
prd[i + 1] = c[i]/2
prd[i - 1] += c[i]*i
return prd
def hermmul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermadd, hermsub, hermdiv, hermpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermmul
>>> hermmul([1, 2, 3], [0, 1, 2])
array([ 52., 29., 52., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1)))
c1 = hermadd(tmp, hermmulx(c1)*2)
return hermadd(c0, hermmulx(c1)*2)
def hermdiv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermadd, hermsub, hermmul, hermpow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermdiv
>>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 2., 2.]))
>>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermpow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermadd, hermsub, hermmul, hermdiv
Examples
--------
>>> from numpy.polynomial.hermite import hermpow
>>> hermpow([1, 2, 3], 2)
array([ 81., 52., 82., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = hermmul(prd, c)
return prd
def hermder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite series.
Returns the Hermite series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2``
while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) +
2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If `c` is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite import hermder
>>> hermder([ 1. , 0.5, 0.5, 0.5])
array([ 1., 2., 3.])
>>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = (2*j)*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite series.
Returns the Hermite series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermint
>>> hermint([1,2,3]) # integrate once, value 0 at 0.
array([ 1. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ])
>>> hermint([1,2,3], k=1) # integrate once, value 1 at 0.
array([ 2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1
array([-2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1)
array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]/2
for j in range(1, n):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[0] += k[i] - hermval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermval(x, c, tensor=True):
"""
Evaluate an Hermite series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermval2d, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite import hermval
>>> coef = [1,2,3]
>>> hermval(1, coef)
11.0
>>> hermval([[1,2],[3,4]], coef)
array([[ 11., 51.],
[ 115., 203.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
x2 = x*2
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(2*(nd - 1))
c1 = tmp + c1*x2
return c0 + c1*x2
def hermval2d(x, y, c):
"""
Evaluate a 2-D Hermite series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermval, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
return c
def hermgrid2d(x, y, c):
"""
Evaluate a 2-D Hermite series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
return c
def hermval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
c = hermval(z, c, tensor=False)
return c
def hermgrid3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
c = hermval(z, c)
return c
def hermvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = H_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Hermite polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and
``hermval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Hermite series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Hermite polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite import hermvander
>>> x = np.array([-1, 0, 1])
>>> hermvander(x, 3)
array([[ 1., -2., 2., 4.],
[ 1., 0., -2., -0.],
[ 1., 2., 2., -4.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
x2 = x*2
v[1] = x2
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
return np.rollaxis(v, 0, v.ndim)
def hermvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = H_i(x) * H_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Hermite polynomials.
If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def hermvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Hermite polynomials.
If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
vz = hermvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def hermfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a Hermite series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, polyfit, hermefit
hermval : Evaluates a Hermite series.
hermvander : Vandermonde matrix of Hermite series.
hermweight : Hermite weight function
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Hermite series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Hermite series are probably most useful when the data can be
approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite import hermfit, hermval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermval(x, [1, 2, 3]) + err
>>> hermfit(x, y, 2)
array([ 0.97902637, 1.99849131, 3.00006 ])
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = hermvander(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = hermvander(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def hermcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Hermite basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-.5*c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1))))
scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(.5*np.arange(1, n))
bot[...] = top
mat[:, -1] -= scl*c[:-1]/(2.0*c[-1])
return mat
def hermroots(c):
"""
Compute the roots of a Hermite series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * H_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, chebroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Hermite series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite import hermroots, hermfromroots
>>> coef = hermfromroots([-1, 0, 1])
>>> coef
array([ 0. , 0.25 , 0. , 0.125])
>>> hermroots(coef)
array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-.5*c[0]/c[1]])
m = hermcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def _normed_hermite_n(x, n):
"""
Evaluate a normalized Hermite polynomial.
Compute the value of the normalized Hermite polynomial of degree ``n``
at the points ``x``.
Parameters
----------
x : ndarray of double.
Points at which to evaluate the function
n : int
Degree of the normalized Hermite function to be evaluated.
Returns
-------
values : ndarray
The shape of the return value is described above.
Notes
-----
.. versionadded:: 1.10.0
This function is needed for finding the Gauss points and integration
weights for high degrees. The values of the standard Hermite functions
overflow when n >= 207.
"""
if n == 0:
return np.ones(x.shape)/np.sqrt(np.sqrt(np.pi))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(np.pi))
nd = float(n)
for i in range(n - 1):
tmp = c0
c0 = -c1*np.sqrt((nd - 1.)/nd)
c1 = tmp + c1*x*np.sqrt(2./nd)
nd = nd - 1.0
return c0 + c1*x*np.sqrt(2)
def hermgauss(deg):
"""
Gauss-Hermite quadrature.
Computes the sample points and weights for Gauss-Hermite quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\\inf, \\inf]`
with the weight function :math:`f(x) = \\exp(-x^2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`H_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1], dtype=np.float64)
m = hermcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = _normed_hermite_n(x, ideg)
df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1/(fm * fm)
# for Hermite we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(np.pi) / w.sum()
return x, w
def hermweight(x):
"""
Weight function of the Hermite polynomials.
The weight function is :math:`\\exp(-x^2)` and the interval of
integration is :math:`[-\\inf, \\inf]`. the Hermite polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x**2)
return w
#
# Hermite series class
#
class Hermite(ABCPolyBase):
"""An Hermite series class.
The Hermite class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Hermite coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*H_0(x) + 2*H_1(X) + 3*H_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermadd)
_sub = staticmethod(hermsub)
_mul = staticmethod(hermmul)
_div = staticmethod(hermdiv)
_pow = staticmethod(hermpow)
_val = staticmethod(hermval)
_int = staticmethod(hermint)
_der = staticmethod(hermder)
_fit = staticmethod(hermfit)
_line = staticmethod(hermline)
_roots = staticmethod(hermroots)
_fromroots = staticmethod(hermfromroots)
# Virtual properties
nickname = 'herm'
domain = np.array(hermdomain)
window = np.array(hermdomain)
| apache-2.0 |
klueska/django-filebrowser | filebrowser/management/commands/fb_version_remove.py | 2 | 5312 | # coding: utf-8
# PYTHON IMPORTS
import os, re
# DJANGO IMPORTS
from django.core.management.base import BaseCommand, CommandError
# FILEBROWSER IMPORTS
from filebrowser.settings import EXTENSION_LIST, EXCLUDE, MEDIA_ROOT, DIRECTORY, VERSIONS, EXTENSIONS
class Command(BaseCommand):
args = '<media_path>'
help = "Remove Image-Versions within FILEBROWSER_DIRECTORY/MEDIA_ROOT."
def handle(self, *args, **options):
media_path = ""
if len(args):
media_path = args[0]
path = os.path.join(MEDIA_ROOT, media_path)
if not os.path.isdir(path):
raise CommandError('<media_path> must be a directory in MEDIA_ROOT. "%s" is no directory.' % path);
self.stdout.write("\n%s\n" % self.help)
self.stdout.write("in this case: %s\n" % path)
# get suffix or prefix
default_prefix_or_suffix = "s"
while 1:
self.stdout.write('\nOlder versions of the FileBrowser used to prefix the filename with the version name.\n')
self.stdout.write('Current version of the FileBrowser adds the version name as suffix.\n')
prefix_or_suffix = raw_input('"p" for prefix or "s" for suffix (leave blank for "%s"): ' % default_prefix_or_suffix)
if default_prefix_or_suffix and prefix_or_suffix == '':
prefix_or_suffix = default_prefix_or_suffix
if prefix_or_suffix != "s" and prefix_or_suffix != "p":
sys.stderr.write('Error: "p" and "s" are the only valid inputs.\n')
prefix_or_suffix = None
continue
break
# get version name
while 1:
version_name = raw_input('\nversion name as defined with VERSIONS: ')
if version_name == "":
self.stderr.write('Error: You have to enter a version name.\n')
version_name = None
continue
else:
break
# get list of all matching files
files = self.get_files(path, version_name, (prefix_or_suffix == "p"))
# output (short version) of files to be deleted
if len(files) > 15:
self.stdout.write('\nFirst/Last 5 files to remove:\n')
for current_file in files[:5]:
self.stdout.write('%s\n' % current_file)
self.stdout.write('...\n')
self.stdout.write('...\n')
for current_file in files[len(files)-5:]:
self.stdout.write('%s\n' % current_file)
else:
self.stdout.write('\nFiles to remove:\n')
for current_file in files:
self.stdout.write('%s\n' % current_file)
# no files...done
if len(files) == 0:
self.stdout.write('0 files removed.\n\n')
return
else:
self.stdout.write('%d file(s) will be removed.\n\n' % len(files))
# ask to make sure
do_remove = ""
self.stdout.write('Are Sure you want to delete these files?\n')
do_remove = raw_input('"y" for Yes or "n" for No (leave blank for "n"): ')
# if "yes" we delete. any different case we finish without removing anything
if do_remove == "y":
for current_file in files:
os.remove(current_file)
self.stdout.write('%d file(s) removed.\n\n' % len(files))
else:
self.stdout.write('No files removed.\n\n')
return
# get files mathing:
# path: search recoursive in this path (os.walk)
# version_name: string is pre/suffix of filename
# search_for_prefix: if true we match against the start of the filename (default is the end)
def get_files(self, path, version_name, search_for_prefix):
file_list = []
# Precompile regular expressions
filter_re = []
for exp in EXCLUDE:
filter_re.append(re.compile(exp))
# walkt throu the filebrowser directory
# for all/new files (except file versions itself and excludes)
for dirpath,dirnames,filenames in os.walk(path, followlinks=True):
for filename in filenames:
filtered = False
# no "hidden" files (stating with ".")
if filename.startswith('.'):
continue
# check the exclude list
for re_prefix in filter_re:
if re_prefix.search(filename):
filtered = True
if filtered:
continue
(filename_noext, extension) = os.path.splitext(filename)
# images only
if extension in EXTENSIONS["Image"]:
# if image matches with version_name we add it to the file_list
if search_for_prefix:
if filename_noext.startswith(version_name + "_"):
file_list.append(os.path.join(dirpath, filename))
elif filename_noext.endswith("_" + version_name):
file_list.append(os.path.join(dirpath, filename))
return file_list
| bsd-3-clause |
40223136/-2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/html/entities.py | 814 | 75240 | """HTML character entity references."""
# maps the HTML entity name to the Unicode codepoint
name2codepoint = {
'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1
'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1
'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
'Alpha': 0x0391, # greek capital letter alpha, U+0391
'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1
'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1
'Beta': 0x0392, # greek capital letter beta, U+0392
'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1
'Chi': 0x03a7, # greek capital letter chi, U+03A7
'Dagger': 0x2021, # double dagger, U+2021 ISOpub
'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3
'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1
'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1
'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1
'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1
'Epsilon': 0x0395, # greek capital letter epsilon, U+0395
'Eta': 0x0397, # greek capital letter eta, U+0397
'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1
'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3
'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1
'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1
'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1
'Iota': 0x0399, # greek capital letter iota, U+0399
'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1
'Kappa': 0x039a, # greek capital letter kappa, U+039A
'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3
'Mu': 0x039c, # greek capital letter mu, U+039C
'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1
'Nu': 0x039d, # greek capital letter nu, U+039D
'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2
'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1
'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1
'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1
'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3
'Omicron': 0x039f, # greek capital letter omicron, U+039F
'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1
'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1
'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3
'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3
'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech
'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3
'Rho': 0x03a1, # greek capital letter rho, U+03A1
'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2
'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3
'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1
'Tau': 0x03a4, # greek capital letter tau, U+03A4
'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3
'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1
'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1
'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1
'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3
'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1
'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3
'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1
'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2
'Zeta': 0x0396, # greek capital letter zeta, U+0396
'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1
'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1
'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia
'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW
'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3
'amp': 0x0026, # ampersand, U+0026 ISOnum
'and': 0x2227, # logical and = wedge, U+2227 ISOtech
'ang': 0x2220, # angle, U+2220 ISOamso
'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr
'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1
'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1
'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW
'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3
'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum
'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub
'cap': 0x2229, # intersection = cap, U+2229 ISOtech
'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1
'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia
'cent': 0x00a2, # cent sign, U+00A2 ISOnum
'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3
'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub
'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub
'cong': 0x2245, # approximately equal to, U+2245 ISOtech
'copy': 0x00a9, # copyright sign, U+00A9 ISOnum
'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
'cup': 0x222a, # union = cup, U+222A ISOtech
'curren': 0x00a4, # currency sign, U+00A4 ISOnum
'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa
'dagger': 0x2020, # dagger, U+2020 ISOpub
'darr': 0x2193, # downwards arrow, U+2193 ISOnum
'deg': 0x00b0, # degree sign, U+00B0 ISOnum
'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3
'diams': 0x2666, # black diamond suit, U+2666 ISOpub
'divide': 0x00f7, # division sign, U+00F7 ISOnum
'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1
'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1
'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1
'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso
'emsp': 0x2003, # em space, U+2003 ISOpub
'ensp': 0x2002, # en space, U+2002 ISOpub
'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3
'equiv': 0x2261, # identical to, U+2261 ISOtech
'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3
'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1
'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1
'euro': 0x20ac, # euro sign, U+20AC NEW
'exist': 0x2203, # there exists, U+2203 ISOtech
'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech
'forall': 0x2200, # for all, U+2200 ISOtech
'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum
'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
'frasl': 0x2044, # fraction slash, U+2044 NEW
'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3
'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech
'gt': 0x003e, # greater-than sign, U+003E ISOnum
'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa
'harr': 0x2194, # left right arrow, U+2194 ISOamsa
'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub
'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub
'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1
'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1
'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum
'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1
'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso
'infin': 0x221e, # infinity, U+221E ISOtech
'int': 0x222b, # integral, U+222B ISOtech
'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3
'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum
'isin': 0x2208, # element of, U+2208 ISOtech
'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1
'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3
'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech
'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3
'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech
'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
'larr': 0x2190, # leftwards arrow, U+2190 ISOnum
'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc
'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum
'le': 0x2264, # less-than or equal to, U+2264 ISOtech
'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc
'lowast': 0x2217, # asterisk operator, U+2217 ISOtech
'loz': 0x25ca, # lozenge, U+25CA ISOpub
'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070
'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed
'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum
'lt': 0x003c, # less-than sign, U+003C ISOnum
'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
'mdash': 0x2014, # em dash, U+2014 ISOpub
'micro': 0x00b5, # micro sign, U+00B5 ISOnum
'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
'minus': 0x2212, # minus sign, U+2212 ISOtech
'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3
'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech
'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum
'ndash': 0x2013, # en dash, U+2013 ISOpub
'ne': 0x2260, # not equal to, U+2260 ISOtech
'ni': 0x220b, # contains as member, U+220B ISOtech
'not': 0x00ac, # not sign, U+00AC ISOnum
'notin': 0x2209, # not an element of, U+2209 ISOtech
'nsub': 0x2284, # not a subset of, U+2284 ISOamsn
'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1
'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3
'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1
'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1
'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2
'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1
'oline': 0x203e, # overline = spacing overscore, U+203E NEW
'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3
'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW
'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb
'or': 0x2228, # logical or = vee, U+2228 ISOtech
'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum
'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum
'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1
'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb
'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1
'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum
'part': 0x2202, # partial differential, U+2202 ISOtech
'permil': 0x2030, # per mille sign, U+2030 ISOtech
'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3
'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3
'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3
'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
'pound': 0x00a3, # pound sign, U+00A3 ISOnum
'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech
'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb
'prop': 0x221d, # proportional to, U+221D ISOtech
'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3
'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum
'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech
'radic': 0x221a, # square root = radical sign, U+221A ISOtech
'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech
'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum
'rceil': 0x2309, # right ceiling, U+2309 ISOamsc
'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum
'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso
'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum
'rfloor': 0x230b, # right floor, U+230B ISOamsc
'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3
'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070
'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed
'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum
'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW
'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2
'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb
'sect': 0x00a7, # section sign, U+00A7 ISOnum
'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum
'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3
'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3
'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech
'spades': 0x2660, # black spade suit, U+2660 ISOpub
'sub': 0x2282, # subset of, U+2282 ISOtech
'sube': 0x2286, # subset of or equal to, U+2286 ISOtech
'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb
'sup': 0x2283, # superset of, U+2283 ISOtech
'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum
'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum
'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum
'supe': 0x2287, # superset of or equal to, U+2287 ISOtech
'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1
'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3
'there4': 0x2234, # therefore, U+2234 ISOtech
'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3
'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW
'thinsp': 0x2009, # thin space, U+2009 ISOpub
'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1
'tilde': 0x02dc, # small tilde, U+02DC ISOdia
'times': 0x00d7, # multiplication sign, U+00D7 ISOnum
'trade': 0x2122, # trade mark sign, U+2122 ISOnum
'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa
'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1
'uarr': 0x2191, # upwards arrow, U+2191 ISOnum
'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1
'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1
'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia
'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW
'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3
'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1
'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso
'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3
'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1
'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum
'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1
'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3
'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070
'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070
}
# maps the HTML5 named character references to the equivalent Unicode character(s)
html5 = {
'Aacute': '\xc1',
'aacute': '\xe1',
'Aacute;': '\xc1',
'aacute;': '\xe1',
'Abreve;': '\u0102',
'abreve;': '\u0103',
'ac;': '\u223e',
'acd;': '\u223f',
'acE;': '\u223e\u0333',
'Acirc': '\xc2',
'acirc': '\xe2',
'Acirc;': '\xc2',
'acirc;': '\xe2',
'acute': '\xb4',
'acute;': '\xb4',
'Acy;': '\u0410',
'acy;': '\u0430',
'AElig': '\xc6',
'aelig': '\xe6',
'AElig;': '\xc6',
'aelig;': '\xe6',
'af;': '\u2061',
'Afr;': '\U0001d504',
'afr;': '\U0001d51e',
'Agrave': '\xc0',
'agrave': '\xe0',
'Agrave;': '\xc0',
'agrave;': '\xe0',
'alefsym;': '\u2135',
'aleph;': '\u2135',
'Alpha;': '\u0391',
'alpha;': '\u03b1',
'Amacr;': '\u0100',
'amacr;': '\u0101',
'amalg;': '\u2a3f',
'AMP': '&',
'amp': '&',
'AMP;': '&',
'amp;': '&',
'And;': '\u2a53',
'and;': '\u2227',
'andand;': '\u2a55',
'andd;': '\u2a5c',
'andslope;': '\u2a58',
'andv;': '\u2a5a',
'ang;': '\u2220',
'ange;': '\u29a4',
'angle;': '\u2220',
'angmsd;': '\u2221',
'angmsdaa;': '\u29a8',
'angmsdab;': '\u29a9',
'angmsdac;': '\u29aa',
'angmsdad;': '\u29ab',
'angmsdae;': '\u29ac',
'angmsdaf;': '\u29ad',
'angmsdag;': '\u29ae',
'angmsdah;': '\u29af',
'angrt;': '\u221f',
'angrtvb;': '\u22be',
'angrtvbd;': '\u299d',
'angsph;': '\u2222',
'angst;': '\xc5',
'angzarr;': '\u237c',
'Aogon;': '\u0104',
'aogon;': '\u0105',
'Aopf;': '\U0001d538',
'aopf;': '\U0001d552',
'ap;': '\u2248',
'apacir;': '\u2a6f',
'apE;': '\u2a70',
'ape;': '\u224a',
'apid;': '\u224b',
'apos;': "'",
'ApplyFunction;': '\u2061',
'approx;': '\u2248',
'approxeq;': '\u224a',
'Aring': '\xc5',
'aring': '\xe5',
'Aring;': '\xc5',
'aring;': '\xe5',
'Ascr;': '\U0001d49c',
'ascr;': '\U0001d4b6',
'Assign;': '\u2254',
'ast;': '*',
'asymp;': '\u2248',
'asympeq;': '\u224d',
'Atilde': '\xc3',
'atilde': '\xe3',
'Atilde;': '\xc3',
'atilde;': '\xe3',
'Auml': '\xc4',
'auml': '\xe4',
'Auml;': '\xc4',
'auml;': '\xe4',
'awconint;': '\u2233',
'awint;': '\u2a11',
'backcong;': '\u224c',
'backepsilon;': '\u03f6',
'backprime;': '\u2035',
'backsim;': '\u223d',
'backsimeq;': '\u22cd',
'Backslash;': '\u2216',
'Barv;': '\u2ae7',
'barvee;': '\u22bd',
'Barwed;': '\u2306',
'barwed;': '\u2305',
'barwedge;': '\u2305',
'bbrk;': '\u23b5',
'bbrktbrk;': '\u23b6',
'bcong;': '\u224c',
'Bcy;': '\u0411',
'bcy;': '\u0431',
'bdquo;': '\u201e',
'becaus;': '\u2235',
'Because;': '\u2235',
'because;': '\u2235',
'bemptyv;': '\u29b0',
'bepsi;': '\u03f6',
'bernou;': '\u212c',
'Bernoullis;': '\u212c',
'Beta;': '\u0392',
'beta;': '\u03b2',
'beth;': '\u2136',
'between;': '\u226c',
'Bfr;': '\U0001d505',
'bfr;': '\U0001d51f',
'bigcap;': '\u22c2',
'bigcirc;': '\u25ef',
'bigcup;': '\u22c3',
'bigodot;': '\u2a00',
'bigoplus;': '\u2a01',
'bigotimes;': '\u2a02',
'bigsqcup;': '\u2a06',
'bigstar;': '\u2605',
'bigtriangledown;': '\u25bd',
'bigtriangleup;': '\u25b3',
'biguplus;': '\u2a04',
'bigvee;': '\u22c1',
'bigwedge;': '\u22c0',
'bkarow;': '\u290d',
'blacklozenge;': '\u29eb',
'blacksquare;': '\u25aa',
'blacktriangle;': '\u25b4',
'blacktriangledown;': '\u25be',
'blacktriangleleft;': '\u25c2',
'blacktriangleright;': '\u25b8',
'blank;': '\u2423',
'blk12;': '\u2592',
'blk14;': '\u2591',
'blk34;': '\u2593',
'block;': '\u2588',
'bne;': '=\u20e5',
'bnequiv;': '\u2261\u20e5',
'bNot;': '\u2aed',
'bnot;': '\u2310',
'Bopf;': '\U0001d539',
'bopf;': '\U0001d553',
'bot;': '\u22a5',
'bottom;': '\u22a5',
'bowtie;': '\u22c8',
'boxbox;': '\u29c9',
'boxDL;': '\u2557',
'boxDl;': '\u2556',
'boxdL;': '\u2555',
'boxdl;': '\u2510',
'boxDR;': '\u2554',
'boxDr;': '\u2553',
'boxdR;': '\u2552',
'boxdr;': '\u250c',
'boxH;': '\u2550',
'boxh;': '\u2500',
'boxHD;': '\u2566',
'boxHd;': '\u2564',
'boxhD;': '\u2565',
'boxhd;': '\u252c',
'boxHU;': '\u2569',
'boxHu;': '\u2567',
'boxhU;': '\u2568',
'boxhu;': '\u2534',
'boxminus;': '\u229f',
'boxplus;': '\u229e',
'boxtimes;': '\u22a0',
'boxUL;': '\u255d',
'boxUl;': '\u255c',
'boxuL;': '\u255b',
'boxul;': '\u2518',
'boxUR;': '\u255a',
'boxUr;': '\u2559',
'boxuR;': '\u2558',
'boxur;': '\u2514',
'boxV;': '\u2551',
'boxv;': '\u2502',
'boxVH;': '\u256c',
'boxVh;': '\u256b',
'boxvH;': '\u256a',
'boxvh;': '\u253c',
'boxVL;': '\u2563',
'boxVl;': '\u2562',
'boxvL;': '\u2561',
'boxvl;': '\u2524',
'boxVR;': '\u2560',
'boxVr;': '\u255f',
'boxvR;': '\u255e',
'boxvr;': '\u251c',
'bprime;': '\u2035',
'Breve;': '\u02d8',
'breve;': '\u02d8',
'brvbar': '\xa6',
'brvbar;': '\xa6',
'Bscr;': '\u212c',
'bscr;': '\U0001d4b7',
'bsemi;': '\u204f',
'bsim;': '\u223d',
'bsime;': '\u22cd',
'bsol;': '\\',
'bsolb;': '\u29c5',
'bsolhsub;': '\u27c8',
'bull;': '\u2022',
'bullet;': '\u2022',
'bump;': '\u224e',
'bumpE;': '\u2aae',
'bumpe;': '\u224f',
'Bumpeq;': '\u224e',
'bumpeq;': '\u224f',
'Cacute;': '\u0106',
'cacute;': '\u0107',
'Cap;': '\u22d2',
'cap;': '\u2229',
'capand;': '\u2a44',
'capbrcup;': '\u2a49',
'capcap;': '\u2a4b',
'capcup;': '\u2a47',
'capdot;': '\u2a40',
'CapitalDifferentialD;': '\u2145',
'caps;': '\u2229\ufe00',
'caret;': '\u2041',
'caron;': '\u02c7',
'Cayleys;': '\u212d',
'ccaps;': '\u2a4d',
'Ccaron;': '\u010c',
'ccaron;': '\u010d',
'Ccedil': '\xc7',
'ccedil': '\xe7',
'Ccedil;': '\xc7',
'ccedil;': '\xe7',
'Ccirc;': '\u0108',
'ccirc;': '\u0109',
'Cconint;': '\u2230',
'ccups;': '\u2a4c',
'ccupssm;': '\u2a50',
'Cdot;': '\u010a',
'cdot;': '\u010b',
'cedil': '\xb8',
'cedil;': '\xb8',
'Cedilla;': '\xb8',
'cemptyv;': '\u29b2',
'cent': '\xa2',
'cent;': '\xa2',
'CenterDot;': '\xb7',
'centerdot;': '\xb7',
'Cfr;': '\u212d',
'cfr;': '\U0001d520',
'CHcy;': '\u0427',
'chcy;': '\u0447',
'check;': '\u2713',
'checkmark;': '\u2713',
'Chi;': '\u03a7',
'chi;': '\u03c7',
'cir;': '\u25cb',
'circ;': '\u02c6',
'circeq;': '\u2257',
'circlearrowleft;': '\u21ba',
'circlearrowright;': '\u21bb',
'circledast;': '\u229b',
'circledcirc;': '\u229a',
'circleddash;': '\u229d',
'CircleDot;': '\u2299',
'circledR;': '\xae',
'circledS;': '\u24c8',
'CircleMinus;': '\u2296',
'CirclePlus;': '\u2295',
'CircleTimes;': '\u2297',
'cirE;': '\u29c3',
'cire;': '\u2257',
'cirfnint;': '\u2a10',
'cirmid;': '\u2aef',
'cirscir;': '\u29c2',
'ClockwiseContourIntegral;': '\u2232',
'CloseCurlyDoubleQuote;': '\u201d',
'CloseCurlyQuote;': '\u2019',
'clubs;': '\u2663',
'clubsuit;': '\u2663',
'Colon;': '\u2237',
'colon;': ':',
'Colone;': '\u2a74',
'colone;': '\u2254',
'coloneq;': '\u2254',
'comma;': ',',
'commat;': '@',
'comp;': '\u2201',
'compfn;': '\u2218',
'complement;': '\u2201',
'complexes;': '\u2102',
'cong;': '\u2245',
'congdot;': '\u2a6d',
'Congruent;': '\u2261',
'Conint;': '\u222f',
'conint;': '\u222e',
'ContourIntegral;': '\u222e',
'Copf;': '\u2102',
'copf;': '\U0001d554',
'coprod;': '\u2210',
'Coproduct;': '\u2210',
'COPY': '\xa9',
'copy': '\xa9',
'COPY;': '\xa9',
'copy;': '\xa9',
'copysr;': '\u2117',
'CounterClockwiseContourIntegral;': '\u2233',
'crarr;': '\u21b5',
'Cross;': '\u2a2f',
'cross;': '\u2717',
'Cscr;': '\U0001d49e',
'cscr;': '\U0001d4b8',
'csub;': '\u2acf',
'csube;': '\u2ad1',
'csup;': '\u2ad0',
'csupe;': '\u2ad2',
'ctdot;': '\u22ef',
'cudarrl;': '\u2938',
'cudarrr;': '\u2935',
'cuepr;': '\u22de',
'cuesc;': '\u22df',
'cularr;': '\u21b6',
'cularrp;': '\u293d',
'Cup;': '\u22d3',
'cup;': '\u222a',
'cupbrcap;': '\u2a48',
'CupCap;': '\u224d',
'cupcap;': '\u2a46',
'cupcup;': '\u2a4a',
'cupdot;': '\u228d',
'cupor;': '\u2a45',
'cups;': '\u222a\ufe00',
'curarr;': '\u21b7',
'curarrm;': '\u293c',
'curlyeqprec;': '\u22de',
'curlyeqsucc;': '\u22df',
'curlyvee;': '\u22ce',
'curlywedge;': '\u22cf',
'curren': '\xa4',
'curren;': '\xa4',
'curvearrowleft;': '\u21b6',
'curvearrowright;': '\u21b7',
'cuvee;': '\u22ce',
'cuwed;': '\u22cf',
'cwconint;': '\u2232',
'cwint;': '\u2231',
'cylcty;': '\u232d',
'Dagger;': '\u2021',
'dagger;': '\u2020',
'daleth;': '\u2138',
'Darr;': '\u21a1',
'dArr;': '\u21d3',
'darr;': '\u2193',
'dash;': '\u2010',
'Dashv;': '\u2ae4',
'dashv;': '\u22a3',
'dbkarow;': '\u290f',
'dblac;': '\u02dd',
'Dcaron;': '\u010e',
'dcaron;': '\u010f',
'Dcy;': '\u0414',
'dcy;': '\u0434',
'DD;': '\u2145',
'dd;': '\u2146',
'ddagger;': '\u2021',
'ddarr;': '\u21ca',
'DDotrahd;': '\u2911',
'ddotseq;': '\u2a77',
'deg': '\xb0',
'deg;': '\xb0',
'Del;': '\u2207',
'Delta;': '\u0394',
'delta;': '\u03b4',
'demptyv;': '\u29b1',
'dfisht;': '\u297f',
'Dfr;': '\U0001d507',
'dfr;': '\U0001d521',
'dHar;': '\u2965',
'dharl;': '\u21c3',
'dharr;': '\u21c2',
'DiacriticalAcute;': '\xb4',
'DiacriticalDot;': '\u02d9',
'DiacriticalDoubleAcute;': '\u02dd',
'DiacriticalGrave;': '`',
'DiacriticalTilde;': '\u02dc',
'diam;': '\u22c4',
'Diamond;': '\u22c4',
'diamond;': '\u22c4',
'diamondsuit;': '\u2666',
'diams;': '\u2666',
'die;': '\xa8',
'DifferentialD;': '\u2146',
'digamma;': '\u03dd',
'disin;': '\u22f2',
'div;': '\xf7',
'divide': '\xf7',
'divide;': '\xf7',
'divideontimes;': '\u22c7',
'divonx;': '\u22c7',
'DJcy;': '\u0402',
'djcy;': '\u0452',
'dlcorn;': '\u231e',
'dlcrop;': '\u230d',
'dollar;': '$',
'Dopf;': '\U0001d53b',
'dopf;': '\U0001d555',
'Dot;': '\xa8',
'dot;': '\u02d9',
'DotDot;': '\u20dc',
'doteq;': '\u2250',
'doteqdot;': '\u2251',
'DotEqual;': '\u2250',
'dotminus;': '\u2238',
'dotplus;': '\u2214',
'dotsquare;': '\u22a1',
'doublebarwedge;': '\u2306',
'DoubleContourIntegral;': '\u222f',
'DoubleDot;': '\xa8',
'DoubleDownArrow;': '\u21d3',
'DoubleLeftArrow;': '\u21d0',
'DoubleLeftRightArrow;': '\u21d4',
'DoubleLeftTee;': '\u2ae4',
'DoubleLongLeftArrow;': '\u27f8',
'DoubleLongLeftRightArrow;': '\u27fa',
'DoubleLongRightArrow;': '\u27f9',
'DoubleRightArrow;': '\u21d2',
'DoubleRightTee;': '\u22a8',
'DoubleUpArrow;': '\u21d1',
'DoubleUpDownArrow;': '\u21d5',
'DoubleVerticalBar;': '\u2225',
'DownArrow;': '\u2193',
'Downarrow;': '\u21d3',
'downarrow;': '\u2193',
'DownArrowBar;': '\u2913',
'DownArrowUpArrow;': '\u21f5',
'DownBreve;': '\u0311',
'downdownarrows;': '\u21ca',
'downharpoonleft;': '\u21c3',
'downharpoonright;': '\u21c2',
'DownLeftRightVector;': '\u2950',
'DownLeftTeeVector;': '\u295e',
'DownLeftVector;': '\u21bd',
'DownLeftVectorBar;': '\u2956',
'DownRightTeeVector;': '\u295f',
'DownRightVector;': '\u21c1',
'DownRightVectorBar;': '\u2957',
'DownTee;': '\u22a4',
'DownTeeArrow;': '\u21a7',
'drbkarow;': '\u2910',
'drcorn;': '\u231f',
'drcrop;': '\u230c',
'Dscr;': '\U0001d49f',
'dscr;': '\U0001d4b9',
'DScy;': '\u0405',
'dscy;': '\u0455',
'dsol;': '\u29f6',
'Dstrok;': '\u0110',
'dstrok;': '\u0111',
'dtdot;': '\u22f1',
'dtri;': '\u25bf',
'dtrif;': '\u25be',
'duarr;': '\u21f5',
'duhar;': '\u296f',
'dwangle;': '\u29a6',
'DZcy;': '\u040f',
'dzcy;': '\u045f',
'dzigrarr;': '\u27ff',
'Eacute': '\xc9',
'eacute': '\xe9',
'Eacute;': '\xc9',
'eacute;': '\xe9',
'easter;': '\u2a6e',
'Ecaron;': '\u011a',
'ecaron;': '\u011b',
'ecir;': '\u2256',
'Ecirc': '\xca',
'ecirc': '\xea',
'Ecirc;': '\xca',
'ecirc;': '\xea',
'ecolon;': '\u2255',
'Ecy;': '\u042d',
'ecy;': '\u044d',
'eDDot;': '\u2a77',
'Edot;': '\u0116',
'eDot;': '\u2251',
'edot;': '\u0117',
'ee;': '\u2147',
'efDot;': '\u2252',
'Efr;': '\U0001d508',
'efr;': '\U0001d522',
'eg;': '\u2a9a',
'Egrave': '\xc8',
'egrave': '\xe8',
'Egrave;': '\xc8',
'egrave;': '\xe8',
'egs;': '\u2a96',
'egsdot;': '\u2a98',
'el;': '\u2a99',
'Element;': '\u2208',
'elinters;': '\u23e7',
'ell;': '\u2113',
'els;': '\u2a95',
'elsdot;': '\u2a97',
'Emacr;': '\u0112',
'emacr;': '\u0113',
'empty;': '\u2205',
'emptyset;': '\u2205',
'EmptySmallSquare;': '\u25fb',
'emptyv;': '\u2205',
'EmptyVerySmallSquare;': '\u25ab',
'emsp13;': '\u2004',
'emsp14;': '\u2005',
'emsp;': '\u2003',
'ENG;': '\u014a',
'eng;': '\u014b',
'ensp;': '\u2002',
'Eogon;': '\u0118',
'eogon;': '\u0119',
'Eopf;': '\U0001d53c',
'eopf;': '\U0001d556',
'epar;': '\u22d5',
'eparsl;': '\u29e3',
'eplus;': '\u2a71',
'epsi;': '\u03b5',
'Epsilon;': '\u0395',
'epsilon;': '\u03b5',
'epsiv;': '\u03f5',
'eqcirc;': '\u2256',
'eqcolon;': '\u2255',
'eqsim;': '\u2242',
'eqslantgtr;': '\u2a96',
'eqslantless;': '\u2a95',
'Equal;': '\u2a75',
'equals;': '=',
'EqualTilde;': '\u2242',
'equest;': '\u225f',
'Equilibrium;': '\u21cc',
'equiv;': '\u2261',
'equivDD;': '\u2a78',
'eqvparsl;': '\u29e5',
'erarr;': '\u2971',
'erDot;': '\u2253',
'Escr;': '\u2130',
'escr;': '\u212f',
'esdot;': '\u2250',
'Esim;': '\u2a73',
'esim;': '\u2242',
'Eta;': '\u0397',
'eta;': '\u03b7',
'ETH': '\xd0',
'eth': '\xf0',
'ETH;': '\xd0',
'eth;': '\xf0',
'Euml': '\xcb',
'euml': '\xeb',
'Euml;': '\xcb',
'euml;': '\xeb',
'euro;': '\u20ac',
'excl;': '!',
'exist;': '\u2203',
'Exists;': '\u2203',
'expectation;': '\u2130',
'ExponentialE;': '\u2147',
'exponentiale;': '\u2147',
'fallingdotseq;': '\u2252',
'Fcy;': '\u0424',
'fcy;': '\u0444',
'female;': '\u2640',
'ffilig;': '\ufb03',
'fflig;': '\ufb00',
'ffllig;': '\ufb04',
'Ffr;': '\U0001d509',
'ffr;': '\U0001d523',
'filig;': '\ufb01',
'FilledSmallSquare;': '\u25fc',
'FilledVerySmallSquare;': '\u25aa',
'fjlig;': 'fj',
'flat;': '\u266d',
'fllig;': '\ufb02',
'fltns;': '\u25b1',
'fnof;': '\u0192',
'Fopf;': '\U0001d53d',
'fopf;': '\U0001d557',
'ForAll;': '\u2200',
'forall;': '\u2200',
'fork;': '\u22d4',
'forkv;': '\u2ad9',
'Fouriertrf;': '\u2131',
'fpartint;': '\u2a0d',
'frac12': '\xbd',
'frac12;': '\xbd',
'frac13;': '\u2153',
'frac14': '\xbc',
'frac14;': '\xbc',
'frac15;': '\u2155',
'frac16;': '\u2159',
'frac18;': '\u215b',
'frac23;': '\u2154',
'frac25;': '\u2156',
'frac34': '\xbe',
'frac34;': '\xbe',
'frac35;': '\u2157',
'frac38;': '\u215c',
'frac45;': '\u2158',
'frac56;': '\u215a',
'frac58;': '\u215d',
'frac78;': '\u215e',
'frasl;': '\u2044',
'frown;': '\u2322',
'Fscr;': '\u2131',
'fscr;': '\U0001d4bb',
'gacute;': '\u01f5',
'Gamma;': '\u0393',
'gamma;': '\u03b3',
'Gammad;': '\u03dc',
'gammad;': '\u03dd',
'gap;': '\u2a86',
'Gbreve;': '\u011e',
'gbreve;': '\u011f',
'Gcedil;': '\u0122',
'Gcirc;': '\u011c',
'gcirc;': '\u011d',
'Gcy;': '\u0413',
'gcy;': '\u0433',
'Gdot;': '\u0120',
'gdot;': '\u0121',
'gE;': '\u2267',
'ge;': '\u2265',
'gEl;': '\u2a8c',
'gel;': '\u22db',
'geq;': '\u2265',
'geqq;': '\u2267',
'geqslant;': '\u2a7e',
'ges;': '\u2a7e',
'gescc;': '\u2aa9',
'gesdot;': '\u2a80',
'gesdoto;': '\u2a82',
'gesdotol;': '\u2a84',
'gesl;': '\u22db\ufe00',
'gesles;': '\u2a94',
'Gfr;': '\U0001d50a',
'gfr;': '\U0001d524',
'Gg;': '\u22d9',
'gg;': '\u226b',
'ggg;': '\u22d9',
'gimel;': '\u2137',
'GJcy;': '\u0403',
'gjcy;': '\u0453',
'gl;': '\u2277',
'gla;': '\u2aa5',
'glE;': '\u2a92',
'glj;': '\u2aa4',
'gnap;': '\u2a8a',
'gnapprox;': '\u2a8a',
'gnE;': '\u2269',
'gne;': '\u2a88',
'gneq;': '\u2a88',
'gneqq;': '\u2269',
'gnsim;': '\u22e7',
'Gopf;': '\U0001d53e',
'gopf;': '\U0001d558',
'grave;': '`',
'GreaterEqual;': '\u2265',
'GreaterEqualLess;': '\u22db',
'GreaterFullEqual;': '\u2267',
'GreaterGreater;': '\u2aa2',
'GreaterLess;': '\u2277',
'GreaterSlantEqual;': '\u2a7e',
'GreaterTilde;': '\u2273',
'Gscr;': '\U0001d4a2',
'gscr;': '\u210a',
'gsim;': '\u2273',
'gsime;': '\u2a8e',
'gsiml;': '\u2a90',
'GT': '>',
'gt': '>',
'GT;': '>',
'Gt;': '\u226b',
'gt;': '>',
'gtcc;': '\u2aa7',
'gtcir;': '\u2a7a',
'gtdot;': '\u22d7',
'gtlPar;': '\u2995',
'gtquest;': '\u2a7c',
'gtrapprox;': '\u2a86',
'gtrarr;': '\u2978',
'gtrdot;': '\u22d7',
'gtreqless;': '\u22db',
'gtreqqless;': '\u2a8c',
'gtrless;': '\u2277',
'gtrsim;': '\u2273',
'gvertneqq;': '\u2269\ufe00',
'gvnE;': '\u2269\ufe00',
'Hacek;': '\u02c7',
'hairsp;': '\u200a',
'half;': '\xbd',
'hamilt;': '\u210b',
'HARDcy;': '\u042a',
'hardcy;': '\u044a',
'hArr;': '\u21d4',
'harr;': '\u2194',
'harrcir;': '\u2948',
'harrw;': '\u21ad',
'Hat;': '^',
'hbar;': '\u210f',
'Hcirc;': '\u0124',
'hcirc;': '\u0125',
'hearts;': '\u2665',
'heartsuit;': '\u2665',
'hellip;': '\u2026',
'hercon;': '\u22b9',
'Hfr;': '\u210c',
'hfr;': '\U0001d525',
'HilbertSpace;': '\u210b',
'hksearow;': '\u2925',
'hkswarow;': '\u2926',
'hoarr;': '\u21ff',
'homtht;': '\u223b',
'hookleftarrow;': '\u21a9',
'hookrightarrow;': '\u21aa',
'Hopf;': '\u210d',
'hopf;': '\U0001d559',
'horbar;': '\u2015',
'HorizontalLine;': '\u2500',
'Hscr;': '\u210b',
'hscr;': '\U0001d4bd',
'hslash;': '\u210f',
'Hstrok;': '\u0126',
'hstrok;': '\u0127',
'HumpDownHump;': '\u224e',
'HumpEqual;': '\u224f',
'hybull;': '\u2043',
'hyphen;': '\u2010',
'Iacute': '\xcd',
'iacute': '\xed',
'Iacute;': '\xcd',
'iacute;': '\xed',
'ic;': '\u2063',
'Icirc': '\xce',
'icirc': '\xee',
'Icirc;': '\xce',
'icirc;': '\xee',
'Icy;': '\u0418',
'icy;': '\u0438',
'Idot;': '\u0130',
'IEcy;': '\u0415',
'iecy;': '\u0435',
'iexcl': '\xa1',
'iexcl;': '\xa1',
'iff;': '\u21d4',
'Ifr;': '\u2111',
'ifr;': '\U0001d526',
'Igrave': '\xcc',
'igrave': '\xec',
'Igrave;': '\xcc',
'igrave;': '\xec',
'ii;': '\u2148',
'iiiint;': '\u2a0c',
'iiint;': '\u222d',
'iinfin;': '\u29dc',
'iiota;': '\u2129',
'IJlig;': '\u0132',
'ijlig;': '\u0133',
'Im;': '\u2111',
'Imacr;': '\u012a',
'imacr;': '\u012b',
'image;': '\u2111',
'ImaginaryI;': '\u2148',
'imagline;': '\u2110',
'imagpart;': '\u2111',
'imath;': '\u0131',
'imof;': '\u22b7',
'imped;': '\u01b5',
'Implies;': '\u21d2',
'in;': '\u2208',
'incare;': '\u2105',
'infin;': '\u221e',
'infintie;': '\u29dd',
'inodot;': '\u0131',
'Int;': '\u222c',
'int;': '\u222b',
'intcal;': '\u22ba',
'integers;': '\u2124',
'Integral;': '\u222b',
'intercal;': '\u22ba',
'Intersection;': '\u22c2',
'intlarhk;': '\u2a17',
'intprod;': '\u2a3c',
'InvisibleComma;': '\u2063',
'InvisibleTimes;': '\u2062',
'IOcy;': '\u0401',
'iocy;': '\u0451',
'Iogon;': '\u012e',
'iogon;': '\u012f',
'Iopf;': '\U0001d540',
'iopf;': '\U0001d55a',
'Iota;': '\u0399',
'iota;': '\u03b9',
'iprod;': '\u2a3c',
'iquest': '\xbf',
'iquest;': '\xbf',
'Iscr;': '\u2110',
'iscr;': '\U0001d4be',
'isin;': '\u2208',
'isindot;': '\u22f5',
'isinE;': '\u22f9',
'isins;': '\u22f4',
'isinsv;': '\u22f3',
'isinv;': '\u2208',
'it;': '\u2062',
'Itilde;': '\u0128',
'itilde;': '\u0129',
'Iukcy;': '\u0406',
'iukcy;': '\u0456',
'Iuml': '\xcf',
'iuml': '\xef',
'Iuml;': '\xcf',
'iuml;': '\xef',
'Jcirc;': '\u0134',
'jcirc;': '\u0135',
'Jcy;': '\u0419',
'jcy;': '\u0439',
'Jfr;': '\U0001d50d',
'jfr;': '\U0001d527',
'jmath;': '\u0237',
'Jopf;': '\U0001d541',
'jopf;': '\U0001d55b',
'Jscr;': '\U0001d4a5',
'jscr;': '\U0001d4bf',
'Jsercy;': '\u0408',
'jsercy;': '\u0458',
'Jukcy;': '\u0404',
'jukcy;': '\u0454',
'Kappa;': '\u039a',
'kappa;': '\u03ba',
'kappav;': '\u03f0',
'Kcedil;': '\u0136',
'kcedil;': '\u0137',
'Kcy;': '\u041a',
'kcy;': '\u043a',
'Kfr;': '\U0001d50e',
'kfr;': '\U0001d528',
'kgreen;': '\u0138',
'KHcy;': '\u0425',
'khcy;': '\u0445',
'KJcy;': '\u040c',
'kjcy;': '\u045c',
'Kopf;': '\U0001d542',
'kopf;': '\U0001d55c',
'Kscr;': '\U0001d4a6',
'kscr;': '\U0001d4c0',
'lAarr;': '\u21da',
'Lacute;': '\u0139',
'lacute;': '\u013a',
'laemptyv;': '\u29b4',
'lagran;': '\u2112',
'Lambda;': '\u039b',
'lambda;': '\u03bb',
'Lang;': '\u27ea',
'lang;': '\u27e8',
'langd;': '\u2991',
'langle;': '\u27e8',
'lap;': '\u2a85',
'Laplacetrf;': '\u2112',
'laquo': '\xab',
'laquo;': '\xab',
'Larr;': '\u219e',
'lArr;': '\u21d0',
'larr;': '\u2190',
'larrb;': '\u21e4',
'larrbfs;': '\u291f',
'larrfs;': '\u291d',
'larrhk;': '\u21a9',
'larrlp;': '\u21ab',
'larrpl;': '\u2939',
'larrsim;': '\u2973',
'larrtl;': '\u21a2',
'lat;': '\u2aab',
'lAtail;': '\u291b',
'latail;': '\u2919',
'late;': '\u2aad',
'lates;': '\u2aad\ufe00',
'lBarr;': '\u290e',
'lbarr;': '\u290c',
'lbbrk;': '\u2772',
'lbrace;': '{',
'lbrack;': '[',
'lbrke;': '\u298b',
'lbrksld;': '\u298f',
'lbrkslu;': '\u298d',
'Lcaron;': '\u013d',
'lcaron;': '\u013e',
'Lcedil;': '\u013b',
'lcedil;': '\u013c',
'lceil;': '\u2308',
'lcub;': '{',
'Lcy;': '\u041b',
'lcy;': '\u043b',
'ldca;': '\u2936',
'ldquo;': '\u201c',
'ldquor;': '\u201e',
'ldrdhar;': '\u2967',
'ldrushar;': '\u294b',
'ldsh;': '\u21b2',
'lE;': '\u2266',
'le;': '\u2264',
'LeftAngleBracket;': '\u27e8',
'LeftArrow;': '\u2190',
'Leftarrow;': '\u21d0',
'leftarrow;': '\u2190',
'LeftArrowBar;': '\u21e4',
'LeftArrowRightArrow;': '\u21c6',
'leftarrowtail;': '\u21a2',
'LeftCeiling;': '\u2308',
'LeftDoubleBracket;': '\u27e6',
'LeftDownTeeVector;': '\u2961',
'LeftDownVector;': '\u21c3',
'LeftDownVectorBar;': '\u2959',
'LeftFloor;': '\u230a',
'leftharpoondown;': '\u21bd',
'leftharpoonup;': '\u21bc',
'leftleftarrows;': '\u21c7',
'LeftRightArrow;': '\u2194',
'Leftrightarrow;': '\u21d4',
'leftrightarrow;': '\u2194',
'leftrightarrows;': '\u21c6',
'leftrightharpoons;': '\u21cb',
'leftrightsquigarrow;': '\u21ad',
'LeftRightVector;': '\u294e',
'LeftTee;': '\u22a3',
'LeftTeeArrow;': '\u21a4',
'LeftTeeVector;': '\u295a',
'leftthreetimes;': '\u22cb',
'LeftTriangle;': '\u22b2',
'LeftTriangleBar;': '\u29cf',
'LeftTriangleEqual;': '\u22b4',
'LeftUpDownVector;': '\u2951',
'LeftUpTeeVector;': '\u2960',
'LeftUpVector;': '\u21bf',
'LeftUpVectorBar;': '\u2958',
'LeftVector;': '\u21bc',
'LeftVectorBar;': '\u2952',
'lEg;': '\u2a8b',
'leg;': '\u22da',
'leq;': '\u2264',
'leqq;': '\u2266',
'leqslant;': '\u2a7d',
'les;': '\u2a7d',
'lescc;': '\u2aa8',
'lesdot;': '\u2a7f',
'lesdoto;': '\u2a81',
'lesdotor;': '\u2a83',
'lesg;': '\u22da\ufe00',
'lesges;': '\u2a93',
'lessapprox;': '\u2a85',
'lessdot;': '\u22d6',
'lesseqgtr;': '\u22da',
'lesseqqgtr;': '\u2a8b',
'LessEqualGreater;': '\u22da',
'LessFullEqual;': '\u2266',
'LessGreater;': '\u2276',
'lessgtr;': '\u2276',
'LessLess;': '\u2aa1',
'lesssim;': '\u2272',
'LessSlantEqual;': '\u2a7d',
'LessTilde;': '\u2272',
'lfisht;': '\u297c',
'lfloor;': '\u230a',
'Lfr;': '\U0001d50f',
'lfr;': '\U0001d529',
'lg;': '\u2276',
'lgE;': '\u2a91',
'lHar;': '\u2962',
'lhard;': '\u21bd',
'lharu;': '\u21bc',
'lharul;': '\u296a',
'lhblk;': '\u2584',
'LJcy;': '\u0409',
'ljcy;': '\u0459',
'Ll;': '\u22d8',
'll;': '\u226a',
'llarr;': '\u21c7',
'llcorner;': '\u231e',
'Lleftarrow;': '\u21da',
'llhard;': '\u296b',
'lltri;': '\u25fa',
'Lmidot;': '\u013f',
'lmidot;': '\u0140',
'lmoust;': '\u23b0',
'lmoustache;': '\u23b0',
'lnap;': '\u2a89',
'lnapprox;': '\u2a89',
'lnE;': '\u2268',
'lne;': '\u2a87',
'lneq;': '\u2a87',
'lneqq;': '\u2268',
'lnsim;': '\u22e6',
'loang;': '\u27ec',
'loarr;': '\u21fd',
'lobrk;': '\u27e6',
'LongLeftArrow;': '\u27f5',
'Longleftarrow;': '\u27f8',
'longleftarrow;': '\u27f5',
'LongLeftRightArrow;': '\u27f7',
'Longleftrightarrow;': '\u27fa',
'longleftrightarrow;': '\u27f7',
'longmapsto;': '\u27fc',
'LongRightArrow;': '\u27f6',
'Longrightarrow;': '\u27f9',
'longrightarrow;': '\u27f6',
'looparrowleft;': '\u21ab',
'looparrowright;': '\u21ac',
'lopar;': '\u2985',
'Lopf;': '\U0001d543',
'lopf;': '\U0001d55d',
'loplus;': '\u2a2d',
'lotimes;': '\u2a34',
'lowast;': '\u2217',
'lowbar;': '_',
'LowerLeftArrow;': '\u2199',
'LowerRightArrow;': '\u2198',
'loz;': '\u25ca',
'lozenge;': '\u25ca',
'lozf;': '\u29eb',
'lpar;': '(',
'lparlt;': '\u2993',
'lrarr;': '\u21c6',
'lrcorner;': '\u231f',
'lrhar;': '\u21cb',
'lrhard;': '\u296d',
'lrm;': '\u200e',
'lrtri;': '\u22bf',
'lsaquo;': '\u2039',
'Lscr;': '\u2112',
'lscr;': '\U0001d4c1',
'Lsh;': '\u21b0',
'lsh;': '\u21b0',
'lsim;': '\u2272',
'lsime;': '\u2a8d',
'lsimg;': '\u2a8f',
'lsqb;': '[',
'lsquo;': '\u2018',
'lsquor;': '\u201a',
'Lstrok;': '\u0141',
'lstrok;': '\u0142',
'LT': '<',
'lt': '<',
'LT;': '<',
'Lt;': '\u226a',
'lt;': '<',
'ltcc;': '\u2aa6',
'ltcir;': '\u2a79',
'ltdot;': '\u22d6',
'lthree;': '\u22cb',
'ltimes;': '\u22c9',
'ltlarr;': '\u2976',
'ltquest;': '\u2a7b',
'ltri;': '\u25c3',
'ltrie;': '\u22b4',
'ltrif;': '\u25c2',
'ltrPar;': '\u2996',
'lurdshar;': '\u294a',
'luruhar;': '\u2966',
'lvertneqq;': '\u2268\ufe00',
'lvnE;': '\u2268\ufe00',
'macr': '\xaf',
'macr;': '\xaf',
'male;': '\u2642',
'malt;': '\u2720',
'maltese;': '\u2720',
'Map;': '\u2905',
'map;': '\u21a6',
'mapsto;': '\u21a6',
'mapstodown;': '\u21a7',
'mapstoleft;': '\u21a4',
'mapstoup;': '\u21a5',
'marker;': '\u25ae',
'mcomma;': '\u2a29',
'Mcy;': '\u041c',
'mcy;': '\u043c',
'mdash;': '\u2014',
'mDDot;': '\u223a',
'measuredangle;': '\u2221',
'MediumSpace;': '\u205f',
'Mellintrf;': '\u2133',
'Mfr;': '\U0001d510',
'mfr;': '\U0001d52a',
'mho;': '\u2127',
'micro': '\xb5',
'micro;': '\xb5',
'mid;': '\u2223',
'midast;': '*',
'midcir;': '\u2af0',
'middot': '\xb7',
'middot;': '\xb7',
'minus;': '\u2212',
'minusb;': '\u229f',
'minusd;': '\u2238',
'minusdu;': '\u2a2a',
'MinusPlus;': '\u2213',
'mlcp;': '\u2adb',
'mldr;': '\u2026',
'mnplus;': '\u2213',
'models;': '\u22a7',
'Mopf;': '\U0001d544',
'mopf;': '\U0001d55e',
'mp;': '\u2213',
'Mscr;': '\u2133',
'mscr;': '\U0001d4c2',
'mstpos;': '\u223e',
'Mu;': '\u039c',
'mu;': '\u03bc',
'multimap;': '\u22b8',
'mumap;': '\u22b8',
'nabla;': '\u2207',
'Nacute;': '\u0143',
'nacute;': '\u0144',
'nang;': '\u2220\u20d2',
'nap;': '\u2249',
'napE;': '\u2a70\u0338',
'napid;': '\u224b\u0338',
'napos;': '\u0149',
'napprox;': '\u2249',
'natur;': '\u266e',
'natural;': '\u266e',
'naturals;': '\u2115',
'nbsp': '\xa0',
'nbsp;': '\xa0',
'nbump;': '\u224e\u0338',
'nbumpe;': '\u224f\u0338',
'ncap;': '\u2a43',
'Ncaron;': '\u0147',
'ncaron;': '\u0148',
'Ncedil;': '\u0145',
'ncedil;': '\u0146',
'ncong;': '\u2247',
'ncongdot;': '\u2a6d\u0338',
'ncup;': '\u2a42',
'Ncy;': '\u041d',
'ncy;': '\u043d',
'ndash;': '\u2013',
'ne;': '\u2260',
'nearhk;': '\u2924',
'neArr;': '\u21d7',
'nearr;': '\u2197',
'nearrow;': '\u2197',
'nedot;': '\u2250\u0338',
'NegativeMediumSpace;': '\u200b',
'NegativeThickSpace;': '\u200b',
'NegativeThinSpace;': '\u200b',
'NegativeVeryThinSpace;': '\u200b',
'nequiv;': '\u2262',
'nesear;': '\u2928',
'nesim;': '\u2242\u0338',
'NestedGreaterGreater;': '\u226b',
'NestedLessLess;': '\u226a',
'NewLine;': '\n',
'nexist;': '\u2204',
'nexists;': '\u2204',
'Nfr;': '\U0001d511',
'nfr;': '\U0001d52b',
'ngE;': '\u2267\u0338',
'nge;': '\u2271',
'ngeq;': '\u2271',
'ngeqq;': '\u2267\u0338',
'ngeqslant;': '\u2a7e\u0338',
'nges;': '\u2a7e\u0338',
'nGg;': '\u22d9\u0338',
'ngsim;': '\u2275',
'nGt;': '\u226b\u20d2',
'ngt;': '\u226f',
'ngtr;': '\u226f',
'nGtv;': '\u226b\u0338',
'nhArr;': '\u21ce',
'nharr;': '\u21ae',
'nhpar;': '\u2af2',
'ni;': '\u220b',
'nis;': '\u22fc',
'nisd;': '\u22fa',
'niv;': '\u220b',
'NJcy;': '\u040a',
'njcy;': '\u045a',
'nlArr;': '\u21cd',
'nlarr;': '\u219a',
'nldr;': '\u2025',
'nlE;': '\u2266\u0338',
'nle;': '\u2270',
'nLeftarrow;': '\u21cd',
'nleftarrow;': '\u219a',
'nLeftrightarrow;': '\u21ce',
'nleftrightarrow;': '\u21ae',
'nleq;': '\u2270',
'nleqq;': '\u2266\u0338',
'nleqslant;': '\u2a7d\u0338',
'nles;': '\u2a7d\u0338',
'nless;': '\u226e',
'nLl;': '\u22d8\u0338',
'nlsim;': '\u2274',
'nLt;': '\u226a\u20d2',
'nlt;': '\u226e',
'nltri;': '\u22ea',
'nltrie;': '\u22ec',
'nLtv;': '\u226a\u0338',
'nmid;': '\u2224',
'NoBreak;': '\u2060',
'NonBreakingSpace;': '\xa0',
'Nopf;': '\u2115',
'nopf;': '\U0001d55f',
'not': '\xac',
'Not;': '\u2aec',
'not;': '\xac',
'NotCongruent;': '\u2262',
'NotCupCap;': '\u226d',
'NotDoubleVerticalBar;': '\u2226',
'NotElement;': '\u2209',
'NotEqual;': '\u2260',
'NotEqualTilde;': '\u2242\u0338',
'NotExists;': '\u2204',
'NotGreater;': '\u226f',
'NotGreaterEqual;': '\u2271',
'NotGreaterFullEqual;': '\u2267\u0338',
'NotGreaterGreater;': '\u226b\u0338',
'NotGreaterLess;': '\u2279',
'NotGreaterSlantEqual;': '\u2a7e\u0338',
'NotGreaterTilde;': '\u2275',
'NotHumpDownHump;': '\u224e\u0338',
'NotHumpEqual;': '\u224f\u0338',
'notin;': '\u2209',
'notindot;': '\u22f5\u0338',
'notinE;': '\u22f9\u0338',
'notinva;': '\u2209',
'notinvb;': '\u22f7',
'notinvc;': '\u22f6',
'NotLeftTriangle;': '\u22ea',
'NotLeftTriangleBar;': '\u29cf\u0338',
'NotLeftTriangleEqual;': '\u22ec',
'NotLess;': '\u226e',
'NotLessEqual;': '\u2270',
'NotLessGreater;': '\u2278',
'NotLessLess;': '\u226a\u0338',
'NotLessSlantEqual;': '\u2a7d\u0338',
'NotLessTilde;': '\u2274',
'NotNestedGreaterGreater;': '\u2aa2\u0338',
'NotNestedLessLess;': '\u2aa1\u0338',
'notni;': '\u220c',
'notniva;': '\u220c',
'notnivb;': '\u22fe',
'notnivc;': '\u22fd',
'NotPrecedes;': '\u2280',
'NotPrecedesEqual;': '\u2aaf\u0338',
'NotPrecedesSlantEqual;': '\u22e0',
'NotReverseElement;': '\u220c',
'NotRightTriangle;': '\u22eb',
'NotRightTriangleBar;': '\u29d0\u0338',
'NotRightTriangleEqual;': '\u22ed',
'NotSquareSubset;': '\u228f\u0338',
'NotSquareSubsetEqual;': '\u22e2',
'NotSquareSuperset;': '\u2290\u0338',
'NotSquareSupersetEqual;': '\u22e3',
'NotSubset;': '\u2282\u20d2',
'NotSubsetEqual;': '\u2288',
'NotSucceeds;': '\u2281',
'NotSucceedsEqual;': '\u2ab0\u0338',
'NotSucceedsSlantEqual;': '\u22e1',
'NotSucceedsTilde;': '\u227f\u0338',
'NotSuperset;': '\u2283\u20d2',
'NotSupersetEqual;': '\u2289',
'NotTilde;': '\u2241',
'NotTildeEqual;': '\u2244',
'NotTildeFullEqual;': '\u2247',
'NotTildeTilde;': '\u2249',
'NotVerticalBar;': '\u2224',
'npar;': '\u2226',
'nparallel;': '\u2226',
'nparsl;': '\u2afd\u20e5',
'npart;': '\u2202\u0338',
'npolint;': '\u2a14',
'npr;': '\u2280',
'nprcue;': '\u22e0',
'npre;': '\u2aaf\u0338',
'nprec;': '\u2280',
'npreceq;': '\u2aaf\u0338',
'nrArr;': '\u21cf',
'nrarr;': '\u219b',
'nrarrc;': '\u2933\u0338',
'nrarrw;': '\u219d\u0338',
'nRightarrow;': '\u21cf',
'nrightarrow;': '\u219b',
'nrtri;': '\u22eb',
'nrtrie;': '\u22ed',
'nsc;': '\u2281',
'nsccue;': '\u22e1',
'nsce;': '\u2ab0\u0338',
'Nscr;': '\U0001d4a9',
'nscr;': '\U0001d4c3',
'nshortmid;': '\u2224',
'nshortparallel;': '\u2226',
'nsim;': '\u2241',
'nsime;': '\u2244',
'nsimeq;': '\u2244',
'nsmid;': '\u2224',
'nspar;': '\u2226',
'nsqsube;': '\u22e2',
'nsqsupe;': '\u22e3',
'nsub;': '\u2284',
'nsubE;': '\u2ac5\u0338',
'nsube;': '\u2288',
'nsubset;': '\u2282\u20d2',
'nsubseteq;': '\u2288',
'nsubseteqq;': '\u2ac5\u0338',
'nsucc;': '\u2281',
'nsucceq;': '\u2ab0\u0338',
'nsup;': '\u2285',
'nsupE;': '\u2ac6\u0338',
'nsupe;': '\u2289',
'nsupset;': '\u2283\u20d2',
'nsupseteq;': '\u2289',
'nsupseteqq;': '\u2ac6\u0338',
'ntgl;': '\u2279',
'Ntilde': '\xd1',
'ntilde': '\xf1',
'Ntilde;': '\xd1',
'ntilde;': '\xf1',
'ntlg;': '\u2278',
'ntriangleleft;': '\u22ea',
'ntrianglelefteq;': '\u22ec',
'ntriangleright;': '\u22eb',
'ntrianglerighteq;': '\u22ed',
'Nu;': '\u039d',
'nu;': '\u03bd',
'num;': '#',
'numero;': '\u2116',
'numsp;': '\u2007',
'nvap;': '\u224d\u20d2',
'nVDash;': '\u22af',
'nVdash;': '\u22ae',
'nvDash;': '\u22ad',
'nvdash;': '\u22ac',
'nvge;': '\u2265\u20d2',
'nvgt;': '>\u20d2',
'nvHarr;': '\u2904',
'nvinfin;': '\u29de',
'nvlArr;': '\u2902',
'nvle;': '\u2264\u20d2',
'nvlt;': '<\u20d2',
'nvltrie;': '\u22b4\u20d2',
'nvrArr;': '\u2903',
'nvrtrie;': '\u22b5\u20d2',
'nvsim;': '\u223c\u20d2',
'nwarhk;': '\u2923',
'nwArr;': '\u21d6',
'nwarr;': '\u2196',
'nwarrow;': '\u2196',
'nwnear;': '\u2927',
'Oacute': '\xd3',
'oacute': '\xf3',
'Oacute;': '\xd3',
'oacute;': '\xf3',
'oast;': '\u229b',
'ocir;': '\u229a',
'Ocirc': '\xd4',
'ocirc': '\xf4',
'Ocirc;': '\xd4',
'ocirc;': '\xf4',
'Ocy;': '\u041e',
'ocy;': '\u043e',
'odash;': '\u229d',
'Odblac;': '\u0150',
'odblac;': '\u0151',
'odiv;': '\u2a38',
'odot;': '\u2299',
'odsold;': '\u29bc',
'OElig;': '\u0152',
'oelig;': '\u0153',
'ofcir;': '\u29bf',
'Ofr;': '\U0001d512',
'ofr;': '\U0001d52c',
'ogon;': '\u02db',
'Ograve': '\xd2',
'ograve': '\xf2',
'Ograve;': '\xd2',
'ograve;': '\xf2',
'ogt;': '\u29c1',
'ohbar;': '\u29b5',
'ohm;': '\u03a9',
'oint;': '\u222e',
'olarr;': '\u21ba',
'olcir;': '\u29be',
'olcross;': '\u29bb',
'oline;': '\u203e',
'olt;': '\u29c0',
'Omacr;': '\u014c',
'omacr;': '\u014d',
'Omega;': '\u03a9',
'omega;': '\u03c9',
'Omicron;': '\u039f',
'omicron;': '\u03bf',
'omid;': '\u29b6',
'ominus;': '\u2296',
'Oopf;': '\U0001d546',
'oopf;': '\U0001d560',
'opar;': '\u29b7',
'OpenCurlyDoubleQuote;': '\u201c',
'OpenCurlyQuote;': '\u2018',
'operp;': '\u29b9',
'oplus;': '\u2295',
'Or;': '\u2a54',
'or;': '\u2228',
'orarr;': '\u21bb',
'ord;': '\u2a5d',
'order;': '\u2134',
'orderof;': '\u2134',
'ordf': '\xaa',
'ordf;': '\xaa',
'ordm': '\xba',
'ordm;': '\xba',
'origof;': '\u22b6',
'oror;': '\u2a56',
'orslope;': '\u2a57',
'orv;': '\u2a5b',
'oS;': '\u24c8',
'Oscr;': '\U0001d4aa',
'oscr;': '\u2134',
'Oslash': '\xd8',
'oslash': '\xf8',
'Oslash;': '\xd8',
'oslash;': '\xf8',
'osol;': '\u2298',
'Otilde': '\xd5',
'otilde': '\xf5',
'Otilde;': '\xd5',
'otilde;': '\xf5',
'Otimes;': '\u2a37',
'otimes;': '\u2297',
'otimesas;': '\u2a36',
'Ouml': '\xd6',
'ouml': '\xf6',
'Ouml;': '\xd6',
'ouml;': '\xf6',
'ovbar;': '\u233d',
'OverBar;': '\u203e',
'OverBrace;': '\u23de',
'OverBracket;': '\u23b4',
'OverParenthesis;': '\u23dc',
'par;': '\u2225',
'para': '\xb6',
'para;': '\xb6',
'parallel;': '\u2225',
'parsim;': '\u2af3',
'parsl;': '\u2afd',
'part;': '\u2202',
'PartialD;': '\u2202',
'Pcy;': '\u041f',
'pcy;': '\u043f',
'percnt;': '%',
'period;': '.',
'permil;': '\u2030',
'perp;': '\u22a5',
'pertenk;': '\u2031',
'Pfr;': '\U0001d513',
'pfr;': '\U0001d52d',
'Phi;': '\u03a6',
'phi;': '\u03c6',
'phiv;': '\u03d5',
'phmmat;': '\u2133',
'phone;': '\u260e',
'Pi;': '\u03a0',
'pi;': '\u03c0',
'pitchfork;': '\u22d4',
'piv;': '\u03d6',
'planck;': '\u210f',
'planckh;': '\u210e',
'plankv;': '\u210f',
'plus;': '+',
'plusacir;': '\u2a23',
'plusb;': '\u229e',
'pluscir;': '\u2a22',
'plusdo;': '\u2214',
'plusdu;': '\u2a25',
'pluse;': '\u2a72',
'PlusMinus;': '\xb1',
'plusmn': '\xb1',
'plusmn;': '\xb1',
'plussim;': '\u2a26',
'plustwo;': '\u2a27',
'pm;': '\xb1',
'Poincareplane;': '\u210c',
'pointint;': '\u2a15',
'Popf;': '\u2119',
'popf;': '\U0001d561',
'pound': '\xa3',
'pound;': '\xa3',
'Pr;': '\u2abb',
'pr;': '\u227a',
'prap;': '\u2ab7',
'prcue;': '\u227c',
'prE;': '\u2ab3',
'pre;': '\u2aaf',
'prec;': '\u227a',
'precapprox;': '\u2ab7',
'preccurlyeq;': '\u227c',
'Precedes;': '\u227a',
'PrecedesEqual;': '\u2aaf',
'PrecedesSlantEqual;': '\u227c',
'PrecedesTilde;': '\u227e',
'preceq;': '\u2aaf',
'precnapprox;': '\u2ab9',
'precneqq;': '\u2ab5',
'precnsim;': '\u22e8',
'precsim;': '\u227e',
'Prime;': '\u2033',
'prime;': '\u2032',
'primes;': '\u2119',
'prnap;': '\u2ab9',
'prnE;': '\u2ab5',
'prnsim;': '\u22e8',
'prod;': '\u220f',
'Product;': '\u220f',
'profalar;': '\u232e',
'profline;': '\u2312',
'profsurf;': '\u2313',
'prop;': '\u221d',
'Proportion;': '\u2237',
'Proportional;': '\u221d',
'propto;': '\u221d',
'prsim;': '\u227e',
'prurel;': '\u22b0',
'Pscr;': '\U0001d4ab',
'pscr;': '\U0001d4c5',
'Psi;': '\u03a8',
'psi;': '\u03c8',
'puncsp;': '\u2008',
'Qfr;': '\U0001d514',
'qfr;': '\U0001d52e',
'qint;': '\u2a0c',
'Qopf;': '\u211a',
'qopf;': '\U0001d562',
'qprime;': '\u2057',
'Qscr;': '\U0001d4ac',
'qscr;': '\U0001d4c6',
'quaternions;': '\u210d',
'quatint;': '\u2a16',
'quest;': '?',
'questeq;': '\u225f',
'QUOT': '"',
'quot': '"',
'QUOT;': '"',
'quot;': '"',
'rAarr;': '\u21db',
'race;': '\u223d\u0331',
'Racute;': '\u0154',
'racute;': '\u0155',
'radic;': '\u221a',
'raemptyv;': '\u29b3',
'Rang;': '\u27eb',
'rang;': '\u27e9',
'rangd;': '\u2992',
'range;': '\u29a5',
'rangle;': '\u27e9',
'raquo': '\xbb',
'raquo;': '\xbb',
'Rarr;': '\u21a0',
'rArr;': '\u21d2',
'rarr;': '\u2192',
'rarrap;': '\u2975',
'rarrb;': '\u21e5',
'rarrbfs;': '\u2920',
'rarrc;': '\u2933',
'rarrfs;': '\u291e',
'rarrhk;': '\u21aa',
'rarrlp;': '\u21ac',
'rarrpl;': '\u2945',
'rarrsim;': '\u2974',
'Rarrtl;': '\u2916',
'rarrtl;': '\u21a3',
'rarrw;': '\u219d',
'rAtail;': '\u291c',
'ratail;': '\u291a',
'ratio;': '\u2236',
'rationals;': '\u211a',
'RBarr;': '\u2910',
'rBarr;': '\u290f',
'rbarr;': '\u290d',
'rbbrk;': '\u2773',
'rbrace;': '}',
'rbrack;': ']',
'rbrke;': '\u298c',
'rbrksld;': '\u298e',
'rbrkslu;': '\u2990',
'Rcaron;': '\u0158',
'rcaron;': '\u0159',
'Rcedil;': '\u0156',
'rcedil;': '\u0157',
'rceil;': '\u2309',
'rcub;': '}',
'Rcy;': '\u0420',
'rcy;': '\u0440',
'rdca;': '\u2937',
'rdldhar;': '\u2969',
'rdquo;': '\u201d',
'rdquor;': '\u201d',
'rdsh;': '\u21b3',
'Re;': '\u211c',
'real;': '\u211c',
'realine;': '\u211b',
'realpart;': '\u211c',
'reals;': '\u211d',
'rect;': '\u25ad',
'REG': '\xae',
'reg': '\xae',
'REG;': '\xae',
'reg;': '\xae',
'ReverseElement;': '\u220b',
'ReverseEquilibrium;': '\u21cb',
'ReverseUpEquilibrium;': '\u296f',
'rfisht;': '\u297d',
'rfloor;': '\u230b',
'Rfr;': '\u211c',
'rfr;': '\U0001d52f',
'rHar;': '\u2964',
'rhard;': '\u21c1',
'rharu;': '\u21c0',
'rharul;': '\u296c',
'Rho;': '\u03a1',
'rho;': '\u03c1',
'rhov;': '\u03f1',
'RightAngleBracket;': '\u27e9',
'RightArrow;': '\u2192',
'Rightarrow;': '\u21d2',
'rightarrow;': '\u2192',
'RightArrowBar;': '\u21e5',
'RightArrowLeftArrow;': '\u21c4',
'rightarrowtail;': '\u21a3',
'RightCeiling;': '\u2309',
'RightDoubleBracket;': '\u27e7',
'RightDownTeeVector;': '\u295d',
'RightDownVector;': '\u21c2',
'RightDownVectorBar;': '\u2955',
'RightFloor;': '\u230b',
'rightharpoondown;': '\u21c1',
'rightharpoonup;': '\u21c0',
'rightleftarrows;': '\u21c4',
'rightleftharpoons;': '\u21cc',
'rightrightarrows;': '\u21c9',
'rightsquigarrow;': '\u219d',
'RightTee;': '\u22a2',
'RightTeeArrow;': '\u21a6',
'RightTeeVector;': '\u295b',
'rightthreetimes;': '\u22cc',
'RightTriangle;': '\u22b3',
'RightTriangleBar;': '\u29d0',
'RightTriangleEqual;': '\u22b5',
'RightUpDownVector;': '\u294f',
'RightUpTeeVector;': '\u295c',
'RightUpVector;': '\u21be',
'RightUpVectorBar;': '\u2954',
'RightVector;': '\u21c0',
'RightVectorBar;': '\u2953',
'ring;': '\u02da',
'risingdotseq;': '\u2253',
'rlarr;': '\u21c4',
'rlhar;': '\u21cc',
'rlm;': '\u200f',
'rmoust;': '\u23b1',
'rmoustache;': '\u23b1',
'rnmid;': '\u2aee',
'roang;': '\u27ed',
'roarr;': '\u21fe',
'robrk;': '\u27e7',
'ropar;': '\u2986',
'Ropf;': '\u211d',
'ropf;': '\U0001d563',
'roplus;': '\u2a2e',
'rotimes;': '\u2a35',
'RoundImplies;': '\u2970',
'rpar;': ')',
'rpargt;': '\u2994',
'rppolint;': '\u2a12',
'rrarr;': '\u21c9',
'Rrightarrow;': '\u21db',
'rsaquo;': '\u203a',
'Rscr;': '\u211b',
'rscr;': '\U0001d4c7',
'Rsh;': '\u21b1',
'rsh;': '\u21b1',
'rsqb;': ']',
'rsquo;': '\u2019',
'rsquor;': '\u2019',
'rthree;': '\u22cc',
'rtimes;': '\u22ca',
'rtri;': '\u25b9',
'rtrie;': '\u22b5',
'rtrif;': '\u25b8',
'rtriltri;': '\u29ce',
'RuleDelayed;': '\u29f4',
'ruluhar;': '\u2968',
'rx;': '\u211e',
'Sacute;': '\u015a',
'sacute;': '\u015b',
'sbquo;': '\u201a',
'Sc;': '\u2abc',
'sc;': '\u227b',
'scap;': '\u2ab8',
'Scaron;': '\u0160',
'scaron;': '\u0161',
'sccue;': '\u227d',
'scE;': '\u2ab4',
'sce;': '\u2ab0',
'Scedil;': '\u015e',
'scedil;': '\u015f',
'Scirc;': '\u015c',
'scirc;': '\u015d',
'scnap;': '\u2aba',
'scnE;': '\u2ab6',
'scnsim;': '\u22e9',
'scpolint;': '\u2a13',
'scsim;': '\u227f',
'Scy;': '\u0421',
'scy;': '\u0441',
'sdot;': '\u22c5',
'sdotb;': '\u22a1',
'sdote;': '\u2a66',
'searhk;': '\u2925',
'seArr;': '\u21d8',
'searr;': '\u2198',
'searrow;': '\u2198',
'sect': '\xa7',
'sect;': '\xa7',
'semi;': ';',
'seswar;': '\u2929',
'setminus;': '\u2216',
'setmn;': '\u2216',
'sext;': '\u2736',
'Sfr;': '\U0001d516',
'sfr;': '\U0001d530',
'sfrown;': '\u2322',
'sharp;': '\u266f',
'SHCHcy;': '\u0429',
'shchcy;': '\u0449',
'SHcy;': '\u0428',
'shcy;': '\u0448',
'ShortDownArrow;': '\u2193',
'ShortLeftArrow;': '\u2190',
'shortmid;': '\u2223',
'shortparallel;': '\u2225',
'ShortRightArrow;': '\u2192',
'ShortUpArrow;': '\u2191',
'shy': '\xad',
'shy;': '\xad',
'Sigma;': '\u03a3',
'sigma;': '\u03c3',
'sigmaf;': '\u03c2',
'sigmav;': '\u03c2',
'sim;': '\u223c',
'simdot;': '\u2a6a',
'sime;': '\u2243',
'simeq;': '\u2243',
'simg;': '\u2a9e',
'simgE;': '\u2aa0',
'siml;': '\u2a9d',
'simlE;': '\u2a9f',
'simne;': '\u2246',
'simplus;': '\u2a24',
'simrarr;': '\u2972',
'slarr;': '\u2190',
'SmallCircle;': '\u2218',
'smallsetminus;': '\u2216',
'smashp;': '\u2a33',
'smeparsl;': '\u29e4',
'smid;': '\u2223',
'smile;': '\u2323',
'smt;': '\u2aaa',
'smte;': '\u2aac',
'smtes;': '\u2aac\ufe00',
'SOFTcy;': '\u042c',
'softcy;': '\u044c',
'sol;': '/',
'solb;': '\u29c4',
'solbar;': '\u233f',
'Sopf;': '\U0001d54a',
'sopf;': '\U0001d564',
'spades;': '\u2660',
'spadesuit;': '\u2660',
'spar;': '\u2225',
'sqcap;': '\u2293',
'sqcaps;': '\u2293\ufe00',
'sqcup;': '\u2294',
'sqcups;': '\u2294\ufe00',
'Sqrt;': '\u221a',
'sqsub;': '\u228f',
'sqsube;': '\u2291',
'sqsubset;': '\u228f',
'sqsubseteq;': '\u2291',
'sqsup;': '\u2290',
'sqsupe;': '\u2292',
'sqsupset;': '\u2290',
'sqsupseteq;': '\u2292',
'squ;': '\u25a1',
'Square;': '\u25a1',
'square;': '\u25a1',
'SquareIntersection;': '\u2293',
'SquareSubset;': '\u228f',
'SquareSubsetEqual;': '\u2291',
'SquareSuperset;': '\u2290',
'SquareSupersetEqual;': '\u2292',
'SquareUnion;': '\u2294',
'squarf;': '\u25aa',
'squf;': '\u25aa',
'srarr;': '\u2192',
'Sscr;': '\U0001d4ae',
'sscr;': '\U0001d4c8',
'ssetmn;': '\u2216',
'ssmile;': '\u2323',
'sstarf;': '\u22c6',
'Star;': '\u22c6',
'star;': '\u2606',
'starf;': '\u2605',
'straightepsilon;': '\u03f5',
'straightphi;': '\u03d5',
'strns;': '\xaf',
'Sub;': '\u22d0',
'sub;': '\u2282',
'subdot;': '\u2abd',
'subE;': '\u2ac5',
'sube;': '\u2286',
'subedot;': '\u2ac3',
'submult;': '\u2ac1',
'subnE;': '\u2acb',
'subne;': '\u228a',
'subplus;': '\u2abf',
'subrarr;': '\u2979',
'Subset;': '\u22d0',
'subset;': '\u2282',
'subseteq;': '\u2286',
'subseteqq;': '\u2ac5',
'SubsetEqual;': '\u2286',
'subsetneq;': '\u228a',
'subsetneqq;': '\u2acb',
'subsim;': '\u2ac7',
'subsub;': '\u2ad5',
'subsup;': '\u2ad3',
'succ;': '\u227b',
'succapprox;': '\u2ab8',
'succcurlyeq;': '\u227d',
'Succeeds;': '\u227b',
'SucceedsEqual;': '\u2ab0',
'SucceedsSlantEqual;': '\u227d',
'SucceedsTilde;': '\u227f',
'succeq;': '\u2ab0',
'succnapprox;': '\u2aba',
'succneqq;': '\u2ab6',
'succnsim;': '\u22e9',
'succsim;': '\u227f',
'SuchThat;': '\u220b',
'Sum;': '\u2211',
'sum;': '\u2211',
'sung;': '\u266a',
'sup1': '\xb9',
'sup1;': '\xb9',
'sup2': '\xb2',
'sup2;': '\xb2',
'sup3': '\xb3',
'sup3;': '\xb3',
'Sup;': '\u22d1',
'sup;': '\u2283',
'supdot;': '\u2abe',
'supdsub;': '\u2ad8',
'supE;': '\u2ac6',
'supe;': '\u2287',
'supedot;': '\u2ac4',
'Superset;': '\u2283',
'SupersetEqual;': '\u2287',
'suphsol;': '\u27c9',
'suphsub;': '\u2ad7',
'suplarr;': '\u297b',
'supmult;': '\u2ac2',
'supnE;': '\u2acc',
'supne;': '\u228b',
'supplus;': '\u2ac0',
'Supset;': '\u22d1',
'supset;': '\u2283',
'supseteq;': '\u2287',
'supseteqq;': '\u2ac6',
'supsetneq;': '\u228b',
'supsetneqq;': '\u2acc',
'supsim;': '\u2ac8',
'supsub;': '\u2ad4',
'supsup;': '\u2ad6',
'swarhk;': '\u2926',
'swArr;': '\u21d9',
'swarr;': '\u2199',
'swarrow;': '\u2199',
'swnwar;': '\u292a',
'szlig': '\xdf',
'szlig;': '\xdf',
'Tab;': '\t',
'target;': '\u2316',
'Tau;': '\u03a4',
'tau;': '\u03c4',
'tbrk;': '\u23b4',
'Tcaron;': '\u0164',
'tcaron;': '\u0165',
'Tcedil;': '\u0162',
'tcedil;': '\u0163',
'Tcy;': '\u0422',
'tcy;': '\u0442',
'tdot;': '\u20db',
'telrec;': '\u2315',
'Tfr;': '\U0001d517',
'tfr;': '\U0001d531',
'there4;': '\u2234',
'Therefore;': '\u2234',
'therefore;': '\u2234',
'Theta;': '\u0398',
'theta;': '\u03b8',
'thetasym;': '\u03d1',
'thetav;': '\u03d1',
'thickapprox;': '\u2248',
'thicksim;': '\u223c',
'ThickSpace;': '\u205f\u200a',
'thinsp;': '\u2009',
'ThinSpace;': '\u2009',
'thkap;': '\u2248',
'thksim;': '\u223c',
'THORN': '\xde',
'thorn': '\xfe',
'THORN;': '\xde',
'thorn;': '\xfe',
'Tilde;': '\u223c',
'tilde;': '\u02dc',
'TildeEqual;': '\u2243',
'TildeFullEqual;': '\u2245',
'TildeTilde;': '\u2248',
'times': '\xd7',
'times;': '\xd7',
'timesb;': '\u22a0',
'timesbar;': '\u2a31',
'timesd;': '\u2a30',
'tint;': '\u222d',
'toea;': '\u2928',
'top;': '\u22a4',
'topbot;': '\u2336',
'topcir;': '\u2af1',
'Topf;': '\U0001d54b',
'topf;': '\U0001d565',
'topfork;': '\u2ada',
'tosa;': '\u2929',
'tprime;': '\u2034',
'TRADE;': '\u2122',
'trade;': '\u2122',
'triangle;': '\u25b5',
'triangledown;': '\u25bf',
'triangleleft;': '\u25c3',
'trianglelefteq;': '\u22b4',
'triangleq;': '\u225c',
'triangleright;': '\u25b9',
'trianglerighteq;': '\u22b5',
'tridot;': '\u25ec',
'trie;': '\u225c',
'triminus;': '\u2a3a',
'TripleDot;': '\u20db',
'triplus;': '\u2a39',
'trisb;': '\u29cd',
'tritime;': '\u2a3b',
'trpezium;': '\u23e2',
'Tscr;': '\U0001d4af',
'tscr;': '\U0001d4c9',
'TScy;': '\u0426',
'tscy;': '\u0446',
'TSHcy;': '\u040b',
'tshcy;': '\u045b',
'Tstrok;': '\u0166',
'tstrok;': '\u0167',
'twixt;': '\u226c',
'twoheadleftarrow;': '\u219e',
'twoheadrightarrow;': '\u21a0',
'Uacute': '\xda',
'uacute': '\xfa',
'Uacute;': '\xda',
'uacute;': '\xfa',
'Uarr;': '\u219f',
'uArr;': '\u21d1',
'uarr;': '\u2191',
'Uarrocir;': '\u2949',
'Ubrcy;': '\u040e',
'ubrcy;': '\u045e',
'Ubreve;': '\u016c',
'ubreve;': '\u016d',
'Ucirc': '\xdb',
'ucirc': '\xfb',
'Ucirc;': '\xdb',
'ucirc;': '\xfb',
'Ucy;': '\u0423',
'ucy;': '\u0443',
'udarr;': '\u21c5',
'Udblac;': '\u0170',
'udblac;': '\u0171',
'udhar;': '\u296e',
'ufisht;': '\u297e',
'Ufr;': '\U0001d518',
'ufr;': '\U0001d532',
'Ugrave': '\xd9',
'ugrave': '\xf9',
'Ugrave;': '\xd9',
'ugrave;': '\xf9',
'uHar;': '\u2963',
'uharl;': '\u21bf',
'uharr;': '\u21be',
'uhblk;': '\u2580',
'ulcorn;': '\u231c',
'ulcorner;': '\u231c',
'ulcrop;': '\u230f',
'ultri;': '\u25f8',
'Umacr;': '\u016a',
'umacr;': '\u016b',
'uml': '\xa8',
'uml;': '\xa8',
'UnderBar;': '_',
'UnderBrace;': '\u23df',
'UnderBracket;': '\u23b5',
'UnderParenthesis;': '\u23dd',
'Union;': '\u22c3',
'UnionPlus;': '\u228e',
'Uogon;': '\u0172',
'uogon;': '\u0173',
'Uopf;': '\U0001d54c',
'uopf;': '\U0001d566',
'UpArrow;': '\u2191',
'Uparrow;': '\u21d1',
'uparrow;': '\u2191',
'UpArrowBar;': '\u2912',
'UpArrowDownArrow;': '\u21c5',
'UpDownArrow;': '\u2195',
'Updownarrow;': '\u21d5',
'updownarrow;': '\u2195',
'UpEquilibrium;': '\u296e',
'upharpoonleft;': '\u21bf',
'upharpoonright;': '\u21be',
'uplus;': '\u228e',
'UpperLeftArrow;': '\u2196',
'UpperRightArrow;': '\u2197',
'Upsi;': '\u03d2',
'upsi;': '\u03c5',
'upsih;': '\u03d2',
'Upsilon;': '\u03a5',
'upsilon;': '\u03c5',
'UpTee;': '\u22a5',
'UpTeeArrow;': '\u21a5',
'upuparrows;': '\u21c8',
'urcorn;': '\u231d',
'urcorner;': '\u231d',
'urcrop;': '\u230e',
'Uring;': '\u016e',
'uring;': '\u016f',
'urtri;': '\u25f9',
'Uscr;': '\U0001d4b0',
'uscr;': '\U0001d4ca',
'utdot;': '\u22f0',
'Utilde;': '\u0168',
'utilde;': '\u0169',
'utri;': '\u25b5',
'utrif;': '\u25b4',
'uuarr;': '\u21c8',
'Uuml': '\xdc',
'uuml': '\xfc',
'Uuml;': '\xdc',
'uuml;': '\xfc',
'uwangle;': '\u29a7',
'vangrt;': '\u299c',
'varepsilon;': '\u03f5',
'varkappa;': '\u03f0',
'varnothing;': '\u2205',
'varphi;': '\u03d5',
'varpi;': '\u03d6',
'varpropto;': '\u221d',
'vArr;': '\u21d5',
'varr;': '\u2195',
'varrho;': '\u03f1',
'varsigma;': '\u03c2',
'varsubsetneq;': '\u228a\ufe00',
'varsubsetneqq;': '\u2acb\ufe00',
'varsupsetneq;': '\u228b\ufe00',
'varsupsetneqq;': '\u2acc\ufe00',
'vartheta;': '\u03d1',
'vartriangleleft;': '\u22b2',
'vartriangleright;': '\u22b3',
'Vbar;': '\u2aeb',
'vBar;': '\u2ae8',
'vBarv;': '\u2ae9',
'Vcy;': '\u0412',
'vcy;': '\u0432',
'VDash;': '\u22ab',
'Vdash;': '\u22a9',
'vDash;': '\u22a8',
'vdash;': '\u22a2',
'Vdashl;': '\u2ae6',
'Vee;': '\u22c1',
'vee;': '\u2228',
'veebar;': '\u22bb',
'veeeq;': '\u225a',
'vellip;': '\u22ee',
'Verbar;': '\u2016',
'verbar;': '|',
'Vert;': '\u2016',
'vert;': '|',
'VerticalBar;': '\u2223',
'VerticalLine;': '|',
'VerticalSeparator;': '\u2758',
'VerticalTilde;': '\u2240',
'VeryThinSpace;': '\u200a',
'Vfr;': '\U0001d519',
'vfr;': '\U0001d533',
'vltri;': '\u22b2',
'vnsub;': '\u2282\u20d2',
'vnsup;': '\u2283\u20d2',
'Vopf;': '\U0001d54d',
'vopf;': '\U0001d567',
'vprop;': '\u221d',
'vrtri;': '\u22b3',
'Vscr;': '\U0001d4b1',
'vscr;': '\U0001d4cb',
'vsubnE;': '\u2acb\ufe00',
'vsubne;': '\u228a\ufe00',
'vsupnE;': '\u2acc\ufe00',
'vsupne;': '\u228b\ufe00',
'Vvdash;': '\u22aa',
'vzigzag;': '\u299a',
'Wcirc;': '\u0174',
'wcirc;': '\u0175',
'wedbar;': '\u2a5f',
'Wedge;': '\u22c0',
'wedge;': '\u2227',
'wedgeq;': '\u2259',
'weierp;': '\u2118',
'Wfr;': '\U0001d51a',
'wfr;': '\U0001d534',
'Wopf;': '\U0001d54e',
'wopf;': '\U0001d568',
'wp;': '\u2118',
'wr;': '\u2240',
'wreath;': '\u2240',
'Wscr;': '\U0001d4b2',
'wscr;': '\U0001d4cc',
'xcap;': '\u22c2',
'xcirc;': '\u25ef',
'xcup;': '\u22c3',
'xdtri;': '\u25bd',
'Xfr;': '\U0001d51b',
'xfr;': '\U0001d535',
'xhArr;': '\u27fa',
'xharr;': '\u27f7',
'Xi;': '\u039e',
'xi;': '\u03be',
'xlArr;': '\u27f8',
'xlarr;': '\u27f5',
'xmap;': '\u27fc',
'xnis;': '\u22fb',
'xodot;': '\u2a00',
'Xopf;': '\U0001d54f',
'xopf;': '\U0001d569',
'xoplus;': '\u2a01',
'xotime;': '\u2a02',
'xrArr;': '\u27f9',
'xrarr;': '\u27f6',
'Xscr;': '\U0001d4b3',
'xscr;': '\U0001d4cd',
'xsqcup;': '\u2a06',
'xuplus;': '\u2a04',
'xutri;': '\u25b3',
'xvee;': '\u22c1',
'xwedge;': '\u22c0',
'Yacute': '\xdd',
'yacute': '\xfd',
'Yacute;': '\xdd',
'yacute;': '\xfd',
'YAcy;': '\u042f',
'yacy;': '\u044f',
'Ycirc;': '\u0176',
'ycirc;': '\u0177',
'Ycy;': '\u042b',
'ycy;': '\u044b',
'yen': '\xa5',
'yen;': '\xa5',
'Yfr;': '\U0001d51c',
'yfr;': '\U0001d536',
'YIcy;': '\u0407',
'yicy;': '\u0457',
'Yopf;': '\U0001d550',
'yopf;': '\U0001d56a',
'Yscr;': '\U0001d4b4',
'yscr;': '\U0001d4ce',
'YUcy;': '\u042e',
'yucy;': '\u044e',
'yuml': '\xff',
'Yuml;': '\u0178',
'yuml;': '\xff',
'Zacute;': '\u0179',
'zacute;': '\u017a',
'Zcaron;': '\u017d',
'zcaron;': '\u017e',
'Zcy;': '\u0417',
'zcy;': '\u0437',
'Zdot;': '\u017b',
'zdot;': '\u017c',
'zeetrf;': '\u2128',
'ZeroWidthSpace;': '\u200b',
'Zeta;': '\u0396',
'zeta;': '\u03b6',
'Zfr;': '\u2128',
'zfr;': '\U0001d537',
'ZHcy;': '\u0416',
'zhcy;': '\u0436',
'zigrarr;': '\u21dd',
'Zopf;': '\u2124',
'zopf;': '\U0001d56b',
'Zscr;': '\U0001d4b5',
'zscr;': '\U0001d4cf',
'zwj;': '\u200d',
'zwnj;': '\u200c',
}
# maps the Unicode codepoint to the HTML entity name
codepoint2name = {}
# maps the HTML entity name to the character
# (or a character reference if the character is outside the Latin-1 range)
entitydefs = {}
for (name, codepoint) in name2codepoint.items():
codepoint2name[codepoint] = name
entitydefs[name] = chr(codepoint)
del name, codepoint
| gpl-3.0 |
talhajaved/nyuadmarket | flask/lib/python2.7/site-packages/werkzeug/debug/__init__.py | 310 | 7800 | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import json
import mimetypes
from os.path import join, dirname, basename, isfile
from werkzeug.wrappers import BaseRequest as Request, BaseResponse as Response
from werkzeug.debug.tbtools import get_current_traceback, render_console_html
from werkzeug.debug.console import Console
from werkzeug.security import gen_salt
#: import this here because it once was documented as being available
#: from this module. In case there are users left ...
from werkzeug.debug.repr import debug_repr
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
.. versionadded:: 0.9
The `lodgeit_url` parameter was deprecated.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
"""
# this class is public
__module__ = 'werkzeug'
def __init__(self, app, evalex=False, request_key='werkzeug.request',
console_path='/console', console_init_func=None,
show_hidden_frames=False, lodgeit_url=None):
if lodgeit_url is not None:
from warnings import warn
warn(DeprecationWarning('Werkzeug now pastes into gists.'))
if not console_init_func:
console_init_func = dict
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, 'close'):
app_iter.close()
except Exception:
if hasattr(app_iter, 'close'):
app_iter.close()
traceback = get_current_traceback(skip=1, show_hidden_frames=
self.show_hidden_frames,
ignore_system_exceptions=True)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response('500 INTERNAL SERVER ERROR', [
('Content-Type', 'text/html; charset=utf-8'),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
('X-XSS-Protection', '0'),
])
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ['wsgi.errors'].write(
'Debugging middleware caught exception in streamed '
'response at a point where response headers were already '
'sent.\n')
else:
yield traceback.render_full(evalex=self.evalex,
secret=self.secret) \
.encode('utf-8', 'replace')
traceback.log(environ['wsgi.errors'])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype='text/html')
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
self.frames[0] = _ConsoleFrame(self.console_init_func())
return Response(render_console_html(secret=self.secret),
mimetype='text/html')
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype='application/json')
def get_source(self, request, frame):
"""Render the source viewer."""
return Response(frame.render_source(), mimetype='text/html')
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join(dirname(__file__), 'shared', basename(filename))
if isfile(filename):
mimetype = mimetypes.guess_type(filename)[0] \
or 'application/octet-stream'
f = open(filename, 'rb')
try:
return Response(f.read(), mimetype=mimetype)
finally:
f.close()
return Response('Not Found', status=404)
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get('__debugger__') == 'yes':
cmd = request.args.get('cmd')
arg = request.args.get('f')
secret = request.args.get('s')
traceback = self.tracebacks.get(request.args.get('tb', type=int))
frame = self.frames.get(request.args.get('frm', type=int))
if cmd == 'resource' and arg:
response = self.get_resource(request, arg)
elif cmd == 'paste' and traceback is not None and \
secret == self.secret:
response = self.paste_traceback(request, traceback)
elif cmd == 'source' and frame and self.secret == secret:
response = self.get_source(request, frame)
elif self.evalex and cmd is not None and frame is not None and \
self.secret == secret:
response = self.execute_command(request, cmd, frame)
elif self.evalex and self.console_path is not None and \
request.path == self.console_path:
response = self.display_console(request)
return response(environ, start_response)
| mit |
fevxie/odoo | addons/membership/report/report_membership.py | 313 | 5267 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
import openerp.addons.decimal_precision as dp
STATE = [
('none', 'Non Member'),
('canceled', 'Cancelled Member'),
('old', 'Old Member'),
('waiting', 'Waiting Member'),
('invoiced', 'Invoiced Member'),
('free', 'Free Member'),
('paid', 'Paid Member'),
]
class report_membership(osv.osv):
'''Membership Analysis'''
_name = 'report.membership'
_description = __doc__
_auto = False
_rec_name = 'start_date'
_columns = {
'start_date': fields.date('Start Date', readonly=True),
'date_to': fields.date('End Date', readonly=True, help="End membership date"),
'num_waiting': fields.integer('# Waiting', readonly=True),
'num_invoiced': fields.integer('# Invoiced', readonly=True),
'num_paid': fields.integer('# Paid', readonly=True),
'tot_pending': fields.float('Pending Amount', digits_compute= dp.get_precision('Account'), readonly=True),
'tot_earned': fields.float('Earned Amount', digits_compute= dp.get_precision('Account'), readonly=True),
'partner_id': fields.many2one('res.partner', 'Member', readonly=True),
'associate_member_id': fields.many2one('res.partner', 'Associate Member', readonly=True),
'membership_id': fields.many2one('product.product', 'Membership Product', readonly=True),
'membership_state': fields.selection(STATE, 'Current Membership State', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'quantity': fields.integer("Quantity", readonly=True),
}
def init(self, cr):
'''Create the view'''
tools.drop_view_if_exists(cr, 'report_membership')
cr.execute("""
CREATE OR REPLACE VIEW report_membership AS (
SELECT
MIN(id) AS id,
partner_id,
count(membership_id) as quantity,
user_id,
membership_state,
associate_member_id,
membership_amount,
date_to,
start_date,
COUNT(num_waiting) AS num_waiting,
COUNT(num_invoiced) AS num_invoiced,
COUNT(num_paid) AS num_paid,
SUM(tot_pending) AS tot_pending,
SUM(tot_earned) AS tot_earned,
membership_id,
company_id
FROM
(SELECT
MIN(p.id) AS id,
p.id AS partner_id,
p.user_id AS user_id,
p.membership_state AS membership_state,
p.associate_member AS associate_member_id,
p.membership_amount AS membership_amount,
p.membership_stop AS date_to,
p.membership_start AS start_date,
CASE WHEN ml.state = 'waiting' THEN ml.id END AS num_waiting,
CASE WHEN ml.state = 'invoiced' THEN ml.id END AS num_invoiced,
CASE WHEN ml.state = 'paid' THEN ml.id END AS num_paid,
CASE WHEN ml.state IN ('waiting', 'invoiced') THEN SUM(il.price_subtotal) ELSE 0 END AS tot_pending,
CASE WHEN ml.state = 'paid' OR p.membership_state = 'old' THEN SUM(il.price_subtotal) ELSE 0 END AS tot_earned,
ml.membership_id AS membership_id,
p.company_id AS company_id
FROM res_partner p
LEFT JOIN membership_membership_line ml ON (ml.partner = p.id)
LEFT JOIN account_invoice_line il ON (ml.account_invoice_line = il.id)
LEFT JOIN account_invoice ai ON (il.invoice_id = ai.id)
WHERE p.membership_state != 'none' and p.active = 'true'
GROUP BY
p.id,
p.user_id,
p.membership_state,
p.associate_member,
p.membership_amount,
p.membership_start,
ml.membership_id,
p.company_id,
ml.state,
ml.id
) AS foo
GROUP BY
start_date,
date_to,
partner_id,
user_id,
membership_id,
company_id,
membership_state,
associate_member_id,
membership_amount
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shnizzedy/SM_openSMILE | openSMILE_preprocessing/mxf_to_wav.py | 1 | 1348 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
mxf_to_wav.py
Script to quickly convert an mxf file to a waveform file.
Author:
– Jon Clucas, 2016 ([email protected])
© 2016, Child Mind Institute, Apache v2.0 License
Created on Fri Dec 23 12:43:40 2016
@author: jon.clucas
"""
import argparse, subprocess
from os import path
def mxf_to_wav(in_file):
# make an output filename
out_base = path.basename(in_file).strip('.mxf').strip('.MXF')
out_i = 0
out_file = path.join(path.dirname(in_file), ''.join([out_base, '.wav']))
while path.exists(out_file):
out_file = path.join(path.dirname(in_file), ''.join([out_base, '_',
str(out_i), '.wav']))
out_i = out_i + 1
# do the conversion verbosely
to_convert = ''.join(["ffmpeg -i ", in_file, " -ac 2 -acodec pcm_s16le ",
out_file])
print(''.join(["Converting ", in_file, " to ", out_file]))
subprocess.call(to_convert, shell = True)
def main():
# script can be run from the command line
parser = argparse.ArgumentParser(description='get mxf')
parser.add_argument('in_file', metavar='in_file', type=str)
arg = parser.parse_args()
mxf_to_wav(arg.in_file)
# ============================================================================
if __name__ == '__main__':
main()
| apache-2.0 |
mistercrunch/airflow | airflow/contrib/sensors/aws_glue_catalog_partition_sensor.py | 7 | 1222 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.glue_catalog_partition`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.sensors.glue_catalog_partition import AwsGlueCatalogPartitionSensor # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.sensors.glue_catalog_partition`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 |
vmax-feihu/hue | desktop/core/ext-py/requests-2.6.0/requests/packages/chardet/universaldetector.py | 1776 | 6840 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
| apache-2.0 |
Plain-Andy-legacy/android_external_chromium_org | tools/valgrind/asan/asan_symbolize.py | 26 | 1774 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from third_party import asan_symbolize
import os
import sys
class LineBuffered(object):
"""Disable buffering on a file object."""
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
if '\n' in data:
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def disable_buffering():
"""Makes this process and child processes stdout unbuffered."""
if not os.environ.get('PYTHONUNBUFFERED'):
# Since sys.stdout is a C++ object, it's impossible to do
# sys.stdout.write = lambda...
sys.stdout = LineBuffered(sys.stdout)
os.environ['PYTHONUNBUFFERED'] = 'x'
def set_symbolizer_path():
"""Set the path to the llvm-symbolize binary in the Chromium source tree."""
if not os.environ.get('LLVM_SYMBOLIZER_PATH'):
script_dir = os.path.dirname(os.path.abspath(__file__))
# Assume this script resides three levels below src/ (i.e.
# src/tools/valgrind/asan/).
src_root = os.path.join(script_dir, "..", "..", "..")
symbolizer_path = os.path.join(src_root, 'third_party',
'llvm-build', 'Release+Asserts', 'bin', 'llvm-symbolizer')
assert(os.path.isfile(symbolizer_path))
os.environ['LLVM_SYMBOLIZER_PATH'] = os.path.abspath(symbolizer_path)
def main():
disable_buffering()
set_symbolizer_path()
asan_symbolize.demangle = True
asan_symbolize.fix_filename_patterns = sys.argv[1:]
asan_symbolize.logfile = sys.stdin
loop = asan_symbolize.SymbolizationLoop()
loop.process_logfile()
if __name__ == '__main__':
main()
| bsd-3-clause |
nanditav/15712-TensorFlow | tensorflow/python/kernel_tests/summary_image_op_test.py | 14 | 4001 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary image op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.ops import image_ops
class SummaryImageOpTest(tf.test.TestCase):
def _AsSummary(self, s):
summ = tf.Summary()
summ.ParseFromString(s)
return summ
def _CheckProto(self, image_summ, shape):
"""Verify that the non-image parts of the image_summ proto match shape."""
# Only the first 3 images are returned.
for v in image_summ.value:
v.image.ClearField("encoded_image_string")
expected = '\n'.join("""
value {
tag: "img/image/%d"
image { height: %d width: %d colorspace: %d }
}""" % ((i,) + shape[1:]) for i in xrange(3))
self.assertProtoEquals(expected, image_summ)
def testImageSummary(self):
np.random.seed(7)
for depth in (1, 3, 4):
for positive in False, True:
with self.test_session(graph=tf.Graph()) as sess:
shape = (4, 5, 7) + (depth,)
bad_color = [255, 0, 0, 255][:depth]
# Build a mostly random image with one nan
const = np.random.randn(*shape).astype(np.float32)
const[0, 1, 2] = 0 # Make the nan entry not the max
if positive:
const = 1 + np.maximum(const, 0)
scale = 255 / const.reshape(4, -1).max(axis=1)
offset = 0
else:
scale = 127 / np.abs(const.reshape(4, -1)).max(axis=1)
offset = 128
adjusted = np.floor(scale[:, None, None, None] * const + offset)
const[0, 1, 2, depth // 2] = np.nan
# Summarize
summ = tf.summary.image("img", const)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
# Decode the first image and check consistency
image = image_ops.decode_png(
image_summ.value[0].image.encoded_image_string).eval()
self.assertAllEqual(image[1, 2], bad_color)
image[1, 2] = adjusted[0, 1, 2]
self.assertAllClose(image, adjusted[0])
# Check the rest of the proto
self._CheckProto(image_summ, shape)
def testImageSummaryUint8(self):
np.random.seed(7)
for depth in (1, 3, 4):
with self.test_session(graph=tf.Graph()) as sess:
shape = (4, 5, 7) + (depth,)
# Build a random uint8 image
images = np.random.randint(256, size=shape).astype(np.uint8)
tf_images = tf.convert_to_tensor(images)
self.assertEqual(tf_images.dtype, tf.uint8)
# Summarize
summ = tf.summary.image("img", tf_images)
value = sess.run(summ)
self.assertEqual([], summ.get_shape())
image_summ = self._AsSummary(value)
# Decode the first image and check consistency.
# Since we're uint8, everything should be exact.
image = image_ops.decode_png(
image_summ.value[0].image.encoded_image_string).eval()
self.assertAllEqual(image, images[0])
# Check the rest of the proto
self._CheckProto(image_summ, shape)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
StephenWeber/ansible | lib/ansible/modules/cloud/openstack/os_server.py | 11 | 23393 | #!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <[email protected]>
# Copyright (c) 2013, John Dewey <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_server
short_description: Create/Delete Compute Instances from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Create or Remove compute instances from OpenStack.
options:
name:
description:
- Name that has to be given to the instance
required: true
image:
description:
- The name or id of the base image to boot.
required: true
image_exclude:
description:
- Text to use to filter image names, for the case, such as HP, where
there are multiple image names matching the common identifying
portions. image_exclude is a negative match filter - it is text that
may not exist in the image name. Defaults to "(deprecated)"
flavor:
description:
- The name or id of the flavor in which the new instance has to be
created. Mutually exclusive with flavor_ram
required: false
default: 1
flavor_ram:
description:
- The minimum amount of ram in MB that the flavor in which the new
instance has to be created must have. Mutually exclusive with flavor.
required: false
default: 1
flavor_include:
description:
- Text to use to filter flavor names, for the case, such as Rackspace,
where there are multiple flavors that have the same ram count.
flavor_include is a positive match filter - it must exist in the
flavor name.
key_name:
description:
- The key pair name to be used when creating a instance
required: false
default: None
security_groups:
description:
- Names of the security groups to which the instance should be
added. This may be a YAML list or a comma separated string.
required: false
default: None
network:
description:
- Name or ID of a network to attach this instance to. A simpler
version of the nics parameter, only one of network or nics should
be supplied.
required: false
default: None
nics:
description:
- A list of networks to which the instance's interface should
be attached. Networks may be referenced by net-id/net-name/port-id
or port-name.
- 'Also this accepts a string containing a list of (net/port)-(id/name)
Eg: nics: "net-id=uuid-1,port-name=myport"
Only one of network or nics should be supplied.'
required: false
default: None
auto_ip:
description:
- Ensure instance has public ip however the cloud wants to do that
required: false
default: 'yes'
aliases: ['auto_floating_ip', 'public_ip']
floating_ips:
description:
- list of valid floating IPs that pre-exist to assign to this node
required: false
default: None
floating_ip_pools:
description:
- Name of floating IP pool from which to choose a floating IP
required: false
default: None
meta:
description:
- 'A list of key value pairs that should be provided as a metadata to
the new instance or a string containing a list of key-value pairs.
Eg: meta: "key1=value1,key2=value2"'
required: false
default: None
wait:
description:
- If the module should wait for the instance to be created.
required: false
default: 'yes'
timeout:
description:
- The amount of time the module should wait for the instance to get
into active state.
required: false
default: 180
config_drive:
description:
- Whether to boot the server with config drive enabled
required: false
default: 'no'
userdata:
description:
- Opaque blob of data which is made available to the instance
required: false
default: None
boot_from_volume:
description:
- Should the instance boot from a persistent volume created based on
the image given. Mututally exclusive with boot_volume.
required: false
default: false
volume_size:
description:
- The size of the volume to create in GB if booting from volume based
on an image.
boot_volume:
description:
- Volume name or id to use as the volume to boot from. Implies
boot_from_volume. Mutually exclusive with image and boot_from_volume.
required: false
default: None
aliases: ['root_volume']
terminate_volume:
description:
- If true, delete volume when deleting instance (if booted from volume)
default: false
volumes:
description:
- A list of preexisting volumes names or ids to attach to the instance
required: false
default: []
scheduler_hints:
description:
- Arbitrary key/value pairs to the scheduler for custom use
required: false
default: None
version_added: "2.1"
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
delete_fip:
description:
- When I(state) is absent and this option is true, any floating IP
associated with the instance will be deleted along with the instance.
required: false
default: false
version_added: "2.2"
reuse_ips:
description:
- When I(auto_ip) is true and this option is true, the I(auto_ip) code
will attempt to re-use unassigned floating ips in the project before
creating a new one. It is important to note that it is impossible
to safely do this concurrently, so if your use case involves
concurrent server creation, it is highly recommended to set this to
false and to delete the floating ip associated with a server when
the server is deleted using I(delete_fip).
required: false
default: true
version_added: "2.2"
availability_zone:
description:
- Availability zone in which to create the server.
required: false
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
- name: Create a new instance and attaches to a network and passes metadata to the instance
os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
timeout: 200
flavor: 4
nics:
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
- net-name: another_network
meta:
hostname: test1
group: uge_master
# Create a new instance in HP Cloud AE1 region availability zone az2 and
# automatically assigns a floating IP
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: username
password: Equality7-2521
project_name: username-project1
name: vm1
region_name: region-b.geo-1
availability_zone: az2
image: 9302692b-b787-4b52-a3a6-daebb79cb498
key_name: test
timeout: 200
flavor: 101
security_groups: default
auto_ip: yes
# Create a new instance in named cloud mordred availability zone az2
# and assigns a pre-known floating IP
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
state: present
cloud: mordred
name: vm1
availability_zone: az2
image: 9302692b-b787-4b52-a3a6-daebb79cb498
key_name: test
timeout: 200
flavor: 101
floating_ips:
- 12.34.56.79
# Create a new instance with 4G of RAM on Ubuntu Trusty, ignoring
# deprecated images
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
name: vm1
state: present
cloud: mordred
region_name: region-b.geo-1
image: Ubuntu Server 14.04
image_exclude: deprecated
flavor_ram: 4096
# Create a new instance with 4G of RAM on Ubuntu Trusty on a Performance node
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
name: vm1
cloud: rax-dfw
state: present
image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
flavor_ram: 4096
flavor_include: Performance
# Creates a new instance and attaches to multiple network
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance with a string
os_server:
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
timeout: 200
flavor: 4
nics: "net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,net-id=542f0430-62fe-11e5-9d70-feff819cdc9f..."
- name: Creates a new instance and attaches to a network and passes metadata to the instance
os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
timeout: 200
flavor: 4
nics:
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
- net-name: another_network
meta: "hostname=test1,group=uge_master"
- name: Creates a new instance and attaches to a specific network
os_server:
state: present
auth:
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
username: admin
password: admin
project_name: admin
name: vm1
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
key_name: ansible_key
timeout: 200
flavor: 4
network: another_network
# Create a new instance with 4G of RAM on a 75G Ubuntu Trusty volume
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
name: vm1
state: present
cloud: mordred
region_name: ams01
image: Ubuntu Server 14.04
flavor_ram: 4096
boot_from_volume: True
volume_size: 75
# Creates a new instance with 2 volumes attached
- name: launch a compute instance
hosts: localhost
tasks:
- name: launch an instance
os_server:
name: vm1
state: present
cloud: mordred
region_name: ams01
image: Ubuntu Server 14.04
flavor_ram: 4096
volumes:
- photos
- music
'''
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _exit_hostvars(module, cloud, server, changed=True):
hostvars = meta.get_hostvars_from_server(cloud, server)
module.exit_json(
changed=changed, server=server, id=server.id, openstack=hostvars)
def _parse_nics(nics):
for net in nics:
if isinstance(net, str):
for nic in net.split(','):
yield dict((nic.split('='),))
else:
yield net
def _network_args(module, cloud):
args = []
nics = module.params['nics']
if not isinstance(nics, list):
module.fail_json(msg='The \'nics\' parameter must be a list.')
for net in _parse_nics(nics):
if not isinstance(net, dict):
module.fail_json(
msg='Each entry in the \'nics\' parameter must be a dict.')
if net.get('net-id'):
args.append(net)
elif net.get('net-name'):
by_name = cloud.get_network(net['net-name'])
if not by_name:
module.fail_json(
msg='Could not find network by net-name: %s' %
net['net-name'])
args.append({'net-id': by_name['id']})
elif net.get('port-id'):
args.append(net)
elif net.get('port-name'):
by_name = cloud.get_port(net['port-name'])
if not by_name:
module.fail_json(
msg='Could not find port by port-name: %s' %
net['port-name'])
args.append({'port-id': by_name['id']})
return args
def _parse_meta(meta):
if isinstance(meta, str):
metas = {}
for kv_str in meta.split(","):
k, v = kv_str.split("=")
metas[k] = v
return metas
if not meta:
return {}
return meta
def _delete_server(module, cloud):
try:
cloud.delete_server(
module.params['name'], wait=module.params['wait'],
timeout=module.params['timeout'],
delete_ips=module.params['delete_fip'])
except Exception as e:
module.fail_json(msg="Error in deleting vm: %s" % e.message)
module.exit_json(changed=True, result='deleted')
def _create_server(module, cloud):
flavor = module.params['flavor']
flavor_ram = module.params['flavor_ram']
flavor_include = module.params['flavor_include']
image_id = None
if not module.params['boot_volume']:
image_id = cloud.get_image_id(
module.params['image'], module.params['image_exclude'])
if not image_id:
module.fail_json(msg="Could not find image %s" %
module.params['image'])
if flavor:
flavor_dict = cloud.get_flavor(flavor)
if not flavor_dict:
module.fail_json(msg="Could not find flavor %s" % flavor)
else:
flavor_dict = cloud.get_flavor_by_ram(flavor_ram, flavor_include)
if not flavor_dict:
module.fail_json(msg="Could not find any matching flavor")
nics = _network_args(module, cloud)
module.params['meta'] = _parse_meta(module.params['meta'])
bootkwargs = dict(
name=module.params['name'],
image=image_id,
flavor=flavor_dict['id'],
nics=nics,
meta=module.params['meta'],
security_groups=module.params['security_groups'],
userdata=module.params['userdata'],
config_drive=module.params['config_drive'],
)
for optional_param in (
'key_name', 'availability_zone', 'network',
'scheduler_hints', 'volume_size', 'volumes'):
if module.params[optional_param]:
bootkwargs[optional_param] = module.params[optional_param]
server = cloud.create_server(
ip_pool=module.params['floating_ip_pools'],
ips=module.params['floating_ips'],
auto_ip=module.params['auto_ip'],
boot_volume=module.params['boot_volume'],
boot_from_volume=module.params['boot_from_volume'],
terminate_volume=module.params['terminate_volume'],
reuse_ips=module.params['reuse_ips'],
wait=module.params['wait'], timeout=module.params['timeout'],
**bootkwargs
)
_exit_hostvars(module, cloud, server)
def _update_server(module, cloud, server):
changed = False
module.params['meta'] = _parse_meta(module.params['meta'])
# cloud.set_server_metadata only updates the key=value pairs, it doesn't
# touch existing ones
update_meta = {}
for (k, v) in module.params['meta'].items():
if k not in server.metadata or server.metadata[k] != v:
update_meta[k] = v
if update_meta:
cloud.set_server_metadata(server, update_meta)
changed = True
# Refresh server vars
server = cloud.get_server(module.params['name'])
return (changed, server)
def _delete_floating_ip_list(cloud, server, extra_ips):
for ip in extra_ips:
cloud.nova_client.servers.remove_floating_ip(
server=server.id, address=ip)
def _check_floating_ips(module, cloud, server):
changed = False
auto_ip = module.params['auto_ip']
floating_ips = module.params['floating_ips']
floating_ip_pools = module.params['floating_ip_pools']
if floating_ip_pools or floating_ips or auto_ip:
ips = openstack_find_nova_addresses(server.addresses, 'floating')
if not ips:
# If we're configured to have a floating but we don't have one,
# let's add one
server = cloud.add_ips_to_server(
server,
auto_ip=auto_ip,
ips=floating_ips,
ip_pool=floating_ip_pools,
wait=module.params['wait'],
timeout=module.params['timeout'],
)
changed = True
elif floating_ips:
# we were configured to have specific ips, let's make sure we have
# those
missing_ips = []
for ip in floating_ips:
if ip not in ips:
missing_ips.append(ip)
if missing_ips:
server = cloud.add_ip_list(server, missing_ips,
wait=module.params['wait'],
timeout=module.params['timeout'])
changed = True
extra_ips = []
for ip in ips:
if ip not in floating_ips:
extra_ips.append(ip)
if extra_ips:
_delete_floating_ip_list(cloud, server, extra_ips)
changed = True
return (changed, server)
def _get_server_state(module, cloud):
state = module.params['state']
server = cloud.get_server(module.params['name'])
if server and state == 'present':
if server.status not in ('ACTIVE', 'SHUTOFF', 'PAUSED', 'SUSPENDED'):
module.fail_json(
msg="The instance is available but not Active state: "
+ server.status)
(ip_changed, server) = _check_floating_ips(module, cloud, server)
(server_changed, server) = _update_server(module, cloud, server)
_exit_hostvars(module, cloud, server, ip_changed or server_changed)
if server and state == 'absent':
return True
if state == 'absent':
module.exit_json(changed=False, result="not present")
return True
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
image = dict(default=None),
image_exclude = dict(default='(deprecated)'),
flavor = dict(default=None),
flavor_ram = dict(default=None, type='int'),
flavor_include = dict(default=None),
key_name = dict(default=None),
security_groups = dict(default=['default'], type='list'),
network = dict(default=None),
nics = dict(default=[], type='list'),
meta = dict(default=None, type='raw'),
userdata = dict(default=None, aliases=['user_data']),
config_drive = dict(default=False, type='bool'),
auto_ip = dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
floating_ips = dict(default=None, type='list'),
floating_ip_pools = dict(default=None, type='list'),
volume_size = dict(default=False, type='int'),
boot_from_volume = dict(default=False, type='bool'),
boot_volume = dict(default=None, aliases=['root_volume']),
terminate_volume = dict(default=False, type='bool'),
volumes = dict(default=[], type='list'),
scheduler_hints = dict(default=None, type='dict'),
state = dict(default='present', choices=['absent', 'present']),
delete_fip = dict(default=False, type='bool'),
reuse_ips = dict(default=True, type='bool'),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['auto_ip', 'floating_ips'],
['auto_ip', 'floating_ip_pools'],
['floating_ips', 'floating_ip_pools'],
['flavor', 'flavor_ram'],
['image', 'boot_volume'],
['boot_from_volume', 'boot_volume'],
['nics', 'network'],
],
required_if=[
('boot_from_volume', True, ['volume_size', 'image']),
],
)
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
image = module.params['image']
boot_volume = module.params['boot_volume']
flavor = module.params['flavor']
flavor_ram = module.params['flavor_ram']
if state == 'present':
if not (image or boot_volume):
module.fail_json(
msg="Parameter 'image' or 'boot_volume' is required "
"if state == 'present'"
)
if not flavor and not flavor_ram:
module.fail_json(
msg="Parameter 'flavor' or 'flavor_ram' is required "
"if state == 'present'"
)
try:
cloud_params = dict(module.params)
cloud_params.pop('userdata', None)
cloud = shade.openstack_cloud(**cloud_params)
if state == 'present':
_get_server_state(module, cloud)
_create_server(module, cloud)
elif state == 'absent':
_get_server_state(module, cloud)
_delete_server(module, cloud)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e), extra_data=e.extra_data)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
VIkramx89/Flights-and-Hotels | functionality/advanced_search.py | 2 | 4423 | '''
'''
from validations import ViewValidations
from exceptions import CustomExceptions
from utility import DBConnectivity
list_of_flight=[]
def search_advance():
try:
p_source=input("Enter the source:")
ViewValidations.validate_source(p_source)
p_destination=input("Enter the Destination:")
ViewValidations.validate_destination(p_destination)
print("Available options are:\n")
print("=====================")
res1=get_flight_with1Hop(p_source,p_destination)
res2=get_flight_with2Hop(p_source,p_destination)
if(res1==False and res2==False):
print("No option")
except CustomExceptions.InvalidSourceException as e:
print(e)
except CustomExceptions.InvalidDestinationException as e:
print(e)
def get_flight_with1Hop(p_source,p_destination):
try:
flag=0
con=DBConnectivity.create_connection()
cur=DBConnectivity.create_cursor(con)
cur.execute("select t1.flightid,t1.flightname,t1.source,t1.destination,t1.departuretime,t1.arrivaltime,t2.flightid,t2.flightname,t2.source,t2.destination,t2.departuretime,t2.arrivaltime from flight_details t1,flight_details t2 where t1.destination=t2.source and t1.source=:source and t2.destination=:destination",{"source":p_source,"destination":p_destination})
for row in cur:
flag=1
arr1=row[5]
dep2=row[10]
if(validate_timing(arr1,dep2)):
print_flight(row,1)
if(flag==1):
return True
else:
return False
finally:
cur.close()
con.close()
def get_flight_with2Hop(p_source,p_destination):
try:
flag=0
con=DBConnectivity.create_connection()
cur=DBConnectivity.create_cursor(con)
cur.execute("select t1.flightid,t1.flightname,t1.source,t1.destination,t1.departuretime,t1.arrivaltime,t2.flightid,t2.flightname,t2.source,t2.destination,t2.departuretime,t2.arrivaltime,t3.flightid,t3.flightname,t3.source,t3.destination,t3.departuretime,t3.arrivaltime from flight_details t1,flight_details t2,flight_details t3 where t1.destination=t2.source and t2.destination=t3.source and t1.source=:source and t3.destination=:destination",{"source":p_source,"destination":p_destination})
for row in cur:
flag=1
arr1=row[5]
dep2=row[10]
if(validate_timing(arr1,dep2)):
arr2=row[11]
dep3=row[16]
if(validate_timing(arr2,dep3)):
print_flight(row,2)
if(flag==1):
return True
else:
return False
finally:
cur.close()
con.close()
def validate_timing(arr1,dep2):
dephr=int(dep2[0:2])
depmin=int(dep2[3:])
deptime=dephr*60+depmin
arrhr=int(arr1[0:2])
arrmin=int(arr1[3:])
arrtime=arrhr*60+arrmin
if(arrtime<deptime):
return True
else:
return False
def print_flight(row,flag):
if(flag==1):
dur1=cal_duration(row[4],row[5])
dur2=cal_duration(row[10],row[11])
print("Flightid Dep Time Duration Source Destination")
print(row[0],"\t",row[4],"\t",dur1,"\t",row[2],"\t",row[3])
print("\n")
print(row[6],"\t",row[10],"\t",dur2,"\t",row[8],"\t",row[9])
print("--------------------------------------------------------------------------------------------------")
else:
dur1=cal_duration(row[4],row[5])
dur2=cal_duration(row[10],row[11])
dur3=cal_duration(row[16],row[17])
print("Flightid Dep Time Duration Source Destination")
print(row[0],"\t",row[4],"\t",dur1,"\t",row[2],"\t",row[3])
print("\n")
print(row[6],"\t",row[10],"\t",dur2,"\t",row[8],"\t",row[9])
print("\n")
print(row[12],"\t",row[16],"\t",dur3,"\t",row[14],"\t",row[15])
print("--------------------------------------------------------------------------------------------------")
def cal_duration(dep,arr):
dephr=int(dep[0:2])
depmin=int(dep[3:])
arrhr=int(arr[0:2])
arrmin=int(arr[3:])
duration=(arrhr*60+arrmin)-(dephr*60+depmin)
durhr=int(duration/60)
durmin=duration%60
if(int(durmin)!=0):
duration1=str(durhr)+"hrs "+str(durmin)+"mins"
else:
duration1=str(durhr)+"hrs "
return duration1
| epl-1.0 |
user-none/calibre | src/calibre/web/feeds/recipes/__init__.py | 13 | 1888 | #!/usr/bin/env python2
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net>'
'''
Builtin recipes.
'''
import re, time, io
from calibre.web.feeds.news import (BasicNewsRecipe, CustomIndexRecipe,
AutomaticNewsRecipe, CalibrePeriodical)
from calibre.web.feeds.jsnews import JavascriptRecipe
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.utils.config import JSONConfig
basic_recipes = (BasicNewsRecipe, AutomaticNewsRecipe, CustomIndexRecipe,
CalibrePeriodical, JavascriptRecipe)
custom_recipes = JSONConfig('custom_recipes/index.json')
def custom_recipe_filename(id_, title):
from calibre.utils.filenames import ascii_filename
return ascii_filename(title[:50]) + \
('_%s.recipe'%id_)
def compile_recipe(src):
'''
Compile the code in src and return a recipe object, if found.
:param src: Python source code as bytestring or unicode object
:return: Recipe class or None, if no such class was found in src
'''
if not isinstance(src, unicode):
match = re.search(r'coding[:=]\s*([-\w.]+)', src[:200])
enc = match.group(1) if match else 'utf-8'
src = src.decode(enc)
# Python complains if there is a coding declaration in a unicode string
src = re.sub(r'^#.*coding\s*[:=]\s*([-\w.]+)', '#', src.lstrip(u'\ufeff'), flags=re.MULTILINE)
# Translate newlines to \n
src = io.StringIO(src, newline=None).getvalue()
namespace = {
'BasicNewsRecipe':BasicNewsRecipe,
'AutomaticNewsRecipe':AutomaticNewsRecipe,
'time':time, 're':re,
'BeautifulSoup':BeautifulSoup
}
exec src in namespace
for x in namespace.itervalues():
if (isinstance(x, type) and issubclass(x, BasicNewsRecipe) and x not
in basic_recipes):
return x
return None
| gpl-3.0 |
solvcon/solvcon | solvcon/parcel/vewave/solver.py | 2 | 10148 | # -*- coding: UTF-8 -*-
#
# Copyright (c) 2012, Yung-Yu Chen <[email protected]>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
A two-/three-dimensional, second order CESE solver for generic linear PDEs. It
uses :py:mod:`solvcon./`.
"""
__all__ = [
'VewaveSolver', 'VewavePeriodic', 'VewaveBC', 'VewaveNonRefl',
'VewaveLongSineX',
]
import os
import warnings
import numpy as np
import solvcon as sc
from solvcon import solver
from solvcon import boundcond
# for readthedocs to work.
sc.import_module_may_fail('._algorithm')
class VewaveSolver(solver.MeshSolver):
"""This class controls the underneath algorithm :py:class:`VewaveAlgorithm
<._algorithm.VewaveAlgorithm>`.
"""
_interface_init_ = ['cecnd', 'cevol']
_solution_array_ = ['solt', 'sol', 'soln', 'dsol', 'dsoln']
def __init__(self, blk, mtrldict, **kw):
"""
A linear solver needs a :py:class:`Block <solvcon.block.Block>` and a
dictionary for mapping names to :py:class:`~.material.Material`:
>>> from solvcon import testing
>>> blk = testing.create_trivial_2d_blk()
>>> blk.clgrp.fill(0)
>>> blk.grpnames.append('blank')
>>> svr = VewaveSolver(blk, {}) # doctest: +ELLIPSIS
"""
super(VewaveSolver, self).__init__(blk, **kw)
# meta data.
ndim = blk.ndim
ncell = blk.ncell
ngstcell = blk.ngstcell
self.clcnd = blk.clcnd
fpdtype = 'float64'
self.neq = self.determine_neq(ndim)
#: A :py:class:`dict` that maps names to :py:class:`Material
#: <.material.Material>` object.
self.mtrldict = mtrldict if mtrldict else {}
#: A :py:class:`list` of all :py:class:`Material <.material.Material>`
#: objects.
self.mtrllist = None
# scheme parameters.
self.substep_run = 2
self.alpha = int(kw.pop('alpha', 1))
self.sigma0 = int(kw.pop('sigma0', 3.0))
self.taylor = float(kw.pop('taylor', 1)) # dirty hack.
self.cnbfac = float(kw.pop('cnbfac', 1.0)) # dirty hack.
self.sftfac = float(kw.pop('sftfac', 1.0)) # dirty hack.
self.taumin = float(kw.pop('taumin', 0.0))
self.tauscale = float(kw.pop('tauscale', 1.0))
# dual mesh.
self.cecnd = np.empty(
(ngstcell+ncell, blk.CLMFC+1, ndim), dtype=fpdtype)
self.cevol = np.empty(
(ngstcell+ncell, blk.CLMFC+1), dtype=fpdtype)
self.sfmrc = np.empty((ncell, blk.CLMFC, blk.FCMND, 2, ndim),
dtype=fpdtype)
# parameters.
self.grpda = np.empty((self.ngroup, self.gdlen), dtype=fpdtype)
nsca = kw.pop('nsca', 4)
nvec = kw.pop('nvec', 0)
self.amsca = np.empty((ngstcell+ncell, nsca), dtype=fpdtype)
self.amvec = np.empty((ngstcell+ncell, nvec, ndim), dtype=fpdtype)
# solutions.
neq = self.neq
self.sol = np.empty((ngstcell+ncell, neq), dtype=fpdtype)
self.soln = np.empty((ngstcell+ncell, neq), dtype=fpdtype)
self.solt = np.empty((ngstcell+ncell, neq), dtype=fpdtype)
self.dsol = np.empty((ngstcell+ncell, neq, ndim), dtype=fpdtype)
self.dsoln = np.empty((ngstcell+ncell, neq, ndim), dtype=fpdtype)
self.stm = np.empty((ngstcell+ncell, neq), dtype=fpdtype)
self.cfl = np.empty(ngstcell+ncell, dtype=fpdtype)
self.ocfl = np.empty(ngstcell+ncell, dtype=fpdtype)
alg = _algorithm.VewaveAlgorithm()
alg.setup_mesh(blk)
alg.setup_algorithm(self)
self.alg = alg
@staticmethod
def determine_neq(ndim):
return 45
@property
def gdlen(self):
return self.determine_neq(self.ndim)**2 * self.ndim
def init(self, **kw):
self.cevol.fill(0.0)
self.cecnd.fill(0.0)
self.alg.prepare_ce()
super(VewaveSolver, self).init(**kw)
self.sfmrc.fill(0.0)
self.alg.prepare_sf()
self._debug_check_array('sfmrc')
def provide(self):
super(VewaveSolver, self).provide()
self.grpda.fill(0)
def preloop(self):
# fill group data array.
self.mtrllist = self._build_mtrllist(self.grpnames, self.mtrldict)
for igrp in range(len(self.grpnames)):
mtrl = self.mtrllist[igrp]
jaco = self.grpda[igrp].reshape(self.neq, self.neq, self.ndim)
mjacos = mtrl.get_jacos(self.ndim)
for idm in range(self.ndim):
jaco[:,:,idm] = mjacos[idm,:,:]
# pre-calculate CFL.
self.alg.calc_cfl()
self.ocfl[:] = self.cfl[:]
# super method.
super(VewaveSolver, self).preloop()
@staticmethod
def _build_mtrllist(grpnames, mtrldict):
"""
Build the material list out of the mapping dict.
@type grpnames: list
@param mtrldict: the map from names to material objects.
@type mtrldict: dict
@return: the list of material object.
@rtype: Material
"""
mtrllist = list()
default_mtuple = mtrldict.get(None, None)
for grpname in grpnames:
try:
mtrl = mtrldict.get(grpname, default_mtuple)
except KeyError as e:
args = e.args[:]
args.append('no material named %s in mtrldict'%grpname)
e.args = args
raise
mtrllist.append(mtrl)
return mtrllist
def apply_bc(self):
super(VewaveSolver, self).apply_bc()
self.call_non_interface_bc('soln')
self.call_non_interface_bc('dsoln')
###########################################################################
# Begin marching algorithm.
@sc.MeshSolver.register_marcher
def update(self, worker=None):
self.alg.update(self.time, self.time_increment)
self.sol[:,:] = self.soln[:,:]
self.dsol[:,:,:] = self.dsoln[:,:,:]
@sc.MeshSolver.register_marcher
def calcsolt(self, worker=None):
#self.create_alg().calc_solt()
self.alg.calc_solt()
@sc.MeshSolver.register_marcher
def calcsoln(self, worker=None):
#self.create_alg().calc_soln()
self.alg.calc_soln()
@sc.MeshSolver.register_marcher
def ibcsoln(self, worker=None):
if worker: self.exchangeibc('soln', worker=worker)
@sc.MeshSolver.register_marcher
def bcsoln(self, worker=None):
self.call_non_interface_bc('soln')
@sc.MeshSolver.register_marcher
def calcdsoln(self, worker=None):
self.alg.calc_dsoln()
@sc.MeshSolver.register_marcher
def ibcdsoln(self, worker=None):
if worker: self.exchangeibc('dsoln', worker=worker)
@sc.MeshSolver.register_marcher
def bcdsoln(self, worker=None):
self.call_non_interface_bc('dsoln')
# End marching algorithm.
###########################################################################
class VewavePeriodic(boundcond.periodic):
"""
General periodic boundary condition for sequential runs.
"""
def init(self, **kw):
svr = self.svr
blk = svr.blk
ngstcell = blk.ngstcell
ngstface = blk.ngstface
facn = self.facn
slctm = self.rclp[:,0] + ngstcell
slctr = self.rclp[:,1] + ngstcell
# move coordinates.
shf = svr.cecnd[slctr,0,:] - blk.shfccnd[facn[:,2]+ngstface,:]
svr.cecnd[slctm,0,:] = blk.shfccnd[facn[:,0]+ngstface,:] + shf
def soln(self):
svr = self.svr
blk = svr.blk
slctm = self.rclp[:,0] + blk.ngstcell
slctr = self.rclp[:,1] + blk.ngstcell
svr.soln[slctm,:] = svr.soln[slctr,:]
def dsoln(self):
svr = self.svr
blk = svr.blk
slctm = self.rclp[:,0] + blk.ngstcell
slctr = self.rclp[:,1] + blk.ngstcell
svr.dsoln[slctm,:,:] = svr.dsoln[slctr,:,:]
class VewaveBC(boundcond.BC):
#: Ghost geometry calculator type.
_ghostgeom_ = None
@property
def alg(self):
return self.svr.alg
def init(self, **kw):
getattr(self.alg, 'ghostgeom_'+self._ghostgeom_)(self.facn)
class VewaveNonRefl(VewaveBC):
_ghostgeom_ = 'mirror'
def soln(self):
self.alg.bound_nonrefl_soln(self.facn)
def dsoln(self):
self.alg.bound_nonrefl_dsoln(self.facn)
class VewaveLongSineX(VewaveBC):
"""
Provide longitudinal wave in x-direction.
Wave is a sinusoidal wave.
"""
_ghostgeom_ = 'mirror'
def soln(self):
self.alg.bound_longsinex_soln(self.facn)
def dsoln(self):
self.alg.bound_longsinex_dsoln(self.facn)
# vim: set ff=unix fenc=utf8 ft=python ai et sw=4 ts=4 tw=79:
| bsd-3-clause |
acsone/odoo | addons/hr/res_config.py | 377 | 3452 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class hr_config_settings(osv.osv_memory):
_name = 'hr.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_hr_timesheet_sheet': fields.boolean('Allow timesheets validation by managers',
help ="""This installs the module hr_timesheet_sheet."""),
'module_hr_attendance': fields.boolean('Install attendances feature',
help ="""This installs the module hr_attendance."""),
'module_hr_timesheet': fields.boolean('Manage timesheets',
help ="""This installs the module hr_timesheet."""),
'module_hr_holidays': fields.boolean('Manage holidays, leaves and allocation requests',
help ="""This installs the module hr_holidays."""),
'module_hr_expense': fields.boolean('Manage employees expenses',
help ="""This installs the module hr_expense."""),
'module_hr_recruitment': fields.boolean('Manage the recruitment process',
help ="""This installs the module hr_recruitment."""),
'module_hr_contract': fields.boolean('Record contracts per employee',
help ="""This installs the module hr_contract."""),
'module_hr_evaluation': fields.boolean('Organize employees periodic evaluation',
help ="""This installs the module hr_evaluation."""),
'module_hr_gamification': fields.boolean('Drive engagement with challenges and badges',
help ="""This installs the module hr_gamification."""),
'module_account_analytic_analysis': fields.boolean('Allow invoicing based on timesheets (the sale application will be installed)',
help ="""This installs the module account_analytic_analysis, which will install sales management too."""),
'module_hr_payroll': fields.boolean('Manage payroll',
help ="""This installs the module hr_payroll."""),
}
def onchange_hr_timesheet(self, cr, uid, ids, timesheet, context=None):
""" module_hr_timesheet implies module_hr_attendance """
if timesheet:
return {'value': {'module_hr_attendance': True}}
return {}
def onchange_hr_attendance(self, cr, uid, ids, attendance, context=None):
""" module_hr_timesheet implies module_hr_attendance """
if not attendance:
return {'value': {'module_hr_timesheet': False}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
google/tritious | server/app.py | 1 | 15022 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webapp2
import logging
import json
from google.appengine.ext import ndb
from player import *
from map import *
from npc import *
def get_tiles():
return {
'404': { 'passable': False, 'img': 'img/404.png' },
'corner-bl': { 'passable': False, 'img': 'img/corner-bl.png' },
'corner-br': { 'passable': False, 'img': 'img/corner-br.png' },
'corner-tl': { 'passable': False, 'img': 'img/corner-tl.png' },
'corner-tr': { 'passable': False, 'img': 'img/corner-tr.png' },
'floor': { 'passable': True, 'img': 'img/floor.png' },
'grass': { 'passable': True, 'img': 'img/grass.png' },
'rock': { 'passable': False, 'img': 'img/rock.png' },
'wall-bottom': { 'passable': False, 'img': 'img/wall-bottom.png' },
'wall-left': { 'passable': True, 'img': 'img/wall-left.png' },
'wall-top': { 'passable': False, 'img': 'img/wall-top.png' },
'wall-right': { 'passable': True, 'img': 'img/wall-right.png' },
}
def handle_500(request, response, exception):
response.out.write("error")
def print_json(handler, obj):
# If for some reason you're copying my code, be careful with the next line;
# it shouldn't be used on sites with any private data!
handler.response.headers['Access-Control-Allow-Origin'] = '*'
handler.response.headers['Content-Type'] = 'application/json'
handler.response.out.write(json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': ')))
class ErrorRouter(webapp2.RequestHandler):
def get(self):
self.response.out.write("error")
class ApiErrorRouter(webapp2.RequestHandler):
def get(self):
print_json(self,
{
'status' : 404,
'error' : "not found"
}
)
class ApiMeRouter(webapp2.RequestHandler):
def get(self):
p = Player.get_current_player()
me = {
'status' : 0,
'name' : p.name,
#'name' : 'MyCharacter',
"position" : {"map": p.map, "x": p.x, "y": p.y },
"img" : "img/character.png",
"token" : p.create_token(),
}
p.put()
print_json(self, me)
class ApiMoveRouter(webapp2.RequestHandler):
def get(self):
p = Player.get_current_player()
if(p.verify_token(self.request.GET.get("token"))):
x = int(self.request.GET.get("x"))
y = int(self.request.GET.get("y"))
map = self.request.GET.get("map")
if(x < 0 or x > 100 or y < 0 or y > 100):
print_json(self, { 'status': 400, 'error': 'bad coordinates' })
else:
p.x = x
p.y = y
p.map = map
p.put()
print_json(self, { 'status': 0, 'msg': 'Location updated!' })
else:
print_json(self, { 'status': 400, 'error': 'Bad token' })
class ApiRegisterRouter(webapp2.RequestHandler):
def get(self):
print_json(self, me)
class ApiEditMapRouter(webapp2.RequestHandler):
def get(self, map_id):
map = Map.get_map(map_id)
details = self.request.get("details")
Map.set_map(map_id, details)
print_json(self, { 'status': 0, 'msg': 'Saved!' })
class ApiEditNpcRouter(webapp2.RequestHandler):
def get(self, npc_id):
npc = Npc.get_npc(npc_id)
details = self.request.get("details")
Npc.set_npc(npc_id, details)
print_json(self, { 'status': 0, 'msg': 'Saved!' })
class ApiMapRouter(webapp2.RequestHandler):
def get(self, map_id):
map = Map.get_map(map_id)
if(map):
self.response.out.write(map)
return
if(map_id == "main"):
print_json(self, {
# The array of tiles
"map": [
# ["rock", "rock", "rock", "rock", "rock", "rock", "rock", "rock", "rock", "rock"],
# ["rock", "grass", "grass", "grass", "grass", "grass", "grass", "grass", "grass", "rock"],
# ["rock", "grass", "rock", "rock", "rock", "rock", "rock", "rock", "grass", "rock"],
# ["rock", "grass", "grass", "grass", "grass", "grass", "rock", "rock", "grass", "rock"],
# ["rock", "rock", "rock", "rock", "grass", "grass", "grass", "rock", "grass", "rock"],
# ["rock", "rock", "rock", "grass", "grass", "grass", "grass", "grass", "grass", "rock"],
# ["rock", "grass", "grass", "grass", "grass", "rock", "rock", "rock", "rock", "rock"],
# ["rock", "grass", "grass", "rock", "rock", "rock", "rock", "rock", "rock", "rock"],
# ["rock", "grass", "rock", "rock", "rock", "rock", "rock", "rock", "rock", "rock"],
# ["rock", "rock", "rock", "rock", "rock", "rock", "rock", "rock", "rock", "rock"],
["corner-tl", "wall-top", "wall-top", "wall-top", "wall-top", "wall-top", "wall-top", "wall-top", "wall-top", "corner-tr"],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "wall-bottom"], "wall-bottom", "wall-bottom", ["wall-right", "wall-bottom"], "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
["corner-bl", "wall-bottom", "wall-bottom", "wall-bottom", "wall-bottom", "wall-bottom", "wall-bottom", "wall-bottom", "wall-bottom", "corner-br"],
],
# NPCs
"npcs": [
{ "id": "knight", "x": 6, "y": 4, "img": "img/knight.png" },
{ "id": "desert", "x": 1, "y": 1, "img": "img/desertnpc.png" },
],
# Objects
"objects": [
{
"type": "exit",
"details": {
"newmap": "map2",
"newx": 1,
"newy": 1,
},
"x": 1,
"y": 8,
"img": "img/pipe.png",
}
],
# Tiles
"tiles": get_tiles(),
})
elif(map_id == "map2"):
print_json(self, {
# The array of tiles
"map": [
["corner-tl", "wall-top", "wall-top", "wall-top", "wall-top", "wall-top", "wall-top", "wall-top", "wall-top", "corner-tr"],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
[["wall-left", "floor"], "floor", "floor", "floor", "floor", "floor", "floor", "floor", "floor", ["wall-right", "floor"]],
["corner-bl", "wall-bottom", "wall-bottom", "wall-bottom", "wall-bottom", "wall-bottom", "wall-bottom", "wall-bottom", "wall-bottom", "corner-br"],
],
# NPCs
"npcs": [
{ "id": "agent", "x": 8, "y": 8, "img": "img/agent.png" },
],
# Objects
"objects": [
{
"type": "exit",
"details": {
"newmap": "main",
"newx": 4,
"newy": 4,
},
"x": 8,
"y": 1,
"img": "img/pipe.png",
}
],
# Tiles
"tiles": get_tiles(),
})
else:
print_json(self, {
# The array of tiles
"map": [
["404", "404", "404", "404", "404", "404", "404", "404", "404", "404"],
["404", "404", "404", "404", "404", "404", "404", "404", "404", "404"],
["404", "404", "404", "404", "404", "404", "404", "404", "404", "404"],
["404", "404", "404", "404", "404", "404", "404", "404", "404", "404"],
["404", "404", "404", "404", "404", "404", "404", "404", "404", "404"],
["404", "404", "404", "404", "404", "404", "404", "404", "404", "404"],
["404", "404", "404", "404", "404", "404", "404", "404", "404", "404"],
["404", "404", "404", "404", "404", "404", "404", "404", "404", "404"],
["404", "404", "404", "404", "404", "404", "404", "404", "404", "404"],
["404", "404", "404", "404", "404", "404", "404", "404", "404", "404"],
],
# NPCs
"npcs": [],
# Objects
"objects": [],
# Tiles
"tiles": {
"404": {
"img": "img/404.png",
"passable": False,
},
}
})
class ApiTilesRouter(webapp2.RequestHandler):
def get(self):
print_json(self, get_tiles())
class ApiNpcRouter(webapp2.RequestHandler):
def get(self, id):
result = {}
npc = Npc.get_npc(id)
if(npc):
self.response.out.write(npc)
return
if(id == "knight"):
result = {
"text": [
"Hello, player!",
"I may look scary...",
"...but I'm just a normal NPC."
"..."
"Sorry if I disappointed you :(",
],
"name": "Knight",
}
elif(id == "desert"):
result = {
"text": [
"Hi, I think I got lost on my way to the desert",
":("
],
"name": "Desert explorer",
}
elif(id == "agent"):
result = {
"text": [
"Wow, you found the other map!"
],
"name": "Secret Agent",
}
else:
result = {
"text": [
"I may look like an NPC...",
"but I'm actually a 404",
],
"img": "img/404.png"
}
print_json(self, result)
class ApiPropertiesRouter(webapp2.RequestHandler):
def get(self):
print_json(self, {})
application = webapp2.WSGIApplication([
# Experimental API
('^/api/me', ApiMeRouter),
('^/api/move', ApiMoveRouter),
('^/api/editmap/(\w+)', ApiEditMapRouter),
('^/api/editnpc/(\w+)', ApiEditNpcRouter),
('^/api/map/(\w+)', ApiMapRouter),
('^/api/npc/(\w+)', ApiNpcRouter),
('^/api/tiles', ApiTilesRouter),
('^/api/register', ApiRegisterRouter),
('^/api/properties', ApiPropertiesRouter),
('^/api.*', ApiErrorRouter),
# Catch-all
('/.*', ErrorRouter),
], debug=False)
#application.error_handlers[500] = handle_500
| apache-2.0 |
junhuac/MQUIC | depot_tools/external_bin/gsutil/gsutil_4.15/gsutil/third_party/boto/tests/integration/iam/test_password_policy.py | 100 | 3943 | # Copyright (c) 2014 Rocket Internet AG.
# Luca Bruno <[email protected]>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
import time
from tests.compat import unittest
class IAMAccountPasswordPolicy(unittest.TestCase):
iam = True
def test_password_policy(self):
# A series of tests to check the password policy API
iam = boto.connect_iam()
# First preserve what is the current password policy
try:
initial_policy_result = iam.get_account_password_policy()
except boto.exception.BotoServerError as srv_error:
initial_policy = None
if srv_error.status != 404:
raise srv_error
# Update the policy and check it back
test_min_length = 88
iam.update_account_password_policy(minimum_password_length=test_min_length)
new_policy = iam.get_account_password_policy()
new_min_length = new_policy['get_account_password_policy_response']\
['get_account_password_policy_result']['password_policy']\
['minimum_password_length']
if test_min_length != int(new_min_length):
raise Exception("Failed to update account password policy")
# Delete the policy and check the correct deletion
test_policy = ''
iam.delete_account_password_policy()
try:
test_policy = iam.get_account_password_policy()
except boto.exception.BotoServerError as srv_error:
test_policy = None
if srv_error.status != 404:
raise srv_error
if test_policy is not None:
raise Exception("Failed to delete account password policy")
# Restore initial account password policy
if initial_policy:
p = initial_policy['get_account_password_policy_response']\
['get_account_password_policy_result']['password_policy']
iam.update_account_password_policy(minimum_password_length=int(p['minimum_password_length']),
allow_users_to_change_password=bool(p['allow_users_to_change_password']),
hard_expiry=bool(p['hard_expiry']),
max_password_age=int(p['max_password_age']),
password_reuse_prevention=int(p['password_reuse_prevention']),
require_lowercase_characters=bool(p['require_lowercase_characters']),
require_numbers=bool(p['require_numbers']),
require_symbols=bool(p['require_symbols']),
require_uppercase_characters=bool(p['require_uppercase_characters']))
| mit |
heke123/chromium-crosswalk | tools/usb_gadget/usb_descriptors_test.py | 95 | 6817 | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import hid_constants
import usb_descriptors
class DescriptorWithField(usb_descriptors.Descriptor):
pass
DescriptorWithField.AddField('bField', 'B')
class DescriptorWithDefault(usb_descriptors.Descriptor):
pass
DescriptorWithDefault.AddField('bDefault', 'B', default=42)
class DescriptorWithFixed(usb_descriptors.Descriptor):
pass
DescriptorWithFixed.AddFixedField('bFixed', 'B', 42)
class DescriptorWithComputed(usb_descriptors.Descriptor):
@property
def foo(self):
return 42
DescriptorWithComputed.AddComputedField('bComputed', 'B', 'foo')
class DescriptorWithDescriptors(usb_descriptors.DescriptorContainer):
pass
DescriptorWithDescriptors.AddField('bType', 'B')
class DescriptorTest(unittest.TestCase):
def test_default(self):
obj = DescriptorWithDefault()
self.assertEquals(obj.bDefault, 42)
def test_change_default(self):
obj = DescriptorWithDefault()
obj.bDefault = 1
self.assertEquals(obj.bDefault, 1)
def test_override_default(self):
obj = DescriptorWithDefault(bDefault=56)
self.assertEquals(obj.bDefault, 56)
def test_fixed(self):
obj = DescriptorWithFixed()
self.assertEquals(obj.bFixed, 42)
def test_set_fixed(self):
with self.assertRaises(RuntimeError):
DescriptorWithFixed(bFixed=1)
def test_modify_fixed(self):
obj = DescriptorWithFixed()
with self.assertRaises(RuntimeError):
obj.bFixed = 1
def test_computed(self):
obj = DescriptorWithComputed()
self.assertEquals(obj.bComputed, 42)
def test_set_computed(self):
with self.assertRaises(RuntimeError):
DescriptorWithComputed(bComputed=1)
def test_modify_computed(self):
obj = DescriptorWithComputed()
with self.assertRaises(RuntimeError):
obj.bComputed = 1
def test_unexpected(self):
with self.assertRaisesRegexp(TypeError, 'Unexpected'):
DescriptorWithField(bUnexpected=1)
def test_missing(self):
with self.assertRaisesRegexp(TypeError, 'Missing'):
DescriptorWithField()
def test_size(self):
obj = DescriptorWithField(bField=42)
self.assertEquals(obj.struct_size, 1)
self.assertEquals(obj.total_size, 1)
def test_encode(self):
obj = DescriptorWithField(bField=0xff)
self.assertEquals(obj.Encode(), '\xff')
def test_string(self):
obj = DescriptorWithField(bField=42)
string = str(obj)
self.assertIn('bField', string)
self.assertIn('42', string)
def test_container(self):
parent = DescriptorWithDescriptors(bType=0)
child1 = DescriptorWithField(bField=1)
parent.Add(child1)
child2 = DescriptorWithField(bField=2)
parent.Add(child2)
self.assertEquals(parent.total_size, 3)
self.assertEquals(parent.Encode(), '\x00\x01\x02')
string = str(parent)
self.assertIn('bType', string)
self.assertIn('bField', string)
class TestUsbDescriptors(unittest.TestCase):
def test_device_descriptor(self):
device_desc = usb_descriptors.DeviceDescriptor(
idVendor=0xDEAD,
idProduct=0xBEEF,
bcdDevice=0x0100,
bNumConfigurations=1)
self.assertEquals(
device_desc.Encode(),
'\x12\x01\x00\x02\x00\x00\x00\x40\xAD\xDE\xEF\xBE\x00\x01\x00\x00\x00'
'\x01')
def test_unique_interfaces(self):
interface_desc1 = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
interface_desc2 = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1,
bAlternateSetting=1)
interface_desc3 = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
configuration_desc = usb_descriptors.ConfigurationDescriptor(
bmAttributes=0xC0,
MaxPower=100)
configuration_desc.AddInterface(interface_desc1)
configuration_desc.AddInterface(interface_desc2)
with self.assertRaisesRegexp(RuntimeError, r'Interface 1 \(alternate 0\)'):
configuration_desc.AddInterface(interface_desc3)
def test_unique_endpoints(self):
endpoint_desc1 = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=0x02,
wMaxPacketSize=64,
bInterval=1)
endpoint_desc2 = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x81,
bmAttributes=0x02,
wMaxPacketSize=64,
bInterval=1)
endpoint_desc3 = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=0x01,
wMaxPacketSize=32,
bInterval=10)
interface_desc = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
interface_desc.AddEndpoint(endpoint_desc1)
interface_desc.AddEndpoint(endpoint_desc2)
with self.assertRaisesRegexp(RuntimeError, 'Endpoint 0x01 already defined'):
interface_desc.AddEndpoint(endpoint_desc3)
def test_configuration_descriptor(self):
endpoint_desc = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=0x02,
wMaxPacketSize=64,
bInterval=1)
encoded_endpoint = '\x07\x05\x01\x02\x40\x00\x01'
self.assertEquals(endpoint_desc.Encode(), encoded_endpoint)
interface_desc = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
interface_desc.AddEndpoint(endpoint_desc)
self.assertEquals([endpoint_desc], interface_desc.GetEndpoints())
encoded_interface = ('\x09\x04\x01\x00\x01\xFF\xFF\xFF\x00' +
encoded_endpoint)
self.assertEquals(interface_desc.Encode(), encoded_interface)
configuration_desc = usb_descriptors.ConfigurationDescriptor(
bmAttributes=0xC0,
MaxPower=100)
configuration_desc.AddInterface(interface_desc)
self.assertEquals([interface_desc], configuration_desc.GetInterfaces())
encoded_configuration = ('\x09\x02\x19\x00\x01\x01\x00\xC0\x64' +
encoded_interface)
self.assertEquals(configuration_desc.Encode(), encoded_configuration)
def test_encode_hid_descriptor(self):
hid_desc = usb_descriptors.HidDescriptor()
hid_desc.AddDescriptor(hid_constants.DescriptorType.REPORT, 0x80)
hid_desc.AddDescriptor(hid_constants.DescriptorType.PHYSICAL, 0x60)
encoded_desc = '\x0C\x21\x11\x01\x00\x02\x22\x80\x00\x23\x60\x00'
self.assertEquals(hid_desc.Encode(), encoded_desc)
def test_print_hid_descriptor(self):
hid_desc = usb_descriptors.HidDescriptor()
hid_desc.AddDescriptor(hid_constants.DescriptorType.REPORT, 0x80)
hid_desc.AddDescriptor(hid_constants.DescriptorType.PHYSICAL, 0x60)
string = str(hid_desc)
self.assertIn('0x22', string)
self.assertIn('0x23', string)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
iti-luebeck/BEEP | Software/beep_framework/beep_msgs/src/beep_msgs/msg/_MyColor.py | 1 | 3197 | """autogenerated by genpy from beep_msgs/MyColor.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class MyColor(genpy.Message):
_md5sum = "e0a9a6ca70c4e4476c27f745dfa4c480"
_type = "beep_msgs/MyColor"
_has_header = False #flag to mark the presence of a Header object
_full_text = """uint8 r
uint8 g
uint8 b
uint8 w
"""
__slots__ = ['r','g','b','w']
_slot_types = ['uint8','uint8','uint8','uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
r,g,b,w
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MyColor, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.r is None:
self.r = 0
if self.g is None:
self.g = 0
if self.b is None:
self.b = 0
if self.w is None:
self.w = 0
else:
self.r = 0
self.g = 0
self.b = 0
self.w = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_4B.pack(_x.r, _x.g, _x.b, _x.w))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 4
(_x.r, _x.g, _x.b, _x.w,) = _struct_4B.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_4B.pack(_x.r, _x.g, _x.b, _x.w))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 4
(_x.r, _x.g, _x.b, _x.w,) = _struct_4B.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_4B = struct.Struct("<4B")
| bsd-3-clause |
krux/zookeeper-pkg | src/contrib/huebrowser/zkui/src/zkui/urls.py | 114 | 1304 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('zkui',
url(r'^$', 'views.index'),
url(r'view/(?P<id>\d+)$', 'views.view'),
url(r'clients/(?P<host>.+)$', 'views.clients'),
url(r'tree/(?P<id>\d+)(?P<path>.+)$', 'views.tree'),
url(r'create/(?P<id>\d+)(?P<path>.*)$', 'views.create'),
url(r'delete/(?P<id>\d+)(?P<path>.*)$', 'views.delete'),
url(r'edit/base64/(?P<id>\d+)(?P<path>.*)$', 'views.edit_as_base64'),
url(r'edit/text/(?P<id>\d+)(?P<path>.*)$', 'views.edit_as_text')
)
| apache-2.0 |
icloudrnd/automation_tools | openstack_dashboard/dashboards/project/data_processing/data_sources/urls.py | 39 | 1311 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns
from django.conf.urls import url
import openstack_dashboard.dashboards.project.data_processing. \
data_sources.views as views
urlpatterns = patterns('',
url(r'^$', views.DataSourcesView.as_view(),
name='index'),
url(r'^$', views.DataSourcesView.as_view(),
name='data-sources'),
url(r'^create-data-source$',
views.CreateDataSourceView.as_view(),
name='create-data-source'),
url(r'^(?P<data_source_id>[^/]+)$',
views.DataSourceDetailsView.as_view(),
name='details'))
| apache-2.0 |
was4444/chromium.src | third_party/WebKit/Tools/Scripts/webkitpy/w3c/test_importer_unittest.py | 1 | 3217 | # Copyright (C) 2013 Adobe Systems Incorporated. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import optparse
import shutil
import tempfile
import unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive2, ScriptError
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.w3c.test_importer import TestImporter
FAKE_SOURCE_DIR = '/blink/w3c'
FAKE_REPO_DIR = '/blink'
FAKE_FILES = {
'/blink/w3c/empty_dir/README.txt': '',
'/mock-checkout/third_party/WebKit/LayoutTests/w3c/README.txt': '',
'/mock-checkout/third_party/WebKit/LayoutTests/W3CImportExpectations': '',
}
class TestImporterTest(unittest.TestCase):
def test_import_dir_with_no_tests_and_no_hg(self):
host = MockHost()
host.executive = MockExecutive2(exception=OSError())
host.filesystem = MockFileSystem(files=FAKE_FILES)
importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values(
{"overwrite": False, 'destination': 'w3c', 'ignore_expectations': False}))
oc = OutputCapture()
oc.capture_output()
try:
importer.do_import()
finally:
oc.restore_output()
def test_import_dir_with_no_tests(self):
host = MockHost()
host.executive = MockExecutive2(exception=ScriptError(
"abort: no repository found in '/Volumes/Source/src/wk/Tools/Scripts/webkitpy/w3c' (.hg not found)!"))
host.filesystem = MockFileSystem(files=FAKE_FILES)
importer = TestImporter(host, FAKE_SOURCE_DIR, FAKE_REPO_DIR, optparse.Values(
{"overwrite": False, 'destination': 'w3c', 'ignore_expectations': False}))
oc = OutputCapture()
oc.capture_output()
try:
importer.do_import()
finally:
oc.restore_output()
# FIXME: Needs more tests.
| bsd-3-clause |
myusuf3/courtside | game/factories.py | 8 | 1871 | import factory
import datetime
from game import models
from register.models import Player, Sport
from django.contrib.auth.models import User
class SportFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = Sport
sport = factory.Iterator(['soccer', 'basketball', 'volleyball', 'hockey', 'baseball'], cycle=False)
class UserFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = User
username = factory.Sequence(lambda n: 'username{0}'.format(n))
first_name = factory.Sequence(lambda n: 'Mahdi{0}'.format(n))
last_name = factory.Sequence(lambda n: 'Yusuf{0}'.format(n))
email = factory.Sequence(lambda n: 'mahdi{0}@gmail.com'.format(n))
class PlayerFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = Player
user = factory.SubFactory(UserFactory)
@factory.sequence
def gender(n):
if n % 2 == 0:
return 'F'
else:
return 'M'
@factory.post_generation
def sports(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for sport in extracted:
self.sports.add(sport)
class GameFactory(factory.django.DjangoModelFactory):
FACTORY_FOR = models.Game
FACTORY_HIDDEN_ARGS = ('now',)
now = factory.LazyAttribute(lambda o: datetime.datetime.utcnow())
start_date_and_time = factory.LazyAttribute(lambda o: o.now + datetime.timedelta(days=4))
latitude = factory.Sequence(lambda n: '45.{0}'.format(n))
longitude = factory.Sequence(lambda n: '75.{0}'.format(n))
minimum_players = 3
@factory.sequence
def active(n):
return True
@factory.post_generation
def players(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for player in extracted:
self.players.add(player)
| mit |
amaozhao/basecms | unidecode/x0d3.py | 253 | 4705 | data = (
'tim', # 0x00
'tib', # 0x01
'tibs', # 0x02
'tis', # 0x03
'tiss', # 0x04
'ting', # 0x05
'tij', # 0x06
'tic', # 0x07
'tik', # 0x08
'tit', # 0x09
'tip', # 0x0a
'tih', # 0x0b
'pa', # 0x0c
'pag', # 0x0d
'pagg', # 0x0e
'pags', # 0x0f
'pan', # 0x10
'panj', # 0x11
'panh', # 0x12
'pad', # 0x13
'pal', # 0x14
'palg', # 0x15
'palm', # 0x16
'palb', # 0x17
'pals', # 0x18
'palt', # 0x19
'palp', # 0x1a
'palh', # 0x1b
'pam', # 0x1c
'pab', # 0x1d
'pabs', # 0x1e
'pas', # 0x1f
'pass', # 0x20
'pang', # 0x21
'paj', # 0x22
'pac', # 0x23
'pak', # 0x24
'pat', # 0x25
'pap', # 0x26
'pah', # 0x27
'pae', # 0x28
'paeg', # 0x29
'paegg', # 0x2a
'paegs', # 0x2b
'paen', # 0x2c
'paenj', # 0x2d
'paenh', # 0x2e
'paed', # 0x2f
'pael', # 0x30
'paelg', # 0x31
'paelm', # 0x32
'paelb', # 0x33
'paels', # 0x34
'paelt', # 0x35
'paelp', # 0x36
'paelh', # 0x37
'paem', # 0x38
'paeb', # 0x39
'paebs', # 0x3a
'paes', # 0x3b
'paess', # 0x3c
'paeng', # 0x3d
'paej', # 0x3e
'paec', # 0x3f
'paek', # 0x40
'paet', # 0x41
'paep', # 0x42
'paeh', # 0x43
'pya', # 0x44
'pyag', # 0x45
'pyagg', # 0x46
'pyags', # 0x47
'pyan', # 0x48
'pyanj', # 0x49
'pyanh', # 0x4a
'pyad', # 0x4b
'pyal', # 0x4c
'pyalg', # 0x4d
'pyalm', # 0x4e
'pyalb', # 0x4f
'pyals', # 0x50
'pyalt', # 0x51
'pyalp', # 0x52
'pyalh', # 0x53
'pyam', # 0x54
'pyab', # 0x55
'pyabs', # 0x56
'pyas', # 0x57
'pyass', # 0x58
'pyang', # 0x59
'pyaj', # 0x5a
'pyac', # 0x5b
'pyak', # 0x5c
'pyat', # 0x5d
'pyap', # 0x5e
'pyah', # 0x5f
'pyae', # 0x60
'pyaeg', # 0x61
'pyaegg', # 0x62
'pyaegs', # 0x63
'pyaen', # 0x64
'pyaenj', # 0x65
'pyaenh', # 0x66
'pyaed', # 0x67
'pyael', # 0x68
'pyaelg', # 0x69
'pyaelm', # 0x6a
'pyaelb', # 0x6b
'pyaels', # 0x6c
'pyaelt', # 0x6d
'pyaelp', # 0x6e
'pyaelh', # 0x6f
'pyaem', # 0x70
'pyaeb', # 0x71
'pyaebs', # 0x72
'pyaes', # 0x73
'pyaess', # 0x74
'pyaeng', # 0x75
'pyaej', # 0x76
'pyaec', # 0x77
'pyaek', # 0x78
'pyaet', # 0x79
'pyaep', # 0x7a
'pyaeh', # 0x7b
'peo', # 0x7c
'peog', # 0x7d
'peogg', # 0x7e
'peogs', # 0x7f
'peon', # 0x80
'peonj', # 0x81
'peonh', # 0x82
'peod', # 0x83
'peol', # 0x84
'peolg', # 0x85
'peolm', # 0x86
'peolb', # 0x87
'peols', # 0x88
'peolt', # 0x89
'peolp', # 0x8a
'peolh', # 0x8b
'peom', # 0x8c
'peob', # 0x8d
'peobs', # 0x8e
'peos', # 0x8f
'peoss', # 0x90
'peong', # 0x91
'peoj', # 0x92
'peoc', # 0x93
'peok', # 0x94
'peot', # 0x95
'peop', # 0x96
'peoh', # 0x97
'pe', # 0x98
'peg', # 0x99
'pegg', # 0x9a
'pegs', # 0x9b
'pen', # 0x9c
'penj', # 0x9d
'penh', # 0x9e
'ped', # 0x9f
'pel', # 0xa0
'pelg', # 0xa1
'pelm', # 0xa2
'pelb', # 0xa3
'pels', # 0xa4
'pelt', # 0xa5
'pelp', # 0xa6
'pelh', # 0xa7
'pem', # 0xa8
'peb', # 0xa9
'pebs', # 0xaa
'pes', # 0xab
'pess', # 0xac
'peng', # 0xad
'pej', # 0xae
'pec', # 0xaf
'pek', # 0xb0
'pet', # 0xb1
'pep', # 0xb2
'peh', # 0xb3
'pyeo', # 0xb4
'pyeog', # 0xb5
'pyeogg', # 0xb6
'pyeogs', # 0xb7
'pyeon', # 0xb8
'pyeonj', # 0xb9
'pyeonh', # 0xba
'pyeod', # 0xbb
'pyeol', # 0xbc
'pyeolg', # 0xbd
'pyeolm', # 0xbe
'pyeolb', # 0xbf
'pyeols', # 0xc0
'pyeolt', # 0xc1
'pyeolp', # 0xc2
'pyeolh', # 0xc3
'pyeom', # 0xc4
'pyeob', # 0xc5
'pyeobs', # 0xc6
'pyeos', # 0xc7
'pyeoss', # 0xc8
'pyeong', # 0xc9
'pyeoj', # 0xca
'pyeoc', # 0xcb
'pyeok', # 0xcc
'pyeot', # 0xcd
'pyeop', # 0xce
'pyeoh', # 0xcf
'pye', # 0xd0
'pyeg', # 0xd1
'pyegg', # 0xd2
'pyegs', # 0xd3
'pyen', # 0xd4
'pyenj', # 0xd5
'pyenh', # 0xd6
'pyed', # 0xd7
'pyel', # 0xd8
'pyelg', # 0xd9
'pyelm', # 0xda
'pyelb', # 0xdb
'pyels', # 0xdc
'pyelt', # 0xdd
'pyelp', # 0xde
'pyelh', # 0xdf
'pyem', # 0xe0
'pyeb', # 0xe1
'pyebs', # 0xe2
'pyes', # 0xe3
'pyess', # 0xe4
'pyeng', # 0xe5
'pyej', # 0xe6
'pyec', # 0xe7
'pyek', # 0xe8
'pyet', # 0xe9
'pyep', # 0xea
'pyeh', # 0xeb
'po', # 0xec
'pog', # 0xed
'pogg', # 0xee
'pogs', # 0xef
'pon', # 0xf0
'ponj', # 0xf1
'ponh', # 0xf2
'pod', # 0xf3
'pol', # 0xf4
'polg', # 0xf5
'polm', # 0xf6
'polb', # 0xf7
'pols', # 0xf8
'polt', # 0xf9
'polp', # 0xfa
'polh', # 0xfb
'pom', # 0xfc
'pob', # 0xfd
'pobs', # 0xfe
'pos', # 0xff
)
| mit |
echogreens/crab | scikits/crab/recommenders/knn/neighborhood_strategies.py | 10 | 4860 | """
Strategies for users selection to be a
possible candidate to be member of a user neighborhood.
Please check the base.BaseUserNeighborhoodStrategy before
implement your own strategy.
"""
# Author: Marcel Caraciolo <[email protected]>
#
# License: BSD Style.
from base import BaseUserNeighborhoodStrategy
import numpy as np
from ...similarities.basic_similarities import UserSimilarity
from ...metrics.pairwise import euclidean_distances
class AllNeighborsStrategy(BaseUserNeighborhoodStrategy):
'''
Returns
--------
Returns all users in the model.
This strategy is not recommended for large datasets and
it is the dummiest one.
'''
def user_neighborhood(self, user_id, data_model, similarity='user_similarity',
distance=None, nhood_size=None, **params):
'''
Computes a neighborhood consisting of the n users to a given user
based on the strategy implemented in this method.
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
data_model: DataModel instance
The data model that will be the source for the possible
candidates
similarity: string
The similarity to compute the neighborhood (default = 'user_similarity')
|user_similarity'|
distance: function
Pairwise metric to compute the similarity between the users.
nhood_size: int
The neighborhood size (default = None all users)
'''
user_ids = data_model.user_ids()
return user_ids[user_ids != user_id] if user_ids.size else user_ids
class NearestNeighborsStrategy(BaseUserNeighborhoodStrategy):
'''
Returns
--------
Returns the neighborhood consisting of the nearest n
users to a given user. "Nearest" in this context is
defined by the Similarity.
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
data_model: DataModel instance
The data model that will be the source for the possible
candidates
similarity: string
The similarity to compute the neighborhood (default = 'user_similarity')
|user_similarity'|
distance: function
Pairwise metric to compute the similarity between the users.
nhood_size: int
The neighborhood size (default = None all users)
'''
def __init__(self):
self.similarity = None
def _sampling(self, data_model, sampling_rate):
#TODO: Still to be implemented in a best way
return data_model
def _set_similarity(self, data_model, similarity, distance, nhood_size):
if not isinstance(self.similarity, UserSimilarity) \
or not distance == self.similarity.distance:
nhood_size = nhood_size if not nhood_size else nhood_size + 1
self.similarity = UserSimilarity(data_model, distance, nhood_size)
def user_neighborhood(self, user_id, data_model, n_similarity='user_similarity',
distance=None, nhood_size=None, **params):
'''
Computes a neighborhood consisting of the n users to a given
user based on the strategy implemented in this method.
Parameters
-----------
user_id: int or string
ID of user for which to find most similar other users
data_model: DataModel instance
The data model that will be the source for the possible
candidates
n_similarity: string
The similarity to compute the neighborhood (Default = 'user_similarity')
nhood_size: int
The neighborhood size (default = None all users)
Optional Parameters
--------------------
minimal_similarity: float
minimal similarity required for neighbors (default = 0.0)
sampling_rate: int
percentage of users to consider when building neighborhood
(default = 1)
'''
minimal_similarity = params.get('minimal_similarity', 0.0)
sampling_rate = params.get('sampling_rate', 1.0)
data_model = self._sampling(data_model, sampling_rate)
#set the nhood_size at Similarity , and use Similarity to get the top_users
if distance is None:
distance = euclidean_distances
if n_similarity == 'user_similarity':
self._set_similarity(data_model, n_similarity, distance, nhood_size)
else:
raise ValueError('similarity argument must be user_similarity')
neighborhood = [to_user_id for to_user_id, score in self.similarity[user_id] \
if not np.isnan(score) and score >= minimal_similarity and user_id != to_user_id]
return neighborhood
| bsd-3-clause |
iblis17/slick | slick/blueprints/dns/views.py | 4 | 5622 | import json
from flask import redirect, url_for, flash, render_template
from slick.utils.core import get_client
from slick.utils.session import login_required
from .forms import ZoneRecordForm
import manager
# @login_required
# def delete(key_id):
# mgr = SshKeyManager(get_client())
# mgr.delete_key(key_id)
# flash("SSH key deleted.", 'success')
# return redirect(url_for('.index'))
@login_required
def index():
payload = {
'title': 'List DNS Zones',
'zones': manager.all_zones(),
}
return render_template("zone_index.html", **payload)
@login_required
def quick_register(domain, hostname, ip):
""" This method attempts to immediately register the specified hostname in
the specified zone as an A record with a TTL of 3600.
:param string domain: The domain in which the record should be registered.
:param string hostname: The hostname for the new record.
:param string ip: The IP address to associate with the new record.
"""
existing_record = manager.search_record(domain, hostname)
already_exists = False
success = True
# Check to see if the record already exists. This is a quick, slight weak
# attempt to avoid registering duplicate or conflicting records. If we want
# to support round robin A records, this code should be removed.
if existing_record:
if existing_record[0].get('data') == ip:
already_exists = 'Record already registered in DNS.'
elif existing_record[0].get('type') == 'a':
success = False
already_exists = 'Record registered with a different IP. Aborting.'
else:
success = False
already_exists = 'A non-A record already exists for this name.' \
'Aborting.'
if already_exists:
return json.dumps({
'success': success,
'message': already_exists
})
domain_id = manager.get_zone_id_by_name(domain)
if not domain_id:
return json.dumps({
'success': False,
'message': 'Invalid domain specified.',
})
# Create the dictionary that will be used to create the new record.
# This method hardcodes some values to make the process a single click.
fields = {
'zone_id': domain_id,
'rec_type': 'A',
'host': hostname,
'data': ip,
'ttl': 3600,
}
(success, message) = manager.add_record(**fields)
return json.dumps({
'success': success,
'message': message,
})
@login_required
def record_add(zone_id):
zone = manager.get_zone(zone_id)
if not zone:
flash('DNS zone not found.', 'error')
return redirect(url_for('.index'))
form = ZoneRecordForm(zone_id=zone_id)
if form.validate_on_submit():
fields = {}
for field in form:
if 'csrf_token' == field.name:
continue
fields[field.name] = field.data
fields['rec_type'] = fields['type']
del(fields['type'])
manager.add_record(**fields)
flash('Zone record created.', 'success')
return redirect(url_for('.zone_view', zone_id=zone_id))
payload = {
'title': 'Add Zone Record',
'form': form,
'zone': zone,
'action': url_for('.record_add', zone_id=zone_id),
'action_name': 'Add',
}
return render_template('zone_update_record.html', **payload)
@login_required
def record_delete(record_id):
""" This function will remove the record ID from the specified zone.
:param int id: The ID of the record to remove
"""
record = manager.get_record(record_id)
if not record:
flash('DNS record not found.', 'error')
return redirect(url_for('.index'))
(success, message) = manager.delete_record(record_id)
if success:
flash(message, 'success')
else:
flash(message, 'error')
return redirect(url_for('.zone_view', zone_id=record['domain']['id']))
@login_required
def record_edit(record_id):
record = manager.get_record(record_id)
if not record:
flash('DNS record not found.', 'error')
return redirect(url_for('.index'))
defaults = record
defaults['zone_id'] = record['domain']['id']
form = ZoneRecordForm(**defaults)
if form.validate_on_submit():
fields = {'id': record_id}
for field in form:
if 'csrf_token' == field.name:
continue
fields[field.name] = field.data
(success, message) = manager.update_record(fields)
if success:
flash(message, 'success')
else:
flash(message, 'error')
return redirect(url_for('.zone_view', zone_id=record['domain']['id']))
payload = {
'title': 'Edit Zone Record',
'subheader': '%s.%s' % (record['host'], record['domain']['name']),
'record': record,
'form': form,
'action': url_for('.record_edit', record_id=record_id),
'action_name': 'Edit',
'zone': record['domain'],
}
return render_template('zone_update_record.html', **payload)
@login_required
def zone_view(zone_id):
zone = manager.get_zone(zone_id)
if not zone:
flash('DNS zone not found.', 'error')
return redirect(url_for('.index'))
payload = {
'title': 'View Zone',
'subheader': zone['name'],
'submenu': [(url_for('.record_add', zone_id=zone_id), 'Add Record')],
'zone': zone,
}
return render_template("zone_view.html", **payload)
| mit |
yfried/ansible | lib/ansible/modules/network/aci/aci_tenant.py | 15 | 6198 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_tenant
short_description: Manage tenants (fv:Tenant)
description:
- Manage tenants on Cisco ACI fabrics.
notes:
- More information about the internal APIC class B(fv:Tenant) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
tenant:
description:
- The name of the tenant.
required: yes
aliases: [ name, tenant_name ]
description:
description:
- Description for the tenant.
aliases: [ descr ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new tenant
aci_tenant:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
description: Production tenant
state: present
delegate_to: localhost
- name: Remove a tenant
aci_tenant:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
state: absent
delegate_to: localhost
- name: Query a tenant
aci_tenant:
host: apic
username: admin
password: SomeSecretPassword
tenant: production
state: query
delegate_to: localhost
register: query_result
- name: Query all tenants
aci_tenant:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
tenant=dict(type='str', required=False, aliases=['name', 'tenant_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['tenant']],
['state', 'present', ['tenant']],
],
)
description = module.params['description']
state = module.params['state']
tenant = module.params['tenant']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
module_object=tenant,
target_filter={'name': tenant},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvTenant',
class_config=dict(
name=tenant,
descr=description,
),
)
aci.get_diff(aci_class='fvTenant')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
Rona111/sale_commission | __unported__/stock_block_prodlots/product.py | 5 | 1697 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 Pexego (<www.pexego.es>). All Rights Reserved
# $Omar Castiñeira Saavedra$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""adds functionally on product object from block prodlots"""
from osv import fields, osv
class product_template(osv.osv):
"""adds functionally on product object from block prodlots"""
_inherit = "product.template"
_columns = {
'property_waste': fields.property(
'stock.location',
type='many2one',
relation='stock.location',
string="Waste Location",
method=True,
view_load=True,
help="For the current product (template), this stock location will be used, instead of the default one, as a virtual location where the products go when remove"),
}
product_template() | agpl-3.0 |
pieroproietti/penguins-eggs | conf/distros/bionic/calamares/calamares-modules/grubcfg/main.py | 1 | 8013 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# === This file is part of Calamares - <http://github.com/calamares> ===
#
# Copyright 2014-2015, Philip Müller <[email protected]>
# Copyright 2015-2017, Teo Mrnjavac <[email protected]>
# Copyright 2017, Alf Gaida <[email protected]>
# Copyright 2017, Adriaan de Groot <[email protected]>
# Copyright 2017, Gabriel Craciunescu <[email protected]>
#
# Calamares is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Calamares is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Calamares. If not, see <http://www.gnu.org/licenses/>.
import libcalamares
import os
import re
def modify_grub_default(partitions, root_mount_point, distributor):
"""
Configures '/etc/default/grub' for hibernation and plymouth.
@see bootloader/main.py, for similar handling of kernel parameters
:param partitions:
:param root_mount_point:
:param distributor:
:return:
"""
default_dir = os.path.join(root_mount_point, "etc/default")
default_grub = os.path.join(default_dir, "grub")
distributor_replace = distributor.replace("'", "'\\''")
dracut_bin = libcalamares.utils.target_env_call(
["sh", "-c", "which dracut"]
)
plymouth_bin = libcalamares.utils.target_env_call(
["sh", "-c", "which plymouth"]
)
# Shell exit value 0 means success
have_plymouth = plymouth_bin == 0
have_dracut = dracut_bin == 0 # Shell exit value 0 means success
use_splash = ""
swap_uuid = ""
swap_outer_uuid = ""
swap_outer_mappername = None
if have_plymouth:
use_splash = "splash"
cryptdevice_params = []
if have_dracut:
for partition in partitions:
has_luks = "luksMapperName" in partition
if partition["fs"] == "linuxswap" and not has_luks:
swap_uuid = partition["uuid"]
if (partition["fs"] == "linuxswap" and has_luks):
swap_outer_uuid = partition["luksUuid"]
swap_outer_mappername = partition["luksMapperName"]
if (partition["mountPoint"] == "/" and has_luks):
cryptdevice_params = [
"rd.luks.uuid={!s}".format(partition["luksUuid"])
]
else:
for partition in partitions:
has_luks = "luksMapperName" in partition
if partition["fs"] == "linuxswap" and not has_luks:
swap_uuid = partition["uuid"]
if (partition["mountPoint"] == "/" and has_luks):
cryptdevice_params = [
"cryptdevice=UUID={!s}:{!s}".format(
partition["luksUuid"], partition["luksMapperName"]
),
"root=/dev/mapper/{!s}".format(
partition["luksMapperName"]
),
"resume=/dev/mapper/{!s}".format(
partition["luksMapperName"]
)
]
kernel_params = ["quiet"]
if cryptdevice_params:
kernel_params.extend(cryptdevice_params)
if use_splash:
kernel_params.append(use_splash)
if swap_uuid:
kernel_params.append("resume=UUID={!s}".format(swap_uuid))
if have_dracut and swap_outer_uuid:
kernel_params.append("rd.luks.uuid={!s}".format(swap_outer_uuid))
if have_dracut and swap_outer_mappername:
kernel_params.append("resume=/dev/mapper/{!s}".format(
swap_outer_mappername))
distributor_line = "GRUB_DISTRIBUTOR='{!s}'".format(distributor_replace)
if not os.path.exists(default_dir):
os.mkdir(default_dir)
have_kernel_cmd = False
have_distributor_line = False
if "overwrite" in libcalamares.job.configuration:
overwrite = libcalamares.job.configuration["overwrite"]
else:
overwrite = False
if os.path.exists(default_grub) and not overwrite:
with open(default_grub, 'r') as grub_file:
lines = [x.strip() for x in grub_file.readlines()]
for i in range(len(lines)):
if lines[i].startswith("#GRUB_CMDLINE_LINUX_DEFAULT"):
kernel_cmd = "GRUB_CMDLINE_LINUX_DEFAULT=\"{!s}\"".format(
" ".join(kernel_params)
)
lines[i] = kernel_cmd
have_kernel_cmd = True
elif lines[i].startswith("GRUB_CMDLINE_LINUX_DEFAULT"):
regex = re.compile(r"^GRUB_CMDLINE_LINUX_DEFAULT\s*=\s*")
line = regex.sub("", lines[i])
line = line.lstrip()
line = line.lstrip("\"")
line = line.lstrip("'")
line = line.rstrip()
line = line.rstrip("\"")
line = line.rstrip("'")
existing_params = line.split()
for existing_param in existing_params:
existing_param_name = existing_param.split("=")[0]
# the only ones we ever add
if existing_param_name not in [
"quiet", "resume", "splash"]:
kernel_params.append(existing_param)
kernel_cmd = "GRUB_CMDLINE_LINUX_DEFAULT=\"{!s}\"".format(
" ".join(kernel_params)
)
lines[i] = kernel_cmd
have_kernel_cmd = True
elif (lines[i].startswith("#GRUB_DISTRIBUTOR")
or lines[i].startswith("GRUB_DISTRIBUTOR")):
lines[i] = distributor_line
have_distributor_line = True
else:
lines = []
if "defaults" in libcalamares.job.configuration:
for key, value in libcalamares.job.configuration[
"defaults"].items():
if value.__class__.__name__ == "bool":
if value:
escaped_value = "true"
else:
escaped_value = "false"
else:
escaped_value = str(value).replace("'", "'\\''")
lines.append("{!s}='{!s}'".format(key, escaped_value))
if not have_kernel_cmd:
kernel_cmd = "GRUB_CMDLINE_LINUX_DEFAULT=\"{!s}\"".format(
" ".join(kernel_params)
)
lines.append(kernel_cmd)
if not have_distributor_line:
lines.append(distributor_line)
if cryptdevice_params:
lines.append("GRUB_ENABLE_CRYPTODISK=y")
with open(default_grub, 'w') as grub_file:
grub_file.write("\n".join(lines) + "\n")
return None
def run():
"""
Calls routine with given parameters to modify '/etc/default/grub'.
:return:
"""
fw_type = libcalamares.globalstorage.value("firmwareType")
if (libcalamares.globalstorage.value("bootLoader") is None
and fw_type != "efi"):
return None
partitions = libcalamares.globalstorage.value("partitions")
if fw_type == "efi":
esp_found = False
for partition in partitions:
if (partition["mountPoint"]
== libcalamares.globalstorage.value("efiSystemPartition")):
esp_found = True
if not esp_found:
return None
root_mount_point = libcalamares.globalstorage.value("rootMountPoint")
branding = libcalamares.globalstorage.value("branding")
distributor = branding["bootloaderEntryName"]
return modify_grub_default(partitions, root_mount_point, distributor)
| gpl-2.0 |
jorsea/odoomrp-wip | stock_invoicing_type/models/procurement_order.py | 16 | 1700 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import models, api
class ProcurementOrder(models.Model):
_inherit = 'procurement.order'
@api.multi
def run(self, autocommit=False):
sale_obj = self.env['sale.order']
res = super(ProcurementOrder, self).run(autocommit=autocommit)
for rec in self:
for stock_move in rec.move_ids:
if stock_move.picking_id and \
not stock_move.picking_id.invoice_type_id:
orders = sale_obj.search([('procurement_group_id', '=',
stock_move.group_id.id)])
if orders:
invoice_type = orders[0].invoice_type_id.id
picking = stock_move.picking_id
picking.invoice_type_id = invoice_type
return res
| agpl-3.0 |
martonw/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/path_unittest.py | 124 | 3544 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import sys
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.system.platforminfo import PlatformInfo
from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
from webkitpy.common.system import path
class AbspathTest(unittest.TestCase):
def platforminfo(self):
return SystemHost().platform
def test_abspath_to_uri_cygwin(self):
if sys.platform != 'cygwin':
return
self.assertEqual(path.abspath_to_uri(self.platforminfo(), '/cygdrive/c/foo/bar.html'),
'file:///C:/foo/bar.html')
def test_abspath_to_uri_unixy(self):
self.assertEqual(path.abspath_to_uri(MockPlatformInfo(), "/foo/bar.html"),
'file:///foo/bar.html')
def test_abspath_to_uri_win(self):
if sys.platform != 'win32':
return
self.assertEqual(path.abspath_to_uri(self.platforminfo(), 'c:\\foo\\bar.html'),
'file:///c:/foo/bar.html')
def test_abspath_to_uri_escaping_unixy(self):
self.assertEqual(path.abspath_to_uri(MockPlatformInfo(), '/foo/bar + baz%?.html'),
'file:///foo/bar%20+%20baz%25%3F.html')
# Note that you can't have '?' in a filename on windows.
def test_abspath_to_uri_escaping_cygwin(self):
if sys.platform != 'cygwin':
return
self.assertEqual(path.abspath_to_uri(self.platforminfo(), '/cygdrive/c/foo/bar + baz%.html'),
'file:///C:/foo/bar%20+%20baz%25.html')
def test_stop_cygpath_subprocess(self):
if sys.platform != 'cygwin':
return
# Call cygpath to ensure the subprocess is running.
path.cygpath("/cygdrive/c/foo.txt")
self.assertTrue(path._CygPath._singleton.is_running())
# Stop it.
path._CygPath.stop_cygpath_subprocess()
# Ensure that it is stopped.
self.assertFalse(path._CygPath._singleton.is_running())
| bsd-3-clause |
LucasRoesler/django-archive-mixin | django_archive_mixin/utils.py | 1 | 2502 | from distutils.version import StrictVersion
import django
from django.db import models
from django.utils import timezone
def get_field_by_name(model, field):
"""
Retrieve a field instance from a model by its name.
"""
field_dict = {x.name: x for x in model._meta.get_fields()}
return field_dict[field]
def cascade_archive(inst_or_qs, using, keep_parents=False):
"""
Return collector instance that has marked ArchiveMixin instances for
archive (i.e. update) instead of actual delete.
Arguments:
inst_or_qs (models.Model or models.QuerySet): the instance(s) that
are to be deleted.
using (db connection/router): the db to delete from.
keep_parents (bool): defaults to False. Determine if cascade is true.
Returns:
models.deletion.Collector: this is a standard Collector instance but
the ArchiveMixin instances are in the fields for update list.
"""
from .mixins import ArchiveMixin
if not isinstance(inst_or_qs, models.QuerySet):
instances = [inst_or_qs]
else:
instances = inst_or_qs
deleted_ts = timezone.now()
# The collector will iteratively crawl the relationships and
# create a list of models and instances that are connected to
# this instance.
collector = models.deletion.Collector(using=using)
if StrictVersion(django.get_version()) < StrictVersion('1.9.0'):
collector.collect(instances)
else:
collector.collect(instances, keep_parents=keep_parents)
collector.sort()
for model, instances in collector.data.iteritems():
# remove archive mixin models from the delete list and put
# them in the update list. If we do this, we can just call
# the collector.delete method.
inst_list = list(instances)
if issubclass(model, ArchiveMixin):
deleted_on_field = get_field_by_name(model, 'deleted_on')
collector.add_field_update(
deleted_on_field, deleted_ts, inst_list)
del collector.data[model]
for i, qs in enumerate(collector.fast_deletes):
# make sure that we do archive on fast deletable models as
# well.
model = qs.model
if issubclass(model, ArchiveMixin):
deleted_on_field = get_field_by_name(model, 'deleted_on')
collector.add_field_update(deleted_on_field, deleted_ts, qs)
collector.fast_deletes[i] = qs.none()
return collector
| mit |
mattjhayes/nmeta_systemtest | nmeta_systemtest/nmeta_systemtest.py | 1 | 26321 | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Run full suite of nmeta system regression tests
in an easy automated manner to make regression testing
nmeta updates a breeze...
Designed to fail as soon as there is an issue to avoid
unnecessary time waiting to be advised of regression issue that
needs fixing
Provides quantitative (performance) and qualitative (pass test) data
Requires Ansible and test network environment. See documentation for
details.
"""
import datetime
import os
import time
from os.path import expanduser
import sys
#*** Logging imports:
import logging
import logging.handlers
import coloredlogs
#*** Regular Expressions import:
import re
#*** Filename for results to be written to:
RESULTS_DIR = 'nmeta_systemtest_results'
LOGGING_FILENAME = 'test_results.txt'
LOGGING_FILE_LEVEL = logging.INFO
LOGGING_FILE_FORMAT = "%(asctime)s %(levelname)s: " \
"%(funcName)s: %(message)s"
#*** Parameters for capture of environment configuration:
ENVIRONMENT_PLAYBOOK = 'nmeta-full-regression-environment-template.yml'
#*** Parameters for regression of Locations classification:
LOCATIONS_REPEATS = 1
LOCATIONS_TESTS = ["lg1-constrained-bw", "pc1-constrained-bw"]
LOCATIONS_POLICY_1 = 'main_policy_regression_locations.yaml'
LOCATIONS_POLICY_2 = 'main_policy_regression_locations_2.yaml'
LOCATIONS_DURATION = 10
LOCATIONS_PLAYBOOK = 'nmeta-full-regression-locations-template.yml'
LOCATIONS_TCP_PORT = 5555
LOCATIONS_PAUSE1_SWITCH2CONTROLLER = 10
LOCATIONS_PAUSE3_INTERTEST = 6
LOCATIONS_SLEEP = 30
LOCATIONS_TEST_FILES = ['lg1.example.com-iperf_result.txt',
'pc1.example.com-iperf_result.txt']
LOCATIONS_THRESHOLD_CONSTRAINED = 200000
LOCATIONS_THRESHOLD_UNCONSTRAINED = 1000000
#*** Parameters for regression of Static classification:
STATIC_REPEATS = 1
STATIC_TESTS = ["constrained-bw-tcp1234", "constrained-bw-tcp5555"]
STATIC_POLICY_1 = 'main_policy_regression_static.yaml'
STATIC_POLICY_2 = 'main_policy_regression_static_2.yaml'
STATIC_DURATION = 10
STATIC_PLAYBOOK = 'nmeta-full-regression-static-template.yml'
STATIC_PAUSE_SWITCH2CONTROLLER = 30
STATIC_SLEEP = 30
STATIC_TEST_FILES = ["pc1.example.com-1234-iperf_result.txt",
"pc1.example.com-5555-iperf_result.txt"]
STATIC_THRESHOLD_CONSTRAINED = 200000
STATIC_THRESHOLD_UNCONSTRAINED = 1000000
#*** Parameters for regression of Identity classification:
IDENTITY_REPEATS = 1
IDENTITY_TESTS = ["lg1-constrained-bw", "pc1-constrained-bw"]
IDENTITY_POLICY_1 = 'main_policy_regression_identity.yaml'
IDENTITY_POLICY_2 = 'main_policy_regression_identity_2.yaml'
IDENTITY_DURATION = 10
IDENTITY_PLAYBOOK = 'nmeta-full-regression-identity-template.yml'
IDENTITY_TCP_PORT = 5555
IDENTITY_PAUSE1_SWITCH2CONTROLLER = 10
IDENTITY_PAUSE2_LLDPLEARN = 30
IDENTITY_PAUSE3_INTERTEST = 6
IDENTITY_SLEEP = 30
IDENTITY_TEST_FILES = ['lg1.example.com-iperf_result.txt',
'pc1.example.com-iperf_result.txt']
IDENTITY_THRESHOLD_CONSTRAINED = 200000
IDENTITY_THRESHOLD_UNCONSTRAINED = 1000000
#*** Parameters for regression of Statistical classification:
STATISTICAL_REPEATS = 1
STATISTICAL_TESTS = ['constrained-bw-iperf', 'unconstrained-bw-iperf']
STATISTICAL_POLICY_1 = 'main_policy_regression_statistical.yaml'
STATISTICAL_POLICY_2 = 'main_policy_regression_statistical_control.yaml'
STATISTICAL_DURATION = 10
STATISTICAL_PLAYBOOK = 'nmeta-full-regression-statistical-template.yml'
STATISTICAL_TCP_PORT = 5555
STATISTICAL_PAUSE_SWITCH2CONTROLLER = 10
STATISTICAL_SLEEP = 30
STATISTICAL_TEST_FILES = ['pc1.example.com-iperf_result.txt',]
STATISTICAL_THRESHOLD_CONSTRAINED = 400000
STATISTICAL_THRESHOLD_UNCONSTRAINED = 1000000
#*** Parameters for performance testing:
#*** Test effect of different classification policies on performance:
PERFORMANCE_TESTS = ['static', 'identity', 'statistical']
PERFORMANCE_COUNT = 30
PERFORMANCE_PLAYBOOK = 'nmeta-full-regression-performance-template.yml'
PERFORMANCE_PAUSE_SWITCH2CONTROLLER = 10
PERFORMANCE_HPING3_FILENAME = 'pc1.example.com-hping3_output.txt'
PERFORMANCE_SLEEP = 30
#*** Parameters for analysis of nmeta syslog events:
LOGROTATE_PLAYBOOK = 'nmeta-full-regression-logrotate-template.yml'
LOGCHECK_PLAYBOOK = 'nmeta-full-regression-logcheck-template.yml'
LOG_ERROR_FILENAME = 'errors_logged.txt'
#*** Ansible Playbook directory:
HOME_DIR = expanduser("~")
PLAYBOOK_DIR = os.path.join(HOME_DIR,
'nmeta_systemtest/nmeta_systemtest')
def main():
"""
Main function of nmeta regression tests.
Sets up logging, creates the timestamped directory
and runs functions for the various regression test types
"""
#*** Set up logging:
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
coloredlogs.install(level="DEBUG",
logger=logger,
fmt="%(asctime)s.%(msecs)03d %(name)s[%(process)d] " \
"%(funcName)s %(levelname)s %(message)s", datefmt='%H:%M:%S')
logger.info("Running full regression test of nmeta")
#*** Timestamp for results root directory:
timenow = datetime.datetime.now()
timestamp = timenow.strftime("%Y%m%d%H%M%S")
logger.info("root results timestamp is %s", timestamp)
#*** Directory base path to write results to:
results_dir = os.path.join(HOME_DIR, RESULTS_DIR)
#*** Create root directory for results:
os.chdir(results_dir)
logger.debug("creating subdirectory %s", timestamp)
os.mkdir(timestamp)
basedir = os.path.join(results_dir, timestamp)
logger.info("base directory is %s", basedir)
#*** Set up logging to file in the root dir for these results:
logging_file = os.path.join(basedir, LOGGING_FILENAME)
logging_fh = logging.FileHandler(logging_file)
logging_fh.setLevel(LOGGING_FILE_LEVEL)
formatter = logging.Formatter(LOGGING_FILE_FORMAT)
logging_fh.setFormatter(formatter)
logger.addHandler(logging_fh)
#*** Capture environment settings:
record_environment(logger, basedir)
#*** Run performance baseline tests:
regression_performance(logger, basedir)
#*** Run locations traffic classification testing:
test_locations_tc(logger, basedir)
#*** Run static traffic classification testing:
test_static_tc(logger, basedir)
#*** Run identity traffic classification testing:
test_identity_tc(logger, basedir)
#*** Run statistical regression testing:
test_statistical_tc(logger, basedir)
#*** And we're done!:
logger.info("All testing finished, that's a PASS!")
logger.info("See test report at %s/%s", basedir, LOGGING_FILENAME)
def record_environment(logger, basedir):
"""
Capture details of the environment including info
on the nmeta build
"""
extra_vars = {'results_dir': basedir + "/"}
playbook_cmd = build_playbook(ENVIRONMENT_PLAYBOOK, extra_vars,
logger)
os.system(playbook_cmd)
def test_locations_tc(logger, basedir):
"""
Run nmeta locations traffic classification test(s)
"""
logger.info("running locations regression testing")
subdir = 'locations'
#*** Create subdirectory to write results to:
os.chdir(basedir)
os.mkdir(subdir)
test_basedir = os.path.join(basedir, subdir)
#*** Run tests
for i in range(LOCATIONS_REPEATS):
logger.debug("iteration %s of %s", i+1, LOCATIONS_REPEATS)
for test in LOCATIONS_TESTS:
#*** Timestamp for specific test subdirectory:
timenow = datetime.datetime.now()
testdir_timestamp = timenow.strftime("%Y%m%d%H%M%S")
logger.info("running test=%s", test)
test_dir = os.path.join(test_basedir, test,
testdir_timestamp)
rotate_log(logger)
if test == "lg1-constrained-bw":
policy_name = LOCATIONS_POLICY_1
elif test == "pc1-constrained-bw":
policy_name = LOCATIONS_POLICY_2
else:
logger.critical("ERROR: unknown locations test %s",
test)
sys.exit()
extra_vars = {'duration': str(LOCATIONS_DURATION),
'results_dir': test_dir + "/",
'policy_name': policy_name,
'tcp_port': str(LOCATIONS_TCP_PORT),
'pause1':
str(LOCATIONS_PAUSE1_SWITCH2CONTROLLER),
'pause3': str(LOCATIONS_PAUSE3_INTERTEST)}
playbook_cmd = build_playbook(LOCATIONS_PLAYBOOK,
extra_vars, logger)
logger.debug("running Ansible playbook for locations...")
os.system(playbook_cmd)
#*** Analyse locations regression results:
logger.debug("Reading results in directory %s", test_dir)
results = {}
for filename in LOCATIONS_TEST_FILES:
results[filename] = get_iperf_bw(test_dir, filename)
#*** Validate that the results are as expected:
if test == LOCATIONS_TESTS[0]:
constrained = results[LOCATIONS_TEST_FILES[0]]
unconstrained = results[LOCATIONS_TEST_FILES[1]]
elif test == LOCATIONS_TESTS[1]:
constrained = results[LOCATIONS_TEST_FILES[1]]
unconstrained = results[LOCATIONS_TEST_FILES[0]]
else:
#*** Unknown error condition:
logger.critical("UNKNOWN TEST TYPE. test=%s", test)
sys.exit("Please fix this test code. Exiting...")
logger.info("validating bw constrained=%s unconstrained=%s",
constrained, unconstrained)
assert constrained < LOCATIONS_THRESHOLD_CONSTRAINED
assert unconstrained > LOCATIONS_THRESHOLD_UNCONSTRAINED
logger.info("LOCATIONS TC TEST PASSED. test=%s", test)
logger.info("bandwidth constrained=%s unconstrained=%s",
constrained, unconstrained)
#*** Check for any logs that are CRITICAL or ERROR:
check_log(logger, test_dir)
logger.debug("Sleeping... zzzz")
time.sleep(LOCATIONS_SLEEP)
def test_static_tc(logger, basedir):
"""
Run nmeta static traffic classification test(s)
"""
logger.info("running static regression testing")
subdir = 'static'
#*** Create subdirectory to write results to:
os.chdir(basedir)
os.mkdir(subdir)
test_basedir = os.path.join(basedir, subdir)
#*** Run tests
for i in range(STATIC_REPEATS):
logger.debug("iteration %s of %s", i+1, STATIC_REPEATS)
for test in STATIC_TESTS:
#*** Timestamp for specific test subdirectory:
timenow = datetime.datetime.now()
testdir_timestamp = timenow.strftime("%Y%m%d%H%M%S")
logger.info("running test=%s", test)
test_dir = os.path.join(test_basedir, test,
testdir_timestamp)
rotate_log(logger)
if test == 'constrained-bw-tcp1234':
policy_name = STATIC_POLICY_1
elif test == 'constrained-bw-tcp5555':
policy_name = STATIC_POLICY_2
else:
logger.critical("ERROR: unknown static test %s", test)
sys.exit()
extra_vars = {'duration': str(STATIC_DURATION),
'results_dir': test_dir + "/",
'policy_name': policy_name,
'pause1': str(STATIC_PAUSE_SWITCH2CONTROLLER)}
playbook_cmd = build_playbook(STATIC_PLAYBOOK,
extra_vars, logger)
logger.debug("running Ansible playbook...")
os.system(playbook_cmd)
#*** Analyse static regression results:
logger.debug("Reading results in directory %s", test_dir)
results = {}
for filename in STATIC_TEST_FILES:
results[filename] = get_iperf_bw(test_dir, filename)
#*** Validate that the results are as expected:
if test == STATIC_TESTS[0]:
constrained = results[STATIC_TEST_FILES[0]]
unconstrained = results[STATIC_TEST_FILES[1]]
elif test == STATIC_TESTS[1]:
constrained = results[STATIC_TEST_FILES[1]]
unconstrained = results[STATIC_TEST_FILES[0]]
else:
#*** Unknown error condition:
logger.critical("UNKNOWN TEST TYPE. test=%s", test)
sys.exit("Please fix this test code. Exiting...")
logger.info("validating bw constrained=%s unconstrained=%s",
constrained, unconstrained)
assert constrained < STATIC_THRESHOLD_CONSTRAINED
assert unconstrained > STATIC_THRESHOLD_UNCONSTRAINED
logger.info("STATIC TC TEST PASSED. test=%s", test)
logger.info("bandwidth constrained=%s unconstrained=%s",
constrained, unconstrained)
#*** Check for any logs that are CRITICAL or ERROR:
check_log(logger, test_dir)
logger.debug("Sleeping... zzzz")
time.sleep(STATIC_SLEEP)
def test_identity_tc(logger, basedir):
"""
Run nmeta identity traffic classification test(s)
"""
logger.info("running identity regression testing")
subdir = 'identity'
#*** Create subdirectory to write results to:
os.chdir(basedir)
os.mkdir(subdir)
test_basedir = os.path.join(basedir, subdir)
#*** Run tests
for i in range(IDENTITY_REPEATS):
logger.debug("iteration %s of %s", i+1, IDENTITY_REPEATS)
for test in IDENTITY_TESTS:
#*** Timestamp for specific test subdirectory:
timenow = datetime.datetime.now()
testdir_timestamp = timenow.strftime("%Y%m%d%H%M%S")
logger.info("running test=%s", test)
test_dir = os.path.join(test_basedir, test,
testdir_timestamp)
rotate_log(logger)
if test == "lg1-constrained-bw":
policy_name = IDENTITY_POLICY_1
elif test == "pc1-constrained-bw":
policy_name = IDENTITY_POLICY_2
else:
logger.critical("ERROR: unknown identity test %s", test)
sys.exit()
extra_vars = {'duration': str(IDENTITY_DURATION),
'results_dir': test_dir + "/",
'policy_name': policy_name,
'tcp_port': str(IDENTITY_TCP_PORT),
'pause1':
str(IDENTITY_PAUSE1_SWITCH2CONTROLLER),
'pause2': str(IDENTITY_PAUSE2_LLDPLEARN),
'pause3': str(IDENTITY_PAUSE3_INTERTEST)}
playbook_cmd = build_playbook(IDENTITY_PLAYBOOK,
extra_vars, logger)
logger.debug("running Ansible playbook...")
os.system(playbook_cmd)
#*** Analyse identity regression results:
logger.debug("Reading results in directory %s", test_dir)
results = {}
for filename in IDENTITY_TEST_FILES:
results[filename] = get_iperf_bw(test_dir, filename)
#*** Validate that the results are as expected:
if test == IDENTITY_TESTS[0]:
constrained = results[IDENTITY_TEST_FILES[0]]
unconstrained = results[IDENTITY_TEST_FILES[1]]
elif test == IDENTITY_TESTS[1]:
constrained = results[IDENTITY_TEST_FILES[1]]
unconstrained = results[IDENTITY_TEST_FILES[0]]
else:
#*** Unknown error condition:
logger.critical("UNKNOWN TEST TYPE. test=%s", test)
sys.exit("Please fix this test code. Exiting...")
logger.info("validating bw constrained=%s unconstrained=%s",
constrained, unconstrained)
assert constrained < IDENTITY_THRESHOLD_CONSTRAINED
assert unconstrained > IDENTITY_THRESHOLD_UNCONSTRAINED
logger.info("IDENTITY TC TEST PASSED. test=%s", test)
logger.info("bandwidth constrained=%s unconstrained=%s",
constrained, unconstrained)
#*** Check for any logs that are CRITICAL or ERROR:
check_log(logger, test_dir)
logger.debug("Sleeping... zzzz")
time.sleep(IDENTITY_SLEEP)
def test_statistical_tc(logger, basedir):
"""
Run nmeta statistical traffic classification test(s)
"""
logger.info("running statistical regression testing")
subdir = 'statistical'
#*** Create subdirectory to write results to:
os.chdir(basedir)
os.mkdir(subdir)
test_basedir = os.path.join(basedir, subdir)
#*** Run tests
for i in range(STATISTICAL_REPEATS):
logger.debug("iteration %s of %s", i+1, STATISTICAL_REPEATS)
for test in STATISTICAL_TESTS:
#*** Timestamp for specific test subdirectory:
timenow = datetime.datetime.now()
testdir_timestamp = timenow.strftime("%Y%m%d%H%M%S")
logger.info("running test=%s", test)
test_dir = os.path.join(test_basedir, test,
testdir_timestamp)
rotate_log(logger)
if test == 'constrained-bw-iperf':
policy_name = STATISTICAL_POLICY_1
elif test == 'unconstrained-bw-iperf':
policy_name = STATISTICAL_POLICY_2
else:
logger.critical("ERROR: unknown statistical test %s",
test)
sys.exit()
extra_vars = {'duration': str(STATISTICAL_DURATION),
'results_dir': test_dir + "/",
'policy_name': policy_name,
'tcp_port': str(STATISTICAL_TCP_PORT),
'pause1':
str(STATISTICAL_PAUSE_SWITCH2CONTROLLER)}
playbook_cmd = build_playbook(STATISTICAL_PLAYBOOK,
extra_vars, logger)
logger.debug("running Ansible playbook...")
os.system(playbook_cmd)
#*** Analyse statistical regression results:
logger.debug("Reading results in directory %s", test_dir)
results = {}
for filename in STATISTICAL_TEST_FILES:
results[filename] = get_iperf_bw(test_dir, filename)
#*** Validate that the results are as expected:
if test == STATISTICAL_TESTS[0]:
constrained = results[STATISTICAL_TEST_FILES[0]]
logger.info("validating statistical bw constrained=%s",
constrained)
assert constrained < STATISTICAL_THRESHOLD_CONSTRAINED
elif test == STATISTICAL_TESTS[1]:
unconstrained = results[STATISTICAL_TEST_FILES[0]]
logger.info("validating statistical bw unconstrained=%s"
, unconstrained)
assert unconstrained > \
STATISTICAL_THRESHOLD_UNCONSTRAINED
else:
#*** Unknown error condition:
logger.critical("UNKNOWN TEST TYPE. test=%s", test)
sys.exit("Please fix this test code. Exiting...")
logger.info("STATISTICAL TC TEST PASSED. test=%s", test)
#*** Check for any logs that are CRITICAL or ERROR:
check_log(logger, test_dir)
logger.debug("Sleeping... zzzz")
time.sleep(STATISTICAL_SLEEP)
def regression_performance(logger, basedir):
"""
Nmeta performance regression testing
"""
logger.info("running performance regression testing")
subdir = 'performance'
#*** Create subdirectory to write results to:
os.chdir(basedir)
os.mkdir(subdir)
test_basedir = os.path.join(basedir, subdir)
#*** Run tests:
for test in PERFORMANCE_TESTS:
logger.info("running test=%s", test)
test_dir = os.path.join(test_basedir, test)
rotate_log(logger)
if test == "static":
policy_name = STATIC_POLICY_1
elif test == "identity":
policy_name = IDENTITY_POLICY_1
elif test == "statistical":
policy_name = STATISTICAL_POLICY_1
else:
logger.critical("ERROR: unknown performance test %s", test)
sys.exit()
extra_vars = {'count': str(PERFORMANCE_COUNT),
'results_dir': test_dir + "/",
'policy_name': policy_name,
'pause1':
str(PERFORMANCE_PAUSE_SWITCH2CONTROLLER)}
playbook_cmd = build_playbook(PERFORMANCE_PLAYBOOK,
extra_vars, logger)
logger.debug("running Ansible playbook...")
os.system(playbook_cmd)
#*** Read in and analyse hping3 RTT performance results:
rtt_results = hping3_read_results(os.path.join(test_dir,
PERFORMANCE_HPING3_FILENAME))
logger.debug("Performance results are %s", rtt_results)
rtt_avg = sum(rtt_results) / float(len(rtt_results))
logger.info("rtt_avg=%s rtt_max=%s", rtt_avg, max(rtt_results))
#*** Check for any logs that are CRITICAL or ERROR:
check_log(logger, test_dir)
logger.debug("Sleeping... zzzz")
time.sleep(PERFORMANCE_SLEEP)
#==================== helper functions ====================
def get_iperf_bw(test_dir, filename):
"""
Passed the directory and filename of an Iperf result
file and return the reported bandwidth or exit if error
"""
filename_full = os.path.join(test_dir, filename)
with open(filename_full) as filehandle:
data = filehandle.read()
data = data.split(",")
#*** The result is position 8 and remove trailing newline:
return int(str(data[8]).rstrip())
def build_playbook(playbook_name, extra_vars, logger):
"""
Passed an Ansible Playbook name, and a dictionary of extra
vars to pass to it, and return a properly formatted string that
will run the Playbook from the command line
"""
playbook = os.path.join(PLAYBOOK_DIR, playbook_name)
logger.debug("playbook is %s", playbook)
playbook_cmd = "ansible-playbook " + playbook
if extra_vars:
playbook_cmd += " --extra-vars \""
for key, value in extra_vars.iteritems():
playbook_cmd += key + "=" + value + " "
playbook_cmd += "\""
logger.debug("playbook_cmd=%s", playbook_cmd)
return playbook_cmd
def hping3_read_results(filename):
"""
Passed a full path filename. Open this file and process
it line by line, accumulating valid RTT results into a
list and returning it
"""
results = []
with open(filename, 'r') as filehandle:
for line in filehandle.readlines():
hping3_result = hping3_read_line(line)
if hping3_result:
results.append(hping3_result)
return results
def hping3_read_line(hping3_line):
"""
Passed a line from the hping3 output file and return the
result if it exists, otherwise 0.
"""
#*** Extract hping3 time from the line:
#*** Example: len=46 ip=10.1.0.2 ttl=64 DF id=36185 sport=0
#*** flags=RA seq=6 win=0 rtt=9.1 ms
hping3_match = \
re.search(r"rtt=(\S+)", hping3_line)
if hping3_match:
result = hping3_match.groups()[0]
#*** Turn ms into seconds:
result = float(result) / 1000
return result
else:
return 0
def rotate_log(logger):
"""
Run an Ansible playbook to rotate the nmeta log
so that it is fresh for analysis post test
"""
logger.debug("Rotating nmeta syslog output for freshness")
extra_vars = {}
playbook_cmd = build_playbook(LOGROTATE_PLAYBOOK, extra_vars,
logger)
logger.debug("running Ansible playbook...")
os.system(playbook_cmd)
def check_log(logger, test_dir):
"""
Check the nmeta log file to see if it has any log events that
should cause the test to fail so that code can be fixed
"""
logger.debug("Checking nmeta syslog for error or critical messages")
extra_vars = {'results_dir': test_dir + "/",
'error_filename': LOG_ERROR_FILENAME}
playbook_cmd = build_playbook(LOGCHECK_PLAYBOOK, extra_vars,
logger)
logger.debug("running Ansible playbook...")
os.system(playbook_cmd)
#*** Presence of non-zero file indicates ERROR and/or CRITICAL logs:
error_file = os.path.join(test_dir, LOG_ERROR_FILENAME)
if os.path.isfile(error_file):
if os.stat(error_file).st_size > 0:
logger.critical("ERROR and/or CRITICAL logs need attention")
logger.info("Check file %s", error_file)
sys.exit()
if __name__ == "__main__":
#*** Run the main function
main()
| apache-2.0 |
yeyanchao/calibre | src/calibre/ebooks/markdown/blockparser.py | 133 | 3358 |
import markdown
class State(list):
""" Track the current and nested state of the parser.
This utility class is used to track the state of the BlockParser and
support multiple levels if nesting. It's just a simple API wrapped around
a list. Each time a state is set, that state is appended to the end of the
list. Each time a state is reset, that state is removed from the end of
the list.
Therefore, each time a state is set for a nested block, that state must be
reset when we back out of that level of nesting or the state could be
corrupted.
While all the methods of a list object are available, only the three
defined below need be used.
"""
def set(self, state):
""" Set a new state. """
self.append(state)
def reset(self):
""" Step back one step in nested state. """
self.pop()
def isstate(self, state):
""" Test that top (current) level is of given state. """
if len(self):
return self[-1] == state
else:
return False
class BlockParser:
""" Parse Markdown blocks into an ElementTree object.
A wrapper class that stitches the various BlockProcessors together,
looping through them and creating an ElementTree object.
"""
def __init__(self):
self.blockprocessors = markdown.odict.OrderedDict()
self.state = State()
def parseDocument(self, lines):
""" Parse a markdown document into an ElementTree.
Given a list of lines, an ElementTree object (not just a parent Element)
is created and the root element is passed to the parser as the parent.
The ElementTree object is returned.
This should only be called on an entire document, not pieces.
"""
# Create a ElementTree from the lines
self.root = markdown.etree.Element(markdown.DOC_TAG)
self.parseChunk(self.root, '\n'.join(lines))
return markdown.etree.ElementTree(self.root)
def parseChunk(self, parent, text):
""" Parse a chunk of markdown text and attach to given etree node.
While the ``text`` argument is generally assumed to contain multiple
blocks which will be split on blank lines, it could contain only one
block. Generally, this method would be called by extensions when
block parsing is required.
The ``parent`` etree Element passed in is altered in place.
Nothing is returned.
"""
self.parseBlocks(parent, text.split('\n\n'))
def parseBlocks(self, parent, blocks):
""" Process blocks of markdown text and attach to given etree node.
Given a list of ``blocks``, each blockprocessor is stepped through
until there are no blocks left. While an extension could potentially
call this method directly, it's generally expected to be used internally.
This is a public method as an extension may need to add/alter additional
BlockProcessors which call this method to recursively parse a nested
block.
"""
while blocks:
for processor in self.blockprocessors.values():
if processor.test(parent, blocks[0]):
processor.run(parent, blocks)
break
| gpl-3.0 |
Aaron1992/flaskbb | flaskbb/utils/populate.py | 1 | 6801 | # -*- coding: utf-8 -*-
"""
flaskbb.utils.populate
~~~~~~~~~~~~~~~~~~~~
A module that makes creating data more easily
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from flaskbb.management.models import Setting, SettingsGroup
from flaskbb.user.models import User, Group
from flaskbb.forum.models import Post, Topic, Forum, Category
def delete_settings_from_fixture(fixture):
"""
Deletes the settings from a fixture from the database.
"""
for settingsgroup in fixture:
group = SettingsGroup.query.filter_by(key=settingsgroup[0]).first()
for settings in settingsgroup[1]['settings']:
setting = Setting.query.filter_by(key=settings[0]).first()
setting.delete()
group.delete()
def create_settings_from_fixture(fixture):
"""
Inserts the settings from a fixture into the database.
"""
for settingsgroup in fixture:
group = SettingsGroup(
key=settingsgroup[0],
name=settingsgroup[1]['name'],
description=settingsgroup[1]['description']
)
group.save()
for settings in settingsgroup[1]['settings']:
setting = Setting(
key=settings[0],
value=settings[1]['value'],
value_type=settings[1]['value_type'],
name=settings[1]['name'],
description=settings[1]['description'],
extra=settings[1].get('extra', ""), # Optional field
settingsgroup=group.key
)
setting.save()
def update_settings_from_fixture(fixture, overwrite_group=False,
overwrite_setting=False):
"""
Updates the database settings from a fixture.
Returns the number of updated groups and settings.
"""
groups_count = 0
settings_count = 0
for settingsgroup in fixture:
group = SettingsGroup.query.filter_by(key=settingsgroup[0]).first()
if group is not None and overwrite_group or group is None:
groups_count += 1
group = SettingsGroup(
key=settingsgroup[0],
name=settingsgroup[1]['name'],
description=settingsgroup[1]['description']
)
group.save()
for settings in settingsgroup[1]['settings']:
setting = Setting.query.filter_by(key=settings[0]).first()
if setting is not None and overwrite_setting or setting is None:
settings_count += 1
setting = Setting(
key=settings[0],
value=settings[1]['value'],
value_type=settings[1]['value_type'],
name=settings[1]['name'],
description=settings[1]['description'],
extra=settings[1].get('extra', ""),
settingsgroup=group.key
)
setting.save()
return groups_count, settings_count
def create_default_settings():
"""
Creates the default settings
"""
from flaskbb.fixtures.settings import fixture
create_settings_from_fixture(fixture)
def create_default_groups():
"""
This will create the 5 default groups
"""
from flaskbb.fixtures.groups import fixture
result = []
for key, value in fixture.items():
group = Group(name=key)
for k, v in value.items():
setattr(group, k, v)
group.save()
result.append(group)
return result
def create_admin_user(username, password, email):
"""
Creates the administrator user
"""
admin_group = Group.query.filter_by(admin=True).first()
user = User()
user.username = username
user.password = password
user.email = email
user.primary_group_id = admin_group.id
user.save()
def create_welcome_forum():
"""
This will create the `welcome forum` that nearly every
forum software has after the installation process is finished
"""
if User.query.count() < 1:
raise "You need to create the admin user first!"
user = User.query.filter_by(id=1).first()
category = Category(title="My Category", position=1)
category.save()
forum = Forum(title="Welcome", description="Your first forum",
category_id=category.id)
forum.save()
topic = Topic(title="Welcome!")
post = Post(content="Have fun with your new FlaskBB Forum!")
topic.save(user=user, forum=forum, post=post)
def create_test_data():
"""
Creates 5 users, 2 categories and 2 forums in each category. It also opens
a new topic topic in each forum with a post.
"""
create_default_groups()
create_default_settings()
# create 5 users
for u in range(1, 6):
username = "test%s" % u
email = "test%[email protected]" % u
user = User(username=username, password="test", email=email)
user.primary_group_id = u
user.save()
user1 = User.query.filter_by(id=1).first()
user2 = User.query.filter_by(id=2).first()
# create 2 categories
for i in range(1, 3):
category_title = "Test Category %s" % i
category = Category(title=category_title,
description="Test Description")
category.save()
# create 2 forums in each category
for j in range(1, 3):
if i == 2:
j += 2
forum_title = "Test Forum %s %s" % (j, i)
forum = Forum(title=forum_title, description="Test Description",
category_id=i)
forum.save()
# create a topic
topic = Topic()
post = Post()
topic.title = "Test Title %s" % j
post.content = "Test Content"
topic.save(post=post, user=user1, forum=forum)
# create a second post in the forum
post = Post()
post.content = "Test Post"
post.save(user=user2, topic=topic)
def insert_mass_data():
"""
Creates 100 topics in the first forum and each topic has 100 posts.
"""
user1 = User.query.filter_by(id=1).first()
user2 = User.query.filter_by(id=2).first()
forum = Forum.query.filter_by(id=1).first()
# create 1000 topics
for i in range(1, 101):
# create a topic
topic = Topic()
post = Post()
topic.title = "Test Title %s" % i
post.content = "Test Content"
topic.save(post=post, user=user1, forum=forum)
# create 100 posts in each topic
for j in range(1, 100):
post = Post()
post.content = "Test Post"
post.save(user=user2, topic=topic)
| bsd-3-clause |
akumar21NCSU/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/mux.py | 636 | 71218 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for multiplexing extension.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
"""
import collections
import copy
import email
import email.parser
import logging
import math
import struct
import threading
import traceback
from mod_pywebsocket import common
from mod_pywebsocket import handshake
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import create_closing_handshake_body
from mod_pywebsocket._stream_hybi import create_header
from mod_pywebsocket._stream_hybi import create_length_header
from mod_pywebsocket._stream_hybi import parse_frame
from mod_pywebsocket.handshake import hybi
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
_MAX_CHANNEL_ID = 2 ** 29 - 1
_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
_HANDSHAKE_ENCODING_IDENTITY = 0
_HANDSHAKE_ENCODING_DELTA = 1
# We need only these status code for now.
_HTTP_BAD_RESPONSE_MESSAGES = {
common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
}
# DropChannel reason code
# TODO(bashi): Define all reason code defined in -05 draft.
_DROP_CODE_NORMAL_CLOSURE = 1000
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
_DROP_CODE_ACKNOWLEDGED = 3008
_DROP_CODE_BAD_FRAGMENTATION = 3009
class MuxUnexpectedException(Exception):
"""Exception in handling multiplexing extension."""
pass
# Temporary
class MuxNotImplementedException(Exception):
"""Raised when a flow enters unimplemented code path."""
pass
class LogicalConnectionClosedException(Exception):
"""Raised when logical connection is gracefully closed."""
pass
class PhysicalConnectionError(Exception):
"""Raised when there is a physical connection error."""
def __init__(self, drop_code, message=''):
super(PhysicalConnectionError, self).__init__(
'code=%d, message=%r' % (drop_code, message))
self.drop_code = drop_code
self.message = message
class LogicalChannelError(Exception):
"""Raised when there is a logical channel error."""
def __init__(self, channel_id, drop_code, message=''):
super(LogicalChannelError, self).__init__(
'channel_id=%d, code=%d, message=%r' % (
channel_id, drop_code, message))
self.channel_id = channel_id
self.drop_code = drop_code
self.message = message
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
return create_length_header(number, False)
def _create_add_channel_response(channel_id, encoded_handshake,
encoding=0, rejected=False):
if encoding != 0 and encoding != 1:
raise ValueError('Invalid encoding %d' % encoding)
first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
(rejected << 4) | encoding)
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(len(encoded_handshake)) +
encoded_handshake)
return block
def _create_drop_channel(channel_id, code=None, message=''):
if len(message) > 0 and code is None:
raise ValueError('Code must be specified if message is specified')
first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
block = chr(first_byte) + _encode_channel_id(channel_id)
if code is None:
block += _encode_number(0) # Reason size
else:
reason = struct.pack('!H', code) + message
reason_size = _encode_number(len(reason))
block += reason_size + reason
return block
def _create_flow_control(channel_id, replenished_quota):
first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(replenished_quota))
return block
def _create_new_channel_slot(slots, send_quota):
if slots < 0 or send_quota < 0:
raise ValueError('slots and send_quota must be non-negative.')
first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
block = (chr(first_byte) +
_encode_number(slots) +
_encode_number(send_quota))
return block
def _create_fallback_new_channel_slot():
first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
return block
def _parse_request_text(request_text):
request_line, header_lines = request_text.split('\r\n', 1)
words = request_line.split(' ')
if len(words) != 3:
raise ValueError('Bad Request-Line syntax %r' % request_line)
[command, path, version] = words
if version != 'HTTP/1.1':
raise ValueError('Bad request version %r' % version)
# email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
# RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
# RFC 822.
headers = email.parser.Parser().parsestr(header_lines)
return command, path, version, headers
class _ControlBlock(object):
"""A structure that holds parsing result of multiplexing control block.
Control block specific attributes will be added by _MuxFramePayloadParser.
(e.g. encoded_handshake will be added for AddChannelRequest and
AddChannelResponse)
"""
def __init__(self, opcode):
self.opcode = opcode
class _MuxFramePayloadParser(object):
"""A class that parses multiplexed frame payload."""
def __init__(self, payload):
self._data = payload
self._read_position = 0
self._logger = util.get_class_logger(self)
def read_channel_id(self):
"""Reads channel id.
Raises:
ValueError: when the payload doesn't contain
valid channel id.
"""
remaining_length = len(self._data) - self._read_position
pos = self._read_position
if remaining_length == 0:
raise ValueError('Invalid channel id format')
channel_id = ord(self._data[pos])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining_length < 4:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!L',
self._data[pos:pos+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining_length < 3:
raise ValueError('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', self._data[pos+1:pos+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining_length < 2:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!H',
self._data[pos:pos+2])[0] & 0x3fff
channel_id_length = 2
self._read_position += channel_id_length
return channel_id
def read_inner_frame(self):
"""Reads an inner frame.
Raises:
PhysicalConnectionError: when the inner frame is invalid.
"""
if len(self._data) == self._read_position:
raise PhysicalConnectionError(
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
bits = ord(self._data[self._read_position])
self._read_position += 1
fin = (bits & 0x80) == 0x80
rsv1 = (bits & 0x40) == 0x40
rsv2 = (bits & 0x20) == 0x20
rsv3 = (bits & 0x10) == 0x10
opcode = bits & 0xf
payload = self.remaining_data()
# Consume rest of the message which is payload data of the original
# frame.
self._read_position = len(self._data)
return fin, rsv1, rsv2, rsv3, opcode, payload
def _read_number(self):
if self._read_position + 1 > len(self._data):
raise ValueError(
'Cannot read the first byte of number field')
number = ord(self._data[self._read_position])
if number & 0x80 == 0x80:
raise ValueError(
'The most significant bit of the first byte of number should '
'be unset')
self._read_position += 1
pos = self._read_position
if number == 127:
if pos + 8 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 8
number = struct.unpack('!Q', self._data[pos:pos+8])[0]
if number > 0x7FFFFFFFFFFFFFFF:
raise ValueError('Encoded number(%d) >= 2^63' % number)
if number <= 0xFFFF:
raise ValueError(
'%d should not be encoded by 9 bytes encoding' % number)
return number
if number == 126:
if pos + 2 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 2
number = struct.unpack('!H', self._data[pos:pos+2])[0]
if number <= 125:
raise ValueError(
'%d should not be encoded by 3 bytes encoding' % number)
return number
def _read_size_and_contents(self):
"""Reads data that consists of followings:
- the size of the contents encoded the same way as payload length
of the WebSocket Protocol with 1 bit padding at the head.
- the contents.
"""
try:
size = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
pos = self._read_position
if pos + size > len(self._data):
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Cannot read %d bytes data' % size)
self._read_position += size
return self._data[pos:pos+size]
def _read_add_channel_request(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x7
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
# Invalid encoding will be handled by MuxHandler.
encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoding = encoding
encoded_handshake = self._read_size_and_contents()
control_block.encoded_handshake = encoded_handshake
return control_block
def _read_add_channel_response(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x3
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.accepted = (first_byte >> 4) & 1
control_block.encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoded_handshake = self._read_size_and_contents()
return control_block
def _read_flow_control(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def _read_drop_channel(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
reason = self._read_size_and_contents()
if len(reason) == 0:
control_block.drop_code = None
control_block.drop_message = ''
elif len(reason) >= 2:
control_block.drop_code = struct.unpack('!H', reason[:2])[0]
control_block.drop_message = reason[2:]
else:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received DropChannel that conains only 1-byte reason')
return control_block
def _read_new_channel_slot(self, first_byte, control_block):
reserved = first_byte & 0x1e
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.fallback = first_byte & 1
try:
control_block.slots = self._read_number()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def read_control_blocks(self):
"""Reads control block(s).
Raises:
PhysicalConnectionError: when the payload contains invalid control
block(s).
StopIteration: when no control blocks left.
"""
while self._read_position < len(self._data):
first_byte = ord(self._data[self._read_position])
self._read_position += 1
opcode = (first_byte >> 5) & 0x7
control_block = _ControlBlock(opcode=opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
yield self._read_add_channel_request(first_byte, control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
yield self._read_add_channel_response(
first_byte, control_block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
yield self._read_flow_control(first_byte, control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
yield self._read_drop_channel(first_byte, control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
yield self._read_new_channel_slot(first_byte, control_block)
else:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_MUX_OPCODE,
'Invalid opcode %d' % opcode)
assert self._read_position == len(self._data)
raise StopIteration
def remaining_data(self):
"""Returns remaining data."""
return self._data[self._read_position:]
class _LogicalRequest(object):
"""Mimics mod_python request."""
def __init__(self, channel_id, command, path, protocol, headers,
connection):
"""Constructs an instance.
Args:
channel_id: the channel id of the logical channel.
command: HTTP request command.
path: HTTP request path.
headers: HTTP headers.
connection: _LogicalConnection instance.
"""
self.channel_id = channel_id
self.method = command
self.uri = path
self.protocol = protocol
self.headers_in = headers
self.connection = connection
self.server_terminated = False
self.client_terminated = False
def is_https(self):
"""Mimics request.is_https(). Returns False because this method is
used only by old protocols (hixie and hybi00).
"""
return False
class _LogicalConnection(object):
"""Mimics mod_python mp_conn."""
# For details, see the comment of set_read_state().
STATE_ACTIVE = 1
STATE_GRACEFULLY_CLOSED = 2
STATE_TERMINATED = 3
def __init__(self, mux_handler, channel_id):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
channel_id: channel id of this connection.
"""
self._mux_handler = mux_handler
self._channel_id = channel_id
self._incoming_data = ''
# - Protects _waiting_write_completion
# - Signals the thread waiting for completion of write by mux handler
self._write_condition = threading.Condition()
self._waiting_write_completion = False
self._read_condition = threading.Condition()
self._read_state = self.STATE_ACTIVE
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return self._mux_handler.physical_connection.get_local_addr()
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr."""
return self._mux_handler.physical_connection.get_remote_addr()
remote_addr = property(get_remote_addr)
def get_memorized_lines(self):
"""Gets memorized lines. Not supported."""
raise MuxUnexpectedException('_LogicalConnection does not support '
'get_memorized_lines')
def write(self, data):
"""Writes data. mux_handler sends data asynchronously. The caller will
be suspended until write done.
Args:
data: data to be written.
Raises:
MuxUnexpectedException: when called before finishing the previous
write.
"""
try:
self._write_condition.acquire()
if self._waiting_write_completion:
raise MuxUnexpectedException(
'Logical connection %d is already waiting the completion '
'of write' % self._channel_id)
self._waiting_write_completion = True
self._mux_handler.send_data(self._channel_id, data)
self._write_condition.wait()
# TODO(tyoshino): Raise an exception if woke up by on_writer_done.
finally:
self._write_condition.release()
def write_control_data(self, data):
"""Writes data via the control channel. Don't wait finishing write
because this method can be called by mux dispatcher.
Args:
data: data to be written.
"""
self._mux_handler.send_control_data(data)
def on_write_data_done(self):
"""Called when sending data is completed."""
try:
self._write_condition.acquire()
if not self._waiting_write_completion:
raise MuxUnexpectedException(
'Invalid call of on_write_data_done for logical '
'connection %d' % self._channel_id)
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def on_writer_done(self):
"""Called by the mux handler when the writer thread has finished."""
try:
self._write_condition.acquire()
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def append_frame_data(self, frame_data):
"""Appends incoming frame data. Called when mux_handler dispatches
frame data to the corresponding application.
Args:
frame_data: incoming frame data.
"""
self._read_condition.acquire()
self._incoming_data += frame_data
self._read_condition.notify()
self._read_condition.release()
def read(self, length):
"""Reads data. Blocks until enough data has arrived via physical
connection.
Args:
length: length of data to be read.
Raises:
LogicalConnectionClosedException: when closing handshake for this
logical channel has been received.
ConnectionTerminatedException: when the physical connection has
closed, or an error is caused on the reader thread.
"""
self._read_condition.acquire()
while (self._read_state == self.STATE_ACTIVE and
len(self._incoming_data) < length):
self._read_condition.wait()
try:
if self._read_state == self.STATE_GRACEFULLY_CLOSED:
raise LogicalConnectionClosedException(
'Logical channel %d has closed.' % self._channel_id)
elif self._read_state == self.STATE_TERMINATED:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Logical channel (%d) closed' %
(length, self._channel_id))
value = self._incoming_data[:length]
self._incoming_data = self._incoming_data[length:]
finally:
self._read_condition.release()
return value
def set_read_state(self, new_state):
"""Sets the state of this connection. Called when an event for this
connection has occurred.
Args:
new_state: state to be set. new_state must be one of followings:
- STATE_GRACEFULLY_CLOSED: when closing handshake for this
connection has been received.
- STATE_TERMINATED: when the physical connection has closed or
DropChannel of this connection has received.
"""
self._read_condition.acquire()
self._read_state = new_state
self._read_condition.notify()
self._read_condition.release()
class _InnerMessage(object):
"""Holds the result of _InnerMessageBuilder.build().
"""
def __init__(self, opcode, payload):
self.opcode = opcode
self.payload = payload
class _InnerMessageBuilder(object):
"""A class that holds the context of inner message fragmentation and
builds a message from fragmented inner frame(s).
"""
def __init__(self):
self._control_opcode = None
self._pending_control_fragments = []
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
def _handle_first(self, frame):
if frame.opcode == common.OPCODE_CONTINUATION:
raise InvalidFrameException('Sending invalid continuation opcode')
if common.is_control_opcode(frame.opcode):
return self._process_first_fragmented_control(frame)
else:
return self._process_first_fragmented_message(frame)
def _process_first_fragmented_control(self, frame):
self._control_opcode = frame.opcode
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_control
return None
return self._reassemble_fragmented_control()
def _process_first_fragmented_message(self, frame):
self._message_opcode = frame.opcode
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_message
return None
return self._reassemble_fragmented_message()
def _handle_fragmented_control(self, frame):
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented control '
'message' % frame.opcode)
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_control()
def _reassemble_fragmented_control(self):
opcode = self._control_opcode
payload = ''.join(self._pending_control_fragments)
self._control_opcode = None
self._pending_control_fragments = []
if self._message_opcode is not None:
self._frame_handler = self._handle_fragmented_message
else:
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def _handle_fragmented_message(self, frame):
# Sender can interleave a control message while sending fragmented
# messages.
if common.is_control_opcode(frame.opcode):
if self._control_opcode is not None:
raise MuxUnexpectedException(
'Should not reach here(Bug in builder)')
return self._process_first_fragmented_control(frame)
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented message' %
frame.opcode)
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_message()
def _reassemble_fragmented_message(self):
opcode = self._message_opcode
payload = ''.join(self._pending_message_fragments)
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def build(self, frame):
"""Build an inner message. Returns an _InnerMessage instance when
the given frame is the last fragmented frame. Returns None otherwise.
Args:
frame: an inner frame.
Raises:
InvalidFrameException: when received invalid opcode. (e.g.
receiving non continuation data opcode but the fin flag of
the previous inner frame was not set.)
"""
return self._frame_handler(frame)
class _LogicalStream(Stream):
"""Mimics the Stream class. This class interprets multiplexed WebSocket
frames.
"""
def __init__(self, request, stream_options, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
stream_options: StreamOptions instance.
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
# Physical stream is responsible for masking.
stream_options.unmask_receive = False
Stream.__init__(self, request, stream_options)
self._send_closed = False
self._send_quota = send_quota
# - Protects _send_closed and _send_quota
# - Signals the thread waiting for send quota replenished
self._send_condition = threading.Condition()
# The opcode of the first frame in messages.
self._message_opcode = common.OPCODE_TEXT
# True when the last message was fragmented.
self._last_message_was_fragmented = False
self._receive_quota = receive_quota
self._write_inner_frame_semaphore = threading.Semaphore()
self._inner_message_builder = _InnerMessageBuilder()
def _create_inner_frame(self, opcode, payload, end=True):
frame = Frame(fin=end, opcode=opcode, payload=payload)
for frame_filter in self._options.outgoing_frame_filters:
frame_filter.filter(frame)
if len(payload) != len(frame.payload):
raise MuxUnexpectedException(
'Mux extension must not be used after extensions which change '
' frame boundary')
first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
(frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
return chr(first_byte) + frame.payload
def _write_inner_frame(self, opcode, payload, end=True):
payload_length = len(payload)
write_position = 0
try:
# An inner frame will be fragmented if there is no enough send
# quota. This semaphore ensures that fragmented inner frames are
# sent in order on the logical channel.
# Note that frames that come from other logical channels or
# multiplexing control blocks can be inserted between fragmented
# inner frames on the physical channel.
self._write_inner_frame_semaphore.acquire()
# Consume an octet quota when this is the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self._request.channel_id)
self._send_quota -= 1
finally:
self._send_condition.release()
while write_position < payload_length:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._logger.debug(
'No quota. Waiting FlowControl message for %d.' %
self._request.channel_id)
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self.request._channel_id)
remaining = payload_length - write_position
write_length = min(self._send_quota, remaining)
inner_frame_end = (
end and
(write_position + write_length == payload_length))
inner_frame = self._create_inner_frame(
opcode,
payload[write_position:write_position+write_length],
inner_frame_end)
self._send_quota -= write_length
self._logger.debug('Consumed quota=%d, remaining=%d' %
(write_length, self._send_quota))
finally:
self._send_condition.release()
# Writing data will block the worker so we need to release
# _send_condition before writing.
self._logger.debug('Sending inner frame: %r' % inner_frame)
self._request.connection.write(inner_frame)
write_position += write_length
opcode = common.OPCODE_CONTINUATION
except ValueError, e:
raise BadOperationException(e)
finally:
self._write_inner_frame_semaphore.release()
def replenish_send_quota(self, send_quota):
"""Replenish send quota."""
try:
self._send_condition.acquire()
if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
self._send_quota = 0
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
self._send_quota += send_quota
self._logger.debug('Replenished send quota for channel id %d: %d' %
(self._request.channel_id, self._send_quota))
finally:
self._send_condition.notify()
self._send_condition.release()
def consume_receive_quota(self, amount):
"""Consumes receive quota. Returns False on failure."""
if self._receive_quota < amount:
self._logger.debug('Violate quota on channel id %d: %d < %d' %
(self._request.channel_id,
self._receive_quota, amount))
return False
self._receive_quota -= amount
return True
def send_message(self, message, end=True, binary=False):
"""Override Stream.send_message."""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
if binary:
opcode = common.OPCODE_BINARY
else:
opcode = common.OPCODE_TEXT
message = message.encode('utf-8')
for message_filter in self._options.outgoing_message_filters:
message = message_filter.filter(message, end, binary)
if self._last_message_was_fragmented:
if opcode != self._message_opcode:
raise BadOperationException('Message types are different in '
'frames for the same message')
opcode = common.OPCODE_CONTINUATION
else:
self._message_opcode = opcode
self._write_inner_frame(opcode, message, end)
self._last_message_was_fragmented = not end
def _receive_frame(self):
"""Overrides Stream._receive_frame.
In addition to call Stream._receive_frame, this method adds the amount
of payload to receiving quota and sends FlowControl to the client.
We need to do it here because Stream.receive_message() handles
control frames internally.
"""
opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
amount = len(payload)
# Replenish extra one octet when receiving the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
amount += 1
self._receive_quota += amount
frame_data = _create_flow_control(self._request.channel_id,
amount)
self._logger.debug('Sending flow control for %d, replenished=%d' %
(self._request.channel_id, amount))
self._request.connection.write_control_data(frame_data)
return opcode, payload, fin, rsv1, rsv2, rsv3
def _get_message_from_frame(self, frame):
"""Overrides Stream._get_message_from_frame.
"""
try:
inner_message = self._inner_message_builder.build(frame)
except InvalidFrameException:
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
if inner_message is None:
return None
self._original_opcode = inner_message.opcode
return inner_message.payload
def receive_message(self):
"""Overrides Stream.receive_message."""
# Just call Stream.receive_message(), but catch
# LogicalConnectionClosedException, which is raised when the logical
# connection has closed gracefully.
try:
return Stream.receive_message(self)
except LogicalConnectionClosedException, e:
self._logger.debug('%s', e)
return None
def _send_closing_handshake(self, code, reason):
"""Overrides Stream._send_closing_handshake."""
body = create_closing_handshake_body(code, reason)
self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
(self._request.channel_id, code, reason))
self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
self._request.server_terminated = True
def send_ping(self, body=''):
"""Overrides Stream.send_ping"""
self._logger.debug('Sending ping on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PING, body, end=True)
self._ping_queue.append(body)
def _send_pong(self, body):
"""Overrides Stream._send_pong"""
self._logger.debug('Sending pong on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PONG, body, end=True)
def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
"""Overrides Stream.close_connection."""
# TODO(bashi): Implement
self._logger.debug('Closing logical connection %d' %
self._request.channel_id)
self._request.server_terminated = True
def stop_sending(self):
"""Stops accepting new send operation (_write_inner_frame)."""
self._send_condition.acquire()
self._send_closed = True
self._send_condition.notify()
self._send_condition.release()
class _OutgoingData(object):
"""A structure that holds data to be sent via physical connection and
origin of the data.
"""
def __init__(self, channel_id, data):
self.channel_id = channel_id
self.data = data
class _PhysicalConnectionWriter(threading.Thread):
"""A thread that is responsible for writing data to physical connection.
TODO(bashi): Make sure there is no thread-safety problem when the reader
thread reads data from the same socket at a time.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
# When set, make this thread stop accepting new data, flush pending
# data and exit.
self._stop_requested = False
# The close code of the physical connection.
self._close_code = common.STATUS_NORMAL_CLOSURE
# Deque for passing write data. It's protected by _deque_condition
# until _stop_requested is set.
self._deque = collections.deque()
# - Protects _deque, _stop_requested and _close_code
# - Signals threads waiting for them to be available
self._deque_condition = threading.Condition()
def put_outgoing_data(self, data):
"""Puts outgoing data.
Args:
data: _OutgoingData instance.
Raises:
BadOperationException: when the thread has been requested to
terminate.
"""
try:
self._deque_condition.acquire()
if self._stop_requested:
raise BadOperationException('Cannot write data anymore')
self._deque.append(data)
self._deque_condition.notify()
finally:
self._deque_condition.release()
def _write_data(self, outgoing_data):
message = (_encode_channel_id(outgoing_data.channel_id) +
outgoing_data.data)
try:
self._mux_handler.physical_stream.send_message(
message=message, end=True, binary=True)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._mux_handler.physical_connection.remote_addr,), e)
raise
# TODO(bashi): It would be better to block the thread that sends
# control data as well.
if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
def run(self):
try:
self._deque_condition.acquire()
while not self._stop_requested:
if len(self._deque) == 0:
self._deque_condition.wait()
continue
outgoing_data = self._deque.popleft()
self._deque_condition.release()
self._write_data(outgoing_data)
self._deque_condition.acquire()
# Flush deque.
#
# At this point, self._deque_condition is always acquired.
try:
while len(self._deque) > 0:
outgoing_data = self._deque.popleft()
self._write_data(outgoing_data)
finally:
self._deque_condition.release()
# Close physical connection.
try:
# Don't wait the response here. The response will be read
# by the reader thread.
self._mux_handler.physical_stream.close_connection(
self._close_code, wait_response=False)
except Exception, e:
util.prepend_message_to_exception(
'Failed to close the physical connection: %r' % e)
raise
finally:
self._mux_handler.notify_writer_done()
def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
"""Stops the writer thread."""
self._deque_condition.acquire()
self._stop_requested = True
self._close_code = close_code
self._deque_condition.notify()
self._deque_condition.release()
class _PhysicalConnectionReader(threading.Thread):
"""A thread that is responsible for reading data from physical connection.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
def run(self):
while True:
try:
physical_stream = self._mux_handler.physical_stream
message = physical_stream.receive_message()
if message is None:
break
# Below happens only when a data message is received.
opcode = physical_stream.get_last_received_opcode()
if opcode != common.OPCODE_BINARY:
self._mux_handler.fail_physical_connection(
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
'Received a text message on physical connection')
break
except ConnectionTerminatedException, e:
self._logger.debug('%s', e)
break
try:
self._mux_handler.dispatch_message(message)
except PhysicalConnectionError, e:
self._mux_handler.fail_physical_connection(
e.drop_code, e.message)
break
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
except Exception, e:
self._logger.debug(traceback.format_exc())
break
self._mux_handler.notify_reader_done()
class _Worker(threading.Thread):
"""A thread that is responsible for running the corresponding application
handler.
"""
def __init__(self, mux_handler, request):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
request: _LogicalRequest instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self._request = request
self.setDaemon(True)
def run(self):
self._logger.debug('Logical channel worker started. (id=%d)' %
self._request.channel_id)
try:
# Non-critical exceptions will be handled by dispatcher.
self._mux_handler.dispatcher.transfer_data(self._request)
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
finally:
self._mux_handler.notify_worker_done(self._request.channel_id)
class _MuxHandshaker(hybi.Handshaker):
"""Opening handshake processor for multiplexing."""
_DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
def __init__(self, request, dispatcher, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
hybi.Handshaker.__init__(self, request, dispatcher)
self._send_quota = send_quota
self._receive_quota = receive_quota
# Append headers which should not be included in handshake field of
# AddChannelRequest.
# TODO(bashi): Make sure whether we should raise exception when
# these headers are included already.
request.headers_in[common.UPGRADE_HEADER] = (
common.WEBSOCKET_UPGRADE_TYPE)
request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
str(common.VERSION_HYBI_LATEST))
request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
self._DUMMY_WEBSOCKET_KEY)
def _create_stream(self, stream_options):
"""Override hybi.Handshaker._create_stream."""
self._logger.debug('Creating logical stream for %d' %
self._request.channel_id)
return _LogicalStream(
self._request, stream_options, self._send_quota,
self._receive_quota)
def _create_handshake_response(self, accept):
"""Override hybi._create_handshake_response."""
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# Upgrade and Sec-WebSocket-Accept should be excluded.
response.append('%s: %s\r\n' % (
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
if self._request.ws_protocol is not None:
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
"""Override hybi.Handshaker._send_handshake."""
# Don't send handshake response for the default channel
if self._request.channel_id == _DEFAULT_CHANNEL_ID:
return
handshake_response = self._create_handshake_response(accept)
frame_data = _create_add_channel_response(
self._request.channel_id,
handshake_response)
self._logger.debug('Sending handshake response for %d: %r' %
(self._request.channel_id, frame_data))
self._request.connection.write_control_data(frame_data)
class _LogicalChannelData(object):
"""A structure that holds information about logical channel.
"""
def __init__(self, request, worker):
self.request = request
self.worker = worker
self.drop_code = _DROP_CODE_NORMAL_CLOSURE
self.drop_message = ''
class _HandshakeDeltaBase(object):
"""A class that holds information for delta-encoded handshake."""
def __init__(self, headers):
self._headers = headers
def create_headers(self, delta=None):
"""Creates request headers for an AddChannelRequest that has
delta-encoded handshake.
Args:
delta: headers should be overridden.
"""
headers = copy.copy(self._headers)
if delta:
for key, value in delta.items():
# The spec requires that a header with an empty value is
# removed from the delta base.
if len(value) == 0 and headers.has_key(key):
del headers[key]
else:
headers[key] = value
return headers
class _MuxHandler(object):
"""Multiplexing handler. When a handler starts, it launches three
threads; the reader thread, the writer thread, and a worker thread.
The reader thread reads data from the physical stream, i.e., the
ws_stream object of the underlying websocket connection. The reader
thread interprets multiplexed frames and dispatches them to logical
channels. Methods of this class are mostly called by the reader thread.
The writer thread sends multiplexed frames which are created by
logical channels via the physical connection.
The worker thread launched at the starting point handles the
"Implicitly Opened Connection". If multiplexing handler receives
an AddChannelRequest and accepts it, the handler will launch a new worker
thread and dispatch the request to it.
"""
def __init__(self, request, dispatcher):
"""Constructs an instance.
Args:
request: mod_python request of the physical connection.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
"""
self.original_request = request
self.dispatcher = dispatcher
self.physical_connection = request.connection
self.physical_stream = request.ws_stream
self._logger = util.get_class_logger(self)
self._logical_channels = {}
self._logical_channels_condition = threading.Condition()
# Holds client's initial quota
self._channel_slots = collections.deque()
self._handshake_base = None
self._worker_done_notify_received = False
self._reader = None
self._writer = None
def start(self):
"""Starts the handler.
Raises:
MuxUnexpectedException: when the handler already started, or when
opening handshake of the default channel fails.
"""
if self._reader or self._writer:
raise MuxUnexpectedException('MuxHandler already started')
self._reader = _PhysicalConnectionReader(self)
self._writer = _PhysicalConnectionWriter(self)
self._reader.start()
self._writer.start()
# Create "Implicitly Opened Connection".
logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
headers = copy.copy(self.original_request.headers_in)
# Add extensions for logical channel.
headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
common.format_extensions(
self.original_request.mux_processor.extensions()))
self._handshake_base = _HandshakeDeltaBase(headers)
logical_request = _LogicalRequest(
_DEFAULT_CHANNEL_ID,
self.original_request.method,
self.original_request.uri,
self.original_request.protocol,
self._handshake_base.create_headers(),
logical_connection)
# Client's send quota for the implicitly opened connection is zero,
# but we will send FlowControl later so set the initial quota to
# _INITIAL_QUOTA_FOR_CLIENT.
self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
send_quota = self.original_request.mux_processor.quota()
if not self._do_handshake_for_logical_request(
logical_request, send_quota=send_quota):
raise MuxUnexpectedException(
'Failed handshake on the default channel id')
self._add_logical_channel(logical_request)
# Send FlowControl for the implicitly opened connection.
frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
_INITIAL_QUOTA_FOR_CLIENT)
logical_request.connection.write_control_data(frame_data)
def add_channel_slots(self, slots, send_quota):
"""Adds channel slots.
Args:
slots: number of slots to be added.
send_quota: initial send quota for slots.
"""
self._channel_slots.extend([send_quota] * slots)
# Send NewChannelSlot to client.
frame_data = _create_new_channel_slot(slots, send_quota)
self.send_control_data(frame_data)
def wait_until_done(self, timeout=None):
"""Waits until all workers are done. Returns False when timeout has
occurred. Returns True on success.
Args:
timeout: timeout in sec.
"""
self._logical_channels_condition.acquire()
try:
while len(self._logical_channels) > 0:
self._logger.debug('Waiting workers(%d)...' %
len(self._logical_channels))
self._worker_done_notify_received = False
self._logical_channels_condition.wait(timeout)
if not self._worker_done_notify_received:
self._logger.debug('Waiting worker(s) timed out')
return False
finally:
self._logical_channels_condition.release()
# Flush pending outgoing data
self._writer.stop()
self._writer.join()
return True
def notify_write_data_done(self, channel_id):
"""Called by the writer thread when a write operation has done.
Args:
channel_id: objective channel id.
"""
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
channel_data.request.connection.on_write_data_done()
else:
self._logger.debug('Seems that logical channel for %d has gone'
% channel_id)
finally:
self._logical_channels_condition.release()
def send_control_data(self, data):
"""Sends data via the control channel.
Args:
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=_CONTROL_CHANNEL_ID, data=data))
def send_data(self, channel_id, data):
"""Sends data via given logical channel. This method is called by
worker threads.
Args:
channel_id: objective channel id.
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=channel_id, data=data))
def _send_drop_channel(self, channel_id, code=None, message=''):
frame_data = _create_drop_channel(channel_id, code, message)
self._logger.debug(
'Sending drop channel for channel id %d' % channel_id)
self.send_control_data(frame_data)
def _send_error_add_channel_response(self, channel_id, status=None):
if status is None:
status = common.HTTP_STATUS_BAD_REQUEST
if status in _HTTP_BAD_RESPONSE_MESSAGES:
message = _HTTP_BAD_RESPONSE_MESSAGES[status]
else:
self._logger.debug('Response message for %d is not found' % status)
message = '???'
response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
frame_data = _create_add_channel_response(channel_id,
encoded_handshake=response,
encoding=0, rejected=True)
self.send_control_data(frame_data)
def _create_logical_request(self, block):
if block.channel_id == _CONTROL_CHANNEL_ID:
# TODO(bashi): Raise PhysicalConnectionError with code 2006
# instead of MuxUnexpectedException.
raise MuxUnexpectedException(
'Received the control channel id (0) as objective channel '
'id for AddChannel')
if block.encoding > _HANDSHAKE_ENCODING_DELTA:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_REQUEST_ENCODING)
method, path, version, headers = _parse_request_text(
block.encoded_handshake)
if block.encoding == _HANDSHAKE_ENCODING_DELTA:
headers = self._handshake_base.create_headers(headers)
connection = _LogicalConnection(self, block.channel_id)
request = _LogicalRequest(block.channel_id, method, path, version,
headers, connection)
return request
def _do_handshake_for_logical_request(self, request, send_quota=0):
try:
receive_quota = self._channel_slots.popleft()
except IndexError:
raise LogicalChannelError(
request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
handshaker = _MuxHandshaker(request, self.dispatcher,
send_quota, receive_quota)
try:
handshaker.do_handshake()
except handshake.VersionException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(
request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return False
except handshake.HandshakeException, e:
# TODO(bashi): Should we _Fail the Logical Channel_ with 3001
# instead?
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id,
status=e.status)
return False
except handshake.AbortedByUserException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id)
return False
return True
def _add_logical_channel(self, logical_request):
try:
self._logical_channels_condition.acquire()
if logical_request.channel_id in self._logical_channels:
self._logger.debug('Channel id %d already exists' %
logical_request.channel_id)
raise PhysicalConnectionError(
_DROP_CODE_CHANNEL_ALREADY_EXISTS,
'Channel id %d already exists' %
logical_request.channel_id)
worker = _Worker(self, logical_request)
channel_data = _LogicalChannelData(logical_request, worker)
self._logical_channels[logical_request.channel_id] = channel_data
worker.start()
finally:
self._logical_channels_condition.release()
def _process_add_channel_request(self, block):
try:
logical_request = self._create_logical_request(block)
except ValueError, e:
self._logger.debug('Failed to create logical request: %r' % e)
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return
if self._do_handshake_for_logical_request(logical_request):
if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
# Update handshake base.
# TODO(bashi): Make sure this is the right place to update
# handshake base.
self._handshake_base = _HandshakeDeltaBase(
logical_request.headers_in)
self._add_logical_channel(logical_request)
else:
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
def _process_flow_control(self, block):
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.request.ws_stream.replenish_send_quota(
block.send_quota)
finally:
self._logical_channels_condition.release()
def _process_drop_channel(self, block):
self._logger.debug(
'DropChannel received for %d: code=%r, reason=%r' %
(block.channel_id, block.drop_code, block.drop_message))
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
# Close the logical channel
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
finally:
self._logical_channels_condition.release()
def _process_control_blocks(self, parser):
for control_block in parser.read_control_blocks():
opcode = control_block.opcode
self._logger.debug('control block received, opcode: %d' % opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
self._process_add_channel_request(control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received AddChannelResponse')
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
self._process_flow_control(control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
self._process_drop_channel(control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received NewChannelSlot')
else:
raise MuxUnexpectedException(
'Unexpected opcode %r' % opcode)
def _process_logical_frame(self, channel_id, parser):
self._logger.debug('Received a frame. channel id=%d' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
# We must ignore the message for an inactive channel.
return
channel_data = self._logical_channels[channel_id]
fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
consuming_byte = len(payload)
if opcode != common.OPCODE_CONTINUATION:
consuming_byte += 1
if not channel_data.request.ws_stream.consume_receive_quota(
consuming_byte):
# The client violates quota. Close logical channel.
raise LogicalChannelError(
channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
mask=False)
frame_data = header + payload
channel_data.request.connection.append_frame_data(frame_data)
finally:
self._logical_channels_condition.release()
def dispatch_message(self, message):
"""Dispatches message. The reader thread calls this method.
Args:
message: a message that contains encapsulated frame.
Raises:
PhysicalConnectionError: if the message contains physical
connection level errors.
LogicalChannelError: if the message contains logical channel
level errors.
"""
parser = _MuxFramePayloadParser(message)
try:
channel_id = parser.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
if channel_id == _CONTROL_CHANNEL_ID:
self._process_control_blocks(parser)
else:
self._process_logical_frame(channel_id, parser)
def notify_worker_done(self, channel_id):
"""Called when a worker has finished.
Args:
channel_id: channel id corresponded with the worker.
"""
self._logger.debug('Worker for channel id %d terminated' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise MuxUnexpectedException(
'Channel id %d not found' % channel_id)
channel_data = self._logical_channels.pop(channel_id)
finally:
self._worker_done_notify_received = True
self._logical_channels_condition.notify()
self._logical_channels_condition.release()
if not channel_data.request.server_terminated:
self._send_drop_channel(
channel_id, code=channel_data.drop_code,
message=channel_data.drop_message)
def notify_reader_done(self):
"""This method is called by the reader thread when the reader has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for incoming data '
'...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def notify_writer_done(self):
"""This method is called by the writer thread when the writer has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for write '
'completion ...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.on_writer_done()
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def fail_physical_connection(self, code, message):
"""Fail the physical connection.
Args:
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing the physical connection...')
self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
def fail_logical_channel(self, channel_id, code, message):
"""Fail a logical channel.
Args:
channel_id: channel id.
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing logical channel %d...' % channel_id)
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
# Close the logical channel. notify_worker_done() will be
# called later and it will send DropChannel.
channel_data.drop_code = code
channel_data.drop_message = message
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
else:
self._send_drop_channel(channel_id, code, message)
finally:
self._logical_channels_condition.release()
def use_mux(request):
return hasattr(request, 'mux_processor') and (
request.mux_processor.is_active())
def start(request, dispatcher):
mux_handler = _MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
_INITIAL_QUOTA_FOR_CLIENT)
mux_handler.wait_until_done()
# vi:sts=4 sw=4 et
| mpl-2.0 |
cruzegoodin/TSC-ShippingDetails | flask/lib/python2.7/site-packages/flask/json.py | 428 | 8113 | # -*- coding: utf-8 -*-
"""
flask.jsonimpl
~~~~~~~~~~~~~~
Implementation helpers for the JSON support in Flask.
:copyright: (c) 2012 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import io
import uuid
from datetime import datetime
from .globals import current_app, request
from ._compat import text_type, PY2
from werkzeug.http import http_date
from jinja2 import Markup
# Use the same json implementation as itsdangerous on which we
# depend anyways.
try:
from itsdangerous import simplejson as _json
except ImportError:
from itsdangerous import json as _json
# figure out if simplejson escapes slashes. This behavior was changed
# from one version to another without reason.
_slash_escape = '\\/' not in _json.dumps('/')
__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump',
'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder',
'jsonify']
def _wrap_reader_for_text(fp, encoding):
if isinstance(fp.read(0), bytes):
fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
return fp
def _wrap_writer_for_text(fp, encoding):
try:
fp.write('')
except TypeError:
fp = io.TextIOWrapper(fp, encoding)
return fp
class JSONEncoder(_json.JSONEncoder):
"""The default Flask JSON encoder. This one extends the default simplejson
encoder by also supporting ``datetime`` objects, ``UUID`` as well as
``Markup`` objects which are serialized as RFC 822 datetime strings (same
as the HTTP date format). In order to support more data types override the
:meth:`default` method.
"""
def default(self, o):
"""Implement this method in a subclass such that it returns a
serializable object for ``o``, or calls the base implementation (to
raise a ``TypeError``).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
if isinstance(o, datetime):
return http_date(o)
if isinstance(o, uuid.UUID):
return str(o)
if hasattr(o, '__html__'):
return text_type(o.__html__())
return _json.JSONEncoder.default(self, o)
class JSONDecoder(_json.JSONDecoder):
"""The default JSON decoder. This one does not change the behavior from
the default simplejson encoder. Consult the :mod:`json` documentation
for more information. This decoder is not only used for the load
functions of this module but also :attr:`~flask.Request`.
"""
def _dump_arg_defaults(kwargs):
"""Inject default arguments for dump functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_encoder)
if not current_app.config['JSON_AS_ASCII']:
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
else:
kwargs.setdefault('sort_keys', True)
kwargs.setdefault('cls', JSONEncoder)
def _load_arg_defaults(kwargs):
"""Inject default arguments for load functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_decoder)
else:
kwargs.setdefault('cls', JSONDecoder)
def dumps(obj, **kwargs):
"""Serialize ``obj`` to a JSON formatted ``str`` by using the application's
configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
application on the stack.
This function can return ``unicode`` strings or ascii-only bytestrings by
default which coerce into unicode strings automatically. That behavior by
default is controlled by the ``JSON_AS_ASCII`` configuration variable
and can be overriden by the simplejson ``ensure_ascii`` parameter.
"""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
rv = _json.dumps(obj, **kwargs)
if encoding is not None and isinstance(rv, text_type):
rv = rv.encode(encoding)
return rv
def dump(obj, fp, **kwargs):
"""Like :func:`dumps` but writes into a file object."""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
if encoding is not None:
fp = _wrap_writer_for_text(fp, encoding)
_json.dump(obj, fp, **kwargs)
def loads(s, **kwargs):
"""Unserialize a JSON object from a string ``s`` by using the application's
configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
application on the stack.
"""
_load_arg_defaults(kwargs)
if isinstance(s, bytes):
s = s.decode(kwargs.pop('encoding', None) or 'utf-8')
return _json.loads(s, **kwargs)
def load(fp, **kwargs):
"""Like :func:`loads` but reads from a file object.
"""
_load_arg_defaults(kwargs)
if not PY2:
fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
return _json.load(fp, **kwargs)
def htmlsafe_dumps(obj, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``.
"""
rv = dumps(obj, **kwargs) \
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
if not _slash_escape:
rv = rv.replace('\\/', '/')
return rv
def htmlsafe_dump(obj, fp, **kwargs):
"""Like :func:`htmlsafe_dumps` but writes into a file object."""
fp.write(unicode(htmlsafe_dumps(obj, **kwargs)))
def jsonify(*args, **kwargs):
"""Creates a :class:`~flask.Response` with the JSON representation of
the given arguments with an `application/json` mimetype. The arguments
to this function are the same as to the :class:`dict` constructor.
Example usage::
from flask import jsonify
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
This will send a JSON response like this to the browser::
{
"username": "admin",
"email": "admin@localhost",
"id": 42
}
For security reasons only objects are supported toplevel. For more
information about this, have a look at :ref:`json-security`.
This function's response will be pretty printed if it was not requested
with ``X-Requested-With: XMLHttpRequest`` to simplify debugging unless
the ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to false.
.. versionadded:: 0.2
"""
indent = None
if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] \
and not request.is_xhr:
indent = 2
return current_app.response_class(dumps(dict(*args, **kwargs),
indent=indent),
mimetype='application/json')
def tojson_filter(obj, **kwargs):
return Markup(htmlsafe_dumps(obj, **kwargs))
| bsd-3-clause |
thanhacun/odoo | addons/base_report_designer/base_report_designer.py | 314 | 3433 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from StringIO import StringIO
from openerp.modules.module import get_module_resource
import openerp.modules.registry
from openerp.osv import osv
from openerp_sxw2rml import sxw2rml
class report_xml(osv.osv):
_inherit = 'ir.actions.report.xml'
def sxwtorml(self, cr, uid, file_sxw, file_type):
'''
The use of this function is to get rml file from sxw file.
'''
sxwval = StringIO(base64.decodestring(file_sxw))
if file_type=='sxw':
fp = open(get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_oo2rml.xsl'),'rb')
if file_type=='odt':
fp = open(get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_odt2rml.xsl'),'rb')
return {'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read()))}
def upload_report(self, cr, uid, report_id, file_sxw, file_type, context=None):
'''
Untested function
'''
sxwval = StringIO(base64.decodestring(file_sxw))
if file_type=='sxw':
fp = open(get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_oo2rml.xsl'),'rb')
if file_type=='odt':
fp = open(get_module_resource('base_report_designer','openerp_sxw2rml', 'normalized_odt2rml.xsl'),'rb')
report = self.pool['ir.actions.report.xml'].write(cr, uid, [report_id], {
'report_sxw_content': base64.decodestring(file_sxw),
'report_rml_content': str(sxw2rml(sxwval, xsl=fp.read())),
})
return True
def report_get(self, cr, uid, report_id, context=None):
# skip osv.fields.sanitize_binary_value() because we want the raw bytes in all cases
context = dict(context or {}, bin_raw=True)
report = self.browse(cr, uid, report_id, context=context)
sxw_data = report.report_sxw_content
rml_data = report.report_rml_content
if isinstance(sxw_data, unicode):
sxw_data = sxw_data.encode("iso-8859-1", "replace")
if isinstance(rml_data, unicode):
rml_data = rml_data.encode("iso-8859-1", "replace")
return {
'file_type' : report.report_type,
'report_sxw_content': sxw_data and base64.encodestring(sxw_data) or False,
'report_rml_content': rml_data and base64.encodestring(rml_data) or False
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yelongyu/chihu | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py | 1093 | 8936 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| gpl-3.0 |
rahul-c1/scrapy | scrapy/tests/test_commands.py | 3 | 8057 | import os
import sys
import subprocess
from time import sleep
from os.path import exists, join, abspath
from shutil import rmtree
from tempfile import mkdtemp
from twisted.trial import unittest
from scrapy.utils.python import retry_on_eintr
from scrapy.utils.test import get_testenv
class ProjectTest(unittest.TestCase):
project_name = 'testproject'
def setUp(self):
self.temp_path = mkdtemp()
self.cwd = self.temp_path
self.proj_path = join(self.temp_path, self.project_name)
self.proj_mod_path = join(self.proj_path, self.project_name)
self.env = get_testenv()
def tearDown(self):
rmtree(self.temp_path)
def call(self, *new_args, **kwargs):
out = os.tmpfile()
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd, \
env=self.env, **kwargs)
def proc(self, *new_args, **kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
p = subprocess.Popen(args, cwd=self.cwd, env=self.env,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
**kwargs)
waited = 0
interval = 0.2
while p.poll() is None:
sleep(interval)
waited += interval
if waited > 15:
p.kill()
assert False, 'Command took too much time to complete'
return p
class StartprojectTest(ProjectTest):
def test_startproject(self):
self.assertEqual(0, self.call('startproject', self.project_name))
assert exists(join(self.proj_path, 'scrapy.cfg'))
assert exists(join(self.proj_path, 'testproject'))
assert exists(join(self.proj_mod_path, '__init__.py'))
assert exists(join(self.proj_mod_path, 'items.py'))
assert exists(join(self.proj_mod_path, 'pipelines.py'))
assert exists(join(self.proj_mod_path, 'settings.py'))
assert exists(join(self.proj_mod_path, 'spiders', '__init__.py'))
self.assertEqual(1, self.call('startproject', self.project_name))
self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
class CommandTest(ProjectTest):
def setUp(self):
super(CommandTest, self).setUp()
self.call('startproject', self.project_name)
self.cwd = join(self.temp_path, self.project_name)
self.env['SCRAPY_SETTINGS_MODULE'] = '%s.settings' % self.project_name
class GenspiderCommandTest(CommandTest):
def test_arguments(self):
# only pass one argument. spider script shouldn't be created
self.assertEqual(2, self.call('genspider', 'test_name'))
assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
# pass two arguments <name> <domain>. spider script should be created
self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))
assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
def test_template(self, tplname='crawl'):
args = ['--template=%s' % tplname] if tplname else []
spname = 'test_spider'
p = self.proc('genspider', spname, 'test.com', *args)
out = retry_on_eintr(p.stdout.read)
self.assert_("Created spider %r using template %r in module" % (spname, tplname) in out)
self.assert_(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py')))
p = self.proc('genspider', spname, 'test.com', *args)
out = retry_on_eintr(p.stdout.read)
self.assert_("Spider %r already exists in module" % spname in out)
def test_template_basic(self):
self.test_template('basic')
def test_template_csvfeed(self):
self.test_template('csvfeed')
def test_template_xmlfeed(self):
self.test_template('xmlfeed')
def test_list(self):
self.assertEqual(0, self.call('genspider', '--list'))
def test_dump(self):
self.assertEqual(0, self.call('genspider', '--dump=basic'))
self.assertEqual(0, self.call('genspider', '-d', 'basic'))
def test_same_name_as_project(self):
self.assertEqual(2, self.call('genspider', self.project_name))
assert not exists(join(self.proj_mod_path, 'spiders', '%s.py' % self.project_name))
class MiscCommandsTest(CommandTest):
def test_list(self):
self.assertEqual(0, self.call('list'))
class RunSpiderCommandTest(CommandTest):
def test_runspider(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
from scrapy.spider import BaseSpider
class MySpider(BaseSpider):
name = 'myspider'
def start_requests(self):
self.log("It Works!")
return []
""")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assert_("[myspider] DEBUG: It Works!" in log, log)
self.assert_("[myspider] INFO: Spider opened" in log, log)
self.assert_("[myspider] INFO: Closing spider (finished)" in log, log)
self.assert_("[myspider] INFO: Spider closed (finished)" in log, log)
def test_runspider_no_spider_found(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
from scrapy.spider import BaseSpider
""")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assert_("No spider found in file" in log)
def test_runspider_file_not_found(self):
p = self.proc('runspider', 'some_non_existent_file')
log = p.stderr.read()
self.assert_("File not found: some_non_existent_file" in log)
def test_runspider_unable_to_load(self):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
fname = abspath(join(tmpdir, 'myspider.txt'))
with open(fname, 'w') as f:
f.write("")
p = self.proc('runspider', fname)
log = p.stderr.read()
self.assert_("Unable to load" in log)
class ParseCommandTest(CommandTest):
def setUp(self):
super(ParseCommandTest, self).setUp()
self.spider_name = 'parse_spider'
fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
from scrapy.spider import BaseSpider
from scrapy.item import Item
class MySpider(BaseSpider):
name = '{0}'
def parse(self, response):
if getattr(self, 'test_arg', None):
self.log('It Works!')
return [Item()]
""".format(self.spider_name))
fname = abspath(join(self.proj_mod_path, 'pipelines.py'))
with open(fname, 'w') as f:
f.write("""
from scrapy import log
class MyPipeline(object):
component_name = 'my_pipeline'
def process_item(self, item, spider):
log.msg('It Works!')
return item
""")
fname = abspath(join(self.proj_mod_path, 'settings.py'))
with open(fname, 'a') as f:
f.write("""
ITEM_PIPELINES = {'%s.pipelines.MyPipeline': 1}
""" % self.project_name)
def test_spider_arguments(self):
p = self.proc('parse', '--spider', self.spider_name, '-a', 'test_arg=1',
'-c', 'parse', 'http://scrapinghub.com')
log = p.stderr.read()
self.assert_("[parse_spider] DEBUG: It Works!" in log, log)
def test_pipelines(self):
p = self.proc('parse', '--spider', self.spider_name, '--pipelines',
'-c', 'parse', 'http://scrapinghub.com')
log = p.stderr.read()
self.assert_("[scrapy] INFO: It Works!" in log, log)
class BenchCommandTest(CommandTest):
def test_run(self):
p = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001',
'-s', 'CLOSESPIDER_TIMEOUT=0.01')
log = p.stderr.read()
self.assert_('INFO: Crawled' in log, log)
| bsd-3-clause |
chemelnucfin/tensorflow | tensorflow/python/data/kernel_tests/range_test.py | 17 | 2767 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.range()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class RangeTest(test_base.DatasetTestBase):
def testStop(self):
dataset = dataset_ops.Dataset.range(5)
self.assertDatasetProduces(dataset, expected_output=range(5))
def testStartStop(self):
start, stop = 2, 5
dataset = dataset_ops.Dataset.range(start, stop)
self.assertDatasetProduces(dataset, expected_output=range(2, 5))
def testStartStopStep(self):
start, stop, step = 2, 10, 2
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(2, 10, 2))
def testZeroStep(self):
start, stop, step = 2, 10, 0
with self.assertRaises(errors.InvalidArgumentError):
dataset = dataset_ops.Dataset.range(start, stop, step)
self.evaluate(dataset._variant_tensor)
def testNegativeStep(self):
start, stop, step = 2, 10, -1
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(2, 10, -1))
def testStopLessThanStart(self):
start, stop = 10, 2
dataset = dataset_ops.Dataset.range(start, stop)
self.assertDatasetProduces(dataset, expected_output=range(10, 2))
def testStopLessThanStartWithPositiveStep(self):
start, stop, step = 10, 2, 2
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(10, 2, 2))
def testStopLessThanStartWithNegativeStep(self):
start, stop, step = 10, 2, -1
dataset = dataset_ops.Dataset.range(start, stop, step)
self.assertDatasetProduces(dataset, expected_output=range(10, 2, -1))
if __name__ == "__main__":
test.main()
| apache-2.0 |
tridge/ardupilot | Tools/ardupilotwaf/embed.py | 16 | 3286 | #!/usr/bin/env python
'''
script to create ap_romfs_embedded.h from a set of static files
Andrew Tridgell
May 2017
'''
import os, sys, tempfile, gzip
def write_encode(out, s):
out.write(s.encode())
def embed_file(out, f, idx, embedded_name, uncompressed):
'''embed one file'''
try:
contents = open(f,'rb').read()
except Exception:
raise Exception("Failed to embed %s" % f)
pad = 0
if embedded_name.endswith("bootloader.bin"):
# round size to a multiple of 32 bytes for bootloader, this ensures
# it can be flashed on a STM32H7 chip
blen = len(contents)
pad = (32 - (blen % 32)) % 32
if pad != 0:
if sys.version_info[0] >= 3:
contents += bytes([0xff]*pad)
else:
for i in range(pad):
contents += bytes(chr(0xff))
print("Padded %u bytes for %s to %u" % (pad, embedded_name, len(contents)))
crc = crc32(bytearray(contents))
write_encode(out, 'static const uint8_t ap_romfs_%u[] = {' % idx)
compressed = tempfile.NamedTemporaryFile()
if uncompressed:
# ensure nul termination
if sys.version_info[0] >= 3:
nul = bytearray(0)
else:
nul = chr(0)
if contents[-1] != nul:
contents += nul
compressed.write(contents)
else:
# compress it
f = open(compressed.name, "wb")
with gzip.GzipFile(fileobj=f, mode='wb', filename='', compresslevel=9, mtime=0) as g:
g.write(contents)
f.close()
compressed.seek(0)
b = bytearray(compressed.read())
compressed.close()
for c in b:
write_encode(out, '%u,' % c)
write_encode(out, '};\n\n');
return crc
def crc32(bytes, crc=0):
'''crc32 equivalent to crc32_small() from AP_Math/crc.cpp'''
for byte in bytes:
crc ^= byte
for i in range(8):
mask = (-(crc & 1)) & 0xFFFFFFFF
crc >>= 1
crc ^= (0xEDB88320 & mask)
return crc
def create_embedded_h(filename, files, uncompressed=False):
'''create a ap_romfs_embedded.h file'''
out = open(filename, "wb")
write_encode(out, '''// generated embedded files for AP_ROMFS\n\n''')
# remove duplicates and sort
files = sorted(list(set(files)))
crc = {}
for i in range(len(files)):
(name, filename) = files[i]
try:
crc[filename] = embed_file(out, filename, i, name, uncompressed)
except Exception as e:
print(e)
return False
write_encode(out, '''const AP_ROMFS::embedded_file AP_ROMFS::files[] = {\n''')
for i in range(len(files)):
(name, filename) = files[i]
if uncompressed:
ustr = ' (uncompressed)'
else:
ustr = ''
print("Embedding file %s:%s%s" % (name, filename, ustr))
write_encode(out, '{ "%s", sizeof(ap_romfs_%u), 0x%08x, ap_romfs_%u },\n' % (name, i, crc[filename], i))
write_encode(out, '};\n')
out.close()
return True
if __name__ == '__main__':
import sys
flist = []
for i in range(1, len(sys.argv)):
f = sys.argv[i]
flist.append((f, f))
create_embedded_h("/tmp/ap_romfs_embedded.h", flist)
| gpl-3.0 |
jean/sentry | src/sentry/web/frontend/debug/debug_oauth_authorize.py | 4 | 1255 | from __future__ import absolute_import, print_function
from django.views.generic import View
from sentry.models import ApiApplication
from sentry.web.helpers import render_to_response
class DebugOAuthAuthorizeView(View):
def get(self, request):
application = ApiApplication(
name='Example Application',
homepage_url='http://example.com',
terms_url='http://example.com/terms',
privacy_url='http://example.com/privacy',
)
return render_to_response(
'sentry/oauth-authorize.html', {
'user':
request.user,
'application':
application,
'scopes': ['org:read', 'project:write'],
'permissions': [
'Read access to organization details.',
'Read and write access to projects.',
],
}, request
)
class DebugOAuthAuthorizeErrorView(View):
def get(self, request):
return render_to_response(
'sentry/oauth-error.html', {
'error':
'We were unable to complete your request. Please re-initiate the authorization flow.',
}, request
)
| bsd-3-clause |
deisi/home-assistant | homeassistant/components/wink.py | 1 | 3737 | """
Support for Wink hubs.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/wink/
"""
import logging
import json
from homeassistant.helpers import validate_config, discovery
from homeassistant.const import CONF_ACCESS_TOKEN, ATTR_BATTERY_LEVEL
from homeassistant.helpers.entity import Entity
DOMAIN = "wink"
REQUIREMENTS = ['python-wink==0.7.10', 'pubnub==3.8.2']
SUBSCRIPTION_HANDLER = None
CHANNELS = []
def setup(hass, config):
"""Setup the Wink component."""
logger = logging.getLogger(__name__)
if not validate_config(config, {DOMAIN: [CONF_ACCESS_TOKEN]}, logger):
return False
import pywink
from pubnub import Pubnub
pywink.set_bearer_token(config[DOMAIN][CONF_ACCESS_TOKEN])
global SUBSCRIPTION_HANDLER
SUBSCRIPTION_HANDLER = Pubnub("N/A", pywink.get_subscription_key(),
ssl_on=True)
SUBSCRIPTION_HANDLER.set_heartbeat(120)
# Load components for the devices in the Wink that we support
for component_name, func_exists in (
('light', pywink.get_bulbs),
('switch', lambda: pywink.get_switches or pywink.get_sirens or
pywink.get_powerstrip_outlets),
('binary_sensor', pywink.get_sensors),
('sensor', lambda: pywink.get_sensors or pywink.get_eggtrays),
('lock', pywink.get_locks),
('rollershutter', pywink.get_shades),
('garage_door', pywink.get_garage_doors)):
if func_exists():
discovery.load_platform(hass, component_name, DOMAIN, {}, config)
return True
class WinkDevice(Entity):
"""Represents a base Wink device."""
def __init__(self, wink):
"""Initialize the Wink device."""
from pubnub import Pubnub
self.wink = wink
self._battery = self.wink.battery_level
if self.wink.pubnub_channel in CHANNELS:
pubnub = Pubnub("N/A", self.wink.pubnub_key, ssl_on=True)
pubnub.set_heartbeat(120)
pubnub.subscribe(self.wink.pubnub_channel,
self._pubnub_update,
error=self._pubnub_error)
else:
CHANNELS.append(self.wink.pubnub_channel)
SUBSCRIPTION_HANDLER.subscribe(self.wink.pubnub_channel,
self._pubnub_update,
error=self._pubnub_error)
def _pubnub_update(self, message, channel):
self.wink.pubnub_update(json.loads(message))
self.update_ha_state()
def _pubnub_error(self, message):
logging.getLogger(__name__).error(
"Error on pubnub update for " + self.wink.name())
@property
def unique_id(self):
"""Return the ID of this Wink device."""
return "{}.{}".format(self.__class__, self.wink.device_id())
@property
def name(self):
"""Return the name of the device."""
return self.wink.name()
@property
def available(self):
"""True if connection == True."""
return self.wink.available
def update(self):
"""Update state of the device."""
self.wink.update_state()
@property
def should_poll(self):
"""Only poll if we are not subscribed to pubnub."""
return self.wink.pubnub_channel is None
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._battery:
return {
ATTR_BATTERY_LEVEL: self._battery_level,
}
@property
def _battery_level(self):
"""Return the battery level."""
return self.wink.battery_level * 100
| mit |
akash1808/nova | nova/tests/unit/virt/vmwareapi/stubs.py | 52 | 2525 | # Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Stubouts for the test suite
"""
from oslo_vmware import exceptions as vexc
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import network_util
def fake_get_vim_object(arg):
"""Stubs out the VMwareAPISession's get_vim_object method."""
return fake.FakeVim()
@property
def fake_vim_prop(arg):
"""Stubs out the VMwareAPISession's vim property access method."""
return fake.get_fake_vim_object(arg)
def fake_is_vim_object(arg, module):
"""Stubs out the VMwareAPISession's is_vim_object method."""
return isinstance(module, fake.FakeVim)
def fake_temp_method_exception():
raise vexc.VimFaultException(
[vexc.NOT_AUTHENTICATED],
"Session Empty/Not Authenticated")
def fake_temp_session_exception():
raise vexc.VimConnectionException("it's a fake!",
"Session Exception")
def fake_session_file_exception():
fault_list = [vexc.FILE_ALREADY_EXISTS]
raise vexc.VimFaultException(fault_list,
Exception('fake'))
def fake_session_permission_exception():
fault_list = [vexc.NO_PERMISSION]
fault_string = 'Permission to perform this operation was denied.'
details = {'privilegeId': 'Resource.AssignVMToPool', 'object': 'domain-c7'}
raise vexc.VimFaultException(fault_list, fault_string, details=details)
def set_stubs(stubs):
"""Set the stubs."""
stubs.Set(network_util, 'get_network_with_the_name',
fake.fake_get_network)
stubs.Set(images, 'upload_image_stream_optimized', fake.fake_upload_image)
stubs.Set(images, 'fetch_image', fake.fake_fetch_image)
stubs.Set(driver.VMwareAPISession, "vim", fake_vim_prop)
stubs.Set(driver.VMwareAPISession, "_is_vim_object",
fake_is_vim_object)
| apache-2.0 |
azumimuo/family-xbmc-addon | plugin.video.wolfpack/resources/lib/modules/pyaes/__init__.py | 138 | 2032 | # The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# This is a pure-Python implementation of the AES algorithm and AES common
# modes of operation.
# See: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
# See: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation
# Supported key sizes:
# 128-bit
# 192-bit
# 256-bit
# Supported modes of operation:
# ECB - Electronic Codebook
# CBC - Cipher-Block Chaining
# CFB - Cipher Feedback
# OFB - Output Feedback
# CTR - Counter
# See the README.md for API details and general information.
# Also useful, PyCrypto, a crypto library implemented in C with Python bindings:
# https://www.dlitz.net/software/pycrypto/
VERSION = [1, 3, 0]
from .aes import AES, AESModeOfOperationCTR, AESModeOfOperationCBC, AESModeOfOperationCFB, AESModeOfOperationECB, AESModeOfOperationOFB, AESModesOfOperation, Counter
from .blockfeeder import decrypt_stream, Decrypter, encrypt_stream, Encrypter
| gpl-2.0 |
twz915/django | tests/forms_tests/field_tests/test_typedmultiplechoicefield.py | 106 | 3520 | import decimal
from django.forms import TypedMultipleChoiceField, ValidationError
from django.test import SimpleTestCase
class TypedMultipleChoiceFieldTest(SimpleTestCase):
def test_typedmultiplechoicefield_1(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1], f.clean(['1']))
msg = "'Select a valid choice. 2 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(['2'])
def test_typedmultiplechoicefield_2(self):
# Different coercion, same validation.
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=float)
self.assertEqual([1.0], f.clean(['1']))
def test_typedmultiplechoicefield_3(self):
# This can also cause weirdness: be careful (bool(-1) == True, remember)
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=bool)
self.assertEqual([True], f.clean(['-1']))
def test_typedmultiplechoicefield_4(self):
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int)
self.assertEqual([1, -1], f.clean(['1', '-1']))
msg = "'Select a valid choice. 2 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(['1', '2'])
def test_typedmultiplechoicefield_5(self):
# Even more weirdness: if you have a valid choice but your coercion function
# can't coerce, you'll still get a validation error. Don't do this!
f = TypedMultipleChoiceField(choices=[('A', 'A'), ('B', 'B')], coerce=int)
msg = "'Select a valid choice. B is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(['B'])
# Required fields require values
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean([])
def test_typedmultiplechoicefield_6(self):
# Non-required fields aren't required
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False)
self.assertEqual([], f.clean([]))
def test_typedmultiplechoicefield_7(self):
# If you want cleaning an empty value to return a different type, tell the field
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=False, empty_value=None)
self.assertIsNone(f.clean([]))
def test_typedmultiplechoicefield_has_changed(self):
# has_changed should not trigger required validation
f = TypedMultipleChoiceField(choices=[(1, "+1"), (-1, "-1")], coerce=int, required=True)
self.assertFalse(f.has_changed(None, ''))
def test_typedmultiplechoicefield_special_coerce(self):
"""
A coerce function which results in a value not present in choices
should raise an appropriate error (#21397).
"""
def coerce_func(val):
return decimal.Decimal('1.%s' % val)
f = TypedMultipleChoiceField(
choices=[(1, "1"), (2, "2")], coerce=coerce_func, required=True)
self.assertEqual([decimal.Decimal('1.2')], f.clean(['2']))
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean([])
msg = "'Select a valid choice. 3 is not one of the available choices.'"
with self.assertRaisesMessage(ValidationError, msg):
f.clean(['3'])
| bsd-3-clause |
moocowmoo/dashman | lib/pycoin/pycoin/tx/TxIn.py | 14 | 3804 | # -*- coding: utf-8 -*-
"""
Deal with the part of a Tx that specifies where the Bitcoin comes from.
The MIT License (MIT)
Copyright (c) 2013 by Richard Kiss
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import encoding
from ..serialize import b2h, b2h_rev, h2b
from ..serialize.bitcoin_streamer import parse_struct, stream_struct
from .script.tools import disassemble, opcode_list
from .script.vm import verify_script
ZERO = b'\0' * 32
class TxIn(object):
"""
The part of a Tx that specifies where the Bitcoin comes from.
"""
def __init__(self, previous_hash, previous_index, script=b'', sequence=4294967295):
self.previous_hash = previous_hash
self.previous_index = previous_index
self.script = script
self.sequence = sequence
@classmethod
def coinbase_tx_in(self, script):
tx = TxIn(previous_hash=ZERO, previous_index=4294967295, script=script)
return tx
def stream(self, f, blank_solutions=False):
script = b'' if blank_solutions else self.script
stream_struct("#LSL", f, self.previous_hash, self.previous_index, script, self.sequence)
@classmethod
def parse(self, f):
return self(*parse_struct("#LSL", f))
def is_coinbase(self):
return self.previous_hash == ZERO
def public_key_sec(self):
"""Return the public key as sec, or None in case of failure."""
if self.is_coinbase():
return None
opcodes = opcode_list(self.script)
if len(opcodes) == 2 and opcodes[0].startswith("30"):
# the second opcode is probably the public key as sec
sec = h2b(opcodes[1])
return sec
return None
def bitcoin_address(self, address_prefix=b'\0'):
if self.is_coinbase():
return "(coinbase)"
# attempt to return the source address
sec = self.public_key_sec()
if sec:
bitcoin_address = encoding.hash160_sec_to_bitcoin_address(
encoding.hash160(sec), address_prefix=address_prefix)
return bitcoin_address
return "(unknown)"
def verify(self, tx_out_script, signature_for_hash_type_f, expected_hash_type=None):
"""
Return True or False depending upon whether this TxIn verifies.
tx_out_script: the script of the TxOut that corresponds to this input
signature_hash: the hash of the partial transaction
"""
return verify_script(self.script, tx_out_script, signature_for_hash_type_f, expected_hash_type=expected_hash_type)
def __str__(self):
if self.is_coinbase():
return 'TxIn<COINBASE: %s>' % b2h(self.script)
return 'TxIn<%s[%d] "%s">' % (
b2h_rev(self.previous_hash), self.previous_index, disassemble(self.script))
| mit |
racker/cloud-init-debian-pkg | tests/unittests/test_datasource/test_openstack.py | 6 | 11742 | # vi: ts=4 expandtab
#
# Copyright (C) 2014 Yahoo! Inc.
#
# Author: Joshua Harlow <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import copy
import json
import re
from StringIO import StringIO
from urlparse import urlparse
from tests.unittests import helpers as test_helpers
from cloudinit import helpers
from cloudinit import settings
from cloudinit.sources import DataSourceOpenStack as ds
from cloudinit.sources.helpers import openstack
from cloudinit import util
import httpretty as hp
BASE_URL = "http://169.254.169.254"
PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
EC2_META = {
'ami-id': 'ami-00000001',
'ami-launch-index': '0',
'ami-manifest-path': 'FIXME',
'hostname': 'sm-foo-test.novalocal',
'instance-action': 'none',
'instance-id': 'i-00000001',
'instance-type': 'm1.tiny',
'local-hostname': 'sm-foo-test.novalocal',
'local-ipv4': '0.0.0.0',
'public-hostname': 'sm-foo-test.novalocal',
'public-ipv4': '0.0.0.1',
'reservation-id': 'r-iru5qm4m',
}
USER_DATA = '#!/bin/sh\necho This is user data\n'
VENDOR_DATA = {
'magic': '',
}
OSTACK_META = {
'availability_zone': 'nova',
'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
{'content_path': '/content/0001', 'path': '/etc/bar/bar.cfg'}],
'hostname': 'sm-foo-test.novalocal',
'meta': {'dsmode': 'local', 'my-meta': 'my-value'},
'name': 'sm-foo-test',
'public_keys': {'mykey': PUBKEY},
'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c',
}
CONTENT_0 = 'This is contents of /etc/foo.cfg\n'
CONTENT_1 = '# this is /etc/bar/bar.cfg\n'
OS_FILES = {
'openstack/2012-08-10/meta_data.json': json.dumps(OSTACK_META),
'openstack/2012-08-10/user_data': USER_DATA,
'openstack/content/0000': CONTENT_0,
'openstack/content/0001': CONTENT_1,
'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
'openstack/latest/user_data': USER_DATA,
'openstack/latest/vendor_data.json': json.dumps(VENDOR_DATA),
}
EC2_FILES = {
'latest/user-data': USER_DATA,
}
def _register_uris(version, ec2_files, ec2_meta, os_files):
"""Registers a set of url patterns into httpretty that will mimic the
same data returned by the openstack metadata service (and ec2 service)."""
def match_ec2_url(uri, headers):
path = uri.path.lstrip("/")
if path in ec2_files:
return (200, headers, ec2_files.get(path))
if path == 'latest/meta-data':
buf = StringIO()
for (k, v) in ec2_meta.items():
if isinstance(v, (list, tuple)):
buf.write("%s/" % (k))
else:
buf.write("%s" % (k))
buf.write("\n")
return (200, headers, buf.getvalue())
if path.startswith('latest/meta-data'):
value = None
pieces = path.split("/")
if path.endswith("/"):
pieces = pieces[2:-1]
value = util.get_cfg_by_path(ec2_meta, pieces)
else:
pieces = pieces[2:]
value = util.get_cfg_by_path(ec2_meta, pieces)
if value is not None:
return (200, headers, str(value))
return (404, headers, '')
def get_request_callback(method, uri, headers):
uri = urlparse(uri)
path = uri.path.lstrip("/")
if path in os_files:
return (200, headers, os_files.get(path))
return match_ec2_url(uri, headers)
hp.register_uri(hp.GET, re.compile(r'http://169.254.169.254/.*'),
body=get_request_callback)
class TestOpenStackDataSource(test_helpers.TestCase):
VERSION = 'latest'
@hp.activate
def test_successful(self):
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
f = ds.read_metadata_service(BASE_URL, version=self.VERSION)
self.assertEquals(VENDOR_DATA, f.get('vendordata'))
self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertEquals(2, len(f['files']))
self.assertEquals(USER_DATA, f.get('userdata'))
self.assertEquals(EC2_META, f.get('ec2-metadata'))
self.assertEquals(2, f.get('version'))
metadata = f['metadata']
self.assertEquals('nova', metadata.get('availability_zone'))
self.assertEquals('sm-foo-test.novalocal', metadata.get('hostname'))
self.assertEquals('sm-foo-test.novalocal',
metadata.get('local-hostname'))
self.assertEquals('sm-foo-test', metadata.get('name'))
self.assertEquals('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
metadata.get('uuid'))
self.assertEquals('b0fa911b-69d4-4476-bbe2-1c92bff6535c',
metadata.get('instance-id'))
@hp.activate
def test_no_ec2(self):
_register_uris(self.VERSION, {}, {}, OS_FILES)
f = ds.read_metadata_service(BASE_URL, version=self.VERSION)
self.assertEquals(VENDOR_DATA, f.get('vendordata'))
self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertEquals(USER_DATA, f.get('userdata'))
self.assertEquals({}, f.get('ec2-metadata'))
self.assertEquals(2, f.get('version'))
@hp.activate
def test_bad_metadata(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(openstack.NonReadable, ds.read_metadata_service,
BASE_URL, version=self.VERSION)
@hp.activate
def test_bad_uuid(self):
os_files = copy.deepcopy(OS_FILES)
os_meta = copy.deepcopy(OSTACK_META)
os_meta.pop('uuid')
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files[k] = json.dumps(os_meta)
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service,
BASE_URL, version=self.VERSION)
@hp.activate
def test_userdata_empty(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('user_data'):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
f = ds.read_metadata_service(BASE_URL, version=self.VERSION)
self.assertEquals(VENDOR_DATA, f.get('vendordata'))
self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertFalse(f.get('userdata'))
@hp.activate
def test_vendordata_empty(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('vendor_data.json'):
os_files.pop(k, None)
_register_uris(self.VERSION, {}, {}, os_files)
f = ds.read_metadata_service(BASE_URL, version=self.VERSION)
self.assertEquals(CONTENT_0, f['files']['/etc/foo.cfg'])
self.assertEquals(CONTENT_1, f['files']['/etc/bar/bar.cfg'])
self.assertFalse(f.get('vendordata'))
@hp.activate
def test_vendordata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('vendor_data.json'):
os_files[k] = '{' # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service,
BASE_URL, version=self.VERSION)
@hp.activate
def test_metadata_invalid(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files[k] = '{' # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
self.assertRaises(openstack.BrokenMetadata, ds.read_metadata_service,
BASE_URL, version=self.VERSION)
@hp.activate
def test_datasource(self):
_register_uris(self.VERSION, EC2_FILES, EC2_META, OS_FILES)
ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
self.assertIsNone(ds_os.version)
found = ds_os.get_data()
self.assertTrue(found)
self.assertEquals(2, ds_os.version)
md = dict(ds_os.metadata)
md.pop('instance-id', None)
md.pop('local-hostname', None)
self.assertEquals(OSTACK_META, md)
self.assertEquals(EC2_META, ds_os.ec2_metadata)
self.assertEquals(USER_DATA, ds_os.userdata_raw)
self.assertEquals(2, len(ds_os.files))
self.assertEquals(VENDOR_DATA, ds_os.vendordata_raw)
@hp.activate
def test_bad_datasource_meta(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files[k] = '{' # some invalid json
_register_uris(self.VERSION, {}, {}, os_files)
ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
self.assertIsNone(ds_os.version)
found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
@hp.activate
def test_no_datasource(self):
os_files = copy.deepcopy(OS_FILES)
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files.pop(k)
_register_uris(self.VERSION, {}, {}, os_files)
ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
ds_os.ds_cfg = {
'max_wait': 0,
'timeout': 0,
}
self.assertIsNone(ds_os.version)
found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
@hp.activate
def test_disabled_datasource(self):
os_files = copy.deepcopy(OS_FILES)
os_meta = copy.deepcopy(OSTACK_META)
os_meta['meta'] = {
'dsmode': 'disabled',
}
for k in list(os_files.keys()):
if k.endswith('meta_data.json'):
os_files[k] = json.dumps(os_meta)
_register_uris(self.VERSION, {}, {}, os_files)
ds_os = ds.DataSourceOpenStack(settings.CFG_BUILTIN,
None,
helpers.Paths({}))
ds_os.ds_cfg = {
'max_wait': 0,
'timeout': 0,
}
self.assertIsNone(ds_os.version)
found = ds_os.get_data()
self.assertFalse(found)
self.assertIsNone(ds_os.version)
| gpl-3.0 |
tlakshman26/cinder-bug-fix-volume-conversion-full | cinder/tests/unit/test_hp_xp_fc.py | 19 | 33282 | # Copyright (C) 2015, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume.drivers.san.hp import hp_xp_fc
from cinder.volume.drivers.san.hp import hp_xp_opts
from oslo_config import cfg
from oslo_utils import importutils
CONF = cfg.CONF
NORMAL_LDEV_TYPE = 'Normal'
POOL_INFO = {'30': {'total_gb': 'infinite', 'free_gb': 'infinite'}}
EXISTING_POOL_REF = {
'101': {'size': 128}
}
class HPXPFakeCommon(object):
"""Fake HPXP Common."""
def __init__(self, conf, storage_protocol, **kwargs):
self.conf = conf
self.volumes = {}
self.snapshots = {}
self._stats = {}
self.POOL_SIZE = 1000
self.LDEV_MAX = 1024
self.driver_info = {
'hba_id': 'wwpns',
'hba_id_type': 'World Wide Name',
'msg_id': {'target': 308},
'volume_backend_name': 'HPXPFC',
'volume_opts': hp_xp_opts.FC_VOLUME_OPTS,
'volume_type': 'fibre_channel',
}
self.storage_info = {
'protocol': storage_protocol,
'pool_id': None,
'ldev_range': None,
'ports': [],
'compute_ports': [],
'wwns': {},
'output_first': True
}
def create_volume(self, volume):
if volume['size'] > self.POOL_SIZE:
raise exception.VolumeBackendAPIException(
data='The volume size (%s) exceeds the pool size (%s).' %
(volume['size'], self.POOL_SIZE))
newldev = self._available_ldev()
self.volumes[newldev] = volume
return {
'provider_location': newldev,
'metadata': {
'ldev': newldev,
'type': NORMAL_LDEV_TYPE
}
}
def _available_ldev(self):
for i in range(1, self.LDEV_MAX):
if self.volume_exists({'provider_location': str(i)}) is False:
return str(i)
raise exception.VolumeBackendAPIException(
data='Failed to get an available logical device.')
def volume_exists(self, volume):
return self.volumes.get(volume['provider_location'], None) is not None
def delete_volume(self, volume):
vol = self.volumes.get(volume['provider_location'], None)
if vol is not None:
if vol.get('is_busy') is True:
raise exception.VolumeIsBusy(volume_name=volume['name'])
del self.volumes[volume['provider_location']]
def create_snapshot(self, snapshot):
src_vref = self.volumes.get(snapshot["volume_id"])
if not src_vref:
raise exception.VolumeBackendAPIException(
data='The %(type)s %(id)s source to be replicated was not '
'found.' % {'type': 'snapshot', 'id': (snapshot.get('id'))})
newldev = self._available_ldev()
self.volumes[newldev] = snapshot
return {'provider_location': newldev}
def delete_snapshot(self, snapshot):
snap = self.volumes.get(snapshot['provider_location'], None)
if snap is not None:
if snap.get('is_busy') is True:
raise exception.SnapshotIsBusy(snapshot_name=snapshot['name'])
del self.volumes[snapshot['provider_location']]
def get_volume_stats(self, refresh=False):
if refresh:
d = {}
d['volume_backend_name'] = self.driver_info['volume_backend_name']
d['vendor_name'] = 'Hewlett-Packard'
d['driver_version'] = '1.3.0-0_2015.1'
d['storage_protocol'] = self.storage_info['protocol']
pool_info = POOL_INFO.get(self.conf.hpxp_pool)
if pool_info is None:
return self._stats
d['total_capacity_gb'] = pool_info['total_gb']
d['free_capacity_gb'] = pool_info['free_gb']
d['allocated_capacity_gb'] = 0
d['reserved_percentage'] = 0
d['QoS_support'] = False
self._stats = d
return self._stats
def create_volume_from_snapshot(self, volume, snapshot):
ldev = snapshot.get('provider_location')
if self.volumes.get(ldev) is None:
raise exception.VolumeBackendAPIException(
data='The %(type)s %(id)s source to be replicated '
'was not found.' % {'type': 'snapshot', 'id': snapshot['id']})
if volume['size'] != snapshot['volume_size']:
raise exception.VolumeBackendAPIException(
data='The specified operation is not supported. '
'The volume size must be the same as the source %(type)s. '
'(volume: %(volume_id)s)'
% {'type': 'snapshot', 'volume_id': volume['id']})
newldev = self._available_ldev()
self.volumes[newldev] = volume
return {
'provider_location': newldev,
'metadata': {
'ldev': newldev,
'type': NORMAL_LDEV_TYPE,
'snapshot': snapshot['id']
}
}
def create_cloned_volume(self, volume, src_vref):
ldev = src_vref.get('provider_location')
if self.volumes.get(ldev) is None:
raise exception.VolumeBackendAPIException(
data='The %(type)s %(id)s source to be replicated was not '
'found.' % {'type': 'volume', 'id': src_vref.get('id')})
if volume['size'] != src_vref['size']:
raise exception.VolumeBackendAPIException(
data='The specified operation is not supported. '
'The volume size must be the same as the source %(type)s. '
'(volume: %(volume_id)s)' %
{'type': 'volume', 'volume_id': volume['id']})
newldev = self._available_ldev()
self.volumes[newldev] = volume
return {
'provider_location': newldev,
'metadata': {
'ldev': newldev,
'type': NORMAL_LDEV_TYPE,
'volume': src_vref['id']
}
}
def extend_volume(self, volume, new_size):
ldev = volume.get('provider_location')
if not self.volumes.get(ldev):
raise exception.VolumeBackendAPIException(
data='The volume %(volume_id)s to be extended was not found.' %
{'volume_id': volume['id']})
if new_size > self.POOL_SIZE:
raise exception.VolumeBackendAPIException(
data='The volume size (%s) exceeds the pool size (%s).' %
(new_size, self.POOL_SIZE))
self.volumes[ldev]['size'] = new_size
def manage_existing(self, volume, existing_ref):
ldev = existing_ref.get('source-id')
return {
'provider_location': ldev,
'metadata': {
'ldev': ldev,
'type': NORMAL_LDEV_TYPE
}
}
def manage_existing_get_size(self, dummy_volume, existing_ref):
ldev = existing_ref.get('source-id')
if not EXISTING_POOL_REF.get(ldev):
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason='No valid value is '
'specified for "source-id". A valid value '
'must be specified for "source-id" to manage the volume.')
size = EXISTING_POOL_REF[ldev]['size']
return size
def unmanage(self, volume):
vol = self.volumes.get(volume['provider_location'], None)
if vol is not None:
if vol.get('is_busy') is True:
raise exception.VolumeIsBusy(
volume_name=volume['provider_location'])
del self.volumes[volume['provider_location']]
def get_pool_id(self):
pool = self.conf.hpxp_pool
if pool.isdigit():
return int(pool)
return None
def do_setup(self, context):
self.ctxt = context
self.storage_info['pool_id'] = self.get_pool_id()
if self.storage_info['pool_id'] is None:
raise exception.VolumeBackendAPIException(
data='A pool could not be found. (pool: %(pool)s)' %
{'pool': self.conf.hpxp_pool})
def initialize_connection(self, volume, connector):
ldev = volume.get('provider_location')
if not self.volumes.get(ldev):
raise exception.VolumeBackendAPIException(
data='The volume %(volume_id)s to be mapped was not found.' %
{'volume_id': volume['id']})
self.volumes[ldev]['attached'] = connector
return {
'driver_volume_type': self.driver_info['volume_type'],
'data': {
'target_discovered': True,
'target_lun': volume['id'],
'access_mode': 'rw',
'multipath': True,
'target_wwn': ['50060E801053C2E0'],
'initiator_target_map': {
u'2388000087e1a2e0': ['50060E801053C2E0']},
}
}
def terminate_connection(self, volume, connector):
ldev = volume.get('provider_location')
if not self.volumes.get(ldev):
return
if not self.is_volume_attached(volume, connector):
raise exception.VolumeBackendAPIException(
data='Volume not found for %s' % ldev)
del self.volumes[volume['provider_location']]['attached']
for vol in self.volumes:
if 'attached' in self.volumes[vol]:
return
return {
'driver_volume_type': self.driver_info['volume_type'],
'data': {
'target_lun': volume['id'],
'target_wwn': ['50060E801053C2E0'],
'initiator_target_map': {
u'2388000087e1a2e0': ['50060E801053C2E0']},
}
}
def is_volume_attached(self, volume, connector):
if not self.volume_exists(volume):
return False
return (self.volumes[volume['provider_location']].get('attached', None)
== connector)
def copy_volume_data(self, context, src_vol, dest_vol, remote=None):
pass
def copy_image_to_volume(self, context, volume, image_service, image_id):
pass
def restore_backup(self, context, backup, volume, backup_service):
pass
class HPXPFCDriverTest(test.TestCase):
"""Test HPXPFCDriver."""
_VOLUME = {'size': 128,
'name': 'test1',
'id': 'id1',
'status': 'available'}
_VOLUME2 = {'size': 128,
'name': 'test2',
'id': 'id2',
'status': 'available'}
_VOLUME3 = {'size': 256,
'name': 'test2',
'id': 'id3',
'status': 'available'}
_VOLUME_BACKUP = {'size': 128,
'name': 'backup-test',
'id': 'id-backup',
'provider_location': '0',
'status': 'available'}
_TEST_SNAPSHOT = {'volume_name': 'test',
'size': 128,
'volume_size': 128,
'name': 'test-snap',
'volume_id': '1',
'id': 'test-snap-0',
'status': 'available'}
_TOO_BIG_VOLUME_SIZE = 100000
def __init__(self, *args, **kwargs):
super(HPXPFCDriverTest, self).__init__(*args, **kwargs)
def setUp(self):
self._setup_config()
self._setup_driver()
super(HPXPFCDriverTest, self).setUp()
def _setup_config(self):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.hpxp_storage_id = "00000"
self.configuration.hpxp_pool = "30"
@mock.patch.object(importutils, 'import_object', return_value=None)
def _setup_driver(self, arg1):
self.driver = hp_xp_fc.HPXPFCDriver(configuration=self.configuration)
self.driver.common = HPXPFakeCommon(self.configuration, 'FC')
self.driver.do_setup(None)
# API test cases
def test_create_volume(self):
"""Test create_volume."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc = self.driver.create_volume(volume)
volume['provider_location'] = rc['provider_location']
has_volume = self.driver.common.volume_exists(volume)
self.assertTrue(has_volume)
def test_create_volume_error_on_no_pool_space(self):
"""Test create_volume is error on no pool space."""
update = {
'size': self._TOO_BIG_VOLUME_SIZE,
'name': 'test',
'id': 'id1',
'status': 'available'
}
volume = fake_volume.fake_db_volume(**update)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, volume)
def test_create_volume_error_on_no_available_ldev(self):
"""Test create_volume is error on no available ldev."""
for i in range(1, 1024):
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc = self.driver.create_volume(volume)
self.assertEqual(str(i), rc['provider_location'])
volume = fake_volume.fake_db_volume(**self._VOLUME)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume, volume)
def test_delete_volume(self):
"""Test delete_volume."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc = self.driver.create_volume(volume)
volume['provider_location'] = rc['provider_location']
self.driver.delete_volume(volume)
has_volume = self.driver.common.volume_exists(volume)
self.assertFalse(has_volume)
def test_delete_volume_on_non_existing_volume(self):
"""Test delete_volume on non existing volume."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
volume['provider_location'] = '1'
has_volume = self.driver.common.volume_exists(volume)
self.assertFalse(has_volume)
self.driver.delete_volume(volume)
def test_delete_volume_error_on_busy_volume(self):
"""Test delete_volume is error on busy volume."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc = self.driver.create_volume(volume)
self.driver.common.volumes[rc['provider_location']]['is_busy'] = True
volume['provider_location'] = rc['provider_location']
self.assertRaises(exception.VolumeIsBusy,
self.driver.delete_volume, volume)
def test_create_snapshot(self):
"""Test create_snapshot."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
self.driver.create_volume(volume)
snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT)
rc = self.driver.create_snapshot(snapshot)
snapshot['provider_location'] = rc['provider_location']
has_volume = self.driver.common.volume_exists(snapshot)
self.assertTrue(has_volume)
def test_create_snapshot_error_on_non_src_ref(self):
"""Test create_snapshot is error on non source reference."""
snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_snapshot,
snapshot)
def test_delete_snapshot(self):
"""Test delete_snapshot."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
self.driver.create_volume(volume)
snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT)
rc = self.driver.create_snapshot(snapshot)
snapshot['provider_location'] = rc['provider_location']
rc = self.driver.delete_snapshot(snapshot)
has_volume = self.driver.common.volume_exists(snapshot)
self.assertFalse(has_volume)
def test_delete_snapshot_error_on_busy_snapshot(self):
"""Test delete_snapshot is error on busy snapshot."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
self.driver.create_volume(volume)
snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT)
rc = self.driver.create_snapshot(snapshot)
self.driver.common.volumes[rc['provider_location']]['is_busy'] = True
snapshot['provider_location'] = rc['provider_location']
self.assertRaises(exception.SnapshotIsBusy,
self.driver.delete_snapshot,
snapshot)
def test_delete_snapshot_on_non_existing_snapshot(self):
"""Test delete_snapshot on non existing snapshot."""
snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT)
snapshot['provider_location'] = '1'
self.driver.delete_snapshot(snapshot)
def test_create_volume_from_snapshot(self):
"""Test create_volume_from_snapshot."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
self.driver.create_volume(volume)
snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT)
rc_snap = self.driver.create_snapshot(snapshot)
snapshot['provider_location'] = rc_snap['provider_location']
volume2 = fake_volume.fake_db_volume(**self._VOLUME2)
rc = self.driver.create_volume_from_snapshot(volume2, snapshot)
volume2['provider_location'] = rc['provider_location']
has_volume = self.driver.common.volume_exists(volume2)
self.assertTrue(has_volume)
def test_create_volume_from_snapshot_error_on_non_existing_snapshot(self):
"""Test create_volume_from_snapshot.
Test create_volume_from_snapshot is error on non existing snapshot.
"""
volume2 = fake_volume.fake_db_volume(**self._VOLUME2)
snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT)
snapshot['provider_location'] = '1'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
volume2, snapshot)
def test_create_volume_from_snapshot_error_on_diff_size(self):
"""Test create_volume_from_snapshot is error on different size."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
self.driver.create_volume(volume)
snapshot = fake_snapshot.fake_db_snapshot(**self._TEST_SNAPSHOT)
rc_snap = self.driver.create_snapshot(snapshot)
snapshot['provider_location'] = rc_snap['provider_location']
volume3 = fake_volume.fake_db_volume(**self._VOLUME3)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_volume_from_snapshot,
volume3, snapshot)
def test_create_cloned_volume(self):
"""Test create_cloned_volume."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc_vol = self.driver.create_volume(volume)
volume['provider_location'] = rc_vol['provider_location']
volume2 = fake_volume.fake_db_volume(**self._VOLUME2)
rc = self.driver.create_cloned_volume(volume2, volume)
volume2['provider_location'] = rc['provider_location']
has_volume = self.driver.common.volume_exists(volume2)
self.assertTrue(has_volume)
def test_create_cloned_volume_error_on_non_existing_volume(self):
"""Test create_cloned_volume is error on non existing volume."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
volume['provider_location'] = '1'
volume2 = fake_volume.fake_db_volume(**self._VOLUME2)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
volume2, volume)
def test_create_cloned_volume_error_on_diff_size(self):
"""Test create_cloned_volume is error on different size."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc_vol = self.driver.create_volume(volume)
volume['provider_location'] = rc_vol['provider_location']
volume3 = fake_volume.fake_db_volume(**self._VOLUME3)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_cloned_volume,
volume3, volume)
def test_get_volume_stats(self):
"""Test get_volume_stats."""
rc = self.driver.get_volume_stats(True)
self.assertEqual("Hewlett-Packard", rc['vendor_name'])
def test_get_volume_stats_error_on_non_existing_pool_id(self):
"""Test get_volume_stats is error on non existing pool id."""
self.configuration.hpxp_pool = 29
rc = self.driver.get_volume_stats(True)
self.assertEqual({}, rc)
@mock.patch.object(driver.FibreChannelDriver, 'copy_volume_data')
def test_copy_volume_data(self, arg1):
"""Test copy_volume_data."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc_vol = self.driver.create_volume(volume)
volume['provider_location'] = rc_vol['provider_location']
volume2 = fake_volume.fake_db_volume(**self._VOLUME2)
rc_vol2 = self.driver.create_volume(volume2)
volume2['provider_location'] = rc_vol2['provider_location']
self.driver.copy_volume_data(None, volume, volume2, None)
arg1.assert_called_with(None, volume, volume2, None)
@mock.patch.object(driver.FibreChannelDriver, 'copy_volume_data',
side_effect=exception.CinderException)
def test_copy_volume_data_error(self, arg1):
"""Test copy_volume_data is error."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc_vol = self.driver.create_volume(volume)
volume['provider_location'] = rc_vol['provider_location']
volume2 = fake_volume.fake_db_volume(**self._VOLUME2)
volume2['provider_location'] = '2'
self.assertRaises(exception.CinderException,
self.driver.copy_volume_data,
None, volume, volume2, None)
arg1.assert_called_with(None, volume, volume2, None)
@mock.patch.object(driver.FibreChannelDriver, 'copy_image_to_volume')
def test_copy_image_to_volume(self, arg1):
"""Test copy_image_to_volume."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc_vol = self.driver.create_volume(volume)
volume['provider_location'] = rc_vol['provider_location']
self.driver.copy_image_to_volume(None, volume, None, None)
arg1.assert_called_with(None, volume, None, None)
@mock.patch.object(driver.FibreChannelDriver, 'copy_image_to_volume',
side_effect=exception.CinderException)
def test_copy_image_to_volume_error(self, arg1):
"""Test copy_image_to_volume is error."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
volume['provider_location'] = '1'
self.assertRaises(exception.CinderException,
self.driver.copy_image_to_volume,
None, volume, None, None)
arg1.assert_called_with(None, volume, None, None)
@mock.patch.object(driver.FibreChannelDriver, 'restore_backup')
def test_restore_backup(self, arg1):
"""Test restore_backup."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc_vol = self.driver.create_volume(volume)
volume['provider_location'] = rc_vol['provider_location']
volume_backup = fake_volume.fake_db_volume(**self._VOLUME_BACKUP)
self.driver.restore_backup(None, volume_backup, volume, None)
arg1.assert_called_with(None, volume_backup, volume, None)
@mock.patch.object(driver.FibreChannelDriver, 'restore_backup',
side_effect=exception.CinderException)
def test_restore_backup_error(self, arg1):
"""Test restore_backup is error."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
volume['provider_location'] = '1'
volume_backup = fake_volume.fake_db_volume(**self._VOLUME_BACKUP)
self.assertRaises(exception.CinderException,
self.driver.restore_backup,
None, volume_backup, volume, None)
arg1.assert_called_with(None, volume_backup, volume, None)
def test_extend_volume(self):
"""Test extend_volume."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc = self.driver.create_volume(volume)
volume['provider_location'] = rc['provider_location']
new_size = 256
self.driver.extend_volume(volume, new_size)
actual = self.driver.common.volumes[rc['provider_location']]['size']
self.assertEqual(new_size, actual)
def test_extend_volume_error_on_non_existing_volume(self):
"""Test extend_volume is error on non existing volume."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
volume['provider_location'] = '1'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume, volume, 256)
def test_extend_volume_error_on_no_pool_space(self):
"""Test extend_volume is error on no pool space."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc = self.driver.create_volume(volume)
volume['provider_location'] = rc['provider_location']
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
volume, self._TOO_BIG_VOLUME_SIZE)
def test_manage_existing(self):
"""Test manage_existing."""
existing_ref = {'source-id': '101'}
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc = self.driver.manage_existing(volume, existing_ref)
self.assertEqual('101', rc['provider_location'])
def test_manage_existing_with_none_sourceid(self):
"""Test manage_existing is error with no source-id."""
existing_ref = {'source-id': None}
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc = self.driver.manage_existing(volume, existing_ref)
self.assertEqual(None, rc['provider_location'])
def test_manage_existing_get_size(self):
"""Test manage_existing_get_size."""
existing_ref = {'source-id': '101'}
volume = fake_volume.fake_db_volume(**self._VOLUME)
return_size = self.driver.manage_existing_get_size(
volume, existing_ref)
self.assertEqual(EXISTING_POOL_REF['101']['size'], return_size)
def test_manage_existing_get_size_with_none_sourceid(self):
"""Test manage_existing_get_size is error with no source-id."""
existing_ref = {'source-id': None}
volume = fake_volume.fake_db_volume(**self._VOLUME)
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
volume, existing_ref)
def test_unmanage(self):
"""Test unmanage."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc = self.driver.create_volume(volume)
volume['provider_location'] = rc['provider_location']
self.assertTrue(self.driver.common.volume_exists(volume))
self.driver.unmanage(volume)
self.assertFalse(self.driver.common.volume_exists(volume))
def test_unmanage_error_on_busy_volume(self):
"""Test unmanage is error on busy volume."""
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc = self.driver.create_volume(volume)
ldev = rc['provider_location']
self.driver.common.volumes[ldev]['is_busy'] = True
self.assertRaises(exception.VolumeIsBusy,
self.driver.unmanage,
{'provider_location': ldev})
def test_initialize_connection(self):
"""Test initialize_connection."""
connector = {'wwpns': ['12345678912345aa', '12345678912345bb'],
'ip': '127.0.0.1'}
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc_vol = self.driver.create_volume(volume)
volume['provider_location'] = rc_vol['provider_location']
conn_info = self.driver.initialize_connection(volume, connector)
self.assertIn('data', conn_info)
self.assertIn('initiator_target_map', conn_info['data'])
is_attached = self.driver.common.is_volume_attached(volume, connector)
self.assertTrue(is_attached)
self.driver.terminate_connection(volume, connector)
self.driver.delete_volume(volume)
def test_initialize_connection_error_on_non_exisiting_volume(self):
"""Test initialize_connection is error on non existing volume."""
connector = {'wwpns': ['12345678912345aa', '12345678912345bb'],
'ip': '127.0.0.1'}
volume = fake_volume.fake_db_volume(**self._VOLUME)
volume['provider_location'] = '1'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume, connector)
def test_terminate_connection_on_non_last_volume(self):
"""Test terminate_connection on non last volume."""
connector = {'wwpns': ['12345678912345aa', '12345678912345bb'],
'ip': '127.0.0.1'}
last_volume = fake_volume.fake_db_volume(**self._VOLUME)
last_rc_vol = self.driver.create_volume(last_volume)
last_volume['provider_location'] = last_rc_vol['provider_location']
self.driver.initialize_connection(last_volume, connector)
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc_vol = self.driver.create_volume(volume)
volume['provider_location'] = rc_vol['provider_location']
self.driver.initialize_connection(volume, connector)
conn_info = self.driver.terminate_connection(volume, connector)
self.assertNotIn('data', conn_info)
is_attached = self.driver.common.is_volume_attached(volume, connector)
self.assertFalse(is_attached)
self.driver.delete_volume(volume)
self.driver.terminate_connection(last_volume, connector)
self.driver.delete_volume(last_volume)
def test_terminate_connection_on_non_existing_volume(self):
"""Test terminate_connection on non existing volume."""
connector = {'wwpns': ['12345678912345aa', '12345678912345bb'],
'ip': '127.0.0.1'}
volume = fake_volume.fake_db_volume(**self._VOLUME)
volume['provider_location'] = '1'
self.driver.terminate_connection(volume, connector)
def test_terminate_connection_error_on_non_initialized_volume(self):
"""Test terminate_connection is error on non initialized volume."""
connector = {'wwpns': ['12345678912345aa', '12345678912345bb'],
'ip': '127.0.0.1'}
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc_vol = self.driver.create_volume(volume)
volume['provider_location'] = rc_vol['provider_location']
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume, connector)
def test_terminate_connection_last_volume(self):
"""Test terminate_connection on last volume on a host."""
connector = {'wwpns': ['12345678912345aa', '12345678912345bb'],
'ip': '127.0.0.1'}
volume = fake_volume.fake_db_volume(**self._VOLUME)
rc_vol = self.driver.create_volume(volume)
volume['provider_location'] = rc_vol['provider_location']
self.driver.initialize_connection(volume, connector)
conn_info = self.driver.terminate_connection(volume, connector)
self.assertIn('data', conn_info)
self.assertIn('initiator_target_map', conn_info['data'])
is_attached = self.driver.common.is_volume_attached(volume, connector)
self.assertFalse(is_attached)
self.driver.delete_volume(volume)
def test_do_setup_error_on_invalid_pool_id(self):
"""Test do_setup is error on invalid pool id."""
self.configuration.hpxp_pool = 'invalid'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, None)
| apache-2.0 |
mibofra/olifant | usb/_debug.py | 3 | 3220 | # Copyright (C) 2009-2011 Wander Lairson Costa
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
__author__ = 'Wander Lairson Costa'
__all__ = ['methodtrace', 'functiontrace']
import logging
import usb._interop as _interop
_enable_tracing = False
def enable_tracing(enable):
global _enable_tracing
_enable_tracing = enable
def _trace_function_call(logger, fname, *args, **named_args):
logger.debug(
# TODO: check if 'f' is a method or a free function
fname + '(' + \
', '.join((str(val) for val in args)) + \
', '.join((name + '=' + str(val) for name, val in named_args.items())) + ')'
)
# decorator for methods calls tracing
def methodtrace(logger):
def decorator_logging(f):
if not _enable_tracing:
return f
def do_trace(*args, **named_args):
# this if is just a optimization to avoid unecessary string formatting
if logging.DEBUG >= logger.getEffectiveLevel():
fn = type(args[0]).__name__ + '.' + f.__name__
_trace_function_call(logger, fn, *args[1:], **named_args)
return f(*args, **named_args)
_interop._update_wrapper(do_trace, f)
return do_trace
return decorator_logging
# decorator for methods calls tracing
def functiontrace(logger):
def decorator_logging(f):
if not _enable_tracing:
return f
def do_trace(*args, **named_args):
# this if is just a optimization to avoid unecessary string formatting
if logging.DEBUG >= logger.getEffectiveLevel():
_trace_function_call(logger, f.__name__, *args, **named_args)
return f(*args, **named_args)
_interop._update_wrapper(do_trace, f)
return do_trace
return decorator_logging
| gpl-3.0 |
daspecster/google-cloud-python | storage/unit_tests/test_acl.py | 4 | 26727 | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class Test_ACLEntity(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import _ACLEntity
return _ACLEntity
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_default_identifier(self):
TYPE = 'type'
entity = self._make_one(TYPE)
self.assertEqual(entity.type, TYPE)
self.assertIsNone(entity.identifier)
self.assertEqual(entity.get_roles(), set())
def test_ctor_w_identifier(self):
TYPE = 'type'
ID = 'id'
entity = self._make_one(TYPE, ID)
self.assertEqual(entity.type, TYPE)
self.assertEqual(entity.identifier, ID)
self.assertEqual(entity.get_roles(), set())
def test___str__no_identifier(self):
TYPE = 'type'
entity = self._make_one(TYPE)
self.assertEqual(str(entity), TYPE)
def test___str__w_identifier(self):
TYPE = 'type'
ID = 'id'
entity = self._make_one(TYPE, ID)
self.assertEqual(str(entity), '%s-%s' % (TYPE, ID))
def test_grant_simple(self):
TYPE = 'type'
ROLE = 'role'
entity = self._make_one(TYPE)
entity.grant(ROLE)
self.assertEqual(entity.get_roles(), set([ROLE]))
def test_grant_duplicate(self):
TYPE = 'type'
ROLE1 = 'role1'
ROLE2 = 'role2'
entity = self._make_one(TYPE)
entity.grant(ROLE1)
entity.grant(ROLE2)
entity.grant(ROLE1)
self.assertEqual(entity.get_roles(), set([ROLE1, ROLE2]))
def test_revoke_miss(self):
TYPE = 'type'
ROLE = 'nonesuch'
entity = self._make_one(TYPE)
entity.revoke(ROLE)
self.assertEqual(entity.get_roles(), set())
def test_revoke_hit(self):
TYPE = 'type'
ROLE1 = 'role1'
ROLE2 = 'role2'
entity = self._make_one(TYPE)
entity.grant(ROLE1)
entity.grant(ROLE2)
entity.revoke(ROLE1)
self.assertEqual(entity.get_roles(), set([ROLE2]))
def test_grant_read(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant_read()
self.assertEqual(entity.get_roles(), set([entity.READER_ROLE]))
def test_grant_write(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant_write()
self.assertEqual(entity.get_roles(), set([entity.WRITER_ROLE]))
def test_grant_owner(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant_owner()
self.assertEqual(entity.get_roles(), set([entity.OWNER_ROLE]))
def test_revoke_read(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant(entity.READER_ROLE)
entity.revoke_read()
self.assertEqual(entity.get_roles(), set())
def test_revoke_write(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant(entity.WRITER_ROLE)
entity.revoke_write()
self.assertEqual(entity.get_roles(), set())
def test_revoke_owner(self):
TYPE = 'type'
entity = self._make_one(TYPE)
entity.grant(entity.OWNER_ROLE)
entity.revoke_owner()
self.assertEqual(entity.get_roles(), set())
class Test_ACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import ACL
return ACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
acl = self._make_one()
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
def test__ensure_loaded(self):
acl = self._make_one()
def _reload():
acl._really_loaded = True
acl.reload = _reload
acl._ensure_loaded()
self.assertTrue(acl._really_loaded)
def test_client_is_abstract(self):
acl = self._make_one()
self.assertRaises(NotImplementedError, lambda: acl.client)
def test_reset(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
acl.reset()
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
def test___iter___empty_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertEqual(list(acl), [])
def test___iter___empty_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertEqual(list(acl), [])
self.assertTrue(acl.loaded)
def test___iter___non_empty_no_roles(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
self.assertEqual(list(acl), [])
def test___iter___non_empty_w_roles(self):
TYPE = 'type'
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
entity.grant(ROLE)
self.assertEqual(list(acl),
[{'entity': '%s-%s' % (TYPE, ID), 'role': ROLE}])
def test___iter___non_empty_w_empty_role(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
entity.grant('')
self.assertEqual(list(acl), [])
def test_entity_from_dict_allUsers_eager(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity_from_dict({'entity': 'allUsers', 'role': ROLE})
self.assertEqual(entity.type, 'allUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(entity.get_roles(), set([ROLE]))
self.assertEqual(list(acl),
[{'entity': 'allUsers', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_from_dict_allAuthenticatedUsers(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity_from_dict({'entity': 'allAuthenticatedUsers',
'role': ROLE})
self.assertEqual(entity.type, 'allAuthenticatedUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(entity.get_roles(), set([ROLE]))
self.assertEqual(list(acl),
[{'entity': 'allAuthenticatedUsers', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_from_dict_string_w_hyphen(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity_from_dict({'entity': 'type-id', 'role': ROLE})
self.assertEqual(entity.type, 'type')
self.assertEqual(entity.identifier, 'id')
self.assertEqual(entity.get_roles(), set([ROLE]))
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_from_dict_string_wo_hyphen(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
self.assertRaises(ValueError,
acl.entity_from_dict,
{'entity': 'bogus', 'role': ROLE})
self.assertEqual(list(acl.get_entities()), [])
def test_has_entity_miss_str_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertFalse(acl.has_entity('nonesuch'))
def test_has_entity_miss_str_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertFalse(acl.has_entity('nonesuch'))
self.assertTrue(acl.loaded)
def test_has_entity_miss_entity(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
entity = _ACLEntity(TYPE, ID)
acl = self._make_one()
acl.loaded = True
self.assertFalse(acl.has_entity(entity))
def test_has_entity_hit_str(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity('%s-%s' % (TYPE, ID)))
def test_has_entity_hit_entity(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity(entity))
def test_get_entity_miss_str_no_default_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertIsNone(acl.get_entity('nonesuch'))
def test_get_entity_miss_str_no_default_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertIsNone(acl.get_entity('nonesuch'))
self.assertTrue(acl.loaded)
def test_get_entity_miss_entity_no_default(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
entity = _ACLEntity(TYPE, ID)
acl = self._make_one()
acl.loaded = True
self.assertIsNone(acl.get_entity(entity))
def test_get_entity_miss_str_w_default(self):
DEFAULT = object()
acl = self._make_one()
acl.loaded = True
self.assertIs(acl.get_entity('nonesuch', DEFAULT), DEFAULT)
def test_get_entity_miss_entity_w_default(self):
from google.cloud.storage.acl import _ACLEntity
DEFAULT = object()
TYPE = 'type'
ID = 'id'
entity = _ACLEntity(TYPE, ID)
acl = self._make_one()
acl.loaded = True
self.assertIs(acl.get_entity(entity, DEFAULT), DEFAULT)
def test_get_entity_hit_str(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity('%s-%s' % (TYPE, ID)))
def test_get_entity_hit_entity(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertTrue(acl.has_entity(entity))
def test_add_entity_miss_eager(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
ROLE = 'role'
entity = _ACLEntity(TYPE, ID)
entity.grant(ROLE)
acl = self._make_one()
acl.loaded = True
acl.add_entity(entity)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_add_entity_miss_lazy(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
ROLE = 'role'
entity = _ACLEntity(TYPE, ID)
entity.grant(ROLE)
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
acl.add_entity(entity)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
self.assertTrue(acl.loaded)
def test_add_entity_hit(self):
from google.cloud.storage.acl import _ACLEntity
TYPE = 'type'
ID = 'id'
ENTITY_VAL = '%s-%s' % (TYPE, ID)
ROLE = 'role'
entity = _ACLEntity(TYPE, ID)
entity.grant(ROLE)
acl = self._make_one()
acl.loaded = True
before = acl.entity(TYPE, ID)
acl.add_entity(entity)
self.assertTrue(acl.loaded)
self.assertIsNot(acl.get_entity(ENTITY_VAL), before)
self.assertIs(acl.get_entity(ENTITY_VAL), entity)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_miss(self):
TYPE = 'type'
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertTrue(acl.loaded)
entity.grant(ROLE)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_entity_hit(self):
TYPE = 'type'
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
before = acl.entity(TYPE, ID)
before.grant(ROLE)
entity = acl.entity(TYPE, ID)
self.assertIs(entity, before)
self.assertEqual(list(acl),
[{'entity': 'type-id', 'role': ROLE}])
self.assertEqual(list(acl.get_entities()), [entity])
def test_user(self):
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.user(ID)
entity.grant(ROLE)
self.assertEqual(entity.type, 'user')
self.assertEqual(entity.identifier, ID)
self.assertEqual(list(acl),
[{'entity': 'user-%s' % ID, 'role': ROLE}])
def test_group(self):
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.group(ID)
entity.grant(ROLE)
self.assertEqual(entity.type, 'group')
self.assertEqual(entity.identifier, ID)
self.assertEqual(list(acl),
[{'entity': 'group-%s' % ID, 'role': ROLE}])
def test_domain(self):
ID = 'id'
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.domain(ID)
entity.grant(ROLE)
self.assertEqual(entity.type, 'domain')
self.assertEqual(entity.identifier, ID)
self.assertEqual(list(acl),
[{'entity': 'domain-%s' % ID, 'role': ROLE}])
def test_all(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.all()
entity.grant(ROLE)
self.assertEqual(entity.type, 'allUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(list(acl),
[{'entity': 'allUsers', 'role': ROLE}])
def test_all_authenticated(self):
ROLE = 'role'
acl = self._make_one()
acl.loaded = True
entity = acl.all_authenticated()
entity.grant(ROLE)
self.assertEqual(entity.type, 'allAuthenticatedUsers')
self.assertIsNone(entity.identifier)
self.assertEqual(list(acl),
[{'entity': 'allAuthenticatedUsers', 'role': ROLE}])
def test_get_entities_empty_eager(self):
acl = self._make_one()
acl.loaded = True
self.assertEqual(acl.get_entities(), [])
def test_get_entities_empty_lazy(self):
acl = self._make_one()
def _reload():
acl.loaded = True
acl.reload = _reload
self.assertEqual(acl.get_entities(), [])
self.assertTrue(acl.loaded)
def test_get_entities_nonempty(self):
TYPE = 'type'
ID = 'id'
acl = self._make_one()
acl.loaded = True
entity = acl.entity(TYPE, ID)
self.assertEqual(acl.get_entities(), [entity])
def test_reload_missing(self):
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/652
ROLE = 'role'
connection = _Connection({})
client = _Client(connection)
acl = self._make_one()
acl.reload_path = '/testing/acl'
acl.loaded = True
acl.entity('allUsers', ROLE)
acl.reload(client=client)
self.assertEqual(list(acl), [])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '/testing/acl')
def test_reload_empty_result_clears_local(self):
ROLE = 'role'
connection = _Connection({'items': []})
client = _Client(connection)
acl = self._make_one()
acl.reload_path = '/testing/acl'
acl.loaded = True
acl.entity('allUsers', ROLE)
acl.reload(client=client)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl), [])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '/testing/acl')
def test_reload_nonempty_result(self):
ROLE = 'role'
connection = _Connection(
{'items': [{'entity': 'allUsers', 'role': ROLE}]})
client = _Client(connection)
acl = self._make_one()
acl.reload_path = '/testing/acl'
acl.loaded = True
acl.reload(client=client)
self.assertTrue(acl.loaded)
self.assertEqual(list(acl), [{'entity': 'allUsers', 'role': ROLE}])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '/testing/acl')
def test_save_none_set_none_passed(self):
connection = _Connection()
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.save(client=client)
kw = connection._requested
self.assertEqual(len(kw), 0)
def test_save_existing_missing_none_passed(self):
connection = _Connection({})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.save(client=client)
self.assertEqual(list(acl), [])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': []})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_save_no_acl(self):
ROLE = 'role'
AFTER = [{'entity': 'allUsers', 'role': ROLE}]
connection = _Connection({'acl': AFTER})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.entity('allUsers').grant(ROLE)
acl.save(client=client)
self.assertEqual(list(acl), AFTER)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': AFTER})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_save_w_acl(self):
ROLE1 = 'role1'
ROLE2 = 'role2'
STICKY = {'entity': 'allUsers', 'role': ROLE2}
new_acl = [{'entity': 'allUsers', 'role': ROLE1}]
connection = _Connection({'acl': [STICKY] + new_acl})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.save(new_acl, client=client)
entries = list(acl)
self.assertEqual(len(entries), 2)
self.assertTrue(STICKY in entries)
self.assertTrue(new_acl[0] in entries)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': new_acl})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_save_prefefined_invalid(self):
connection = _Connection()
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
with self.assertRaises(ValueError):
acl.save_predefined('bogus', client=client)
def test_save_predefined_valid(self):
PREDEFINED = 'private'
connection = _Connection({'acl': []})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.save_predefined(PREDEFINED, client=client)
entries = list(acl)
self.assertEqual(len(entries), 0)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': []})
self.assertEqual(kw[0]['query_params'],
{'projection': 'full', 'predefinedAcl': PREDEFINED})
def test_save_predefined_w_XML_alias(self):
PREDEFINED_XML = 'project-private'
PREDEFINED_JSON = 'projectPrivate'
connection = _Connection({'acl': []})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.save_predefined(PREDEFINED_XML, client=client)
entries = list(acl)
self.assertEqual(len(entries), 0)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': []})
self.assertEqual(kw[0]['query_params'],
{'projection': 'full',
'predefinedAcl': PREDEFINED_JSON})
def test_save_predefined_valid_w_alternate_query_param(self):
# Cover case where subclass overrides _PREDEFINED_QUERY_PARAM
PREDEFINED = 'publicRead'
connection = _Connection({'acl': []})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl._PREDEFINED_QUERY_PARAM = 'alternate'
acl.save_predefined(PREDEFINED, client=client)
entries = list(acl)
self.assertEqual(len(entries), 0)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': []})
self.assertEqual(kw[0]['query_params'],
{'projection': 'full', 'alternate': PREDEFINED})
def test_clear(self):
ROLE1 = 'role1'
ROLE2 = 'role2'
STICKY = {'entity': 'allUsers', 'role': ROLE2}
connection = _Connection({'acl': [STICKY]})
client = _Client(connection)
acl = self._make_one()
acl.save_path = '/testing'
acl.loaded = True
acl.entity('allUsers', ROLE1)
acl.clear(client=client)
self.assertEqual(list(acl), [STICKY])
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/testing')
self.assertEqual(kw[0]['data'], {'acl': []})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
class Test_BucketACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import BucketACL
return BucketACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
NAME = 'name'
bucket = _Bucket(NAME)
acl = self._make_one(bucket)
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
self.assertIs(acl.bucket, bucket)
self.assertEqual(acl.reload_path, '/b/%s/acl' % NAME)
self.assertEqual(acl.save_path, '/b/%s' % NAME)
class Test_DefaultObjectACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import DefaultObjectACL
return DefaultObjectACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
NAME = 'name'
bucket = _Bucket(NAME)
acl = self._make_one(bucket)
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
self.assertIs(acl.bucket, bucket)
self.assertEqual(acl.reload_path, '/b/%s/defaultObjectAcl' % NAME)
self.assertEqual(acl.save_path, '/b/%s' % NAME)
class Test_ObjectACL(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.storage.acl import ObjectACL
return ObjectACL
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
NAME = 'name'
BLOB_NAME = 'blob-name'
bucket = _Bucket(NAME)
blob = _Blob(bucket, BLOB_NAME)
acl = self._make_one(blob)
self.assertEqual(acl.entities, {})
self.assertFalse(acl.loaded)
self.assertIs(acl.blob, blob)
self.assertEqual(acl.reload_path, '/b/%s/o/%s/acl' % (NAME, BLOB_NAME))
self.assertEqual(acl.save_path, '/b/%s/o/%s' % (NAME, BLOB_NAME))
class _Blob(object):
def __init__(self, bucket, blob):
self.bucket = bucket
self.blob = blob
@property
def path(self):
return '%s/o/%s' % (self.bucket.path, self.blob)
class _Bucket(object):
def __init__(self, name):
self.name = name
@property
def path(self):
return '/b/%s' % self.name
class _Connection(object):
_delete_ok = False
def __init__(self, *responses):
self._responses = responses
self._requested = []
self._deleted = []
def api_request(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
class _Client(object):
def __init__(self, connection):
self._connection = connection
| apache-2.0 |
flyballlabs/threatdetectionservice | api/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/charsetgroupprober.py | 2929 | 3791 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| apache-2.0 |
fkorotkov/pants | src/python/pants/source/payload_fields.py | 4 | 2589 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from hashlib import sha1
from pants.base.payload_field import PayloadField
from pants.source.filespec import matches_filespec
from pants.source.source_root import SourceRootConfig
from pants.source.wrapped_globs import FilesetWithSpec
from pants.util.memo import memoized_property
class SourcesField(PayloadField):
"""A PayloadField encapsulating specified sources."""
@staticmethod
def _validate_sources(sources):
if not isinstance(sources, FilesetWithSpec):
raise ValueError('Expected a FilesetWithSpec. `sources` should be '
'instantiated via `create_sources_field`.')
return sources
def __init__(self, sources, ref_address=None):
"""
:param sources: FilesetWithSpec representing the underlying sources.
:param ref_address: optional address spec of target that provides these sources
"""
self._sources = self._validate_sources(sources)
self._ref_address = ref_address
@property
def source_root(self):
""":returns: the source root for these sources, or None if they're not under a source root."""
# TODO: It's a shame that we have to access the singleton directly here, instead of getting
# the SourceRoots instance from context, as tasks do. In the new engine we could inject
# this into the target, rather than have it reach out for global singletons.
return SourceRootConfig.global_instance().get_source_roots().find_by_path(self.rel_path)
def matches(self, path):
return self.sources.matches(path) or matches_filespec(path, self.filespec)
@property
def filespec(self):
return self.sources.filespec
@property
def rel_path(self):
return self.sources.rel_root
@property
def sources(self):
return self._sources
@memoized_property
def source_paths(self):
return list(self.sources)
@property
def address(self):
"""Returns the address this sources field refers to (used by some derived classes)"""
return self._ref_address
def relative_to_buildroot(self):
"""All sources joined with their relative paths."""
return list(self.sources.paths_from_buildroot_iter())
def _compute_fingerprint(self):
hasher = sha1()
hasher.update(self.rel_path)
hasher.update(self.sources.files_hash)
return hasher.hexdigest()
| apache-2.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.4/tests/regressiontests/aggregation_regress/tests.py | 29 | 34529 | from __future__ import absolute_import
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db.models import Count, Max, Avg, Sum, StdDev, Variance, F, Q
from django.test import TestCase, Approximate, skipUnlessDBFeature
from .models import Author, Book, Publisher, Clues, Entries, HardbackBook
class AggregationTests(TestCase):
fixtures = ["aggregation_regress.json"]
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in kwargs.iteritems():
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
#oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page' : 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(select={"manufacture_cost": "price * .5"}).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn=u'067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost' : 'price * .5'}).annotate(mean_auth_age=Avg('authors__age')).get(pk=2)
self.assertObjectAttrs(obj,
contact_id=3,
id=2,
isbn=u'067232959',
mean_auth_age=45.0,
name=u'Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=2,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertTrue(obj.manufacture_cost == 11.545 or obj.manufacture_cost == Decimal('11.545'))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).values().get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
"contact_id": 3,
"id": 2,
"isbn": u"067232959",
"mean_auth_age": 45.0,
"name": u"Sams Teach Yourself Django in 24 Hours",
"pages": 528,
"price": Decimal("23.09"),
"pubdate": datetime.date(2008, 3, 3),
"publisher_id": 2,
"rating": 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(select={'manufacture_cost' : 'price * .5'}).get(pk=2)
manufacture_cost = obj['manufacture_cost']
self.assertTrue(manufacture_cost == 11.545 or manufacture_cost == Decimal('11.545'))
del obj['manufacture_cost']
self.assertEqual(obj, {
'contact_id': 3,
'id': 2,
'isbn': u'067232959',
'mean_auth_age': 45.0,
'name': u'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal("23.09"),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': 2,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name').get(pk=1)
self.assertEqual(obj, {
"name": u'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).values('name','mean_auth_age').get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': u'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": u'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(select={'price_per_page' : 'price / pages'}).get(pk=1)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': u'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = Book.objects.values('price').annotate(oldest=Max('authors__age')).order_by('oldest', 'price').annotate(Max('publisher__num_awards'))
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
self.assertRaises(
FieldError,
lambda: Book.objects.all().aggregate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('foo'))
)
self.assertRaises(
FieldError,
lambda: Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
)
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(num_authors=Count('authors')).values()[0]
self.assertEqual(obj, {
'contact_id': 8,
'id': 5,
'isbn': u'013790395',
'name': u'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': 3,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__lt=3).exclude(num_authors__lt=2)),
2
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__lt=2).filter(num_authors__lt=3)),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': u'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': u'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': u'Apress', 'num_awards': 3},
{'num_books': 0, 'name': u"Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': u'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = Publisher.objects.annotate(num_books=Count('book')).filter(num_awards__gt=2*F('num_books')).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': u'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': u'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = Publisher.objects.annotate(num_books=Count('book')).exclude(num_books__lt=F('num_awards')/2).order_by('name').values('name','num_books','num_awards')
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': u'Apress', 'num_awards': 3},
{'num_books': 0, 'name': u"Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': u'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = Clues.objects.values('EntryID__Entry').annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = Book.objects.filter(id__in=[]).aggregate(num_authors=Count('authors'), avg_authors=Avg('authors'), max_authors=Max('authors'), max_price=Max('price'), max_rating=Max('rating'))
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = Publisher.objects.filter(pk=5).annotate(num_authors=Count('book__authors'), avg_authors=Avg('book__authors'), max_authors=Max('book__authors'), max_price=Max('book__price'), max_rating=Max('book__rating')).values()
self.assertQuerysetEqual(
qs, [
{'max_authors': None, 'name': u"Jonno's House of Books", 'num_awards': 0, 'max_price': None, 'num_authors': 0, 'max_rating': None, 'id': 5, 'avg_authors': None}
],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs, [
(u'Artificial Intelligence: A Modern Approach', 51.5, u'Prentice Hall', u'Peter Norvig'),
(u'Practical Django Projects', 29.0, u'Apress', u'James Bennett'),
(u'Python Web Development with Django', Approximate(30.333, places=2), u'Prentice Hall', u'Jeffrey Forcier'),
(u'Sams Teach Yourself Django in 24 Hours', 45.0, u'Sams', u'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub':'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
qs = Book.objects.extra(select={'pub':'publisher_id', 'foo':'pages'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': 1, 'id__count': 2},
{'pub': 2, 'id__count': 1},
{'pub': 3, 'id__count': 2},
{'pub': 4, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = Book.objects.filter(pages__gt=100).annotate(n_authors=Count('authors')).filter(n_authors__gt=2).order_by('n_authors')
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qs = Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by()
grouping, gb_params = qs.query.get_compiler(qs.db).get_grouping()
self.assertEqual(len(grouping), 1)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
self.assertRaises(ValueError, Book.objects.all().annotate, Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a field name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with an m2m name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in the ValuesQuerySet, so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], u'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], u'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], u'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name that conflicts with a reverse-related name on the model raises ValueError
self.assertRaises(ValueError, Author.objects.annotate, book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
u'Artificial Intelligence: A Modern Approach',
u'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
u'Practical Django Projects',
u'Python Web Development with Django',
u'Sams Teach Yourself Django in 24 Hours',
u'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with DateQuerySets
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.datetime(1995, 1, 15, 0, 0),
datetime.datetime(2007, 12, 6, 0, 0)
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(select={'sheets' : '(pages + %s) / %s'}, select_params=[1, 2]).order_by('sheets').values('sheets')
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
self.assertEqual(
publishers[0].n_books,
2
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': u'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': u'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs, [
{'n_authors': 2, 'name': u'Artificial Intelligence: A Modern Approach'},
{'n_authors': 1, 'name': u'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
self.assertRaises(
FieldError,
lambda: Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
)
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") | (Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
self.assertQuerysetEqual(
qs, [
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(pk__lt=F("book_count")) | Q(rating_sum=None)
).order_by("pk")
self.assertQuerysetEqual(
qs, [
"Apress",
"Jonno's House of Books",
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2)
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2)
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
| apache-2.0 |
oswalpalash/remoteusermgmt | RUM/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/codingstatemachine.py | 2931 | 2318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
| mit |
dyyi/moneybook | venv/Lib/encodings/idna.py | 215 | 9170 | # This module implements the RFCs 3490 (IDNA) and 3491 (Nameprep)
import stringprep, re, codecs
from unicodedata import ucd_3_2_0 as unicodedata
# IDNA section 3.1
dots = re.compile("[\u002E\u3002\uFF0E\uFF61]")
# IDNA section 5
ace_prefix = b"xn--"
sace_prefix = "xn--"
# This assumes query strings, so AllowUnassigned is true
def nameprep(label):
# Map
newlabel = []
for c in label:
if stringprep.in_table_b1(c):
# Map to nothing
continue
newlabel.append(stringprep.map_table_b2(c))
label = "".join(newlabel)
# Normalize
label = unicodedata.normalize("NFKC", label)
# Prohibit
for c in label:
if stringprep.in_table_c12(c) or \
stringprep.in_table_c22(c) or \
stringprep.in_table_c3(c) or \
stringprep.in_table_c4(c) or \
stringprep.in_table_c5(c) or \
stringprep.in_table_c6(c) or \
stringprep.in_table_c7(c) or \
stringprep.in_table_c8(c) or \
stringprep.in_table_c9(c):
raise UnicodeError("Invalid character %r" % c)
# Check bidi
RandAL = [stringprep.in_table_d1(x) for x in label]
for c in RandAL:
if c:
# There is a RandAL char in the string. Must perform further
# tests:
# 1) The characters in section 5.8 MUST be prohibited.
# This is table C.8, which was already checked
# 2) If a string contains any RandALCat character, the string
# MUST NOT contain any LCat character.
if any(stringprep.in_table_d2(x) for x in label):
raise UnicodeError("Violation of BIDI requirement 2")
# 3) If a string contains any RandALCat character, a
# RandALCat character MUST be the first character of the
# string, and a RandALCat character MUST be the last
# character of the string.
if not RandAL[0] or not RandAL[-1]:
raise UnicodeError("Violation of BIDI requirement 3")
return label
def ToASCII(label):
try:
# Step 1: try ASCII
label = label.encode("ascii")
except UnicodeError:
pass
else:
# Skip to step 3: UseSTD3ASCIIRules is false, so
# Skip to step 8.
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
# Step 2: nameprep
label = nameprep(label)
# Step 3: UseSTD3ASCIIRules is false
# Step 4: try ASCII
try:
label = label.encode("ascii")
except UnicodeError:
pass
else:
# Skip to step 8.
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
# Step 5: Check ACE prefix
if label.startswith(sace_prefix):
raise UnicodeError("Label starts with ACE prefix")
# Step 6: Encode with PUNYCODE
label = label.encode("punycode")
# Step 7: Prepend ACE prefix
label = ace_prefix + label
# Step 8: Check size
if 0 < len(label) < 64:
return label
raise UnicodeError("label empty or too long")
def ToUnicode(label):
# Step 1: Check for ASCII
if isinstance(label, bytes):
pure_ascii = True
else:
try:
label = label.encode("ascii")
pure_ascii = True
except UnicodeError:
pure_ascii = False
if not pure_ascii:
# Step 2: Perform nameprep
label = nameprep(label)
# It doesn't say this, but apparently, it should be ASCII now
try:
label = label.encode("ascii")
except UnicodeError:
raise UnicodeError("Invalid character in IDN label")
# Step 3: Check for ACE prefix
if not label.startswith(ace_prefix):
return str(label, "ascii")
# Step 4: Remove ACE prefix
label1 = label[len(ace_prefix):]
# Step 5: Decode using PUNYCODE
result = label1.decode("punycode")
# Step 6: Apply ToASCII
label2 = ToASCII(result)
# Step 7: Compare the result of step 6 with the one of step 3
# label2 will already be in lower case.
if str(label, "ascii").lower() != str(label2, "ascii"):
raise UnicodeError("IDNA does not round-trip", label, label2)
# Step 8: return the result of step 5
return result
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
if errors != 'strict':
# IDNA is quite clear that implementations must be strict
raise UnicodeError("unsupported error handling "+errors)
if not input:
return b'', 0
try:
result = input.encode('ascii')
except UnicodeEncodeError:
pass
else:
# ASCII name: fast path
labels = result.split(b'.')
for label in labels[:-1]:
if not (0 < len(label) < 64):
raise UnicodeError("label empty or too long")
if len(labels[-1]) >= 64:
raise UnicodeError("label too long")
return result, len(input)
result = bytearray()
labels = dots.split(input)
if labels and not labels[-1]:
trailing_dot = b'.'
del labels[-1]
else:
trailing_dot = b''
for label in labels:
if result:
# Join with U+002E
result.extend(b'.')
result.extend(ToASCII(label))
return bytes(result+trailing_dot), len(input)
def decode(self, input, errors='strict'):
if errors != 'strict':
raise UnicodeError("Unsupported error handling "+errors)
if not input:
return "", 0
# IDNA allows decoding to operate on Unicode strings, too.
if not isinstance(input, bytes):
# XXX obviously wrong, see #3232
input = bytes(input)
if ace_prefix not in input:
# Fast path
try:
return input.decode('ascii'), len(input)
except UnicodeDecodeError:
pass
labels = input.split(b".")
if labels and len(labels[-1]) == 0:
trailing_dot = '.'
del labels[-1]
else:
trailing_dot = ''
result = []
for label in labels:
result.append(ToUnicode(label))
return ".".join(result)+trailing_dot, len(input)
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
def _buffer_encode(self, input, errors, final):
if errors != 'strict':
# IDNA is quite clear that implementations must be strict
raise UnicodeError("unsupported error handling "+errors)
if not input:
return (b'', 0)
labels = dots.split(input)
trailing_dot = b''
if labels:
if not labels[-1]:
trailing_dot = b'.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = b'.'
result = bytearray()
size = 0
for label in labels:
if size:
# Join with U+002E
result.extend(b'.')
size += 1
result.extend(ToASCII(label))
size += len(label)
result += trailing_dot
size += len(trailing_dot)
return (bytes(result), size)
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, input, errors, final):
if errors != 'strict':
raise UnicodeError("Unsupported error handling "+errors)
if not input:
return ("", 0)
# IDNA allows decoding to operate on Unicode strings, too.
if isinstance(input, str):
labels = dots.split(input)
else:
# Must be ASCII string
input = str(input, "ascii")
labels = input.split(".")
trailing_dot = ''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(ToUnicode(label))
if size:
size += 1
size += len(label)
result = ".".join(result) + trailing_dot
size += len(trailing_dot)
return (result, size)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='idna',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 |
endlessm/chromium-browser | third_party/grpc/src/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py | 4 | 3470 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests of grpc.channel_ready_future."""
import threading
import unittest
import logging
import grpc
from tests.unit.framework.common import test_constants
from tests.unit import thread_pool
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
def accept_value(self, value):
with self._condition:
self._value = value
self._condition.notify_all()
def block_until_called(self):
with self._condition:
while self._value is None:
self._condition.wait()
return self._value
class ChannelReadyFutureTest(unittest.TestCase):
def test_lonely_channel_connectivity(self):
channel = grpc.insecure_channel('localhost:12345')
callback = _Callback()
ready_future = grpc.channel_ready_future(channel)
ready_future.add_done_callback(callback.accept_value)
with self.assertRaises(grpc.FutureTimeoutError):
ready_future.result(timeout=test_constants.SHORT_TIMEOUT)
self.assertFalse(ready_future.cancelled())
self.assertFalse(ready_future.done())
self.assertTrue(ready_future.running())
ready_future.cancel()
value_passed_to_callback = callback.block_until_called()
self.assertIs(ready_future, value_passed_to_callback)
self.assertTrue(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
channel.close()
def test_immediately_connectable_channel_connectivity(self):
recording_thread_pool = thread_pool.RecordingThreadPool(
max_workers=None)
server = grpc.server(
recording_thread_pool, options=(('grpc.so_reuseport', 0),))
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
callback = _Callback()
ready_future = grpc.channel_ready_future(channel)
ready_future.add_done_callback(callback.accept_value)
self.assertIsNone(
ready_future.result(timeout=test_constants.LONG_TIMEOUT))
value_passed_to_callback = callback.block_until_called()
self.assertIs(ready_future, value_passed_to_callback)
self.assertFalse(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
# Cancellation after maturity has no effect.
ready_future.cancel()
self.assertFalse(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
self.assertFalse(recording_thread_pool.was_used())
channel.close()
server.stop(None)
if __name__ == '__main__':
logging.basicConfig()
unittest.main(verbosity=2)
| bsd-3-clause |
stewartpark/django | django/core/management/commands/startapp.py | 513 | 1040 | from importlib import import_module
from django.core.management.base import CommandError
from django.core.management.templates import TemplateCommand
class Command(TemplateCommand):
help = ("Creates a Django app directory structure for the given app "
"name in the current directory or optionally in the given "
"directory.")
missing_args_message = "You must provide an application name."
def handle(self, **options):
app_name, target = options.pop('name'), options.pop('directory')
self.validate_name(app_name, "app")
# Check that the app_name cannot be imported.
try:
import_module(app_name)
except ImportError:
pass
else:
raise CommandError("%r conflicts with the name of an existing "
"Python module and cannot be used as an app "
"name. Please try another name." % app_name)
super(Command, self).handle('app', app_name, target, **options)
| bsd-3-clause |
zeroSteiner/king-phisher | data/server/king_phisher/alembic/versions/c9a8d520a26_schema_v9.py | 4 | 1908 | """Schema v9
Revision ID: b8443afcb9e
Revises: b76eab0a059
Create Date: 2018-10-25
"""
# revision identifiers, used by Alembic.
revision = 'c9a8d520a26'
down_revision = 'b8443afcb9e'
import os
import sys
sys.path.insert(1, os.path.abspath(os.path.join(os.path.dirname(__file__), *['..'] * 5)))
from alembic import op
from king_phisher.server.database import manager as db_manager
from king_phisher.server.database import schema_migration as db_schema_migration
import sqlalchemy
def upgrade():
op.add_column('campaigns', sqlalchemy.Column('credential_regex_username', sqlalchemy.String))
op.add_column('campaigns', sqlalchemy.Column('credential_regex_password', sqlalchemy.String))
op.add_column('campaigns', sqlalchemy.Column('credential_regex_mfa_token', sqlalchemy.String))
op.add_column('credentials', sqlalchemy.Column('mfa_token', sqlalchemy.String))
op.add_column('credentials', sqlalchemy.Column('regex_validated', sqlalchemy.Boolean))
op.add_column('users', sqlalchemy.Column('access_level', sqlalchemy.Integer))
op.execute('UPDATE users SET access_level = 1000')
op.alter_column('users', 'access_level', nullable=False)
# adjust the schema version metadata
db_manager.Session.remove()
db_manager.Session.configure(bind=op.get_bind())
session = db_manager.Session()
db_manager.set_metadata('schema_version', 9, session=session)
session.commit()
def downgrade():
db_schema_migration.drop_columns('users', ('access_level',))
db_schema_migration.drop_columns('credentials', ('regex_validated', 'mfa_token'))
db_schema_migration.drop_columns('campaigns', ('credential_regex_mfa_token', 'credential_regex_password', 'credential_regex_username'))
# adjust the schema version metadata
db_manager.Session.remove()
db_manager.Session.configure(bind=op.get_bind())
session = db_manager.Session()
db_manager.set_metadata('schema_version', 8, session=session)
session.commit()
| bsd-3-clause |
ltcmelo/psychec | cnippet/CCompilerFacade.py | 1 | 4589 | # -----------------------------------------------------------------------------
# Copyright (c) 2017 Leandro T. C. Melo ([email protected])
#
# All rights reserved. Unauthorized copying of this file, through any
# medium, is strictly prohibited.
#
# This software is provided on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, explicit or implicit. In no event shall the
# author be liable for any claim or damages.
# -----------------------------------------------------------------------------
import subprocess
import sys
from CommandSummary import CommandSummary
from Diagnostics import DiagnosticReporter, PREPROCESSING_FILE_FAILED
from Process import execute
class CCompilerFacade:
"""
Facade to the host C compiler.
"""
@staticmethod
def ID():
return CCompilerFacade.__name__
GCC = 'GCC'
Clang = 'Clang'
def __init__(self, cnip_opts):
self.cc = cnip_opts['cc']
self.cc_cmd_line = cnip_opts["cc_cmd_line"]
self.cc_family = None
self.cc_cmd_summary = None
@staticmethod
def defined_macros(prefix):
"""
Defined common builtin/platform macros.
"""
macros = [
# Calling conventions
prefix, '__cdecl=',
prefix, '__stdcall=',
prefix, '__thiscall=',
# Nullability attributes
prefix, '_Nullable=',
prefix, '_Nonnull=',
# GNU alternate keywords
prefix, '__extension__=',
# Microsoft
prefix, "'__declspec(a)='"
]
return macros
@staticmethod
def undefined_macros(prefix):
"""
Undefine common builtin/platform macros.
"""
macros = [
# Clang' block language.
prefix, '__BLOCKS__',
]
return macros
def is_supported(self) -> bool:
"""
Tell whether the host C compiler is supported.
"""
# Look at predefined macros: $ echo | gcc -dM -E -
echo = subprocess.Popen('echo', stdout=subprocess.PIPE)
cmd = [self.cc, '-dM', '-E', '-']
try:
macros = subprocess.check_output(cmd, stdin=echo.stdout)
except:
return False
# __GNU__ is predefined in GCC/Clang; __clang__, only in Clang.
if b'__clang__' in macros:
self.cc_family = CCompilerFacade.Clang
elif b'__GNUC__' in macros:
self.cc_family = CCompilerFacade.GCC
return True
def parse_command(self) -> CommandSummary:
"""
Parse the compiler command to extract compilation options.
"""
assert self.cc_family
self.cc_cmd_summary = CommandSummary(self.cc_cmd_line)
return self.cc_cmd_summary.collect()
def original_options(self):
cmd = self.cc_cmd_summary.defined_macros('-D')
cmd += self.cc_cmd_summary.undefined_macros('-U')
cmd += self.cc_cmd_summary.include_paths('-I')
return cmd
def check_syntax(self, c_file_name):
"""
Check the "syntax" of the file -- this is an abuse of terminology, since
`fsyntax-only' performs symbol lookup, i.e., if a declaration is missing,
an error is thrown.
"""
cmd = [self.cc, '-fsyntax-only', c_file_name]
common_opts = [
'-Werror=incompatible-pointer-types',
'-Werror=implicit-function-declaration'
]
cmd += common_opts
by_cc_opts = {
CCompilerFacade.GCC: '-Werror=builtin-declaration-mismatch',
CCompilerFacade.Clang: '-Werror=incompatible-library-redeclaration'
}
cmd.append(by_cc_opts[self.cc_family])
cmd += self.original_options()
return execute(CCompilerFacade.ID(),
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def preprocess(self, c_file_name, pp_file_name):
"""
Preprocess the file.
"""
cmd = [self.cc,
'-E',
'-x',
'c',
c_file_name,
'-o',
pp_file_name]
cmd += CCompilerFacade.defined_macros('-D')
cmd += CCompilerFacade.undefined_macros('-U')
cmd += self.original_options()
code = execute(CCompilerFacade.ID(), cmd)
if code != 0:
sys.exit(
DiagnosticReporter.fatal(PREPROCESSING_FILE_FAILED,
c_file_name))
| lgpl-2.1 |
jrutila/django-shop | docs/OLD-out-of-date/snippets/products.py | 20 | 1153 | # This should be read as pseudo-code, it's not meant to be executable
# Just a rough draft/proposal of how the products could be handled, similar to what Satchmo does
class Product():
# Product very general stuff goes here
name = models.CharField(mx_length=255)
class ProductAttribute():
'''
'''
name = 'ISBN number'
type = 'String' # maybe not necessary?
class ProdcutAttributeValue():
'''
Not necesarly a real model: it's a M2M stub with a value attached, it may be better to implement
in another way?
'''
attribute = models.ForeignKey(ProductAttribute)
product = models.ForeignKey(Products)
value = '0791040984' # Not a good idea to hard-code obviously, it's just an example
# This allows for interesting things, like category Attributes:
class CategoryAttribute():
'''
With a little managers magic, this allows to define a "Books" category, that
adds an ISBN attribute to all the Products it contains.
Another example: having a "Length" attribute for movies and music... etc.
'''
category = models.ForeignKey(Category) # Not defined here, it's easy to figure out
attribute = models.ForeignKey(ProductAttribute)
| bsd-3-clause |
zzzirk/boto | tests/integration/glacier/test_layer2.py | 136 | 2025 | # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import time
from tests.unit import unittest
from boto.glacier.layer2 import Layer1, Layer2
class TestGlacierLayer2(unittest.TestCase):
glacier = True
def setUp(self):
self.layer2 = Layer2()
self.vault_name = 'testvault%s' % int(time.time())
def test_create_delete_vault(self):
vault = self.layer2.create_vault(self.vault_name)
retrieved_vault = self.layer2.get_vault(self.vault_name)
self.layer2.delete_vault(self.vault_name)
self.assertEqual(vault.name, retrieved_vault.name)
self.assertEqual(vault.arn, retrieved_vault.arn)
self.assertEqual(vault.creation_date, retrieved_vault.creation_date)
self.assertEqual(vault.last_inventory_date,
retrieved_vault.last_inventory_date)
self.assertEqual(vault.number_of_archives,
retrieved_vault.number_of_archives)
| mit |
apache/libcloud | docs/examples/storage/create_directory_backup_stream_to_cf.py | 4 | 1099 | import subprocess
from datetime import datetime
from libcloud.storage.types import Provider, ContainerDoesNotExistError
from libcloud.storage.providers import get_driver
driver = get_driver(Provider.CLOUDFILES_US)('username', 'api key')
directory = '/home/some/path'
cmd = 'tar cvzpf - %s' % (directory)
object_name = 'backup-%s.tar.gz' % (datetime.now().strftime('%Y-%m-%d'))
container_name = 'backups'
# Create a container if it doesn't already exist
try:
container = driver.get_container(container_name=container_name)
except ContainerDoesNotExistError:
container = driver.create_container(container_name=container_name)
pipe = subprocess.Popen(cmd, bufsize=0, shell=True, stdout=subprocess.PIPE)
return_code = pipe.poll()
print('Uploading object...')
while return_code is None:
# Compress data in our directory and stream it directly to CF
obj = container.upload_object_via_stream(iterator=pipe.stdout,
object_name=object_name)
return_code = pipe.poll()
print('Upload complete, transferred: %s KB' % ((obj.size / 1024)))
| apache-2.0 |
EricSchles/python-route53 | route53/xml_generators/change_resource_record_set.py | 1 | 4532 | from io import BytesIO
from lxml import etree
from route53.util import prettyprint_xml
def get_change_values(change):
"""
In the case of deletions, we pull the change values for the XML request
from the ResourceRecordSet._initial_vas dict, since we want the original
values. For creations, we pull from the attributes on ResourceRecordSet.
Since we're dealing with attributes vs. dict key/vals, we'll abstract
this part away here and just always pass a dict to write_change.
:rtype: dict
:returns: A dict of change data, used by :py:func:`write_change` to
write the change request XML.
"""
action, rrset = change
if action == 'CREATE':
# For creations, we want the current values, since they don't need to
# match an existing record set.
values = dict()
for key, val in rrset._initial_vals.items():
# Pull from the record set's attributes, which are the current
# values.
values[key] = getattr(rrset, key)
return values
else:
# We can look at the initial values dict for deletions, since we
# have to match against the values currently in Route53.
return rrset._initial_vals
def write_change(change):
"""
Creates an XML element for the change.
:param tuple change: A change tuple from a ChangeSet. Comes in the form
of ``(action, rrset)``.
:rtype: lxml.etree._Element
:returns: A fully baked Change tag.
"""
action, rrset = change
change_vals = get_change_values(change)
e_change = etree.Element("Change")
e_action = etree.SubElement(e_change, "Action")
e_action.text = action
e_rrset = etree.SubElement(e_change, "ResourceRecordSet")
e_name = etree.SubElement(e_rrset, "Name")
e_name.text = change_vals['name']
e_type = etree.SubElement(e_rrset, "Type")
e_type.text = rrset.rrset_type
if change_vals.get('set_identifier'):
e_set_id = etree.SubElement(e_rrset, "SetIdentifier")
e_set_id.text = change_vals['set_identifier']
if change_vals.get('weight'):
e_weight = etree.SubElement(e_rrset, "Weight")
e_weight.text = change_vals['weight']
if change_vals.get('alias_hosted_zone_id') or change_vals.get('alias_dns_name'):
e_alias_target = etree.SubElement(e_rrset, "AliasTarget")
e_hosted_zone_id = etree.SubElement(e_alias_target, "HostedZoneId")
e_hosted_zone_id.text = change_vals['alias_hosted_zone_id']
e_dns_name = etree.SubElement(e_alias_target, "DNSName")
e_dns_name.text = change_vals['alias_dns_name']
if change_vals.get('region'):
e_weight = etree.SubElement(e_rrset, "Region")
e_weight.text = change_vals['region']
e_ttl = etree.SubElement(e_rrset, "TTL")
e_ttl.text = str(change_vals['ttl'])
if rrset.is_alias_record_set():
# A record sets in Alias mode don't have any resource records.
return e_change
e_resource_records = etree.SubElement(e_rrset, "ResourceRecords")
for value in change_vals['records']:
e_resource_record = etree.SubElement(e_resource_records, "ResourceRecord")
e_value = etree.SubElement(e_resource_record, "Value")
e_value.text = value
return e_change
def change_resource_record_set_writer(connection, change_set, comment=None):
"""
Forms an XML string that we'll send to Route53 in order to change
record sets.
:param Route53Connection connection: The connection instance used to
query the API.
:param change_set.ChangeSet change_set: The ChangeSet object to create the
XML doc from.
:keyword str comment: An optional comment to go along with the request.
"""
e_root = etree.Element(
"ChangeResourceRecordSetsRequest",
xmlns=connection._xml_namespace
)
e_change_batch = etree.SubElement(e_root, "ChangeBatch")
if comment:
e_comment = etree.SubElement(e_change_batch, "Comment")
e_comment.text = comment
e_changes = etree.SubElement(e_change_batch, "Changes")
# Deletions need to come first in the change sets.
for change in change_set.deletions + change_set.creations:
e_changes.append(write_change(change))
e_tree = etree.ElementTree(element=e_root)
#print(prettyprint_xml(e_root))
fobj = BytesIO()
# This writes bytes.
e_tree.write(fobj, xml_declaration=True, encoding='utf-8', method="xml")
return fobj.getvalue().decode('utf-8') | mit |
griest024/PokyrimTools | blender_nif_plugin-develop/io_scene_nif/nif_import.py | 2 | 94492 | """This script imports Netimmerse/Gamebryo nif files to Blender."""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright © 2005-2012, NIF File Format Library and Tools contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the NIF File Format Library and Tools
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
from .nif_common import NifCommon
from .collisionsys.collision_import import bhkshape_import, bound_import
from .armaturesys.skeletal import armature_import
from .materialsys.material import material_import
from .texturesys.texture import texture_import
from functools import reduce
import logging
import math
import operator
import os
import os.path
import bpy
import mathutils
import pyffi.spells.nif
import pyffi.spells.nif.fix
from pyffi.formats.nif import NifFormat
from pyffi.formats.egm import EgmFormat
class NifImportError(Exception):
"""A simple custom exception class for import errors."""
pass
class NifImport(NifCommon):
# degrees to radians conversion constant
D2R = 3.14159265358979/180.0
def execute(self):
"""Main import function."""
# dictionary of names, to map NIF blocks to correct Blender names
self.names = {}
# dictionary of bones, maps Blender name to NIF block
self.blocks = {}
# dictionary of bones, maps Blender bone name to matrix that maps the
# NIF bone matrix on the Blender bone matrix
# B' = X * B, where B' is the Blender bone matrix, and B is the NIF bone matrix
self.bones_extra_matrix = {}
# dictionary of bones that belong to a certain armature
# maps NIF armature name to list of NIF bone name
self.armatures = {}
# bone animation priorities (maps NiNode name to priority number);
# priorities are set in import_kf_root and are stored into the name
# of a NULL constraint (for lack of something better) in
# import_armature
self.bone_priorities = {}
# dictionary mapping bhkRigidBody objects to list of objects imported
# in Blender; after we've imported the tree, we use this dictionary
# to set the physics constraints (ragdoll etc)
self.havok_objects = {}
# Helper systems
# Store references to subsystems as needed.
self.bhkhelper = bhkshape_import(parent=self)
self.boundhelper = bound_import(parent=self)
self.armaturehelper = armature_import(parent=self)
self.texturehelper = texture_import(parent=self)
self.materialhelper = material_import(parent=self)
# catch NifImportError
try:
# check that one armature is selected in 'import geometry + parent
# to armature' mode
if False: #TODO self.properties.skeleton == "GEOMETRY_ONLY":
if (len(self.selected_objects) != 1
or self.selected_objects[0].type != 'ARMATURE'):
raise NifImportError(
"You must select exactly one armature in"
" 'Import Geometry Only + Parent To Selected Armature'"
" mode.")
# open file for binary reading
self.info("Importing %s" % self.properties.filepath)
niffile = open(self.properties.filepath, "rb")
self.data = NifFormat.Data()
try:
# check if nif file is valid
self.data.inspect(niffile)
if self.data.version >= 0:
# it is valid, so read the file
self.info("NIF file version: 0x%08X" % self.data.version)
self.info("Reading file")
self.data.read(niffile)
elif self.data.version == -1:
raise NifImportError("Unsupported NIF version.")
else:
raise NifImportError("Not a NIF file.")
finally:
# the file has been read or an error occurred: close file
niffile.close()
if self.properties.keyframe_file:
# open keyframe file for binary reading
self.info("Importing %s" % self.properties.keyframe_file)
kffile = open(self.properties.keyframe_file, "rb")
self.kfdata = NifFormat.Data()
try:
# check if kf file is valid
self.kfdata.inspect(kffile)
if self.kfdata.version >= 0:
# it is valid, so read the file
self.info(
"KF file version: 0x%08X" % self.kfdata.version)
self.info("Reading keyframe file")
self.kfdata.read(kffile)
elif self.kfdata.version == -1:
raise NifImportError("Unsupported KF version.")
else:
raise NifImportError("Not a KF file.")
finally:
# the file has been read or an error occurred: close file
kffile.close()
else:
self.kfdata = None
if self.properties.egm_file:
# open facegen egm file for binary reading
self.info("Importing %s" % self.properties.egm_file)
egmfile = open(self.properties.egm_file, "rb")
self.egmdata = EgmFormat.Data()
try:
# check if kf file is valid
self.egmdata.inspect(egmfile)
if self.egmdata.version >= 0:
# it is valid, so read the file
self.info("EGM file version: %03i"
% self.egmdata.version)
self.info("Reading FaceGen egm file")
self.egmdata.read(egmfile)
# scale the data
self.egmdata.apply_scale(1 / self.properties.scale_correction)
elif self.egmdata.version == -1:
raise NifImportError("Unsupported EGM version.")
else:
raise NifImportError("Not an EGM file.")
finally:
# the file has been read or an error occurred: close file
egmfile.close()
else:
self.egmdata = None
self.info("Importing data")
# calculate and set frames per second
if self.properties.animation:
self.fps = self.get_frames_per_second(
self.data.roots
+ (self.kfdata.roots if self.kfdata else []))
self.context.scene.render.fps = self.fps
# merge skeleton roots and transform geometry into the rest pose
if self.properties.merge_skeleton_roots:
pyffi.spells.nif.fix.SpellMergeSkeletonRoots(data=self.data).recurse()
if self.properties.send_geoms_to_bind_pos:
pyffi.spells.nif.fix.SpellSendGeometriesToBindPosition(data=self.data).recurse()
if self.properties.send_detached_geoms_to_node_pos:
pyffi.spells.nif.fix.SpellSendDetachedGeometriesToNodePosition(data=self.data).recurse()
if self.properties.send_bones_to_bind_position:
pyffi.spells.nif.fix.SpellSendBonesToBindPosition(data=self.data).recurse()
if self.properties.apply_skin_deformation:
for n_geom in self.data.get_global_iterator():
if not isinstance(n_geom, NifFormat.NiGeometry):
continue
if not n_geom.is_skin():
continue
self.info('Applying skin deformation on geometry %s'
% n_geom.name)
vertices, normals = n_geom.get_skin_deformation()
for vold, vnew in zip(n_geom.data.vertices, vertices):
vold.x = vnew.x
vold.y = vnew.y
vold.z = vnew.z
# scale tree
toaster = pyffi.spells.nif.NifToaster()
toaster.scale = 1 / self.properties.scale_correction
pyffi.spells.nif.fix.SpellScale(data=self.data, toaster=toaster).recurse()
# import all root blocks
for block in self.data.roots:
root = block
# root hack for corrupt better bodies meshes
# and remove geometry from better bodies on skeleton import
for b in (b for b in block.tree()
if isinstance(b, NifFormat.NiGeometry)
and b.is_skin()):
# check if root belongs to the children list of the
# skeleton root (can only happen for better bodies meshes)
if root in [c for c in b.skin_instance.skeleton_root.children]:
# fix parenting and update transform accordingly
b.skin_instance.data.set_transform(
root.get_transform()
* b.skin_instance.data.get_transform())
b.skin_instance.skeleton_root = root
# delete non-skeleton nodes if we're importing
# skeleton only
if self.properties.skeleton == "SKELETON_ONLY":
nonbip_children = (child for child in root.children
if child.name[:6] != 'Bip01 ')
for child in nonbip_children:
root.remove_child(child)
# import this root block
self.debug("Root block: %s" % root.get_global_display())
# merge animation from kf tree into nif tree
if self.properties.animation and self.kfdata:
for kf_root in self.kfdata.roots:
self.import_kf_root(kf_root, root)
# import the nif tree
self.import_root(root)
finally:
# clear progress bar
self.info("Finished")
# XXX no longer needed?
# do a full scene update to ensure that transformations are applied
# self.context.scene.update()
return {'FINISHED'}
def import_root(self, root_block):
"""Main import function."""
# check that this is not a kf file
if isinstance(root_block,
(NifFormat.NiSequence,
NifFormat.NiSequenceStreamHelper)):
raise NifImportError("direct .kf import not supported")
# divinity 2: handle CStreamableAssetData
if isinstance(root_block, NifFormat.CStreamableAssetData):
root_block = root_block.root
# sets the root block parent to None, so that when crawling back the
# script won't barf
root_block._parent = None
# set the block parent through the tree, to ensure I can always move
# backward
self.set_parents(root_block)
# mark armature nodes and bones
self.armaturehelper.mark_armatures_bones(root_block)
# import the keyframe notes
if self.properties.animation:
self.import_text_keys(root_block)
# read the NIF tree
if self.armaturehelper.is_armature_root(root_block):
# special case 1: root node is skeleton root
self.debug("%s is an armature root" % root_block.name)
b_obj = self.import_branch(root_block)
elif self.is_grouping_node(root_block):
# special case 2: root node is grouping node
self.debug("%s is a grouping node" % root_block.name)
b_obj = self.import_branch(root_block)
elif isinstance(root_block, NifFormat.NiTriBasedGeom):
# trishape/tristrips root
b_obj = self.import_branch(root_block)
elif isinstance(root_block, NifFormat.NiNode):
# root node is dummy scene node
# process collision
if root_block.collision_object:
bhk_body = root_block.collision_object.body
if not isinstance(bhk_body, NifFormat.bhkRigidBody):
self.warning(
"Unsupported collision structure under node %s"
% root_block.name)
self.bhkhelper.import_bhk_shape(bhkshape=bhk_body)
#process extra data
for n_extra in root_block.get_extra_datas():
if isinstance(n_extra, NifFormat.BSXFlags):
# get bsx flags so we can attach it to collision object
bsx_flags = n_extra.integer_data
elif isinstance(n_extra, NifFormat.NiStringExtraData):
if n_extra.name == "UPB":
upbflags = n_extra.string_data
elif isinstance(n_extra, NifFormat.BSBound):
self.boundhelper.import_bounding_box(n_extra)
# process all its children
for child in root_block.children:
b_obj = self.import_branch(child)
elif isinstance(root_block, NifFormat.NiCamera):
self.warning('Skipped NiCamera root')
elif isinstance(root_block, NifFormat.NiPhysXProp):
self.warning('Skipped NiPhysXProp root')
else:
self.warning(
"Skipped unsupported root block type '%s' (corrupted nif?)."
% root_block.__class__)
# store bone matrix offsets for re-export
if self.bones_extra_matrix:
self.armaturehelper.store_bones_extra_matrix()
# store original names for re-export
if self.names:
self.armaturehelper.store_names()
# now all havok objects are imported, so we are
# ready to import the havok constraints
for hkbody in self.havok_objects:
self.import_bhk_constraints(hkbody)
# parent selected meshes to imported skeleton
if self.properties.skeleton == "SKELETON_ONLY":
# rename vertex groups to reflect bone names
# (for blends imported with older versions of the scripts!)
for b_child_obj in self.selected_objects:
if b_child_obj.type == 'MESH':
for oldgroupname in b_child_obj.data.getVertGroupNames():
newgroupname = self.get_bone_name_for_blender(oldgroupname)
if oldgroupname != newgroupname:
self.info(
"%s: renaming vertex group %s to %s"
% (b_child_obj, oldgroupname, newgroupname))
b_child_obj.data.renameVertGroup(
oldgroupname, newgroupname)
# set parenting
b_obj.makeParentDeform(self.selected_objects)
def import_branch(self, niBlock, b_armature=None, n_armature=None):
"""Read the content of the current NIF tree branch to Blender
recursively.
:param niBlock: The nif block to import.
:param b_armature: The blender armature for the current branch.
:param n_armature: The corresponding nif block for the armature for
the current branch.
"""
self.info("Importing data")
if not niBlock:
return None
elif (isinstance(niBlock, NifFormat.NiTriBasedGeom)
and self.properties.skeleton != "SKELETON_ONLY"):
# it's a shape node and we're not importing skeleton only
# (self.properties.skeleton == "SKELETON_ONLY")
self.debug("Building mesh in import_branch")
# note: transform matrix is set during import
b_obj = self.import_mesh(niBlock)
# skinning? add armature modifier
if niBlock.skin_instance:
self.armaturehelper.append_armature_modifier(b_obj, b_armature)
return b_obj
elif isinstance(niBlock, NifFormat.NiNode):
children = niBlock.children
# bounding box child?
bsbound = self.find_extra(niBlock, NifFormat.BSBound)
if not (children
or niBlock.collision_object
or bsbound or niBlock.has_bounding_box
or self.IMPORT_EXTRANODES):
# do not import unless the node is "interesting"
return None
# import object
if self.armaturehelper.is_armature_root(niBlock):
# all bones in the tree are also imported by
# import_armature
if self.properties.skeleton != "GEOMETRY_ONLY":
b_obj = self.armaturehelper.import_armature(niBlock)
b_armature = b_obj
n_armature = niBlock
else:
b_obj = self.selected_objects[0]
b_armature = b_obj
n_armature = niBlock
self.info(
"Merging nif tree '%s' with armature '%s'"
% (niBlock.name, b_obj.name))
if niBlock.name != b_obj.name:
self.warning(
"Taking nif block '%s' as armature '%s'"
" but names do not match"
% (niBlock.name, b_obj.name))
# armatures cannot group geometries into a single mesh
geom_group = []
elif self.armaturehelper.is_bone(niBlock):
# bones have already been imported during import_armature
b_obj = b_armature.data.bones[self.names[niBlock]]
# bones cannot group geometries into a single mesh
geom_group = []
else:
# is it a grouping node?
geom_group = self.is_grouping_node(niBlock)
# if importing animation, remove children that have
# morph controllers from geometry group
if self.properties.animation:
for child in geom_group:
if self.find_controller(
child, NifFormat.NiGeomMorpherController):
geom_group.remove(child)
# import geometry/empty
if (not geom_group
or not self.properties.combine_shapes
or len(geom_group) > 16):
# no grouping node, or too many materials to
# group the geometry into a single mesh
# so import it as an empty
if not niBlock.has_bounding_box:
b_obj = self.import_empty(niBlock)
else:
b_obj = self.boundhelper.import_bounding_box(niBlock)
geom_group = []
else:
# node groups geometries, so import it as a mesh
self.info(
"Joining geometries %s to single object '%s'"
%([child.name for child in geom_group],
niBlock.name))
b_obj = None
for child in geom_group:
b_obj = self.import_mesh(child,
group_mesh=b_obj,
applytransform=True)
b_obj.name = self.import_name(niBlock)
# skinning? add armature modifier
if any(child.skin_instance
for child in geom_group):
self.armaturehelper.append_armature_modifier(b_obj, b_armature)
# settings for collision node
if isinstance(niBlock, NifFormat.RootCollisionNode):
b_obj.draw_type = 'BOUNDS'
b_obj.show_wire = True
b_obj.draw_bounds_type = 'POLYHEDERON'
b_obj.game.use_collision_bounds = True
b_obj.game.collision_bounds_type = 'TRIANGLE_MESH'
# also remove duplicate vertices
b_mesh = b_obj.data
numverts = len(b_mesh.vertices)
# 0.005 = 1/200
numdel = b_mesh.remDoubles(0.005)
if numdel:
self.info(
"Removed %i duplicate vertices"
" (out of %i) from collision mesh"
% (numdel, numverts))
# find children that aren't part of the geometry group
b_children_list = []
children = [child for child in niBlock.children
if child not in geom_group]
for n_child in children:
b_child = self.import_branch(
n_child, b_armature=b_armature, n_armature=n_armature)
if b_child:
b_children_list.append((n_child, b_child))
object_children = [
(n_child, b_child) for (n_child, b_child) in b_children_list
if isinstance(b_child, bpy.types.Object)]
# if not importing skeleton only
if self.properties.skeleton != "SKELETON_ONLY":
# import collision objects
if isinstance(niBlock.collision_object, NifFormat.bhkNiCollisionObject):
bhk_body = niBlock.collision_object.body
if not isinstance(bhk_body, NifFormat.bhkRigidBody):
self.warning(
"Unsupported collision structure"
" under node %s" % niBlock.name)
collision_objs = self.bhkhelper.import_bhk_shape(bhkshape=bhk_body)
# register children for parentship
object_children += [
(bhk_body, b_child) for b_child in collision_objs]
# import bounding box
if bsbound:
object_children += [
(bsbound, self.bhkhelper.import_bounding_box(bsbound))]
# fix parentship
if isinstance(b_obj, bpy.types.Object):
# simple object parentship
for (n_child, b_child) in object_children:
b_child.parent = b_obj
elif isinstance(b_obj, bpy.types.Bone):
# bone parentship, is a bit more complicated
# go to rest position
b_armature.data.restPosition = True
# set up transforms
for n_child, b_child in object_children:
# save transform
matrix = mathutils.Matrix(
b_child.getMatrix('localspace'))
# fix transform
# the bone has in the nif file an armature space transform
# given by niBlock.get_transform(relative_to=n_armature)
#
# in detail:
# a vertex in the collision object has global
# coordinates
# v * Z * B
# with v the vertex, Z the object transform
# (currently b_obj_matrix)
# and B the nif bone matrix
# in Blender however a vertex has coordinates
# v * O * T * B'
# with B' the Blender bone matrix
# so we need that
# Z * B = O * T * B' or equivalently
# O = Z * B * B'^{-1} * T^{-1}
# = Z * X^{-1} * T^{-1}
# since
# B' = X * B
# with X = self.bones_extra_matrix[B]
# post multiply Z with X^{-1}
extra = mathutils.Matrix(
self.bones_extra_matrix[niBlock])
extra.invert()
matrix = matrix * extra
# cancel out the tail translation T
# (the tail causes a translation along
# the local Y axis)
matrix[3][1] -= b_obj.length
b_child.matrix_local = matrix
# parent child to the bone
b_armature.makeParentBone(
[b_child], b_obj.name)
b_armature.data.restPosition = False
else:
raise RuntimeError(
"Unexpected object type %s" % b_obj.__class__)
# track camera for billboard nodes
if isinstance(niBlock, NifFormat.NiBillboardNode):
# find camera object
for obj in self.context.scene.objects:
if obj.type == 'CAMERA':
break
else:
raise NifImportError(
"Scene needs camera for billboard node"
" (add a camera and try again)")
# make b_obj track camera object
#b_obj.setEuler(0,0,0)
b_obj.constraints.append(
bpy.types.Constraint.TRACKTO)
self.warning(
"Constraint for billboard node on %s added"
" but target not set due to transform bug"
" in Blender. Set target to Camera manually."
% b_obj)
constr = b_obj.constraints[-1]
constr[Blender.Constraint.Settings.TRACK] = Blender.Constraint.Settings.TRACKZ
constr[Blender.Constraint.Settings.UP] = Blender.Constraint.Settings.UPY
# yields transform bug!
#constr[Blender.Constraint.Settings.TARGET] = obj
# set object transform
# this must be done after all children objects have been
# parented to b_obj
if isinstance(b_obj, bpy.types.Object):
# note: bones already have their matrix set
b_obj.matrix_local = self.import_matrix(niBlock)
# import the animations
if self.properties.animation:
self.set_animation(niBlock, b_obj)
# import the extras
self.import_text_keys(niBlock)
# import vis controller
self.import_object_vis_controller(
b_object=b_obj, n_node=niBlock)
# import extra node data, such as node type
# (other types should be added here too)
if (isinstance(niBlock, NifFormat.NiLODNode)
# XXX additional isinstance(b_obj, bpy.types.Object)
# XXX is a 'workaround' to the limitation that bones
# XXX cannot have properties in Blender 2.4x
# XXX (remove this check with Blender 2.5)
and isinstance(b_obj, bpy.types.Object)):
b_obj.addProperty("Type", "NiLODNode", "STRING")
# import lod data
range_data = niBlock.lod_level_data
for lod_level, (n_child, b_child) in zip(
range_data.lod_levels, b_children_list):
b_child.addProperty(
"Near Extent", lod_level.near_extent, "FLOAT")
b_child.addProperty(
"Far Extent", lod_level.far_extent, "FLOAT")
return b_obj
# all else is currently discarded
return None
def import_name(self, niBlock, max_length=22):
"""Get unique name for an object, preserving existing names.
The maximum name length defaults to 22, since this is the
maximum for Blender objects. Bone names can reach length 32.
:param niBlock: A named nif block.
:type niBlock: :class:`~pyffi.formats.nif.NifFormat.NiObjectNET`
:param max_length: The maximum length of the name.
:type max_length: :class:`int`
"""
if niBlock in self.names:
return self.names[niBlock]
self.debug(
"Importing name for %s block from %s"
% (niBlock.__class__.__name__, niBlock.name))
# find unique name for Blender to use
uniqueInt = 0
# strip null terminator from name
niBlock.name = niBlock.name.strip(b"\x00")
niName = niBlock.name.decode()
# if name is empty, create something non-empty
if not niName:
if isinstance(niBlock, NifFormat.RootCollisionNode):
niName = "collision"
else:
niName = "noname"
for uniqueInt in range(-1, 1000):
# limit name length
if uniqueInt == -1:
shortName = niName[:max_length-1]
else:
shortName = ('%s.%02d'
% (niName[:max_length-4],
uniqueInt))
# bone naming convention for blender
shortName = self.get_bone_name_for_blender(shortName)
# make sure it is unique
if (shortName not in bpy.data.objects
and shortName not in bpy.data.materials
and shortName not in bpy.data.meshes):
# shortName not in use anywhere
break
else:
raise RuntimeError("Ran out of names.")
# save mapping
# block niBlock has Blender name shortName
self.names[niBlock] = shortName
# Blender name shortName corresponds to niBlock
self.blocks[shortName] = niBlock
self.debug("Selected unique name %s" % shortName)
return shortName
def import_matrix(self, niBlock, relative_to=None):
"""Retrieves a niBlock's transform matrix as a Mathutil.Matrix."""
n_scale, n_rot_mat3, n_loc_vec3 = niBlock.get_transform(relative_to).get_scale_rotation_translation()
# create a location matrix
b_loc_vec = mathutils.Vector(n_loc_vec3.as_tuple())
b_loc_vec = mathutils.Matrix.Translation(b_loc_vec)
# create a scale matrix
b_scale_mat = mathutils.Matrix.Scale(n_scale, 4)
# create a rotation matrix
b_rot_mat = mathutils.Matrix()
b_rot_mat[0].xyz = n_rot_mat3.m_11, n_rot_mat3.m_21, n_rot_mat3.m_31
b_rot_mat[1].xyz = n_rot_mat3.m_12, n_rot_mat3.m_22, n_rot_mat3.m_32
b_rot_mat[2].xyz = n_rot_mat3.m_13, n_rot_mat3.m_23, n_rot_mat3.m_33
return b_loc_vec * b_rot_mat * b_scale_mat
def import_empty(self, niBlock):
"""Creates and returns a grouping empty."""
shortname = self.import_name(niBlock)
b_empty = bpy.data.objects.new(shortname, None)
# TODO - is longname needed???
b_empty.niftools.longname = niBlock.name.decode()
self.context.scene.objects.link(b_empty)
if niBlock.name in self.bone_priorities:
constr = b_empty.constraints.append(
bpy.types.Constraint.NULL)
constr.name = "priority:%i" % self.bone_priorities[niBlock.name]
return b_empty
def import_material_controllers(self, b_material, n_geom):
"""Import material animation data for given geometry."""
if not self.properties.animation:
return
self.import_material_alpha_controller(b_material, n_geom)
self.import_material_color_controller(
b_material=b_material,
b_channels=("MirR", "MirG", "MirB"),
n_geom=n_geom,
n_target_color=NifFormat.TargetColor.TC_AMBIENT)
self.import_material_color_controller(
b_material=b_material,
b_channels=("R", "G", "B"),
n_geom=n_geom,
n_target_color=NifFormat.TargetColor.TC_DIFFUSE)
self.import_material_color_controller(
b_material=b_material,
b_channels=("SpecR", "SpecG", "SpecB"),
n_geom=n_geom,
n_target_color=NifFormat.TargetColor.TC_SPECULAR)
self.import_material_uv_controller(b_material, n_geom)
def import_material_uv_controller(self, b_material, n_geom):
"""Import UV controller data."""
# search for the block
n_ctrl = self.find_controller(n_geom,
NifFormat.NiUVController)
if not(n_ctrl and n_ctrl.data):
return
self.info("importing UV controller")
b_channels = ("OfsX", "OfsY", "SizeX", "SizeY")
for b_channel, n_uvgroup in zip(b_channels,
n_ctrl.data.uv_groups):
if n_uvgroup.keys:
# create curve in material ipo
b_ipo = self.get_material_ipo(b_material)
b_curve = b_ipo.addCurve(b_channel)
b_curve.interpolation = self.get_b_ipol_from_n_ipol(
n_uvgroup.interpolation)
b_curve.extend = self.get_extend_from_flags(n_ctrl.flags)
for n_key in n_uvgroup.keys:
if b_channel.startswith("Ofs"):
# offsets are negated
b_curve[1 + n_key.time * self.fps] = -n_key.value
else:
b_curve[1 + n_key.time * self.fps] = n_key.value
def import_material_alpha_controller(self, b_material, n_geom):
# find alpha controller
n_matprop = self.find_property(n_geom, NifFormat.NiMaterialProperty)
if not n_matprop:
return
n_alphactrl = self.find_controller(n_matprop,
NifFormat.NiAlphaController)
if not(n_alphactrl and n_alphactrl.data):
return
self.info("importing alpha controller")
b_channel = "Alpha"
b_ipo = self.get_material_ipo(b_material)
b_curve = b_ipo.addCurve(b_channel)
b_curve.interpolation = self.get_b_ipol_from_n_ipol(
n_alphactrl.data.data.interpolation)
b_curve.extend = self.get_extend_from_flags(n_alphactrl.flags)
for n_key in n_alphactrl.data.data.keys:
b_curve[1 + n_key.time * self.fps] = n_key.value
def import_material_color_controller(
self, b_material, b_channels, n_geom, n_target_color):
# find material color controller with matching target color
n_matprop = self.find_property(n_geom, NifFormat.NiMaterialProperty)
if not n_matprop:
return
for ctrl in n_matprop.get_controllers():
if isinstance(ctrl, NifFormat.NiMaterialColorController):
if ctrl.get_target_color() == n_target_color:
n_matcolor_ctrl = ctrl
break
else:
return
self.info(
"importing material color controller for target color %s"
" into blender channels %s"
% (n_target_color, b_channels))
# import data as curves
b_ipo = self.get_material_ipo(b_material)
for i, b_channel in enumerate(b_channels):
b_curve = b_ipo.addCurve(b_channel)
b_curve.interpolation = self.get_b_ipol_from_n_ipol(
n_matcolor_ctrl.data.data.interpolation)
b_curve.extend = self.get_extend_from_flags(n_matcolor_ctrl.flags)
for n_key in n_matcolor_ctrl.data.data.keys:
b_curve[1 + n_key.time * self.fps] = n_key.value.as_list()[i]
def get_material_ipo(self, b_material):
"""Return existing material ipo data, or if none exists, create one
and return that.
"""
if not b_material.ipo:
b_material.ipo = Blender.Ipo.New("Material", "MatIpo")
return b_material.ipo
def import_object_vis_controller(self, b_object, n_node):
"""Import vis controller for blender object."""
n_vis_ctrl = self.find_controller(n_node, NifFormat.NiVisController)
if not(n_vis_ctrl and n_vis_ctrl.data):
return
self.info("importing vis controller")
b_channel = "Layer"
b_ipo = self.get_object_ipo(b_object)
b_curve = b_ipo.addCurve(b_channel)
b_curve.interpolation = Blender.IpoCurve.InterpTypes.CONST
b_curve.extend = self.get_extend_from_flags(n_vis_ctrl.flags)
for n_key in n_vis_ctrl.data.keys:
b_curve[1 + n_key.time * self.fps] = (
2 ** (n_key.value + max([1] + self.context.scene.getLayers()) - 1))
def get_object_ipo(self, b_object):
"""Return existing object ipo data, or if none exists, create one
and return that.
"""
if not b_object.ipo:
b_object.ipo = Blender.Ipo.New("Object", "Ipo")
return b_object.ipo
def import_mesh(self, niBlock,
group_mesh=None,
applytransform=False,
relative_to=None):
"""Creates and returns a raw mesh, or appends geometry data to
group_mesh.
:param niBlock: The nif block whose mesh data to import.
:type niBlock: C{NiTriBasedGeom}
:param group_mesh: The mesh to which to append the geometry
data. If C{None}, a new mesh is created.
:type group_mesh: A Blender object that has mesh data.
:param applytransform: Whether to apply the niBlock's
transformation to the mesh. If group_mesh is not C{None},
then applytransform must be C{True}.
:type applytransform: C{bool}
"""
assert(isinstance(niBlock, NifFormat.NiTriBasedGeom))
self.info("Importing mesh data for geometry %s" % niBlock.name)
if group_mesh:
b_obj = group_mesh
b_mesh = group_mesh.data
else:
# Mesh name -> must be unique, so tag it if needed
b_name = self.import_name(niBlock)
# create mesh data
b_mesh = bpy.data.meshes.new(b_name)
# create mesh object and link to data
b_obj = bpy.data.objects.new(b_name, b_mesh)
# link mesh object to the scene
self.context.scene.objects.link(b_obj)
# save original name as object property, for export
if b_name != niBlock.name.decode():
b_obj['Nif Name'] = niBlock.name.decode()
# Mesh hidden flag
if niBlock.flags & 1 == 1:
b_obj.draw_type = 'WIRE' # hidden: wire
else:
b_obj.draw_type = 'TEXTURED' # not hidden: shaded
# set transform matrix for the mesh
if not applytransform:
if group_mesh:
raise NifImportError(
"BUG: cannot set matrix when importing meshes in groups;"
" use applytransform = True")
b_obj.matrix_local = self.import_matrix(niBlock, relative_to=relative_to)
else:
# used later on
transform = self.import_matrix(niBlock, relative_to=relative_to)
# shortcut for mesh geometry data
niData = niBlock.data
if not niData:
raise NifImportError("no shape data in %s" % b_name)
# vertices
n_verts = niData.vertices
# faces
n_tris = [list(tri) for tri in niData.get_triangles()]
# "sticky" UV coordinates: these are transformed in Blender UV's
n_uvco = niData.uv_sets
# vertex normals
n_norms = niData.normals
'''
Properties
'''
# Stencil (for double sided meshes)
n_stencil_prop = self.find_property(niBlock, NifFormat.NiStencilProperty)
# we don't check flags for now, nothing fancy
if n_stencil_prop:
b_mesh.show_double_sided = True
else:
b_mesh.show_double_sided = False
# Material
# note that NIF files only support one material for each trishape
# find material property
n_mat_prop = self.find_property(niBlock,
NifFormat.NiMaterialProperty)
if n_mat_prop:
# Texture
n_texture_prop = None
if n_uvco:
n_texture_prop = self.find_property(niBlock,
NifFormat.NiTexturingProperty)
# Alpha
n_alpha_prop = self.find_property(niBlock,
NifFormat.NiAlphaProperty)
# Specularity
n_specular_prop = self.find_property(niBlock,
NifFormat.NiSpecularProperty)
# Wireframe
n_wire_prop = self.find_property(niBlock,
NifFormat.NiWireframeProperty)
# bethesda shader
bsShaderProperty = self.find_property(
niBlock, NifFormat.BSShaderPPLightingProperty)
# texturing effect for environment map
# in official files this is activated by a NiTextureEffect child
# preceeding the niBlock
textureEffect = None
if isinstance(niBlock._parent, NifFormat.NiNode):
lastchild = None
for child in niBlock._parent.children:
if child is niBlock:
if isinstance(lastchild, NifFormat.NiTextureEffect):
textureEffect = lastchild
break
lastchild = child
else:
raise RuntimeError("texture effect scanning bug")
# in some mods the NiTextureEffect child follows the niBlock
# but it still works because it is listed in the effect list
# so handle this case separately
if not textureEffect:
for effect in niBlock._parent.effects:
if isinstance(effect, NifFormat.NiTextureEffect):
textureEffect = effect
break
# extra datas (for sid meier's railroads) that have material info
extra_datas = []
for extra in niBlock.get_extra_datas():
if isinstance(extra, NifFormat.NiIntegerExtraData):
if extra.name in self.EXTRA_SHADER_TEXTURES:
# yes, it describes the shader slot number
extra_datas.append(extra)
# create material and assign it to the mesh
# XXX todo: delegate search for properties to import_material
material = self.materialhelper.import_material(n_mat_prop, n_texture_prop,
n_alpha_prop, n_specular_prop,
textureEffect, n_wire_prop,
bsShaderProperty, extra_datas)
# XXX todo: merge this call into import_material
self.import_material_controllers(material, niBlock)
b_mesh_materials = list(b_mesh.materials)
try:
materialIndex = b_mesh_materials.index(material)
except ValueError:
materialIndex = len(b_mesh_materials)
b_mesh.materials.append(material)
'''
# if mesh has one material with n_wire_prop, then make the mesh
# wire in 3D view
if n_wire_prop:
b_obj.draw_type = 'WIRE'
'''
else:
material = None
materialIndex = 0
# if there are no vertices then enable face index shifts
# (this fixes an issue with indexing)
if len(b_mesh.vertices) == 0:
check_shift = True
else:
check_shift = False
# v_map will store the vertex index mapping
# nif vertex i maps to blender vertex v_map[i]
v_map = [0 for i in range(len(n_verts))] # pre-allocate memory, for faster performance
# Following code avoids introducing unwanted cracks in UV seams:
# Construct vertex map to get unique vertex / normal pair list.
# We use a Python dictionary to remove doubles and to keep track of indices.
# While we are at it, we also add vertices while constructing the map.
n_map = {}
b_v_index = len(b_mesh.vertices)
for i, v in enumerate(n_verts):
# The key k identifies unique vertex /normal pairs.
# We use a tuple of ints for key, this works MUCH faster than a
# tuple of floats.
if n_norms:
n = n_norms[i]
k = (int(v.x*self.VERTEX_RESOLUTION),
int(v.y*self.VERTEX_RESOLUTION),
int(v.z*self.VERTEX_RESOLUTION),
int(n.x*self.NORMAL_RESOLUTION),
int(n.y*self.NORMAL_RESOLUTION),
int(n.z*self.NORMAL_RESOLUTION))
else:
k = (int(v.x*self.VERTEX_RESOLUTION),
int(v.y*self.VERTEX_RESOLUTION),
int(v.z*self.VERTEX_RESOLUTION))
# check if vertex was already added, and if so, what index
try:
# this is the bottle neck...
# can we speed this up?
n_map_k = n_map[k]
except KeyError:
n_map_k = None
if not n_map_k:
# not added: new vertex / normal pair
n_map[k] = i # unique vertex / normal pair with key k was added, with NIF index i
v_map[i] = b_v_index # NIF vertex i maps to blender vertex b_v_index
# add the vertex
if applytransform:
v = mathutils.Vector([v.x, v.y, v.z])
v = v * transform
b_mesh.vertices.add(1)
b_mesh.vertices[-1].co = [v.x, v.y, v.z]
else:
b_mesh.vertices.add(1)
b_mesh.vertices[-1].co = [v.x, v.y, v.z]
# adds normal info if present (Blender recalculates these when
# switching between edit mode and object mode, handled further)
#if n_norms:
# mv = b_mesh.vertices[b_v_index]
# n = n_norms[i]
# mv.normal = mathutils.Vector(n.x, n.y, n.z)
b_v_index += 1
else:
# already added
# NIF vertex i maps to Blender vertex v_map[n_map_k]
v_map[i] = v_map[n_map_k]
# report
self.debug("%i unique vertex-normal pairs" % len(n_map))
# release memory
del n_map
# Adds the faces to the mesh
f_map = [None]*len(n_tris)
b_f_index = len(b_mesh.faces)
num_new_faces = 0 # counter for debugging
unique_faces = set() # to avoid duplicate faces
for i, f in enumerate(n_tris):
# get face index
f_verts = [v_map[vert_index] for vert_index in f]
# skip degenerate faces
# we get a ValueError on faces.extend otherwise
if (f_verts[0] == f_verts[1]) or (f_verts[1] == f_verts[2]) or (f_verts[2] == f_verts[0]):
continue
if tuple(f_verts) in unique_faces:
continue
unique_faces.add(tuple(f_verts))
b_mesh.faces.add(1)
if f_verts[2] == 0:
# eeekadoodle fix
f_verts[0], f_verts[1], f_verts[2] = f_verts[2], f_verts[0], f_verts[1]
f[0], f[1], f[2] = f[2], f[0], f[1] # f[0] comes second
b_mesh.faces[-1].vertices_raw = f_verts + [0]
# keep track of added faces, mapping NIF face index to
# Blender face index
f_map[i] = b_f_index
b_f_index += 1
num_new_faces += 1
# at this point, deleted faces (degenerate or duplicate)
# satisfy f_map[i] = None
self.debug("%i unique faces" % num_new_faces)
# set face smoothing and material
for b_f_index in f_map:
if b_f_index is None:
continue
f = b_mesh.faces[b_f_index]
f.use_smooth = True if n_norms else False
f.material_index = materialIndex
# vertex colors
n_vcol = niData.vertex_colors
if n_vcol:
# create vertex_layers
b_meshcolorlayer = b_mesh.vertex_colors.new(name="VertexColor") # color layer
b_meshcolorlayeralpha = b_mesh.vertex_colors.new(name="VertexAlpha") # greyscale
# Mesh Vertex Color / Mesh Face
for n_tri, b_face_index in zip(n_tris, f_map):
if b_face_index is None:
continue
# MeshFace to MeshColor
b_meshcolor = b_meshcolorlayer.data[b_face_index]
b_meshalpha = b_meshcolorlayeralpha.data[b_face_index]
for n_vert_index, n_vert in enumerate(n_tri):
'''TODO: Request index access in the Bpy API
b_meshcolor.color[n_vert_index]'''
# Each MeshColor has n Color's, mapping to (n)_vertex.
b_color = getattr(b_meshcolor, "color%s" % (n_vert_index + 1))
b_colora = getattr(b_meshalpha, "color%s" % (n_vert_index + 1))
b_color.r = n_vcol[n_vert].r
b_color.g = n_vcol[n_vert].g
b_color.b = n_vcol[n_vert].b
b_colora.v = n_vcol[n_vert].a
# vertex colors influence lighting...
# we have to set the use_vertex_color_light flag on the material
# see below
# UV coordinates
# NIF files only support 'sticky' UV coordinates, and duplicates
# vertices to emulate hard edges and UV seam. So whenever a hard edge
# or a UV seam is present the mesh, vertices are duplicated. Blender
# only must duplicate vertices for hard edges; duplicating for UV seams
# would introduce unnecessary hard edges.
# only import UV if there are faces
# (some corner cases have only one vertex, and no faces,
# and b_mesh.faceUV = 1 on such mesh raises a runtime error)
if b_mesh.faces:
# blender 2.5+ aloways uses uv's per face?
#b_mesh.faceUV = 1
#b_mesh.vertexUV = 0
for i, uv_set in enumerate(n_uvco):
# Set the face UV's for the mesh. The NIF format only supports
# vertex UV's, but Blender only allows explicit editing of face
# UV's, so load vertex UV's as face UV's
uvlayer = self.materialhelper.get_uv_layer_name(i)
if not uvlayer in b_mesh.uv_textures:
b_mesh.uv_textures.new(uvlayer)
for f, b_f_index in zip(n_tris, f_map):
if b_f_index is None:
continue
uvlist = [(uv_set[vert_index].u, 1.0 - uv_set[vert_index].v) for vert_index in f]
b_mesh.uv_textures[uvlayer].data[b_f_index].uv1 = uvlist[0]
b_mesh.uv_textures[uvlayer].data[b_f_index].uv2 = uvlist[1]
b_mesh.uv_textures[uvlayer].data[b_f_index].uv3 = uvlist[2]
b_mesh.uv_textures.active_index = 0
if material:
# fix up vertex colors depending on whether we had textures in the
# material
mbasetex = material.texture_slots[0]
mglowtex = material.texture_slots[1]
if b_mesh.vertex_colors:
if mbasetex or mglowtex:
# textured material: vertex colors influence lighting
material.use_vertex_color_light = True
else:
# non-textured material: vertex colors incluence color
material.use_vertex_color_paint = True
# if there's a base texture assigned to this material sets it
# to be displayed in Blender's 3D view
# but only if there are UV coordinates
if mbasetex and mbasetex.texture and n_uvco:
imgobj = mbasetex.texture.image
if imgobj:
for b_f_index in f_map:
if b_f_index is None:
continue
tface = b_mesh.uv_textures.active.data[b_f_index]
# gone in blender 2.5x+?
# f.mode = Blender.Mesh.FaceModes['TEX']
# f.transp = Blender.Mesh.FaceTranspModes['ALPHA']
tface.image = imgobj
# import skinning info, for meshes affected by bones
skininst = niBlock.skin_instance
if skininst:
skindata = skininst.data
bones = skininst.bones
boneWeights = skindata.bone_list
for idx, bone in enumerate(bones):
# skip empty bones (see pyffi issue #3114079)
if not bone:
continue
vertex_weights = boneWeights[idx].vertex_weights
groupname = self.names[bone]
if not groupname in b_mesh.getVertGroupNames():
b_mesh.addVertGroup(groupname)
for skinWeight in vertex_weights:
vert = skinWeight.index
weight = skinWeight.weight
b_mesh.assignVertsToGroup(
groupname, [v_map[vert]], weight,
Blender.Mesh.AssignModes.REPLACE)
# import body parts as vertex groups
if isinstance(skininst, NifFormat.BSDismemberSkinInstance):
skinpart = niBlock.get_skin_partition()
for bodypart, skinpartblock in zip(
skininst.partitions, skinpart.skin_partition_blocks):
bodypart_wrap = NifFormat.BSDismemberBodyPartType()
bodypart_wrap.set_value(bodypart.body_part)
groupname = bodypart_wrap.get_detail_display()
# create vertex group if it did not exist yet
if not(groupname in b_mesh.getVertGroupNames()):
b_mesh.addVertGroup(groupname)
# find vertex indices of this group
groupverts = [v_map[v_index]
for v_index in skinpartblock.vertex_map]
# create the group
b_mesh.assignVertsToGroup(
groupname, groupverts, 1,
Blender.Mesh.AssignModes.ADD)
# import morph controller
# XXX todo: move this to import_mesh_controllers
if self.properties.animation:
morphCtrl = self.find_controller(niBlock, NifFormat.NiGeomMorpherController)
if morphCtrl:
morphData = morphCtrl.data
if morphData.num_morphs:
# insert base key at frame 1, using relative keys
b_mesh.insertKey(1, 'relative')
# get name for base key
keyname = morphData.morphs[0].frame_name
if not keyname:
keyname = 'Base'
# set name for base key
b_mesh.key.blocks[0].name = keyname
# get base vectors and import all morphs
baseverts = morphData.morphs[0].vectors
b_ipo = Blender.Ipo.New('Key' , 'KeyIpo')
b_mesh.key.ipo = b_ipo
for idxMorph in range(1, morphData.num_morphs):
# get name for key
keyname = morphData.morphs[idxMorph].frame_name
if not keyname:
keyname = 'Key %i' % idxMorph
self.info("inserting key '%s'" % keyname)
# get vectors
morphverts = morphData.morphs[idxMorph].vectors
# for each vertex calculate the key position from base
# pos + delta offset
assert(len(baseverts) == len(morphverts) == len(v_map))
for bv, mv, b_v_index in zip(baseverts, morphverts, v_map):
base = mathutils.Vector(bv.x, bv.y, bv.z)
delta = mathutils.Vector(mv.x, mv.y, mv.z)
v = base + delta
if applytransform:
v *= transform
b_mesh.vertices[b_v_index].co[0] = v.x
b_mesh.vertices[b_v_index].co[1] = v.y
b_mesh.vertices[b_v_index].co[2] = v.z
# update the mesh and insert key
b_mesh.insertKey(idxMorph, 'relative')
# set name for key
b_mesh.key.blocks[idxMorph].name = keyname
# set up the ipo key curve
try:
b_curve = b_ipo.addCurve(keyname)
except ValueError:
# this happens when two keys have the same name
# an instance of this is in fallout 3
# meshes/characters/_male/skeleton.nif HeadAnims:0
self.warning(
"skipped duplicate of key '%s'" % keyname)
# no idea how to set up the bezier triples -> switching
# to linear instead
b_curve.interpolation = Blender.IpoCurve.InterpTypes.LINEAR
# select extrapolation
b_curve.extend = self.get_extend_from_flags(morphCtrl.flags)
# set up the curve's control points
# first find the keys
# older versions store keys in the morphData
morphkeys = morphData.morphs[idxMorph].keys
# newer versions store keys in the controller
if (not morphkeys) and morphCtrl.interpolators:
morphkeys = morphCtrl.interpolators[idxMorph].data.data.keys
for key in morphkeys:
x = key.value
frame = 1+int(key.time * self.fps + 0.5)
b_curve.addBezier( ( frame, x ) )
# finally: return to base position
for bv, b_v_index in zip(baseverts, v_map):
base = mathutils.Vector(bv.x, bv.y, bv.z)
if applytransform:
base *= transform
b_mesh.vertices[b_v_index].co[0] = base.x
b_mesh.vertices[b_v_index].co[1] = base.y
b_mesh.vertices[b_v_index].co[2] = base.z
# import facegen morphs
if self.egmdata:
# XXX if there is an egm, the assumption is that there is only one
# XXX mesh in the nif
sym_morphs = [list(morph.get_relative_vertices())
for morph in self.egmdata.sym_morphs]
asym_morphs = [list(morph.get_relative_vertices())
for morph in self.egmdata.asym_morphs]
# insert base key at frame 1, using relative keys
b_mesh.insertKey(1, 'relative')
if self.IMPORT_EGMANIM:
# if morphs are animated: create key ipo for mesh
b_ipo = Blender.Ipo.New('Key' , 'KeyIpo')
b_mesh.key.ipo = b_ipo
morphs = ([(morph, "EGM SYM %i" % i)
for i, morph in enumerate(sym_morphs)]
+
[(morph, "EGM ASYM %i" % i)
for i, morph in enumerate(asym_morphs)])
for morphverts, keyname in morphs:
# length check disabled
# as sometimes, oddly, the morph has more vertices...
#assert(len(verts) == len(morphverts) == len(v_map))
# for each vertex calculate the key position from base
# pos + delta offset
for bv, mv, b_v_index in zip(verts, morphverts, v_map):
base = mathutils.Vector(bv.x, bv.y, bv.z)
delta = mathutils.Vector(mv[0], mv[1], mv[2])
v = base + delta
if applytransform:
v *= transform
b_mesh.vertices[b_v_index].co[0] = v.x
b_mesh.vertices[b_v_index].co[1] = v.y
b_mesh.vertices[b_v_index].co[2] = v.z
# update the mesh and insert key
b_mesh.insertKey(1, 'relative')
# set name for key
b_mesh.key.blocks[-1].name = keyname
if self.IMPORT_EGMANIM:
# set up the ipo key curve
b_curve = b_ipo.addCurve(keyname)
# linear interpolation
b_curve.interpolation = Blender.IpoCurve.InterpTypes.LINEAR
# constant extrapolation
b_curve.extend = Blender.IpoCurve.ExtendTypes.CONST
# set up the curve's control points
framestart = 1 + len(b_mesh.key.blocks) * 10
for frame, value in ((framestart, 0),
(framestart + 5, self.IMPORT_EGMANIMSCALE),
(framestart + 10, 0)):
b_curve.addBezier( ( frame, value ) )
if self.IMPORT_EGMANIM:
# set begin and end frame
self.context.scene.getRenderingContext().startFrame(1)
self.context.scene.getRenderingContext().endFrame(
11 + len(b_mesh.key.blocks) * 10)
# finally: return to base position
for bv, b_v_index in zip(verts, v_map):
base = mathutils.Vector(bv.x, bv.y, bv.z)
if applytransform:
base *= transform
b_mesh.vertices[b_v_index].co[0] = base.x
b_mesh.vertices[b_v_index].co[1] = base.y
b_mesh.vertices[b_v_index].co[2] = base.z
# import priority if existing
if niBlock.name in self.bone_priorities:
constr = b_obj.constraints.append(
bpy.types.Constraint.NULL)
constr.name = "priority:%i" % self.bone_priorities[niBlock.name]
# recalculate mesh to render correctly
# implementation note: update() without validate() can cause crash
b_mesh.calc_normals()
b_mesh.validate()
b_mesh.update()
return b_obj
# import animation groups
def import_text_keys(self, niBlock):
"""Stores the text keys that define animation start and end in a text
buffer, so that they can be re-exported. Since the text buffer is
cleared on each import only the last import will be exported
correctly."""
if isinstance(niBlock, NifFormat.NiControllerSequence):
txk = niBlock.text_keys
else:
txk = niBlock.find(block_type=NifFormat.NiTextKeyExtraData)
if txk:
# get animation text buffer, and clear it if it already exists
# TODO git rid of try-except block here
try:
animtxt = [txt for txt in bpy.data.texts if txt.name == "Anim"][0]
animtxt.clear()
except:
animtxt = bpy.data.texts.new("Anim")
frame = 1
for key in txk.text_keys:
newkey = str(key.value).replace('\r\n', '/').rstrip('/')
frame = 1 + int(key.time * self.fps + 0.5) # time 0.0 is frame 1
animtxt.write('%i/%s\n'%(frame, newkey))
# set start and end frames
self.context.scene.getRenderingContext().startFrame(1)
self.context.scene.getRenderingContext().endFrame(frame)
def get_frames_per_second(self, roots):
"""Scan all blocks and return a reasonable number for FPS."""
# find all key times
key_times = []
for root in roots:
for kfd in root.tree(block_type=NifFormat.NiKeyframeData):
key_times.extend(key.time for key in kfd.translations.keys)
key_times.extend(key.time for key in kfd.scales.keys)
key_times.extend(key.time for key in kfd.quaternion_keys)
key_times.extend(key.time for key in kfd.xyz_rotations[0].keys)
key_times.extend(key.time for key in kfd.xyz_rotations[1].keys)
key_times.extend(key.time for key in kfd.xyz_rotations[2].keys)
for kfi in root.tree(block_type=NifFormat.NiBSplineInterpolator):
if not kfi.basis_data:
# skip bsplines without basis data (eg bowidle.kf in
# Oblivion)
continue
key_times.extend(
point * (kfi.stop_time - kfi.start_time)
/ (kfi.basis_data.num_control_points - 2)
for point in range(kfi.basis_data.num_control_points - 2))
for uvdata in root.tree(block_type=NifFormat.NiUVData):
for uvgroup in uvdata.uv_groups:
key_times.extend(key.time for key in uvgroup.keys)
# not animated, return a reasonable default
if not key_times:
return 30
# calculate FPS
fps = 30
lowest_diff = sum(abs(int(time * fps + 0.5) - (time * fps))
for time in key_times)
# for fps in range(1,120): #disabled, used for testing
for test_fps in [20, 25, 35]:
diff = sum(abs(int(time * test_fps + 0.5)-(time * test_fps))
for time in key_times)
if diff < lowest_diff:
lowest_diff = diff
fps = test_fps
self.info("Animation estimated at %i frames per second." % fps)
return fps
def store_animation_data(self, rootBlock):
return
# very slow, implement later
"""
niBlockList = [block for block in rootBlock.tree() if isinstance(block, NifFormat.NiAVObject)]
for niBlock in niBlockList:
kfc = self.find_controller(niBlock, NifFormat.NiKeyframeController)
if not kfc: continue
kfd = kfc.data
if not kfd: continue
_ANIMATION_DATA.extend([{'data': key, 'block': niBlock, 'frame': None} for key in kfd.translations.keys])
_ANIMATION_DATA.extend([{'data': key, 'block': niBlock, 'frame': None} for key in kfd.scales.keys])
if kfd.rotation_type == 4:
_ANIMATION_DATA.extend([{'data': key, 'block': niBlock, 'frame': None} for key in kfd.xyz_rotations.keys])
else:
_ANIMATION_DATA.extend([{'data': key, 'block': niBlock, 'frame': None} for key in kfd.quaternion_keys])
# set the frames in the _ANIMATION_DATA list
for key in _ANIMATION_DATA:
# time 0 is frame 1
key['frame'] = 1 + int(key['data'].time * self.fps + 0.5)
# sort by frame, I need this later
_ANIMATION_DATA.sort(lambda key1, key2: cmp(key1['frame'], key2['frame']))
"""
def set_parents(self, niBlock):
"""Set the parent block recursively through the tree, to allow
crawling back as needed."""
if isinstance(niBlock, NifFormat.NiNode):
# list of non-null children
children = [ child for child in niBlock.children if child ]
for child in children:
child._parent = niBlock
self.set_parents(child)
def is_grouping_node(self, niBlock):
"""Determine whether node is grouping node.
Returns the children which are grouped, or empty list if it is not a
grouping node.
"""
# combining shapes: disable grouping
if not self.properties.combine_shapes:
return []
# check that it is a ninode
if not isinstance(niBlock, NifFormat.NiNode):
return []
# NiLODNodes are never grouping nodes
# (this ensures that they are imported as empties, with LODs
# as child meshes)
if isinstance(niBlock, NifFormat.NiLODNode):
return []
# root collision node: join everything
if isinstance(niBlock, NifFormat.RootCollisionNode):
return [ child for child in niBlock.children if
isinstance(child, NifFormat.NiTriBasedGeom) ]
# check that node has name
node_name = niBlock.name
if not node_name:
return []
# strip "NonAccum" trailer, if present
if node_name[-9:].lower() == " nonaccum":
node_name = node_name[:-9]
# get all geometry children
return [ child for child in niBlock.children
if (isinstance(child, NifFormat.NiTriBasedGeom)
and child.name.find(node_name) != -1) ]
def set_animation(self, niBlock, b_obj):
"""Load basic animation info for this object."""
kfc = self.find_controller(niBlock, NifFormat.NiKeyframeController)
if not kfc:
# no animation data: do nothing
return
if kfc.interpolator:
if isinstance(kfc.interpolator, NifFormat.NiBSplineInterpolator):
kfd = None # not supported yet so avoids fatal error - should be kfc.interpolator.spline_data when spline data is figured out.
else:
kfd = kfc.interpolator.data
else:
kfd = kfc.data
if not kfd:
# no animation data: do nothing
return
# denote progress
self.info("Animation")
self.info("Importing animation data for %s" % b_obj.name)
assert(isinstance(kfd, NifFormat.NiKeyframeData))
# create an Ipo for this object
b_ipo = self.get_object_ipo(b_obj)
# get the animation keys
translations = kfd.translations
scales = kfd.scales
# add the keys
self.debug('Scale keys...')
for key in scales.keys:
frame = 1+int(key.time * self.fps + 0.5) # time 0.0 is frame 1
Blender.Set('curframe', frame)
b_obj.SizeX = key.value
b_obj.SizeY = key.value
b_obj.SizeZ = key.value
b_obj.insertIpoKey(Blender.Object.SIZE)
# detect the type of rotation keys
rotation_type = kfd.rotation_type
if rotation_type == 4:
# uses xyz rotation
xkeys = kfd.xyz_rotations[0].keys
ykeys = kfd.xyz_rotations[1].keys
zkeys = kfd.xyz_rotations[2].keys
self.debug('Rotation keys...(euler)')
for (xkey, ykey, zkey) in zip(xkeys, ykeys, zkeys):
frame = 1+int(xkey.time * self.fps + 0.5) # time 0.0 is frame 1
# XXX we assume xkey.time == ykey.time == zkey.time
Blender.Set('curframe', frame)
# both in radians, no conversion needed
b_obj.RotX = xkey.value
b_obj.RotY = ykey.value
b_obj.RotZ = zkey.value
b_obj.insertIpoKey(Blender.Object.ROT)
else:
# uses quaternions
if kfd.quaternion_keys:
self.debug('Rotation keys...(quaternions)')
for key in kfd.quaternion_keys:
frame = 1+int(key.time * self.fps + 0.5) # time 0.0 is frame 1
Blender.Set('curframe', frame)
rot = mathutils.Quaternion(key.value.w, key.value.x, key.value.y, key.value.z).toEuler()
# Blender euler is in degrees, object RotXYZ is in radians
b_obj.RotX = rot.x * self.D2R
b_obj.RotY = rot.y * self.D2R
b_obj.RotZ = rot.z * self.D2R
b_obj.insertIpoKey(Blender.Object.ROT)
if translations.keys:
self.debug('Translation keys...')
for key in translations.keys:
frame = 1+int(key.time * self.fps + 0.5) # time 0.0 is frame 1
Blender.Set('curframe', frame)
b_obj.LocX = key.value.x
b_obj.LocY = key.value.y
b_obj.LocZ = key.value.z
b_obj.insertIpoKey(Blender.Object.LOC)
Blender.Set('curframe', 1)
def import_bhk_constraints(self, hkbody):
"""Imports a bone havok constraint as Blender object constraint."""
assert(isinstance(hkbody, NifFormat.bhkRigidBody))
# check for constraints
if not hkbody.constraints:
return
# find objects
if len(self.havok_objects[hkbody]) != 1:
self.warning(
"Rigid body with no or multiple shapes, constraints skipped")
return
b_hkobj = self.havok_objects[hkbody][0]
self.info("Importing constraints for %s" % b_hkobj.name)
# now import all constraints
for hkconstraint in hkbody.constraints:
# check constraint entities
if not hkconstraint.num_entities == 2:
self.warning(
"Constraint with more than 2 entities, skipped")
continue
if not hkconstraint.entities[0] is hkbody:
self.warning(
"First constraint entity not self, skipped")
continue
if not hkconstraint.entities[1] in self.havok_objects:
self.warning(
"Second constraint entity not imported, skipped")
continue
# get constraint descriptor
if isinstance(hkconstraint, NifFormat.bhkRagdollConstraint):
hkdescriptor = hkconstraint.ragdoll
elif isinstance(hkconstraint, NifFormat.bhkLimitedHingeConstraint):
hkdescriptor = hkconstraint.limited_hinge
elif isinstance(hkconstraint, NifFormat.bhkHingeConstraint):
hkdescriptor = hkconstraint.hinge
elif isinstance(hkconstraint, NifFormat.bhkMalleableConstraint):
if hkconstraint.type == 7:
hkdescriptor = hkconstraint.ragdoll
elif hkconstraint.type == 2:
hkdescriptor = hkconstraint.limited_hinge
else:
self.warning("Unknown malleable type (%i), skipped"
% hkconstraint.type)
# extra malleable constraint settings
### damping parameters not yet in Blender Python API
### tau (force between bodies) not supported by Blender
else:
self.warning("Unknown constraint type (%s), skipped"
% hkconstraint.__class__.__name__)
continue
# add the constraint as a rigid body joint
b_constr = b_hkobj.constraints.append(bpy.types.Constraint.RIGIDBODYJOINT)
# note: rigidbodyjoint parameters (from Constraint.c)
# CONSTR_RB_AXX 0.0
# CONSTR_RB_AXY 0.0
# CONSTR_RB_AXZ 0.0
# CONSTR_RB_EXTRAFZ 0.0
# CONSTR_RB_MAXLIMIT0 0.0
# CONSTR_RB_MAXLIMIT1 0.0
# CONSTR_RB_MAXLIMIT2 0.0
# CONSTR_RB_MAXLIMIT3 0.0
# CONSTR_RB_MAXLIMIT4 0.0
# CONSTR_RB_MAXLIMIT5 0.0
# CONSTR_RB_MINLIMIT0 0.0
# CONSTR_RB_MINLIMIT1 0.0
# CONSTR_RB_MINLIMIT2 0.0
# CONSTR_RB_MINLIMIT3 0.0
# CONSTR_RB_MINLIMIT4 0.0
# CONSTR_RB_MINLIMIT5 0.0
# CONSTR_RB_PIVX 0.0
# CONSTR_RB_PIVY 0.0
# CONSTR_RB_PIVZ 0.0
# CONSTR_RB_TYPE 12
# LIMIT 63
# PARSIZEY 63
# TARGET [Object "capsule.002"]
# limit 3, 4, 5 correspond to angular limits along x, y and z
# and are measured in degrees
# pivx/y/z is the pivot point
# set constraint target
b_constr[Blender.Constraint.Settings.TARGET] = \
self.havok_objects[hkconstraint.entities[1]][0]
# set rigid body type (generic)
b_constr[Blender.Constraint.Settings.CONSTR_RB_TYPE] = 12
# limiting parameters (limit everything)
b_constr[Blender.Constraint.Settings.LIMIT] = 63
# get pivot point
pivot = mathutils.Vector(
hkdescriptor.pivot_a.x * self.HAVOK_SCALE,
hkdescriptor.pivot_a.y * self.HAVOK_SCALE,
hkdescriptor.pivot_a.z * self.HAVOK_SCALE)
# get z- and x-axes of the constraint
# (also see export_nif.py NifImport.export_constraints)
if isinstance(hkdescriptor, NifFormat.RagdollDescriptor):
# for ragdoll, take z to be the twist axis (central axis of the
# cone, that is)
axis_z = mathutils.Vector(
hkdescriptor.twist_a.x,
hkdescriptor.twist_a.y,
hkdescriptor.twist_a.z)
# for ragdoll, let x be the plane vector
axis_x = mathutils.Vector(
hkdescriptor.plane_a.x,
hkdescriptor.plane_a.y,
hkdescriptor.plane_a.z)
# set the angle limits
# (see http://niftools.sourceforge.net/wiki/Oblivion/Bhk_Objects/Ragdoll_Constraint
# for a nice picture explaining this)
b_constr[Blender.Constraint.Settings.CONSTR_RB_MINLIMIT5] = \
hkdescriptor.twist_min_angle
b_constr[Blender.Constraint.Settings.CONSTR_RB_MAXLIMIT5] = \
hkdescriptor.twist_max_angle
b_constr[Blender.Constraint.Settings.CONSTR_RB_MINLIMIT3] = \
-hkdescriptor.cone_max_angle
b_constr[Blender.Constraint.Settings.CONSTR_RB_MAXLIMIT3] = \
hkdescriptor.cone_max_angle
b_constr[Blender.Constraint.Settings.CONSTR_RB_MINLIMIT4] = \
hkdescriptor.plane_min_angle
b_constr[Blender.Constraint.Settings.CONSTR_RB_MAXLIMIT4] = \
hkdescriptor.plane_max_angle
elif isinstance(hkdescriptor, NifFormat.LimitedHingeDescriptor):
# for hinge, y is the vector on the plane of rotation defining
# the zero angle
axis_y = mathutils.Vector(
hkdescriptor.perp_2_axle_in_a_1.x,
hkdescriptor.perp_2_axle_in_a_1.y,
hkdescriptor.perp_2_axle_in_a_1.z)
# for hinge, take x to be the the axis of rotation
# (this corresponds with Blender's convention for hinges)
axis_x = mathutils.Vector(
hkdescriptor.axle_a.x,
hkdescriptor.axle_a.y,
hkdescriptor.axle_a.z)
# for hinge, z is the vector on the plane of rotation defining
# the positive direction of rotation
axis_z = mathutils.Vector(
hkdescriptor.perp_2_axle_in_a_2.x,
hkdescriptor.perp_2_axle_in_a_2.y,
hkdescriptor.perp_2_axle_in_a_2.z)
# they should form a orthogonal basis
if (mathutils.CrossVecs(axis_x, axis_y)
- axis_z).length > 0.01:
# either not orthogonal, or negative orientation
if (mathutils.CrossVecs(-axis_x, axis_y)
- axis_z).length > 0.01:
self.warning(
"Axes are not orthogonal in %s;"
" arbitrary orientation has been chosen"
% hkdescriptor.__class__.__name__)
axis_z = mathutils.CrossVecs(axis_x, axis_y)
else:
# fix orientation
self.warning(
"X axis flipped in %s to fix orientation"
% hkdescriptor.__class__.__name__)
axis_x = -axis_x
# getting properties with no blender constraint
# equivalent and setting as obj properties
b_hkobj.addProperty("LimitedHinge_MaxAngle",
hkdescriptor.max_angle)
b_hkobj.addProperty("LimitedHinge_MinAngle",
hkdescriptor.min_angle)
b_hkobj.addProperty("LimitedHinge_MaxFriction",
hkdescriptor.max_friction)
elif isinstance(hkdescriptor, NifFormat.HingeDescriptor):
# for hinge, y is the vector on the plane of rotation defining
# the zero angle
axis_y = mathutils.Vector(
hkdescriptor.perp_2_axle_in_a_1.x,
hkdescriptor.perp_2_axle_in_a_1.y,
hkdescriptor.perp_2_axle_in_a_1.z)
# for hinge, z is the vector on the plane of rotation defining
# the positive direction of rotation
axis_z = mathutils.Vector(
hkdescriptor.perp_2_axle_in_a_2.x,
hkdescriptor.perp_2_axle_in_a_2.y,
hkdescriptor.perp_2_axle_in_a_2.z)
# take x to be the the axis of rotation
# (this corresponds with Blender's convention for hinges)
axis_x = mathutils.CrossVecs(axis_y, axis_z)
else:
raise ValueError("unknown descriptor %s"
% hkdescriptor.__class__.__name__)
# transform pivot point and constraint matrix into object
# coordinates
# (also see export_nif.py NifImport.export_constraints)
# the pivot point v is in hkbody coordinates
# however blender expects it in object coordinates, v'
# v * R * B = v' * O * T * B'
# with R = rigid body transform (usually unit tf)
# B = nif bone matrix
# O = blender object transform
# T = bone tail matrix (translation in Y direction)
# B' = blender bone matrix
# so we need to cancel out the object transformation by
# v' = v * R * B * B'^{-1} * T^{-1} * O^{-1}
# the local rotation L at the pivot point must be such that
# (axis_z + v) * R * B = ([0 0 1] * L + v') * O * T * B'
# so (taking the rotation parts of all matrices!!!)
# [0 0 1] * L = axis_z * R * B * B'^{-1} * T^{-1} * O^{-1}
# and similarly
# [1 0 0] * L = axis_x * R * B * B'^{-1} * T^{-1} * O^{-1}
# hence these give us the first and last row of L
# which is exactly enough to provide the euler angles
# multiply with rigid body transform
if isinstance(hkbody, NifFormat.bhkRigidBodyT):
# set rotation
transform = mathutils.Quaternion(
hkbody.rotation.w, hkbody.rotation.x,
hkbody.rotation.y, hkbody.rotation.z).toMatrix()
transform.resize4x4()
# set translation
transform[3][0] = hkbody.translation.x * 7
transform[3][1] = hkbody.translation.y * 7
transform[3][2] = hkbody.translation.z * 7
# apply transform
pivot = pivot * transform
transform = transform.rotationPart()
axis_z = axis_z * transform
axis_x = axis_x * transform
# next, cancel out bone matrix correction
# note that B' = X * B with X = self.bones_extra_matrix[B]
# so multiply with the inverse of X
for niBone in self.bones_extra_matrix:
if niBone.collision_object \
and niBone.collision_object.body is hkbody:
transform = mathutils.Matrix(
self.bones_extra_matrix[niBone])
transform.invert()
pivot = pivot * transform
transform = transform.rotationPart()
axis_z = axis_z * transform
axis_x = axis_x * transform
break
# cancel out bone tail translation
if b_hkobj.parentbonename:
pivot[1] -= b_hkobj.parent.data.bones[
b_hkobj.parentbonename].length
# cancel out object transform
transform = mathutils.Matrix(
b_hkobj.getMatrix('localspace'))
transform.invert()
pivot = pivot * transform
transform = transform.rotationPart()
axis_z = axis_z * transform
axis_x = axis_x * transform
# set pivot point
b_constr[Blender.Constraint.Settings.CONSTR_RB_PIVX] = pivot[0]
b_constr[Blender.Constraint.Settings.CONSTR_RB_PIVY] = pivot[1]
b_constr[Blender.Constraint.Settings.CONSTR_RB_PIVZ] = pivot[2]
# set euler angles
constr_matrix = mathutils.Matrix(
axis_x,
mathutils.CrossVecs(axis_z, axis_x),
axis_z)
constr_euler = constr_matrix.toEuler()
b_constr[Blender.Constraint.Settings.CONSTR_RB_AXX] = constr_euler.x
b_constr[Blender.Constraint.Settings.CONSTR_RB_AXY] = constr_euler.y
b_constr[Blender.Constraint.Settings.CONSTR_RB_AXZ] = constr_euler.z
# DEBUG
assert((axis_x - mathutils.Vector(1,0,0) * constr_matrix).length < 0.0001)
assert((axis_z - mathutils.Vector(0,0,1) * constr_matrix).length < 0.0001)
# the generic rigid body type is very buggy... so for simulation
# purposes let's transform it into ball and hinge
if isinstance(hkdescriptor, NifFormat.RagdollDescriptor):
# ball
b_constr[Blender.Constraint.Settings.CONSTR_RB_TYPE] = 1
elif isinstance(hkdescriptor, (NifFormat.LimitedHingeDescriptor,
NifFormat.HingeDescriptor)):
# (limited) hinge
b_constr[Blender.Constraint.Settings.CONSTR_RB_TYPE] = 2
else:
raise ValueError("unknown descriptor %s"
% hkdescriptor.__class__.__name__)
def import_kf_root(self, kf_root, root):
"""Merge kf into nif.
*** Note: this function will eventually move to PyFFI. ***
"""
self.info("Merging kf tree into nif tree")
# check that this is an Oblivion style kf file
if not isinstance(kf_root, NifFormat.NiControllerSequence):
raise NifImportError("non-Oblivion .kf import not supported")
# import text keys
self.import_text_keys(kf_root)
# go over all controlled blocks
for controlledblock in kf_root.controlled_blocks:
# get the name
nodename = controlledblock.get_node_name()
# match from nif tree?
node = root.find(block_name = nodename)
if not node:
self.info(
"Animation for %s but no such node found in nif tree"
% nodename)
continue
# node found, now find the controller
controllertype = controlledblock.get_controller_type()
if not controllertype:
self.info(
"Animation for %s without controller type, so skipping"
% nodename)
continue
controller = self.find_controller(node, getattr(NifFormat, controllertype))
if not controller:
self.info(
"Animation for %s with %s controller,"
" but no such controller type found"
" in corresponding node, so creating one"
% (nodename, controllertype))
controller = getattr(NifFormat, controllertype)()
# TODO set all the fields of this controller
node.add_controller(controller)
# yes! attach interpolator
controller.interpolator = controlledblock.interpolator
# in case of a NiTransformInterpolator without a data block
# we still must re-export the interpolator for Oblivion to
# accept the file
# so simply add dummy keyframe data for this one with just a single
# key to flag the exporter to export the keyframe as interpolator
# (i.e. length 1 keyframes are simply interpolators)
if isinstance(controller.interpolator,
NifFormat.NiTransformInterpolator) \
and controller.interpolator.data is None:
# create data block
kfi = controller.interpolator
kfi.data = NifFormat.NiTransformData()
# fill with info from interpolator
kfd = controller.interpolator.data
# copy rotation
kfd.num_rotation_keys = 1
kfd.rotation_type = NifFormat.KeyType.LINEAR_KEY
kfd.quaternion_keys.update_size()
kfd.quaternion_keys[0].time = 0.0
kfd.quaternion_keys[0].value.x = kfi.rotation.x
kfd.quaternion_keys[0].value.y = kfi.rotation.y
kfd.quaternion_keys[0].value.z = kfi.rotation.z
kfd.quaternion_keys[0].value.w = kfi.rotation.w
# copy translation
if kfi.translation.x < -1000000:
# invalid, happens in fallout 3, e.g. h2haim.kf
self.warning("ignored NaN in interpolator translation")
else:
kfd.translations.num_keys = 1
kfd.translations.keys.update_size()
kfd.translations.keys[0].time = 0.0
kfd.translations.keys[0].value.x = kfi.translation.x
kfd.translations.keys[0].value.y = kfi.translation.y
kfd.translations.keys[0].value.z = kfi.translation.z
# ignore scale, usually contains invalid data in interpolator
# save priority for future reference
# (priorities will be stored into the name of a NULL constraint on
# bones, see import_armature function)
self.bone_priorities[nodename] = controlledblock.priority
# DEBUG: save the file for manual inspection
#niffile = open("C:\\test.nif", "wb")
#NifFormat.write(niffile,
# version = 0x14000005, user_version = 11, roots = [root])
def menu_func(self, context):
"""Import operator for the menu."""
# TODO get default path from config registry
# default_path = bpy.data.filename.replace(".blend", ".nif")
default_path = "import.nif"
self.layout.operator(
NifImport.bl_idname,
text="NetImmerse/Gamebryo (.nif & .kf & .egm)"
).filepath = default_path
def register():
"""Register nif import operator."""
bpy.types.register(NifImport)
bpy.types.INFO_MT_file_import.append(menu_func)
def unregister():
"""Unregister nif import operator."""
bpy.types.unregister(NifImport)
bpy.types.INFO_MT_file_import.remove(menu_func)
if __name__ == '__main__':
"""Register nif import, when starting Blender."""
register()
| mit |
manishpatell/erpcustomizationssaiimpex123qwe | addons/crm/report/crm_phonecall_report.py | 309 | 3982 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.addons.crm import crm
from openerp.osv import fields, osv
AVAILABLE_STATES = [
('draft', 'Draft'),
('open', 'Todo'),
('cancel', 'Cancelled'),
('done', 'Held'),
('pending', 'Pending')
]
class crm_phonecall_report(osv.osv):
""" Phone calls by user and section """
_name = "crm.phonecall.report"
_description = "Phone calls by user and section"
_auto = False
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Section', readonly=True),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'nbr': fields.integer('# of Cases', readonly=True), # TDE FIXME master: rename into nbr_cases
'state': fields.selection(AVAILABLE_STATES, 'Status', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'delay_close': fields.float('Delay to close', digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'duration': fields.float('Duration', digits=(16,2),readonly=True, group_operator="avg"),
'delay_open': fields.float('Delay to open',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="[('section_id','=',section_id),\
('object_id.model', '=', 'crm.phonecall')]"),
'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'opening_date': fields.datetime('Opening Date', readonly=True, select=True),
'date_closed': fields.datetime('Close Date', readonly=True, select=True),
}
def init(self, cr):
""" Phone Calls By User And Section
@param cr: the current row, from the database cursor,
"""
tools.drop_view_if_exists(cr, 'crm_phonecall_report')
cr.execute("""
create or replace view crm_phonecall_report as (
select
id,
c.date_open as opening_date,
c.date_closed as date_closed,
c.state,
c.user_id,
c.section_id,
c.categ_id,
c.partner_id,
c.duration,
c.company_id,
c.priority,
1 as nbr,
c.create_date as create_date,
extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
from
crm_phonecall c
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cboothe/libkml | third_party/googletest-r108/test/gtest_break_on_failure_unittest.py | 15 | 6081 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
import os
import signal
import sys
import unittest
# Constants.
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = os.path.join(gtest_test_utils.GetBuildDir(),
'gtest_break_on_failure_unittest_');
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise.
"""
exit_code = os.system(command)
return os.WIFSIGNALED(exit_code)
# The unit test.
class GTestBreakOnFailureUnitTest(unittest.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = ' --%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = ' --%s' % BREAK_ON_FAILURE_FLAG
command = EXE_PATH + flag
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, command, should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
decvalts/iris | lib/iris/tests/unit/fileformats/grib/load_rules/test_convert.py | 2 | 7421 | # (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for :func:`iris.fileformats.grib.load_rules.convert`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import gribapi
import iris
from iris.fileformats.rules import Reference
from iris.tests import mock
from iris.tests.test_grib_load import TestGribSimple
from iris.tests.unit.fileformats import TestField
import iris.unit
from iris.fileformats.grib import GribWrapper
from iris.fileformats.grib.load_rules import convert
class Test_GribLevels_Mock(TestGribSimple):
# Unit test levels with mocking.
def test_grib2_height(self):
grib = self.mock_grib()
grib.edition = 2
grib.typeOfFirstFixedSurface = 103
grib.scaledValueOfFirstFixedSurface = 12345
grib.scaleFactorOfFirstFixedSurface = 0
grib.typeOfSecondFixedSurface = 255
cube = self.cube_from_message(grib)
self.assertEqual(
cube.coord('height'),
iris.coords.DimCoord(12345, standard_name="height", units="m"))
def test_grib2_bounded_height(self):
grib = self.mock_grib()
grib.edition = 2
grib.typeOfFirstFixedSurface = 103
grib.scaledValueOfFirstFixedSurface = 12345
grib.scaleFactorOfFirstFixedSurface = 0
grib.typeOfSecondFixedSurface = 103
grib.scaledValueOfSecondFixedSurface = 54321
grib.scaleFactorOfSecondFixedSurface = 0
cube = self.cube_from_message(grib)
self.assertEqual(
cube.coord('height'),
iris.coords.DimCoord(33333, standard_name="height", units="m",
bounds=[[12345, 54321]]))
def test_grib2_diff_bound_types(self):
grib = self.mock_grib()
grib.edition = 2
grib.typeOfFirstFixedSurface = 103
grib.scaledValueOfFirstFixedSurface = 12345
grib.scaleFactorOfFirstFixedSurface = 0
grib.typeOfSecondFixedSurface = 102
grib.scaledValueOfSecondFixedSurface = 54321
grib.scaleFactorOfSecondFixedSurface = 0
with mock.patch('warnings.warn') as warn:
cube = self.cube_from_message(grib)
warn.assert_called_with(
"Different vertical bound types not yet handled.")
class TestBoundedTime(TestField):
@staticmethod
def is_forecast_period(coord):
return (coord.standard_name == 'forecast_period' and
coord.units == 'hours')
@staticmethod
def is_time(coord):
return (coord.standard_name == 'time' and
coord.units == 'hours since epoch')
def assert_bounded_message(self, **kwargs):
attributes = {'productDefinitionTemplateNumber': 0,
'edition': 1, '_forecastTime': 15,
'_forecastTimeUnit': 'hours',
'phenomenon_bounds': lambda u: (80, 120),
'_phenomenonDateTime': -1,
'table2Version': 9999}
attributes.update(kwargs)
message = mock.Mock(**attributes)
self._test_for_coord(message, convert, self.is_forecast_period,
expected_points=[35],
expected_bounds=[[15, 55]])
self._test_for_coord(message, convert, self.is_time,
expected_points=[100],
expected_bounds=[[80, 120]])
def test_time_range_indicator_2(self):
self.assert_bounded_message(timeRangeIndicator=2)
def test_time_range_indicator_3(self):
self.assert_bounded_message(timeRangeIndicator=3)
def test_time_range_indicator_4(self):
self.assert_bounded_message(timeRangeIndicator=4)
def test_time_range_indicator_5(self):
self.assert_bounded_message(timeRangeIndicator=5)
def test_time_range_indicator_51(self):
self.assert_bounded_message(timeRangeIndicator=51)
def test_time_range_indicator_113(self):
self.assert_bounded_message(timeRangeIndicator=113)
def test_time_range_indicator_114(self):
self.assert_bounded_message(timeRangeIndicator=114)
def test_time_range_indicator_115(self):
self.assert_bounded_message(timeRangeIndicator=115)
def test_time_range_indicator_116(self):
self.assert_bounded_message(timeRangeIndicator=116)
def test_time_range_indicator_117(self):
self.assert_bounded_message(timeRangeIndicator=117)
def test_time_range_indicator_118(self):
self.assert_bounded_message(timeRangeIndicator=118)
def test_time_range_indicator_123(self):
self.assert_bounded_message(timeRangeIndicator=123)
def test_time_range_indicator_124(self):
self.assert_bounded_message(timeRangeIndicator=124)
def test_time_range_indicator_125(self):
self.assert_bounded_message(timeRangeIndicator=125)
def test_product_template_8(self):
self.assert_bounded_message(edition=2,
productDefinitionTemplateNumber=8)
def test_product_template_9(self):
self.assert_bounded_message(edition=2,
productDefinitionTemplateNumber=9)
class Test_GribLevels(tests.IrisTest):
def test_grib1_hybrid_height(self):
gm = gribapi.grib_new_from_samples('regular_gg_ml_grib1')
gw = GribWrapper(gm)
results = convert(gw)
factory, = results[0]
self.assertEqual(factory.factory_class,
iris.aux_factory.HybridPressureFactory)
delta, sigma, ref = factory.args
self.assertEqual(delta, {'long_name': 'level_pressure'})
self.assertEqual(sigma, {'long_name': 'sigma'})
self.assertEqual(ref, Reference(name='surface_pressure'))
ml_ref = iris.coords.CoordDefn('model_level_number', None, None,
iris.unit.Unit('1'),
{'positive': 'up'}, None)
lp_ref = iris.coords.CoordDefn(None, 'level_pressure', None,
iris.unit.Unit('Pa'),
{}, None)
s_ref = iris.coords.CoordDefn(None, 'sigma', None,
iris.unit.Unit('1'),
{}, None)
aux_coord_defns = [coord._as_defn() for coord, dim in results[8]]
self.assertIn(ml_ref, aux_coord_defns)
self.assertIn(lp_ref, aux_coord_defns)
self.assertIn(s_ref, aux_coord_defns)
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
thanhacun/odoo | addons/membership/wizard/__init__.py | 432 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import membership_invoice
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tomfotherby/tower-cli | tests/test_utils_command.py | 4 | 1663 | # Copyright 2015, Ansible, Inc.
# Luke Sneeringer <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from click.testing import CliRunner
from tower_cli.utils.command import Command
from tests.compat import unittest
class CommandTests(unittest.TestCase):
"""A set of tests to ensure that the tower_cli Command class works
in the way we expect.
"""
def setUp(self):
self.runner = CliRunner()
def test_dash_dash_help(self):
"""Establish that no_args_is_help causes the help to be printed,
and an exit.
"""
# Create a command with which to test.
@click.command(no_args_is_help=True, cls=Command)
@click.argument('parrot')
def foo(parrot):
click.echo(parrot)
# Establish that this command echos if called with echo.
self.assertEqual(self.runner.invoke(foo, ['bar']).output, 'bar\n')
# Establish that this command sends help if called with nothing.
result = self.runner.invoke(foo)
self.assertIn('--help', result.output)
self.assertIn('Show this message and exit.\n', result.output)
| apache-2.0 |
barbour-em/osf.io | scripts/analytics/addons.py | 15 | 2140 | # -*- coding: utf-8 -*-
import os
import re
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from website.app import init_app
from .utils import plot_dates, oid_to_datetime, mkdirp
log_collection = database['nodelog']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'addons')
mkdirp(FIG_PATH)
ADDONS = [
'github',
's3',
'figshare',
'dropbox',
'dataverse',
]
def get_collection_datetimes(collection, _id='_id', query=None):
query = query or {}
return [
oid_to_datetime(record[_id])
for record in collection.find({}, {_id: True})
]
def analyze_model(model):
dates = get_collection_datetimes(model._storage[0].store)
return {
'dates': dates,
'count': len(dates),
}
def analyze_addon_installs(name):
config = settings.ADDONS_AVAILABLE_DICT[name]
results = {
key: analyze_model(model)
for key, model in config.settings_models.iteritems()
}
return results
def analyze_addon_logs(name):
pattern = re.compile('^{0}'.format(name), re.I)
logs = log_collection.find({'action': {'$regex': pattern}}, {'date': True})
return [
record['date']
for record in logs
]
def analyze_addon(name):
installs = analyze_addon_installs(name)
for model, result in installs.iteritems():
if not result['dates']:
continue
fig = plot_dates(result['dates'])
plt.title('{} configurations: {} ({} total)'.format(name, model, len(result['dates'])))
plt.savefig(os.path.join(FIG_PATH, '{}-installs-{}.png'.format(name, model)))
plt.close()
log_dates = analyze_addon_logs(name)
if not log_dates:
return
fig = plot_dates(log_dates)
plt.title('{} actions ({} total)'.format(name, len(log_dates)))
plt.savefig(os.path.join(FIG_PATH, '{}-actions.png'.format(name)))
plt.close()
def main():
init_app(routes=False)
for addon in ADDONS:
if addon in settings.ADDONS_AVAILABLE_DICT:
analyze_addon(addon)
if __name__ == '__main__':
main()
| apache-2.0 |
saurabhjn76/sympy | sympy/parsing/maxima.py | 105 | 1740 | from __future__ import print_function, division
import re
from sympy import sympify, Sum, product, sin, cos
class MaximaHelpers:
def maxima_expand(expr):
return expr.expand()
def maxima_float(expr):
return expr.evalf()
def maxima_trigexpand(expr):
return expr.expand(trig=True)
def maxima_sum(a1, a2, a3, a4):
return Sum(a1, (a2, a3, a4)).doit()
def maxima_product(a1, a2, a3, a4):
return product(a1, (a2, a3, a4))
def maxima_csc(expr):
return 1/sin(expr)
def maxima_sec(expr):
return 1/cos(expr)
sub_dict = {
'pi': re.compile('%pi'),
'E': re.compile('%e'),
'I': re.compile('%i'),
'**': re.compile('\^'),
'oo': re.compile(r'\binf\b'),
'-oo': re.compile(r'\bminf\b'),
"'-'": re.compile(r'\bminus\b'),
'maxima_expand': re.compile(r'\bexpand\b'),
'maxima_float': re.compile(r'\bfloat\b'),
'maxima_trigexpand': re.compile(r'\btrigexpand'),
'maxima_sum': re.compile(r'\bsum\b'),
'maxima_product': re.compile(r'\bproduct\b'),
'cancel': re.compile(r'\bratsimp\b'),
'maxima_csc': re.compile(r'\bcsc\b'),
'maxima_sec': re.compile(r'\bsec\b')
}
var_name = re.compile('^\s*(\w+)\s*:')
def parse_maxima(str, globals=None, name_dict={}):
str = str.strip()
str = str.rstrip('; ')
for k, v in sub_dict.items():
str = v.sub(k, str)
assign_var = None
var_match = var_name.search(str)
if var_match:
assign_var = var_match.group(1)
str = str[var_match.end():].strip()
dct = MaximaHelpers.__dict__.copy()
dct.update(name_dict)
obj = sympify(str, locals=dct)
if assign_var and globals:
globals[assign_var] = obj
return obj
| bsd-3-clause |
hamiltont/CouchPotatoServer | libs/git/repository.py | 109 | 20401 | # Copyright (c) 2009, Rotem Yaari <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Rotem Yaari ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Rotem Yaari BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from collections import Sequence
import re
import os
import subprocess
import sys
from . import branch
from . import tag
from . import commit
from . import config
from .files import ModifiedFile
from . import ref
from . import ref_container
from . import remotes
from .utils import quote_for_shell
from .utils import CommandString as CMD
#exceptions
from .exceptions import CannotFindRepository
from .exceptions import GitException
from .exceptions import GitCommandFailedException
from .exceptions import MergeConflict
from .exceptions import NonexistentRefException
BRANCH_ALIAS_MARKER = ' -> '
class Repository(ref_container.RefContainer):
_git_command = None
def setCommand(self, command):
self._git_command = command
############################# internal methods #############################
_loggingEnabled = False
def _getWorkingDirectory(self):
return '.'
def _logGitCommand(self, command, cwd):
if self._loggingEnabled:
print >> sys.stderr, ">>", command
def enableLogging(self):
self._loggingEnabled = True
def disableLogging(self):
self._loggingEnabled = False
def _executeGitCommand(self, command, cwd = None):
if cwd is None:
cwd = self._getWorkingDirectory()
command = '%s %s' % (self._git_command, str(command))
self._logGitCommand(command, cwd)
returned = subprocess.Popen(command,
shell = True,
cwd = cwd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
returned.wait()
return returned
def _executeGitCommandAssertSuccess(self, command, **kwargs):
returned = self._executeGitCommand(command, **kwargs)
assert returned.returncode is not None
if returned.returncode != 0:
raise GitCommandFailedException(kwargs.get('cwd', self._getWorkingDirectory()), command, returned)
return returned
def _getOutputAssertSuccess(self, command, **kwargs):
return self._executeGitCommandAssertSuccess(command, **kwargs).stdout.read()
def _getMergeBase(self, a, b):
raise NotImplementedError()
def getMergeBase(self, a, b):
repo = self
if isinstance(b, commit.Commit) and isinstance(b.repo, LocalRepository):
repo = b.repo
elif isinstance(a, commit.Commit) and isinstance(a.repo, LocalRepository):
repo = a.repo
return repo._getMergeBase(a, b)
############################## remote repositories #############################
class RemoteRepository(Repository):
def __init__(self, url, command = 'git'):
self.setCommand(command)
super(RemoteRepository, self).__init__()
self.url = url
def _getRefs(self, prefix = ''):
output = self._executeGitCommandAssertSuccess("ls-remote %s" % (self.url,))
for output_line in output.stdout:
commit, refname = output_line.split()
if refname.startswith(prefix):
yield refname[len(prefix):], commit.strip()
def _getRefsAsClass(self, prefix, cls):
return [cls(self, ref) for ref, _ in self._getRefs(prefix)]
def _getCommitByRefName(self, refname):
sha_by_ref = dict(self._getRefs())
for prefix in 'refs/tags/', 'refs/heads/':
sha = sha_by_ref.get(prefix + refname, None)
if sha is not None:
return commit.Commit(self, sha)
raise NonexistentRefException("Cannot find ref name %r in %s" % (refname, self))
def getBranches(self):
return self._getRefsAsClass('refs/heads/', branch.RemoteBranch)
def getTags(self):
return self._getRefsAsClass('refs/tags/', tag.RemoteTag)
############################## local repositories ##############################
class LocalRepository(Repository):
def __init__(self, path, command = 'git'):
self.setCommand(command)
super(LocalRepository, self).__init__()
self.path = path
self.config = config.GitConfiguration(self)
self._version = None
def __repr__(self):
return "<Git Repository at %s>" % (self.path,)
def _getWorkingDirectory(self):
return self.path
def _getCommitByHash(self, sha):
return commit.Commit(self, sha)
def _getCommitByRefName(self, name):
return commit.Commit(self, self._getOutputAssertSuccess("rev-parse %s" % name).strip())
def _getCommitByPartialHash(self, sha):
return self._getCommitByRefName(sha)
def getGitVersion(self):
if self._version is None:
version_output = self._getOutputAssertSuccess("version")
version_match = re.match(r"git\s+version\s+(\S+)[\s\(]?", version_output, re.I)
if version_match is None:
raise GitException("Cannot extract git version (unfamiliar output format %r?)" % version_output)
self._version = version_match.group(1)
return self._version
########################### Initializing a repository ##########################
def init(self, bare = False):
if not os.path.exists(self.path):
os.mkdir(self.path)
if not os.path.isdir(self.path):
raise GitException("Cannot create repository in %s - "
"not a directory" % self.path)
self._executeGitCommandAssertSuccess("init %s" % ("--bare" if bare else ""))
def _asURL(self, repo):
if isinstance(repo, LocalRepository):
repo = repo.path
elif isinstance(repo, RemoteRepository):
repo = repo.url
elif not isinstance(repo, basestring):
raise TypeError("Cannot clone from %r" % (repo,))
return repo
def clone(self, repo):
self._executeGitCommandAssertSuccess("clone %s %s" % (self._asURL(repo), self.path), cwd = ".")
########################### Querying repository refs ###########################
def getBranches(self):
returned = []
for git_branch_line in self._executeGitCommandAssertSuccess("branch").stdout:
if git_branch_line.startswith("*"):
git_branch_line = git_branch_line[1:]
git_branch_line = git_branch_line.strip()
if BRANCH_ALIAS_MARKER in git_branch_line:
alias_name, aliased = git_branch_line.split(BRANCH_ALIAS_MARKER)
returned.append(branch.LocalBranchAlias(self, alias_name, aliased))
else:
returned.append(branch.LocalBranch(self, git_branch_line))
return returned
def getTags(self):
returned = []
for git_tag_line in self._executeGitCommandAssertSuccess("tag").stdout:
returned.append(tag.LocalTag(self, git_tag_line.strip()))
return returned
def _getCommits(self, specs, includeMerges):
command = "log --pretty=format:%%H %s" % specs
if not includeMerges:
command += " --no-merges"
for c in self._executeGitCommandAssertSuccess(command).stdout:
yield commit.Commit(self, c.strip())
def getCommits(self, start = None, end = "HEAD", includeMerges = True):
spec = self._normalizeRefName(start or "")
spec += ".."
spec += self._normalizeRefName(end)
return list(self._getCommits(spec, includeMerges = includeMerges))
def getCurrentBranch(self):
#todo: improve this method of obtaining current branch
for branch_name in self._executeGitCommandAssertSuccess("branch").stdout:
branch_name = branch_name.strip()
if not branch_name.startswith("*"):
continue
branch_name = branch_name[1:].strip()
if branch_name == '(no branch)':
return None
return self.getBranchByName(branch_name)
def getRemotes(self):
config_dict = self.config.getDict()
returned = []
for line in self._getOutputAssertSuccess("remote show -n").splitlines():
line = line.strip()
returned.append(remotes.Remote(self, line, config_dict.get('remote.%s.url' % line.strip())))
return returned
def getRemoteByName(self, name):
return self._getByName(self.getRemotes, name)
def _getMergeBase(self, a, b):
if isinstance(a, ref.Ref):
a = a.getHead()
if isinstance(b, ref.Ref):
b = b.getHead()
returned = self._executeGitCommand("merge-base %s %s" % (a, b))
if returned.returncode == 0:
return commit.Commit(self, returned.stdout.read().strip())
# make sure this is not a misc. error with git
unused = self.getHead()
return None
################################ Querying Status ###############################
def containsCommit(self, commit):
try:
self._executeGitCommandAssertSuccess("log -1 %s" % (commit,))
except GitException:
return False
return True
def getHead(self):
return self._getCommitByRefName("HEAD")
def _getFiles(self, *flags):
flags = ["--exclude-standard"] + list(flags)
return [f.strip()
for f in self._getOutputAssertSuccess("ls-files %s" % (" ".join(flags))).splitlines()]
def _getRawDiff(self, *flags, **options):
match_statuses = options.pop('fileStatuses', None)
if match_statuses is not None and not isinstance(match_statuses, Sequence):
raise ValueError("matchedStatuses must be a sequence")
if options:
raise TypeError("Unknown arguments specified: %s" % ", ".join(options))
flags = " ".join(str(f) for f in flags)
modified_files = []
for line in self._getOutputAssertSuccess("diff --raw %s" % flags).splitlines():
file_status = line.split()[-2]
file_name = line.split()[-1]
if match_statuses is None or file_status in match_statuses:
modified_files.append(ModifiedFile(file_name))
return modified_files
def getStagedFiles(self):
if self.isInitialized():
return self._getRawDiff('--cached')
return self._getFiles()
def getUnchangedFiles(self):
return self._getFiles()
def getChangedFiles(self):
return self._getRawDiff()
def getDeletedFiles(self):
return self._getRawDiff(fileStatuses = ['D'])
def getUntrackedFiles(self):
return self._getFiles("--others")
def isInitialized(self):
try:
self.getHead()
return True
except GitException:
return False
def isValid(self):
return os.path.isdir(os.path.join(self.path, ".git")) or \
(os.path.isfile(os.path.join(self.path, "HEAD")) and os.path.isdir(os.path.join(self.path, "objects")))
def isWorkingDirectoryClean(self):
return not (self.getUntrackedFiles() or self.getChangedFiles() or self.getStagedFiles())
def __contains__(self, thing):
if isinstance(thing, basestring) or isinstance(thing, commit.Commit):
return self.containsCommit(thing)
raise NotImplementedError()
################################ Staging content ###############################
def add(self, path):
self._executeGitCommandAssertSuccess("add %s" % quote_for_shell(path))
def delete(self, path, recursive = False, force = False):
flags = ""
if recursive:
flags += "-r "
if force:
flags += "-f "
self._executeGitCommandAssertSuccess("rm %s%s" % (flags, quote_for_shell(path)))
def addAll(self):
return self.add('.')
################################## Committing ##################################
def _normalizeRefName(self, thing):
if isinstance(thing, ref.Ref):
thing = thing.getNormalizedName()
return str(thing)
def _deduceNewCommitFromCommitOutput(self, output):
for pattern in [
# new-style commit pattern
r"^\[\S+\s+(?:\(root-commit\)\s+)?(\S+)\]",
]:
match = re.search(pattern, output)
if match:
return commit.Commit(self, match.group(1))
return None
def commit(self, message, allowEmpty = False, commitAll = False):
args = ''
if commitAll:
args = args + '--all'
command = "commit %s -m %s" % (args, quote_for_shell(message))
if allowEmpty:
command += " --allow-empty"
output = self._getOutputAssertSuccess(command)
return self._deduceNewCommitFromCommitOutput(output)
################################ Changing state ################################
def _createBranchOrTag(self, objname, name, startingPoint, returned_class):
command = "%s %s " % (objname, name)
if startingPoint is not None:
command += self._normalizeRefName(startingPoint)
self._executeGitCommandAssertSuccess(command)
return returned_class(self, name)
def createBranch(self, name, startingPoint = None):
return self._createBranchOrTag('branch', name, startingPoint, branch.LocalBranch)
def createTag(self, name, startingPoint = None):
return self._createBranchOrTag('tag', name, startingPoint, tag.LocalTag)
def checkout(self, thing = None, targetBranch = None, files = ()):
if thing is None:
thing = ""
command = "checkout %s" % (self._normalizeRefName(thing),)
if targetBranch is not None:
command += " -b %s" % (targetBranch,)
if files:
command += " -- %s" % " ".join(files)
self._executeGitCommandAssertSuccess(command)
def mergeMultiple(self, srcs, allowFastForward = True, log = False, message = None):
try:
self._executeGitCommandAssertSuccess(CMD("merge",
" ".join(self._normalizeRefName(src) for src in srcs),
"--no-ff" if not allowFastForward else None,
"--log" if log else None,
("-m \"%s\"" % message) if message is not None else None))
except GitCommandFailedException, e:
# git-merge tends to ignore the stderr rule...
output = e.stdout + e.stderr
if 'conflict' in output.lower():
raise MergeConflict()
raise
def merge(self, src, *args, **kwargs):
return self.mergeMultiple([src], *args, **kwargs)
def _reset(self, flag, thing):
command = "reset %s %s" % (
flag,
self._normalizeRefName(thing))
self._executeGitCommandAssertSuccess(command)
def resetSoft(self, thing = "HEAD"):
return self._reset("--soft", thing)
def resetHard(self, thing = "HEAD"):
return self._reset("--hard", thing)
def resetMixed(self, thing = "HEAD"):
return self._reset("--mixed", thing)
def _clean(self, flags):
self._executeGitCommandAssertSuccess("clean -q " + flags)
def cleanIgnoredFiles(self):
"""Cleans files that match the patterns in .gitignore"""
return self._clean("-f -X")
def cleanUntrackedFiles(self):
return self._clean("-f -d")
################################# collaboration ################################
def addRemote(self, name, url):
self._executeGitCommandAssertSuccess("remote add %s %s" % (name, url))
return remotes.Remote(self, name, url)
def fetch(self, repo = None):
command = "fetch"
if repo is not None:
command += " "
command += self._asURL(repo)
self._executeGitCommandAssertSuccess(command)
def pull(self, repo = None):
command = "pull"
if repo is not None:
command += " "
command += self._asURL(repo)
self._executeGitCommandAssertSuccess(command)
def _getRefspec(self, fromBranch = None, toBranch = None, force = False):
returned = ""
if fromBranch is not None:
returned += self._normalizeRefName(fromBranch)
if returned or toBranch is not None:
returned += ":"
if toBranch is not None:
if isinstance(toBranch, branch.RegisteredRemoteBranch):
toBranch = toBranch.name
returned += self._normalizeRefName(toBranch)
if returned and force:
returned = "+%s" % returned
return returned
def push(self, remote = None, fromBranch = None, toBranch = None, force = False):
command = "push"
#build push arguments
refspec = self._getRefspec(toBranch = toBranch, fromBranch = fromBranch, force = force)
if refspec and not remote:
remote = "origin"
if isinstance(remote, remotes.Remote):
remote = remote.name
elif isinstance(remote, RemoteRepository):
remote = remote.url
elif isinstance(remote, LocalRepository):
remote = remote.path
if remote is not None and not isinstance(remote, basestring):
raise TypeError("Invalid type for 'remote' parameter: %s" % (type(remote),))
command = "push %s %s" % (remote if remote is not None else "", refspec)
self._executeGitCommandAssertSuccess(command)
def rebase(self, src):
self._executeGitCommandAssertSuccess("rebase %s" % self._normalizeRefName(src))
#################################### Stashes ###################################
def saveStash(self, name = None):
command = "stash save"
if name is not None:
command += " %s" % name
self._executeGitCommandAssertSuccess(command)
def popStash(self, arg = None):
command = "stash pop"
if arg is not None:
command += " %s" % arg
self._executeGitCommandAssertSuccess(command)
################################# Configuration ################################
################################### Shortcuts ##################################
def clone(source, location):
returned = LocalRepository(location)
returned.clone(source)
return returned
def find_repository():
orig_path = path = os.path.realpath('.')
drive, path = os.path.splitdrive(path)
while path:
current_path = os.path.join(drive, path)
current_repo = LocalRepository(current_path)
if current_repo.isValid():
return current_repo
path, path_tail = os.path.split(current_path)
if not path_tail:
raise CannotFindRepository("Cannot find repository for %s" % (orig_path,))
| gpl-3.0 |
JCBarahona/edX | lms/djangoapps/instructor_task/tasks_helper.py | 1 | 69010 | """
This file contains tasks that are designed to perform background operations on the
running state of a course.
"""
import json
import re
from collections import OrderedDict
from datetime import datetime
from django.conf import settings
from eventtracking import tracker
from itertools import chain
from time import time
import unicodecsv
import logging
from celery import Task, current_task
from celery.states import SUCCESS, FAILURE
from django.contrib.auth.models import User
from django.core.files.storage import DefaultStorage
from django.db import transaction, reset_queries
from django.db.models import Q
import dogstats_wrapper as dog_stats_api
from pytz import UTC
from StringIO import StringIO
from edxmako.shortcuts import render_to_string
from instructor.paidcourse_enrollment_report import PaidCourseEnrollmentReportProvider
from shoppingcart.models import (
PaidCourseRegistration, CourseRegCodeItem, InvoiceTransaction,
Invoice, CouponRedemption, RegistrationCodeRedemption, CourseRegistrationCode
)
from survey.models import SurveyAnswer
from track.views import task_track
from util.file import course_filename_prefix_generator, UniversalNewlineIterator
from xblock.runtime import KvsFieldData
from xmodule.modulestore.django import modulestore
from xmodule.split_test_module import get_split_user_partitions
from django.utils.translation import ugettext as _
from certificates.models import (
CertificateWhitelist,
certificate_info_for_user,
CertificateStatuses
)
from certificates.api import generate_user_certificates
from courseware.courses import get_course_by_id, get_problems_in_section
from courseware.grades import iterate_grades_for
from courseware.models import StudentModule
from courseware.model_data import DjangoKeyValueStore, FieldDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_analytics.basic import (
enrolled_students_features,
get_proctored_exam_results,
list_may_enroll,
list_problem_responses
)
from instructor_analytics.csvs import format_dictlist
from instructor_task.models import ReportStore, InstructorTask, PROGRESS
from lms.djangoapps.lms_xblock.runtime import LmsPartitionService
from openedx.core.djangoapps.course_groups.cohorts import get_cohort
from openedx.core.djangoapps.course_groups.models import CourseUserGroup
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from opaque_keys.edx.keys import UsageKey
from openedx.core.djangoapps.course_groups.cohorts import add_user_to_cohort, is_course_cohorted
from student.models import CourseEnrollment, CourseAccessRole
from teams.models import CourseTeamMembership
from verify_student.models import SoftwareSecurePhotoVerification
# define different loggers for use within tasks and on client side
TASK_LOG = logging.getLogger('edx.celery.task')
# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'
FILTERED_OUT_ROLES = ['staff', 'instructor', 'finance_admin', 'sales_admin']
# define values for update functions to use to return status to perform_module_state_update
UPDATE_STATUS_SUCCEEDED = 'succeeded'
UPDATE_STATUS_FAILED = 'failed'
UPDATE_STATUS_SKIPPED = 'skipped'
# The setting name used for events when "settings" (account settings, preferences, profile information) change.
REPORT_REQUESTED_EVENT_NAME = u'edx.instructor.report.requested'
class BaseInstructorTask(Task):
"""
Base task class for use with InstructorTask models.
Permits updating information about task in corresponding InstructorTask for monitoring purposes.
Assumes that the entry_id of the InstructorTask model is the first argument to the task.
The `entry_id` is the primary key for the InstructorTask entry representing the task. This class
updates the entry on success and failure of the task it wraps. It is setting the entry's value
for task_state based on what Celery would set it to once the task returns to Celery:
FAILURE if an exception is encountered, and SUCCESS if it returns normally.
Other arguments are pass-throughs to perform_module_state_update, and documented there.
"""
abstract = True
def on_success(self, task_progress, task_id, args, kwargs):
"""
Update InstructorTask object corresponding to this task with info about success.
Updates task_output and task_state. But it shouldn't actually do anything
if the task is only creating subtasks to actually do the work.
Assumes `task_progress` is a dict containing the task's result, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
This is JSON-serialized and stored in the task_output column of the InstructorTask entry.
"""
TASK_LOG.debug('Task %s: success returned with progress: %s', task_id, task_progress)
# We should be able to find the InstructorTask object to update
# based on the task_id here, without having to dig into the
# original args to the task. On the other hand, the entry_id
# is the first value passed to all such args, so we'll use that.
# And we assume that it exists, else we would already have had a failure.
entry_id = args[0]
entry = InstructorTask.objects.get(pk=entry_id)
# Check to see if any subtasks had been defined as part of this task.
# If not, then we know that we're done. (If so, let the subtasks
# handle updating task_state themselves.)
if len(entry.subtasks) == 0:
entry.task_output = InstructorTask.create_output_for_success(task_progress)
entry.task_state = SUCCESS
entry.save_now()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
Update InstructorTask object corresponding to this task with info about failure.
Fetches and updates exception and traceback information on failure.
If an exception is raised internal to the task, it is caught by celery and provided here.
The information is recorded in the InstructorTask object as a JSON-serialized dict
stored in the task_output column. It contains the following keys:
'exception': type of exception object
'message': error message from exception object
'traceback': traceback information (truncated if necessary)
Note that there is no way to record progress made within the task (e.g. attempted,
succeeded, etc.) when such failures occur.
"""
TASK_LOG.debug(u'Task %s: failure returned', task_id)
entry_id = args[0]
try:
entry = InstructorTask.objects.get(pk=entry_id)
except InstructorTask.DoesNotExist:
# if the InstructorTask object does not exist, then there's no point
# trying to update it.
TASK_LOG.error(u"Task (%s) has no InstructorTask object for id %s", task_id, entry_id)
else:
TASK_LOG.warning(u"Task (%s) failed", task_id, exc_info=True)
entry.task_output = InstructorTask.create_output_for_failure(einfo.exception, einfo.traceback)
entry.task_state = FAILURE
entry.save_now()
class UpdateProblemModuleStateError(Exception):
"""
Error signaling a fatal condition while updating problem modules.
Used when the current module cannot be processed and no more
modules should be attempted.
"""
pass
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task
class TaskProgress(object):
"""
Encapsulates the current task's progress by keeping track of
'attempted', 'succeeded', 'skipped', 'failed', 'total',
'action_name', and 'duration_ms' values.
"""
def __init__(self, action_name, total, start_time):
self.action_name = action_name
self.total = total
self.start_time = start_time
self.attempted = 0
self.succeeded = 0
self.skipped = 0
self.failed = 0
def update_task_state(self, extra_meta=None):
"""
Update the current celery task's state to the progress state
specified by the current object. Returns the progress
dictionary for use by `run_main_task` and
`BaseInstructorTask.on_success`.
Arguments:
extra_meta (dict): Extra metadata to pass to `update_state`
Returns:
dict: The current task's progress dict
"""
progress_dict = {
'action_name': self.action_name,
'attempted': self.attempted,
'succeeded': self.succeeded,
'skipped': self.skipped,
'failed': self.failed,
'total': self.total,
'duration_ms': int((time() - self.start_time) * 1000),
}
if extra_meta is not None:
progress_dict.update(extra_meta)
_get_current_task().update_state(state=PROGRESS, meta=progress_dict)
return progress_dict
def run_main_task(entry_id, task_fcn, action_name):
"""
Applies the `task_fcn` to the arguments defined in `entry_id` InstructorTask.
Arguments passed to `task_fcn` are:
`entry_id` : the primary key for the InstructorTask entry representing the task.
`course_id` : the id for the course.
`task_input` : dict containing task-specific arguments, JSON-decoded from InstructorTask's task_input.
`action_name` : past-tense verb to use for constructing status messages.
If no exceptions are raised, the `task_fcn` should return a dict containing
the task's result with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible subtasks to attempt
'action_name': user-visible verb to use in status messages.
Should be past-tense. Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
"""
# Get the InstructorTask to be updated. If this fails then let the exception return to Celery.
# There's no point in catching it here.
entry = InstructorTask.objects.get(pk=entry_id)
entry.task_state = PROGRESS
entry.save_now()
# Get inputs to use in this task from the entry
task_id = entry.task_id
course_id = entry.course_id
task_input = json.loads(entry.task_input)
# Construct log message
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(task_id=task_id, entry_id=entry_id, course_id=course_id, task_input=task_input)
TASK_LOG.info(u'%s, Starting update (nothing %s yet)', task_info_string, action_name)
# Check that the task_id submitted in the InstructorTask matches the current task
# that is running.
request_task_id = _get_current_task().request.id
if task_id != request_task_id:
fmt = u'{task_info}, Requested task did not match actual task "{actual_id}"'
message = fmt.format(task_info=task_info_string, actual_id=request_task_id)
TASK_LOG.error(message)
raise ValueError(message)
# Now do the work
with dog_stats_api.timer('instructor_tasks.time.overall', tags=[u'action:{name}'.format(name=action_name)]):
task_progress = task_fcn(entry_id, course_id, task_input, action_name)
# Release any queries that the connection has been hanging onto
reset_queries()
# Log and exit, returning task_progress info as task result
TASK_LOG.info(u'%s, Task type: %s, Finishing task: %s', task_info_string, action_name, task_progress)
return task_progress
def perform_module_state_update(update_fcn, filter_fcn, _entry_id, course_id, task_input, action_name):
"""
Performs generic update by visiting StudentModule instances with the update_fcn provided.
StudentModule instances are those that match the specified `course_id` and `module_state_key`.
If `student_identifier` is not None, it is used as an additional filter to limit the modules to those belonging
to that student. If `student_identifier` is None, performs update on modules for all students on the specified problem.
If a `filter_fcn` is not None, it is applied to the query that has been constructed. It takes one
argument, which is the query being filtered, and returns the filtered version of the query.
The `update_fcn` is called on each StudentModule that passes the resulting filtering.
It is passed three arguments: the module_descriptor for the module pointed to by the
module_state_key, the particular StudentModule to update, and the xmodule_instance_args being
passed through. If the value returned by the update function evaluates to a boolean True,
the update is successful; False indicates the update on the particular student module failed.
A raised exception indicates a fatal condition -- that no other student modules should be considered.
The return value is a dict containing the task's results, with the following keys:
'attempted': number of attempts made
'succeeded': number of attempts that "succeeded"
'skipped': number of attempts that "skipped"
'failed': number of attempts that "failed"
'total': number of possible updates to attempt
'action_name': user-visible verb to use in status messages. Should be past-tense.
Pass-through of input `action_name`.
'duration_ms': how long the task has (or had) been running.
Because this is run internal to a task, it does not catch exceptions. These are allowed to pass up to the
next level, so that it can set the failure modes and capture the error trace in the InstructorTask and the
result object.
"""
start_time = time()
usage_keys = []
problem_url = task_input.get('problem_url')
entrance_exam_url = task_input.get('entrance_exam_url')
student_identifier = task_input.get('student')
problems = {}
# if problem_url is present make a usage key from it
if problem_url:
usage_key = course_id.make_usage_key_from_deprecated_string(problem_url)
usage_keys.append(usage_key)
# find the problem descriptor:
problem_descriptor = modulestore().get_item(usage_key)
problems[unicode(usage_key)] = problem_descriptor
# if entrance_exam is present grab all problems in it
if entrance_exam_url:
problems = get_problems_in_section(entrance_exam_url)
usage_keys = [UsageKey.from_string(location) for location in problems.keys()]
# find the modules in question
modules_to_update = StudentModule.objects.filter(course_id=course_id, module_state_key__in=usage_keys)
# give the option of updating an individual student. If not specified,
# then updates all students who have responded to a problem so far
student = None
if student_identifier is not None:
# if an identifier is supplied, then look for the student,
# and let it throw an exception if none is found.
if "@" in student_identifier:
student = User.objects.get(email=student_identifier)
elif student_identifier is not None:
student = User.objects.get(username=student_identifier)
if student is not None:
modules_to_update = modules_to_update.filter(student_id=student.id)
if filter_fcn is not None:
modules_to_update = filter_fcn(modules_to_update)
task_progress = TaskProgress(action_name, modules_to_update.count(), start_time)
task_progress.update_task_state()
for module_to_update in modules_to_update:
task_progress.attempted += 1
module_descriptor = problems[unicode(module_to_update.module_state_key)]
# There is no try here: if there's an error, we let it throw, and the task will
# be marked as FAILED, with a stack trace.
with dog_stats_api.timer('instructor_tasks.module.time.step', tags=[u'action:{name}'.format(name=action_name)]):
update_status = update_fcn(module_descriptor, module_to_update)
if update_status == UPDATE_STATUS_SUCCEEDED:
# If the update_fcn returns true, then it performed some kind of work.
# Logging of failures is left to the update_fcn itself.
task_progress.succeeded += 1
elif update_status == UPDATE_STATUS_FAILED:
task_progress.failed += 1
elif update_status == UPDATE_STATUS_SKIPPED:
task_progress.skipped += 1
else:
raise UpdateProblemModuleStateError("Unexpected update_status returned: {}".format(update_status))
return task_progress.update_task_state()
def _get_task_id_from_xmodule_args(xmodule_instance_args):
"""Gets task_id from `xmodule_instance_args` dict, or returns default value if missing."""
return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID
def _get_xqueue_callback_url_prefix(xmodule_instance_args):
"""Gets prefix to use when constructing xqueue_callback_url."""
return xmodule_instance_args.get('xqueue_callback_url_prefix', '') if xmodule_instance_args is not None else ''
def _get_track_function_for_task(student, xmodule_instance_args=None, source_page='x_module_task'):
"""
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
"""
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {'student': student.username, 'task_id': _get_task_id_from_xmodule_args(xmodule_instance_args)}
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page=source_page)
def _get_module_instance_for_task(course_id, student, module_descriptor, xmodule_instance_args=None,
grade_bucket_type=None, course=None):
"""
Fetches a StudentModule instance for a given `course_id`, `student` object, and `module_descriptor`.
`xmodule_instance_args` is used to provide information for creating a track function and an XQueue callback.
These are passed, along with `grade_bucket_type`, to get_module_for_descriptor_internal, which sidesteps
the need for a Request object when instantiating an xmodule instance.
"""
# reconstitute the problem's corresponding XModule:
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(course_id, student, module_descriptor)
student_data = KvsFieldData(DjangoKeyValueStore(field_data_cache))
# get request-related tracking information from args passthrough, and supplement with task-specific
# information:
request_info = xmodule_instance_args.get('request_info', {}) if xmodule_instance_args is not None else {}
task_info = {"student": student.username, "task_id": _get_task_id_from_xmodule_args(xmodule_instance_args)}
def make_track_function():
'''
Make a tracking function that logs what happened.
For insertion into ModuleSystem, and used by CapaModule, which will
provide the event_type (as string) and event (as dict) as arguments.
The request_info and task_info (and page) are provided here.
'''
return lambda event_type, event: task_track(request_info, task_info, event_type, event, page='x_module_task')
xqueue_callback_url_prefix = xmodule_instance_args.get('xqueue_callback_url_prefix', '') \
if xmodule_instance_args is not None else ''
return get_module_for_descriptor_internal(
user=student,
descriptor=module_descriptor,
student_data=student_data,
course_id=course_id,
track_function=make_track_function(),
xqueue_callback_url_prefix=xqueue_callback_url_prefix,
grade_bucket_type=grade_bucket_type,
# This module isn't being used for front-end rendering
request_token=None,
# pass in a loaded course for override enabling
course=course
)
@transaction.autocommit
def rescore_problem_module_state(xmodule_instance_args, module_descriptor, student_module):
'''
Takes an XModule descriptor and a corresponding StudentModule object, and
performs rescoring on the student's problem submission.
Throws exceptions if the rescoring is fatal and should be aborted if in a loop.
In particular, raises UpdateProblemModuleStateError if module fails to instantiate,
or if the module doesn't support rescoring.
Returns True if problem was successfully rescored for the given student, and False
if problem encountered some kind of error in rescoring.
'''
# unpack the StudentModule:
course_id = student_module.course_id
student = student_module.student
usage_key = student_module.module_state_key
with modulestore().bulk_operations(course_id):
course = get_course_by_id(course_id)
# TODO: Here is a call site where we could pass in a loaded course. I
# think we certainly need it since grading is happening here, and field
# overrides would be important in handling that correctly
instance = _get_module_instance_for_task(
course_id,
student,
module_descriptor,
xmodule_instance_args,
grade_bucket_type='rescore',
course=course
)
if instance is None:
# Either permissions just changed, or someone is trying to be clever
# and load something they shouldn't have access to.
msg = "No module {loc} for student {student}--access denied?".format(
loc=usage_key,
student=student
)
TASK_LOG.debug(msg)
raise UpdateProblemModuleStateError(msg)
if not hasattr(instance, 'rescore_problem'):
# This should also not happen, since it should be already checked in the caller,
# but check here to be sure.
msg = "Specified problem does not support rescoring."
raise UpdateProblemModuleStateError(msg)
result = instance.rescore_problem()
instance.save()
if 'success' not in result:
# don't consider these fatal, but false means that the individual call didn't complete:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: unexpected response %(msg)s",
dict(
msg=result,
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
elif result['success'] not in ['correct', 'incorrect']:
TASK_LOG.warning(
u"error processing rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_FAILED
else:
TASK_LOG.debug(
u"successfully processed rescore call for course %(course)s, problem %(loc)s "
u"and student %(student)s: %(msg)s",
dict(
msg=result['success'],
course=course_id,
loc=usage_key,
student=student
)
)
return UPDATE_STATUS_SUCCEEDED
@transaction.autocommit
def reset_attempts_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Resets problem attempts to zero for specified `student_module`.
Returns a status of UPDATE_STATUS_SUCCEEDED if a problem has non-zero attempts
that are being reset, and UPDATE_STATUS_SKIPPED otherwise.
"""
update_status = UPDATE_STATUS_SKIPPED
problem_state = json.loads(student_module.state) if student_module.state else {}
if 'attempts' in problem_state:
old_number_of_attempts = problem_state["attempts"]
if old_number_of_attempts > 0:
problem_state["attempts"] = 0
# convert back to json and save
student_module.state = json.dumps(problem_state)
student_module.save()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
event_info = {"old_attempts": old_number_of_attempts, "new_attempts": 0}
track_function('problem_reset_attempts', event_info)
update_status = UPDATE_STATUS_SUCCEEDED
return update_status
@transaction.autocommit
def delete_problem_module_state(xmodule_instance_args, _module_descriptor, student_module):
"""
Delete the StudentModule entry.
Always returns UPDATE_STATUS_SUCCEEDED, indicating success, if it doesn't raise an exception due to database error.
"""
student_module.delete()
# get request-related tracking information from args passthrough,
# and supplement with task-specific information:
track_function = _get_track_function_for_task(student_module.student, xmodule_instance_args)
track_function('problem_delete_state', {})
return UPDATE_STATUS_SUCCEEDED
def upload_csv_to_report_store(rows, csv_name, course_id, timestamp, config_name='GRADES_DOWNLOAD'):
"""
Upload data as a CSV using ReportStore.
Arguments:
rows: CSV data in the following format (first column may be a
header):
[
[row1_colum1, row1_colum2, ...],
...
]
csv_name: Name of the resulting CSV
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
report_store.store_rows(
course_id,
u"{course_prefix}_{csv_name}_{timestamp_str}.csv".format(
course_prefix=course_filename_prefix_generator(course_id),
csv_name=csv_name,
timestamp_str=timestamp.strftime("%Y-%m-%d-%H%M")
),
rows
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": csv_name, })
def upload_exec_summary_to_store(data_dict, report_name, course_id, generated_at, config_name='FINANCIAL_REPORTS'):
"""
Upload Executive Summary Html file using ReportStore.
Arguments:
data_dict: containing executive report data.
report_name: Name of the resulting Html File.
course_id: ID of the course
"""
report_store = ReportStore.from_config(config_name)
# Use the data dict and html template to generate the output buffer
output_buffer = StringIO(render_to_string("instructor/instructor_dashboard_2/executive_summary.html", data_dict))
report_store.store(
course_id,
u"{course_prefix}_{report_name}_{timestamp_str}.html".format(
course_prefix=course_filename_prefix_generator(course_id),
report_name=report_name,
timestamp_str=generated_at.strftime("%Y-%m-%d-%H%M")
),
output_buffer,
config={
'content_type': 'text/html',
'content_encoding': None,
}
)
tracker.emit(REPORT_REQUESTED_EVENT_NAME, {"report_type": report_name})
def upload_grades_csv(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=too-many-statements
"""
For a given `course_id`, generate a grades CSV file for all students that
are enrolled, and store using a `ReportStore`. Once created, the files can
be accessed by instantiating another `ReportStore` (via
`ReportStore.from_config()`) and calling `link_for()` on it. Writes are
buffered, so we'll never write part of a CSV file to S3 -- i.e. any files
that are visible in ReportStore will be complete ones.
As we start to add more CSV downloads, it will probably be worthwhile to
make a more general CSVDoc class instead of building out the rows like we
do here.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
course = get_course_by_id(course_id)
course_is_cohorted = is_course_cohorted(course.id)
teams_enabled = course.teams_enabled
cohorts_header = ['Cohort Name'] if course_is_cohorted else []
teams_header = ['Team Name'] if teams_enabled else []
experiment_partitions = get_split_user_partitions(course.user_partitions)
group_configs_header = [u'Experiment Group ({})'.format(partition.name) for partition in experiment_partitions]
certificate_info_header = ['Certificate Eligible', 'Certificate Delivered', 'Certificate Type']
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=course_id, whitelist=True)
whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
# Loop over all our students and build our CSV lists in memory
header = None
rows = []
err_rows = [["id", "username", "error_msg"]]
current_step = {'step': 'Calculating Grades'}
total_enrolled_students = enrolled_students.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Starting grade calculation for total students: %s',
task_info_string,
action_name,
current_step,
total_enrolled_students
)
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students):
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after each student is graded to get a sense
# of the task's progress
student_counter += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation in-progress for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
if gradeset:
# We were able to successfully grade this student for this course.
task_progress.succeeded += 1
if not header:
header = [section['label'] for section in gradeset[u'section_breakdown']]
rows.append(
["id", "email", "username", "grade"] + header + cohorts_header +
group_configs_header + teams_header +
['Enrollment Track', 'Verification Status'] + certificate_info_header
)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
cohorts_group_name = []
if course_is_cohorted:
group = get_cohort(student, course_id, assign=False)
cohorts_group_name.append(group.name if group else '')
group_configs_group_names = []
for partition in experiment_partitions:
group = LmsPartitionService(student, course_id).get_group(partition, assign=False)
group_configs_group_names.append(group.name if group else '')
team_name = []
if teams_enabled:
try:
membership = CourseTeamMembership.objects.get(user=student, team__course_id=course_id)
team_name.append(membership.team.name)
except CourseTeamMembership.DoesNotExist:
team_name.append('')
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(student, course_id)[0]
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
student,
course_id,
enrollment_mode
)
certificate_info = certificate_info_for_user(
student,
course_id,
gradeset['grade'],
student.id in whitelisted_user_ids
)
# Not everybody has the same gradable items. If the item is not
# found in the user's gradeset, just assume it's a 0. The aggregated
# grades for their sections and overall course will be calculated
# without regard for the item they didn't have access to, so it's
# possible for a student to have a 0.0 show up in their row but
# still have 100% for the course.
row_percents = [percents.get(label, 0.0) for label in header]
rows.append(
[student.id, student.email, student.username, gradeset['percent']] +
row_percents + cohorts_group_name + group_configs_group_names + team_name +
[enrollment_mode] + [verification_status] + certificate_info
)
else:
# An empty gradeset means we failed to grade a student.
task_progress.failed += 1
err_rows.append([student.id, student.username, err_msg])
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Grade calculation completed for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_enrolled_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'grade_report', course_id, start_date)
# If there are any error rows (don't count the header), write them out as well
if len(err_rows) > 1:
upload_csv_to_report_store(err_rows, 'grade_report_err', course_id, start_date)
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing grade task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def _order_problems(blocks):
"""
Sort the problems by the assignment type and assignment that it belongs to.
Args:
blocks (OrderedDict) - A course structure containing blocks that have been ordered
(i.e. when we iterate over them, we will see them in the order
that they appear in the course).
Returns:
an OrderedDict that maps a problem id to its headers in the final report.
"""
problems = OrderedDict()
assignments = dict()
# First, sort out all the blocks into their correct assignments and all the
# assignments into their correct types.
for block in blocks:
# Put the assignments in order into the assignments list.
if blocks[block]['block_type'] == 'sequential':
block_format = blocks[block]['format']
if block_format not in assignments:
assignments[block_format] = OrderedDict()
assignments[block_format][block] = list()
# Put the problems into the correct order within their assignment.
if blocks[block]['block_type'] == 'problem' and blocks[block]['graded'] is True:
current = blocks[block]['parent']
# crawl up the tree for the sequential block
while blocks[current]['block_type'] != 'sequential':
current = blocks[current]['parent']
current_format = blocks[current]['format']
assignments[current_format][current].append(block)
# Now that we have a sorting and an order for the assignments and problems,
# iterate through them in order to generate the header row.
for assignment_type in assignments:
for assignment_index, assignment in enumerate(assignments[assignment_type].keys(), start=1):
for problem in assignments[assignment_type][assignment]:
header_name = u"{assignment_type} {assignment_index}: {assignment_name} - {block}".format(
block=blocks[problem]['display_name'],
assignment_type=assignment_type,
assignment_index=assignment_index,
assignment_name=blocks[assignment]['display_name']
)
problems[problem] = [header_name + " (Earned)", header_name + " (Possible)"]
return problems
def upload_problem_responses_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
all student answers to a given problem, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating students answers to problem'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
problem_location = task_input.get('problem_location')
student_data = list_problem_responses(course_id, problem_location)
features = ['username', 'state']
header, rows = format_dictlist(student_data, features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
problem_location = re.sub(r'[:/]', '_', problem_location)
csv_name = 'student_state_from_{}'.format(problem_location)
upload_csv_to_report_store(rows, csv_name, course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_problem_grade_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Generate a CSV containing all students' problem grades within a given
`course_id`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
# This struct encapsulates both the display names of each static item in the
# header row as values as well as the django User field names of those items
# as the keys. It is structured in this way to keep the values related.
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
try:
course_structure = CourseStructure.objects.get(course_id=course_id)
blocks = course_structure.ordered_blocks
problems = _order_problems(blocks)
except CourseStructure.DoesNotExist:
return task_progress.update_task_state(
extra_meta={'step': 'Generating course structure. Please refresh and try again.'}
)
# Just generate the static fields for now.
rows = [list(header_row.values()) + ['Final Grade'] + list(chain.from_iterable(problems.values()))]
error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'}
for student, gradeset, err_msg in iterate_grades_for(course_id, enrolled_students, keep_raw_scores=True):
student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1
if 'percent' not in gradeset or 'raw_scores' not in gradeset:
# There was an error grading this student.
# Generally there will be a non-empty err_msg, but that is not always the case.
if not err_msg:
err_msg = u"Unknown error"
error_rows.append(student_fields + [err_msg])
task_progress.failed += 1
continue
final_grade = gradeset['percent']
# Only consider graded problems
problem_scores = {unicode(score.module_id): score for score in gradeset['raw_scores'] if score.graded}
earned_possible_values = list()
for problem_id in problems:
try:
problem_score = problem_scores[problem_id]
earned_possible_values.append([problem_score.earned, problem_score.possible])
except KeyError:
# The student has not been graded on this problem. For example,
# iterate_grades_for skips problems that students have never
# seen in order to speed up report generation. It could also be
# the case that the student does not have access to it (e.g. A/B
# test or cohorted courseware).
earned_possible_values.append(['N/A', 'N/A'])
rows.append(student_fields + [final_grade] + list(chain.from_iterable(earned_possible_values)))
task_progress.succeeded += 1
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload if any students have been successfully graded
if len(rows) > 1:
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
# If there are any error rows, write them out as well
if len(error_rows) > 1:
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
def upload_students_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating Profile Info'}
task_progress.update_task_state(extra_meta=current_step)
# compute the student features table and format it
query_features = task_input.get('features')
student_data = enrolled_students_features(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'student_profile_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_enrollment_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing profile
information for all students that are enrolled, and store using a
`ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
students_in_course = CourseEnrollment.objects.enrolled_and_dropped_out_users(course_id)
task_progress = TaskProgress(action_name, students_in_course.count(), start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
# Loop over all our students and build our CSV lists in memory
rows = []
header = None
current_step = {'step': 'Gathering Profile Information'}
enrollment_report_provider = PaidCourseEnrollmentReportProvider()
total_students = students_in_course.count()
student_counter = 0
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating detailed enrollment report for total students: %s',
task_info_string,
action_name,
current_step,
total_students
)
for student in students_in_course:
# Periodically update task status (this is a cache write)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# Now add a log entry after certain intervals to get a hint that task is in progress
student_counter += 1
if student_counter % 100 == 0:
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, gathering enrollment profile for students in progress: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
user_data = enrollment_report_provider.get_user_profile(student.id)
course_enrollment_data = enrollment_report_provider.get_enrollment_info(student, course_id)
payment_data = enrollment_report_provider.get_payment_info(student, course_id)
# display name map for the column headers
enrollment_report_headers = {
'User ID': _('User ID'),
'Username': _('Username'),
'Full Name': _('Full Name'),
'First Name': _('First Name'),
'Last Name': _('Last Name'),
'Company Name': _('Company Name'),
'Title': _('Title'),
'Language': _('Language'),
'Year of Birth': _('Year of Birth'),
'Gender': _('Gender'),
'Level of Education': _('Level of Education'),
'Mailing Address': _('Mailing Address'),
'Goals': _('Goals'),
'City': _('City'),
'Country': _('Country'),
'Enrollment Date': _('Enrollment Date'),
'Currently Enrolled': _('Currently Enrolled'),
'Enrollment Source': _('Enrollment Source'),
'Enrollment Role': _('Enrollment Role'),
'List Price': _('List Price'),
'Payment Amount': _('Payment Amount'),
'Coupon Codes Used': _('Coupon Codes Used'),
'Registration Code Used': _('Registration Code Used'),
'Payment Status': _('Payment Status'),
'Transaction Reference Number': _('Transaction Reference Number')
}
if not header:
header = user_data.keys() + course_enrollment_data.keys() + payment_data.keys()
display_headers = []
for header_element in header:
# translate header into a localizable display string
display_headers.append(enrollment_report_headers.get(header_element, header_element))
rows.append(display_headers)
rows.append(user_data.values() + course_enrollment_data.values() + payment_data.values())
task_progress.succeeded += 1
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, Detailed enrollment report generated for students: %s/%s',
task_info_string,
action_name,
current_step,
student_counter,
total_students
)
# By this point, we've got the rows we're going to stuff into our CSV files.
current_step = {'step': 'Uploading CSVs'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_csv_to_report_store(rows, 'enrollment_report', course_id, start_date, config_name='FINANCIAL_REPORTS')
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing detailed enrollment task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_may_enroll_csv(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
information about students who may enroll but have not done so
yet, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about students who may enroll'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = task_input.get('features')
student_data = list_may_enroll(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'may_enroll_info', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def get_executive_report(course_id):
"""
Returns dict containing information about the course executive summary.
"""
single_purchase_total = PaidCourseRegistration.get_total_amount_of_purchased_item(course_id)
bulk_purchase_total = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id)
paid_invoices_total = InvoiceTransaction.get_total_amount_of_paid_course_invoices(course_id)
gross_paid_revenue = single_purchase_total + bulk_purchase_total + paid_invoices_total
all_invoices_total = Invoice.get_invoice_total_amount_for_course(course_id)
gross_pending_revenue = all_invoices_total - float(paid_invoices_total)
gross_revenue = float(gross_paid_revenue) + float(gross_pending_revenue)
refunded_self_purchased_seats = PaidCourseRegistration.get_self_purchased_seat_count(
course_id, status='refunded'
)
refunded_bulk_purchased_seats = CourseRegCodeItem.get_bulk_purchased_seat_count(
course_id, status='refunded'
)
total_seats_refunded = refunded_self_purchased_seats + refunded_bulk_purchased_seats
self_purchased_refunds = PaidCourseRegistration.get_total_amount_of_purchased_item(
course_id,
status='refunded'
)
bulk_purchase_refunds = CourseRegCodeItem.get_total_amount_of_purchased_item(course_id, status='refunded')
total_amount_refunded = self_purchased_refunds + bulk_purchase_refunds
top_discounted_codes = CouponRedemption.get_top_discount_codes_used(course_id)
total_coupon_codes_purchases = CouponRedemption.get_total_coupon_code_purchases(course_id)
bulk_purchased_codes = CourseRegistrationCode.order_generated_registration_codes(course_id)
unused_registration_codes = 0
for registration_code in bulk_purchased_codes:
if not RegistrationCodeRedemption.is_registration_code_redeemed(registration_code.code):
unused_registration_codes += 1
self_purchased_seat_count = PaidCourseRegistration.get_self_purchased_seat_count(course_id)
bulk_purchased_seat_count = CourseRegCodeItem.get_bulk_purchased_seat_count(course_id)
total_invoiced_seats = CourseRegistrationCode.invoice_generated_registration_codes(course_id).count()
total_seats = self_purchased_seat_count + bulk_purchased_seat_count + total_invoiced_seats
self_purchases_percentage = 0.0
bulk_purchases_percentage = 0.0
invoice_purchases_percentage = 0.0
avg_price_paid = 0.0
if total_seats != 0:
self_purchases_percentage = (float(self_purchased_seat_count) / float(total_seats)) * 100
bulk_purchases_percentage = (float(bulk_purchased_seat_count) / float(total_seats)) * 100
invoice_purchases_percentage = (float(total_invoiced_seats) / float(total_seats)) * 100
avg_price_paid = gross_revenue / total_seats
course = get_course_by_id(course_id, depth=0)
currency = settings.PAID_COURSE_REGISTRATION_CURRENCY[1]
return {
'display_name': course.display_name,
'start_date': course.start.strftime("%Y-%m-%d") if course.start is not None else 'N/A',
'end_date': course.end.strftime("%Y-%m-%d") if course.end is not None else 'N/A',
'total_seats': total_seats,
'currency': currency,
'gross_revenue': float(gross_revenue),
'gross_paid_revenue': float(gross_paid_revenue),
'gross_pending_revenue': gross_pending_revenue,
'total_seats_refunded': total_seats_refunded,
'total_amount_refunded': float(total_amount_refunded),
'average_paid_price': float(avg_price_paid),
'discount_codes_data': top_discounted_codes,
'total_seats_using_discount_codes': total_coupon_codes_purchases,
'total_self_purchase_seats': self_purchased_seat_count,
'total_bulk_purchase_seats': bulk_purchased_seat_count,
'total_invoiced_seats': total_invoiced_seats,
'unused_bulk_purchase_code_count': unused_registration_codes,
'self_purchases_percentage': self_purchases_percentage,
'bulk_purchases_percentage': bulk_purchases_percentage,
'invoice_purchases_percentage': invoice_purchases_percentage,
}
def upload_exec_summary_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=invalid-name
"""
For a given `course_id`, generate a html report containing information,
which provides a snapshot of how the course is doing.
"""
start_time = time()
report_generation_date = datetime.now(UTC)
status_interval = 100
enrolled_users = CourseEnrollment.objects.users_enrolled_in(course_id)
true_enrollment_count = 0
for user in enrolled_users:
if not user.is_staff and not CourseAccessRole.objects.filter(
user=user, course_id=course_id, role__in=FILTERED_OUT_ROLES
).exists():
true_enrollment_count += 1
task_progress = TaskProgress(action_name, true_enrollment_count, start_time)
fmt = u'Task: {task_id}, InstructorTask ID: {entry_id}, Course: {course_id}, Input: {task_input}'
task_info_string = fmt.format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input
)
TASK_LOG.info(u'%s, Task type: %s, Starting task execution', task_info_string, action_name)
current_step = {'step': 'Gathering executive summary report information'}
TASK_LOG.info(
u'%s, Task type: %s, Current step: %s, generating executive summary report',
task_info_string,
action_name,
current_step
)
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
task_progress.attempted += 1
# get the course executive summary report information.
data_dict = get_executive_report(course_id)
data_dict.update(
{
'total_enrollments': true_enrollment_count,
'report_generation_date': report_generation_date.strftime("%Y-%m-%d"),
}
)
# By this point, we've got the data that we need to generate html report.
current_step = {'step': 'Uploading executive summary report HTML file'}
task_progress.update_task_state(extra_meta=current_step)
TASK_LOG.info(u'%s, Task type: %s, Current step: %s', task_info_string, action_name, current_step)
# Perform the actual upload
upload_exec_summary_to_store(data_dict, 'executive_report', course_id, report_generation_date)
task_progress.succeeded += 1
# One last update before we close out...
TASK_LOG.info(u'%s, Task type: %s, Finalizing executive summary report task', task_info_string, action_name)
return task_progress.update_task_state(extra_meta=current_step)
def upload_course_survey_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=invalid-name
"""
For a given `course_id`, generate a html report containing the survey results for a course.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Gathering course survey report information'}
task_progress.update_task_state(extra_meta=current_step)
distinct_survey_fields_queryset = SurveyAnswer.objects.filter(course_key=course_id).values('field_name').distinct()
survey_fields = []
for unique_field_row in distinct_survey_fields_queryset:
survey_fields.append(unique_field_row['field_name'])
survey_fields.sort()
user_survey_answers = OrderedDict()
survey_answers_for_course = SurveyAnswer.objects.filter(course_key=course_id).select_related('user')
for survey_field_record in survey_answers_for_course:
user_id = survey_field_record.user.id
if user_id not in user_survey_answers.keys():
user_survey_answers[user_id] = {
'username': survey_field_record.user.username,
'email': survey_field_record.user.email
}
user_survey_answers[user_id][survey_field_record.field_name] = survey_field_record.field_value
header = ["User ID", "User Name", "Email"]
header.extend(survey_fields)
csv_rows = []
for user_id in user_survey_answers.keys():
row = []
row.append(user_id)
row.append(user_survey_answers[user_id].get('username', ''))
row.append(user_survey_answers[user_id].get('email', ''))
for survey_field in survey_fields:
row.append(user_survey_answers[user_id].get(survey_field, ''))
csv_rows.append(row)
task_progress.attempted = task_progress.succeeded = len(csv_rows)
task_progress.skipped = task_progress.total - task_progress.attempted
csv_rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(csv_rows, 'course_survey_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def upload_proctored_exam_results_report(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name): # pylint: disable=invalid-name
"""
For a given `course_id`, generate a CSV file containing
information about proctored exam results, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating info about proctored exam results in a course'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
query_features = _task_input.get('features')
student_data = get_proctored_exam_results(course_id, query_features)
header, rows = format_dictlist(student_data, query_features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
upload_csv_to_report_store(rows, 'proctored_exam_results_report', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def generate_students_certificates(
_xmodule_instance_args, _entry_id, course_id, task_input, action_name): # pylint: disable=unused-argument
"""
For a given `course_id`, generate certificates for only students present in 'students' key in task_input
json column, otherwise generate certificates for all enrolled students.
"""
start_time = time()
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id)
students = task_input.get('students', None)
if students is not None:
enrolled_students = enrolled_students.filter(id__in=students)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
current_step = {'step': 'Calculating students already have certificates'}
task_progress.update_task_state(extra_meta=current_step)
statuses_to_regenerate = task_input.get('statuses_to_regenerate', [])
students_require_certs = students_require_certificate(course_id, enrolled_students, statuses_to_regenerate)
task_progress.skipped = task_progress.total - len(students_require_certs)
current_step = {'step': 'Generating Certificates'}
task_progress.update_task_state(extra_meta=current_step)
course = modulestore().get_course(course_id, depth=0)
# Generate certificate for each student
for student in students_require_certs:
task_progress.attempted += 1
status = generate_user_certificates(
student,
course_id,
course=course
)
if status in [CertificateStatuses.generating, CertificateStatuses.downloadable]:
task_progress.succeeded += 1
else:
task_progress.failed += 1
return task_progress.update_task_state(extra_meta=current_step)
def cohort_students_and_upload(_xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
Within a given course, cohort students in bulk, then upload the results
using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
# Iterate through rows to get total assignments for task progress
with DefaultStorage().open(task_input['file_name']) as f:
total_assignments = 0
for _line in unicodecsv.DictReader(UniversalNewlineIterator(f)):
total_assignments += 1
task_progress = TaskProgress(action_name, total_assignments, start_time)
current_step = {'step': 'Cohorting Students'}
task_progress.update_task_state(extra_meta=current_step)
# cohorts_status is a mapping from cohort_name to metadata about
# that cohort. The metadata will include information about users
# successfully added to the cohort, users not found, and a cached
# reference to the corresponding cohort object to prevent
# redundant cohort queries.
cohorts_status = {}
with DefaultStorage().open(task_input['file_name']) as f:
for row in unicodecsv.DictReader(UniversalNewlineIterator(f), encoding='utf-8'):
# Try to use the 'email' field to identify the user. If it's not present, use 'username'.
username_or_email = row.get('email') or row.get('username')
cohort_name = row.get('cohort') or ''
task_progress.attempted += 1
if not cohorts_status.get(cohort_name):
cohorts_status[cohort_name] = {
'Cohort Name': cohort_name,
'Students Added': 0,
'Students Not Found': set()
}
try:
cohorts_status[cohort_name]['cohort'] = CourseUserGroup.objects.get(
course_id=course_id,
group_type=CourseUserGroup.COHORT,
name=cohort_name
)
cohorts_status[cohort_name]["Exists"] = True
except CourseUserGroup.DoesNotExist:
cohorts_status[cohort_name]["Exists"] = False
if not cohorts_status[cohort_name]['Exists']:
task_progress.failed += 1
continue
try:
add_user_to_cohort(cohorts_status[cohort_name]['cohort'], username_or_email)
cohorts_status[cohort_name]['Students Added'] += 1
task_progress.succeeded += 1
except User.DoesNotExist:
cohorts_status[cohort_name]['Students Not Found'].add(username_or_email)
task_progress.failed += 1
except ValueError:
# Raised when the user is already in the given cohort
task_progress.skipped += 1
task_progress.update_task_state(extra_meta=current_step)
current_step['step'] = 'Uploading CSV'
task_progress.update_task_state(extra_meta=current_step)
# Filter the output of `add_users_to_cohorts` in order to upload the result.
output_header = ['Cohort Name', 'Exists', 'Students Added', 'Students Not Found']
output_rows = [
[
','.join(status_dict.get(column_name, '')) if column_name == 'Students Not Found'
else status_dict[column_name]
for column_name in output_header
]
for _cohort_name, status_dict in cohorts_status.iteritems()
]
output_rows.insert(0, output_header)
upload_csv_to_report_store(output_rows, 'cohort_results', course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
def students_require_certificate(course_id, enrolled_students, statuses_to_regenerate=None):
"""
Returns list of students where certificates needs to be generated.
if 'statuses_to_regenerate' is given then return students that have Generated Certificates
and the generated certificate status lies in 'statuses_to_regenerate'
if 'statuses_to_regenerate' is not given then return all the enrolled student skipping the ones
whose certificates have already been generated.
:param course_id:
:param enrolled_students:
:param statuses_to_regenerate:
"""
if statuses_to_regenerate:
# Return Students that have Generated Certificates and the generated certificate status
# lies in 'statuses_to_regenerate'
return User.objects.filter(
generatedcertificate__course_id=course_id,
generatedcertificate__status__in=statuses_to_regenerate
)
else:
# compute those students whose certificates are already generated
students_already_have_certs = User.objects.filter(
~Q(generatedcertificate__status=CertificateStatuses.unavailable),
generatedcertificate__course_id=course_id)
# Return all the enrolled student skipping the ones whose certificates have already been generated
return list(set(enrolled_students) - set(students_already_have_certs))
| agpl-3.0 |
ioanpocol/superdesk-core | superdesk/publish/formatters/__init__.py | 2 | 6138 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import logging
from lxml import etree
from superdesk.metadata.item import ITEM_TYPE, CONTENT_TYPE, FORMATS, FORMAT
from superdesk.etree import parse_html
from superdesk.text_utils import get_text
from superdesk.publish import registered_transmitters
formatters = []
logger = logging.getLogger(__name__)
class FormatterRegistry(type):
"""Registry metaclass for formatters."""
def __init__(cls, name, bases, attrs):
"""Register sub-classes of Formatter class when defined."""
super(FormatterRegistry, cls).__init__(name, bases, attrs)
if name != 'Formatter':
formatters.append(cls)
class Formatter(metaclass=FormatterRegistry):
"""Base Formatter class for all types of Formatters like News ML 1.2, News ML G2, NITF, etc."""
def __init__(self):
self.can_preview = False
self.can_export = False
self.destination = None
self.subscriber = None
def format(self, article, subscriber, codes=None):
"""Formats the article and returns the transformed string"""
raise NotImplementedError()
def export(self, article, subscriber, codes=None):
"""Formats the article and returns the output string for export"""
raise NotImplementedError()
def can_format(self, format_type, article):
"""Test if formatter can format for given article."""
raise NotImplementedError()
def append_body_footer(self, article):
"""
Checks if the article has any Public Service Announcements and if available appends each of them to the body.
:return: body with public service announcements.
"""
try:
article['body_html'] = article['body_html'].replace('<br>', '<br/>')
except KeyError:
pass
body = ''
if article[ITEM_TYPE] in [CONTENT_TYPE.TEXT, CONTENT_TYPE.PREFORMATTED]:
body = article.get('body_html', '')
elif article[ITEM_TYPE] in [CONTENT_TYPE.AUDIO, CONTENT_TYPE.PICTURE, CONTENT_TYPE.VIDEO]:
body = article.get('description', '')
if body and article.get(FORMAT, '') == FORMATS.PRESERVED:
body = body.replace('\n', '\r\n').replace('\r\r', '\r')
parsed = parse_html(body, content='html')
for br in parsed.xpath('//br'):
br.tail = '\r\n' + br.tail if br.tail else '\r\n'
etree.strip_elements(parsed, 'br', with_tail=False)
body = etree.tostring(parsed, encoding="unicode")
if body and article.get('body_footer'):
footer = article.get('body_footer')
if article.get(FORMAT, '') == FORMATS.PRESERVED:
body = '{}\r\n{}'.format(body, get_text(footer))
else:
body = '{}{}'.format(body, footer)
return body
def append_legal(self, article, truncate=False):
"""
Checks if the article has the legal flag on and adds 'Legal:' to the slugline
:param article: article having the slugline
:param truncate: truncates the slugline to 24 characters
:return: updated slugline
"""
slugline = article.get('slugline', '') or ''
if article.get('flags', {}).get('marked_for_legal', False):
slugline = '{}: {}'.format('Legal', slugline)
if truncate:
slugline = slugline[:24]
return slugline
def map_html_to_xml(self, element, html):
"""
Map the html text tags to xml
:param etree.Element element: The xml element to populate
:param str html: the html to parse the text from
:return:
"""
root = parse_html(html, content='html')
# if there are no ptags just br
if not len(root.xpath('//p')) and len(root.xpath('//br')):
para = etree.SubElement(element, 'p')
for br in root.xpath('//br'):
etree.SubElement(para, 'br').text = br.text
for p in root.xpath('//p'):
para = etree.SubElement(element, 'p')
if len(p.xpath('.//br')) > 0:
for br in p.xpath('.//br'):
etree.SubElement(para, 'br').text = br.text
para.text = etree.tostring(p, encoding="unicode", method="text")
# there neither ptags pr br's
if len(list(element)) == 0:
etree.SubElement(element, 'p').text = etree.tostring(root, encoding="unicode", method="text")
def set_destination(self, destination=None, subscriber=None):
self.destination = destination
self.subscriber = subscriber
def _publish_media(self, media):
if self.destination:
try:
transmitter = registered_transmitters[self.destination['delivery_type']]
except KeyError:
logger.warning('Missing transmitter for destination %s', self.destination)
else:
return transmitter.transmit_media(media, self.subscriber, self.destination)
def get_formatter(format_type, article):
"""Get parser for given xml.
:param etree: parsed xml
"""
for formatter_cls in formatters:
formatter_instance = formatter_cls()
if formatter_instance.can_format(format_type, article):
return formatter_instance
def get_all_formatters():
"""Return all formatters registered."""
return [formatter_cls() for formatter_cls in formatters]
from .nitf_formatter import NITFFormatter # NOQA
from .ninjs_formatter import NINJSFormatter # NOQA
from .newsml_1_2_formatter import NewsML12Formatter # NOQA
from .newsml_g2_formatter import NewsMLG2Formatter # NOQA
from .email_formatter import EmailFormatter # NOQA
from .ninjs_newsroom_formatter import NewsroomNinjsFormatter # NOQA
from .idml_formatter import IDMLFormatter # NOQA
| agpl-3.0 |
pamoakoy/invenio | modules/bibformat/lib/elements/bfe_record_stats.py | 35 | 2649 | ## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints record statistics
"""
__revision__ = "$Id$"
from invenio.dbquery import run_sql
def format_element(bfo, display='day_distinct_ip_nb_views'):
'''
Prints record statistics
@param display: the type of statistics displayed. Can be 'total_nb_view', 'day_nb_views', 'total_distinct_ip_nb_views', 'day_distincts_ip_nb_views', 'total_distinct_ip_per_day_nb_views'
'''
if display == 'total_nb_views':
return run_sql("""SELECT COUNT(client_host) FROM rnkPAGEVIEWS
WHERE id_bibrec=%s""",
(bfo.recID,))[0][0]
elif display == 'day_nb_views':
return run_sql("""SELECT COUNT(client_host) FROM rnkPAGEVIEWS
WHERE id_bibrec=%s AND DATE(view_time)=CURDATE()""",
(bfo.recID,))[0][0]
elif display == 'total_distinct_ip_nb_views':
return run_sql("""SELECT COUNT(DISTINCT client_host) FROM rnkPAGEVIEWS
WHERE id_bibrec=%s""",
(bfo.recID,))[0][0]
elif display == 'day_distinct_ip_nb_views':
return run_sql("""SELECT COUNT(DISTINCT client_host) FROM rnkPAGEVIEWS
WHERE id_bibrec=%s AND DATE(view_time)=CURDATE()""",
(bfo.recID,))[0][0]
elif display == 'total_distinct_ip_per_day_nb_views':
# Count the number of distinct IP addresses for every day Then
# sum up. Similar to total_distinct_users_nb_views but assume
# that several different users can be behind a single IP
# (which could change every day)
res = run_sql("""SELECT COUNT(DISTINCT client_host)
FROM rnkPAGEVIEWS
WHERE id_bibrec=%s GROUP BY DATE(view_time)""",
(bfo.recID,))
return sum([row[0] for row in res])
| gpl-2.0 |
luv/impulse-cube | deps/pyudev-0.16.1/tests/plugins/privileged.py | 3 | 2224 | # -*- coding: utf-8 -*-
# Copyright (C) 2012 Sebastian Wiesner <[email protected]>
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 2.1 of the License, or (at your
# option) any later version.
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
plugins.privileged
==================
Support privileged operations to trigger real udev events.
This plugin adds :func:`load_dummy` and :func:`unload_dummy` to the
:mod:`pytest` namespace.
.. moduleauthor:: Sebastian Wiesner <[email protected]>
"""
from __future__ import (print_function, division, unicode_literals,
absolute_import)
from subprocess import call
import pytest
def pytest_addoption(parser):
group = parser.getgroup('privileged', 'tests with privileged operations')
group.addoption('--enable-privileged', action='store_true',
help='Enable tests that required privileged operations',
default=False)
def check_privileges_or_skip():
if not pytest.config.option.enable_privileged:
pytest.skip('privileged tests disabled')
def load_dummy():
"""
Load the ``dummy`` module.
If privileged tests are disabled, the current test is skipped.
"""
check_privileges_or_skip()
call(['sudo', 'modprobe', 'dummy'])
def unload_dummy():
"""
Unload the ``dummy`` module.
If privileged tests are disabled, the current test is skipped.
"""
check_privileges_or_skip()
call(['sudo', 'modprobe', '-r', 'dummy'])
EXPOSED_FUNCTIONS = [load_dummy, unload_dummy]
def pytest_namespace():
return dict((f.__name__, f) for f in EXPOSED_FUNCTIONS)
| gpl-3.0 |
vicky2135/lucious | oscar/lib/python2.7/site-packages/pygments/formatters/terminal256.py | 27 | 10776 | # -*- coding: utf-8 -*-
"""
pygments.formatters.terminal256
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for 256-color terminal output with ANSI sequences.
RGB-to-XTERM color conversion routines adapted from xterm256-conv
tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
by Wolfgang Frisch.
Formatter version 1.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# TODO:
# - Options to map style's bold/underline/italic/border attributes
# to some ANSI attrbutes (something like 'italic=underline')
# - An option to output "style RGB to xterm RGB/index" conversion table
# - An option to indicate that we are running in "reverse background"
# xterm. This means that default colors are white-on-black, not
# black-on-while, so colors like "white background" need to be converted
# to "white background, black foreground", etc...
import sys
from pygments.formatter import Formatter
from pygments.console import codes
from pygments.style import ansicolors
__all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter']
class EscapeSequence:
def __init__(self, fg=None, bg=None, bold=False, underline=False):
self.fg = fg
self.bg = bg
self.bold = bold
self.underline = underline
def escape(self, attrs):
if len(attrs):
return "\x1b[" + ";".join(attrs) + "m"
return ""
def color_string(self):
attrs = []
if self.fg is not None:
if self.fg in ansicolors:
esc = codes[self.fg[5:]]
if ';01m' in esc:
self.bold = True
# extract fg color code.
attrs.append(esc[2:4])
else:
attrs.extend(("38", "5", "%i" % self.fg))
if self.bg is not None:
if self.bg in ansicolors:
esc = codes[self.bg[5:]]
# extract fg color code, add 10 for bg.
attrs.append(str(int(esc[2:4])+10))
else:
attrs.extend(("48", "5", "%i" % self.bg))
if self.bold:
attrs.append("01")
if self.underline:
attrs.append("04")
return self.escape(attrs)
def true_color_string(self):
attrs = []
if self.fg:
attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))
if self.bg:
attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))
if self.bold:
attrs.append("01")
if self.underline:
attrs.append("04")
return self.escape(attrs)
def reset_string(self):
attrs = []
if self.fg is not None:
attrs.append("39")
if self.bg is not None:
attrs.append("49")
if self.bold or self.underline:
attrs.append("00")
return self.escape(attrs)
class Terminal256Formatter(Formatter):
"""
Format tokens with ANSI color sequences, for output in a 256-color
terminal or console. Like in `TerminalFormatter` color sequences
are terminated at newlines, so that paging the output works correctly.
The formatter takes colors from a style defined by the `style` option
and converts them to nearest ANSI 256-color escape sequences. Bold and
underline attributes from the style are preserved (and displayed).
.. versionadded:: 0.9
.. versionchanged:: 2.2
If the used style defines foreground colors in the form ``#ansi*``, then
`Terminal256Formatter` will map these to non extended foreground color.
See :ref:`AnsiTerminalStyle` for more information.
Options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
"""
name = 'Terminal256'
aliases = ['terminal256', 'console256', '256']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self.xterm_colors = []
self.best_match = {}
self.style_string = {}
self.usebold = 'nobold' not in options
self.useunderline = 'nounderline' not in options
self._build_color_table() # build an RGB-to-256 color conversion table
self._setup_styles() # convert selected style's colors to term. colors
def _build_color_table(self):
# colors 0..15: 16 basic colors
self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
# colors 16..232: the 6x6x6 color cube
valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
for i in range(217):
r = valuerange[(i // 36) % 6]
g = valuerange[(i // 6) % 6]
b = valuerange[i % 6]
self.xterm_colors.append((r, g, b))
# colors 233..253: grayscale
for i in range(1, 22):
v = 8 + i * 10
self.xterm_colors.append((v, v, v))
def _closest_color(self, r, g, b):
distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
match = 0
for i in range(0, 254):
values = self.xterm_colors[i]
rd = r - values[0]
gd = g - values[1]
bd = b - values[2]
d = rd*rd + gd*gd + bd*bd
if d < distance:
match = i
distance = d
return match
def _color_index(self, color):
index = self.best_match.get(color, None)
if color in ansicolors:
# strip the `#ansi` part and look up code
index = color
self.best_match[color] = index
if index is None:
try:
rgb = int(str(color), 16)
except ValueError:
rgb = 0
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
index = self._closest_color(r, g, b)
self.best_match[color] = index
return index
def _setup_styles(self):
for ttype, ndef in self.style:
escape = EscapeSequence()
# get foreground from ansicolor if set
if ndef['ansicolor']:
escape.fg = self._color_index(ndef['ansicolor'])
elif ndef['color']:
escape.fg = self._color_index(ndef['color'])
if ndef['bgansicolor']:
escape.bg = self._color_index(ndef['bgansicolor'])
elif ndef['bgcolor']:
escape.bg = self._color_index(ndef['bgcolor'])
if self.usebold and ndef['bold']:
escape.bold = True
if self.useunderline and ndef['underline']:
escape.underline = True
self.style_string[str(ttype)] = (escape.color_string(),
escape.reset_string())
def format(self, tokensource, outfile):
# hack: if the output is a terminal and has an encoding set,
# use that to avoid unicode encode problems
if not self.encoding and hasattr(outfile, "encoding") and \
hasattr(outfile, "isatty") and outfile.isatty() and \
sys.version_info < (3,):
self.encoding = outfile.encoding
return Formatter.format(self, tokensource, outfile)
def format_unencoded(self, tokensource, outfile):
for ttype, value in tokensource:
not_found = True
while ttype and not_found:
try:
# outfile.write( "<" + str(ttype) + ">" )
on, off = self.style_string[str(ttype)]
# Like TerminalFormatter, add "reset colors" escape sequence
# on newline.
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write(on + line + off)
outfile.write('\n')
if spl[-1]:
outfile.write(on + spl[-1] + off)
not_found = False
# outfile.write( '#' + str(ttype) + '#' )
except KeyError:
# ottype = ttype
ttype = ttype[:-1]
# outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
if not_found:
outfile.write(value)
class TerminalTrueColorFormatter(Terminal256Formatter):
r"""
Format tokens with ANSI color sequences, for output in a true-color
terminal or console. Like in `TerminalFormatter` color sequences
are terminated at newlines, so that paging the output works correctly.
.. versionadded:: 2.1
Options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
"""
name = 'TerminalTrueColor'
aliases = ['terminal16m', 'console16m', '16m']
filenames = []
def _build_color_table(self):
pass
def _color_tuple(self, color):
try:
rgb = int(str(color), 16)
except ValueError:
return None
r = (rgb >> 16) & 0xff
g = (rgb >> 8) & 0xff
b = rgb & 0xff
return (r, g, b)
def _setup_styles(self):
for ttype, ndef in self.style:
escape = EscapeSequence()
if ndef['color']:
escape.fg = self._color_tuple(ndef['color'])
if ndef['bgcolor']:
escape.bg = self._color_tuple(ndef['bgcolor'])
if self.usebold and ndef['bold']:
escape.bold = True
if self.useunderline and ndef['underline']:
escape.underline = True
self.style_string[str(ttype)] = (escape.true_color_string(),
escape.reset_string())
| bsd-3-clause |
axcheron/pydbg | pydbg/windows_h.py | 10 | 15273 | # generated by 'xml2py'
#
# $Id: windows_h.py 194 2007-04-05 15:31:53Z cameron $
#
# flags 'windows.xml -s DEBUG_EVENT -s CONTEXT -s MEMORY_BASIC_INFORMATION -s LDT_ENTRY -s PROCESS_INFORMATION -s STARTUPINFO -s SYSTEM_INFO -s TOKEN_PRIVILEGES -s LUID -s HANDLE -o windows_h.py'
# PEDRAM - line swap ... have to patch in our own __reduce__ definition to each ctype.
#from ctypes import *
from my_ctypes import *
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 4188
class _TOKEN_PRIVILEGES(Structure):
pass
TOKEN_PRIVILEGES = _TOKEN_PRIVILEGES
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 3774
class _STARTUPINFOA(Structure):
pass
STARTUPINFOA = _STARTUPINFOA
STARTUPINFO = STARTUPINFOA
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1661
class _LDT_ENTRY(Structure):
pass
LDT_ENTRY = _LDT_ENTRY
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 4534
class _MEMORY_BASIC_INFORMATION(Structure):
pass
MEMORY_BASIC_INFORMATION = _MEMORY_BASIC_INFORMATION
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 697
class _DEBUG_EVENT(Structure):
pass
DEBUG_EVENT = _DEBUG_EVENT
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1563
class _CONTEXT(Structure):
pass
CONTEXT = _CONTEXT
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 497
class _SYSTEM_INFO(Structure):
pass
SYSTEM_INFO = _SYSTEM_INFO
HANDLE = c_void_p
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 229
class _PROCESS_INFORMATION(Structure):
pass
PROCESS_INFORMATION = _PROCESS_INFORMATION
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 394
class _LUID(Structure):
pass
LUID = _LUID
WORD = c_ushort
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1664
class N10_LDT_ENTRY3DOLLAR_4E(Union):
pass
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1665
class N10_LDT_ENTRY3DOLLAR_43DOLLAR_5E(Structure):
pass
BYTE = c_ubyte
N10_LDT_ENTRY3DOLLAR_43DOLLAR_5E._fields_ = [
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1665
('BaseMid', BYTE),
('Flags1', BYTE),
('Flags2', BYTE),
('BaseHi', BYTE),
]
assert sizeof(N10_LDT_ENTRY3DOLLAR_43DOLLAR_5E) == 4, sizeof(N10_LDT_ENTRY3DOLLAR_43DOLLAR_5E)
assert alignment(N10_LDT_ENTRY3DOLLAR_43DOLLAR_5E) == 1, alignment(N10_LDT_ENTRY3DOLLAR_43DOLLAR_5E)
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1671
class N10_LDT_ENTRY3DOLLAR_43DOLLAR_6E(Structure):
pass
DWORD = c_ulong
N10_LDT_ENTRY3DOLLAR_43DOLLAR_6E._fields_ = [
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1671
('BaseMid', DWORD, 8),
('Type', DWORD, 5),
('Dpl', DWORD, 2),
('Pres', DWORD, 1),
('LimitHi', DWORD, 4),
('Sys', DWORD, 1),
('Reserved_0', DWORD, 1),
('Default_Big', DWORD, 1),
('Granularity', DWORD, 1),
('BaseHi', DWORD, 8),
]
assert sizeof(N10_LDT_ENTRY3DOLLAR_43DOLLAR_6E) == 4, sizeof(N10_LDT_ENTRY3DOLLAR_43DOLLAR_6E)
assert alignment(N10_LDT_ENTRY3DOLLAR_43DOLLAR_6E) == 4, alignment(N10_LDT_ENTRY3DOLLAR_43DOLLAR_6E)
N10_LDT_ENTRY3DOLLAR_4E._fields_ = [
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1664
('Bytes', N10_LDT_ENTRY3DOLLAR_43DOLLAR_5E),
('Bits', N10_LDT_ENTRY3DOLLAR_43DOLLAR_6E),
]
assert sizeof(N10_LDT_ENTRY3DOLLAR_4E) == 4, sizeof(N10_LDT_ENTRY3DOLLAR_4E)
assert alignment(N10_LDT_ENTRY3DOLLAR_4E) == 4, alignment(N10_LDT_ENTRY3DOLLAR_4E)
_LDT_ENTRY._fields_ = [
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1661
('LimitLow', WORD),
('BaseLow', WORD),
('HighWord', N10_LDT_ENTRY3DOLLAR_4E),
]
assert sizeof(_LDT_ENTRY) == 8, sizeof(_LDT_ENTRY)
assert alignment(_LDT_ENTRY) == 4, alignment(_LDT_ENTRY)
PVOID = c_void_p
UINT_PTR = c_ulong
SIZE_T = UINT_PTR
_MEMORY_BASIC_INFORMATION._fields_ = [
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 4534
('BaseAddress', PVOID),
('AllocationBase', PVOID),
('AllocationProtect', DWORD),
('RegionSize', SIZE_T),
('State', DWORD),
('Protect', DWORD),
('Type', DWORD),
]
assert sizeof(_MEMORY_BASIC_INFORMATION) == 28, sizeof(_MEMORY_BASIC_INFORMATION)
assert alignment(_MEMORY_BASIC_INFORMATION) == 4, alignment(_MEMORY_BASIC_INFORMATION)
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1539
class _FLOATING_SAVE_AREA(Structure):
pass
_FLOATING_SAVE_AREA._fields_ = [
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1539
('ControlWord', DWORD),
('StatusWord', DWORD),
('TagWord', DWORD),
('ErrorOffset', DWORD),
('ErrorSelector', DWORD),
('DataOffset', DWORD),
('DataSelector', DWORD),
('RegisterArea', BYTE * 80),
('Cr0NpxState', DWORD),
]
assert sizeof(_FLOATING_SAVE_AREA) == 112, sizeof(_FLOATING_SAVE_AREA)
assert alignment(_FLOATING_SAVE_AREA) == 4, alignment(_FLOATING_SAVE_AREA)
FLOATING_SAVE_AREA = _FLOATING_SAVE_AREA
_CONTEXT._fields_ = [
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 1563
('ContextFlags', DWORD),
('Dr0', DWORD),
('Dr1', DWORD),
('Dr2', DWORD),
('Dr3', DWORD),
('Dr6', DWORD),
('Dr7', DWORD),
('FloatSave', FLOATING_SAVE_AREA),
('SegGs', DWORD),
('SegFs', DWORD),
('SegEs', DWORD),
('SegDs', DWORD),
('Edi', DWORD),
('Esi', DWORD),
('Ebx', DWORD),
('Edx', DWORD),
('Ecx', DWORD),
('Eax', DWORD),
('Ebp', DWORD),
('Eip', DWORD),
('SegCs', DWORD),
('EFlags', DWORD),
('Esp', DWORD),
('SegSs', DWORD),
('ExtendedRegisters', BYTE * 512),
]
assert sizeof(_CONTEXT) == 716, sizeof(_CONTEXT)
assert alignment(_CONTEXT) == 4, alignment(_CONTEXT)
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 498
class N12_SYSTEM_INFO4DOLLAR_37E(Union):
pass
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 500
class N12_SYSTEM_INFO4DOLLAR_374DOLLAR_38E(Structure):
pass
N12_SYSTEM_INFO4DOLLAR_374DOLLAR_38E._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 500
('wProcessorArchitecture', WORD),
('wReserved', WORD),
]
assert sizeof(N12_SYSTEM_INFO4DOLLAR_374DOLLAR_38E) == 4, sizeof(N12_SYSTEM_INFO4DOLLAR_374DOLLAR_38E)
assert alignment(N12_SYSTEM_INFO4DOLLAR_374DOLLAR_38E) == 2, alignment(N12_SYSTEM_INFO4DOLLAR_374DOLLAR_38E)
N12_SYSTEM_INFO4DOLLAR_37E._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 498
('dwOemId', DWORD),
# Unnamed field renamed to '_'
('_', N12_SYSTEM_INFO4DOLLAR_374DOLLAR_38E),
]
assert sizeof(N12_SYSTEM_INFO4DOLLAR_37E) == 4, sizeof(N12_SYSTEM_INFO4DOLLAR_37E)
assert alignment(N12_SYSTEM_INFO4DOLLAR_37E) == 4, alignment(N12_SYSTEM_INFO4DOLLAR_37E)
LPVOID = c_void_p
_SYSTEM_INFO._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 497
# Unnamed field renamed to '_'
('_', N12_SYSTEM_INFO4DOLLAR_37E),
('dwPageSize', DWORD),
('lpMinimumApplicationAddress', LPVOID),
('lpMaximumApplicationAddress', LPVOID),
('dwActiveProcessorMask', DWORD),
('dwNumberOfProcessors', DWORD),
('dwProcessorType', DWORD),
('dwAllocationGranularity', DWORD),
('wProcessorLevel', WORD),
('wProcessorRevision', WORD),
]
assert sizeof(_SYSTEM_INFO) == 36, sizeof(_SYSTEM_INFO)
assert alignment(_SYSTEM_INFO) == 4, alignment(_SYSTEM_INFO)
CHAR = c_char
LPSTR = POINTER(CHAR)
LPBYTE = POINTER(BYTE)
_STARTUPINFOA._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 3774
('cb', DWORD),
('lpReserved', LPSTR),
('lpDesktop', LPSTR),
('lpTitle', LPSTR),
('dwX', DWORD),
('dwY', DWORD),
('dwXSize', DWORD),
('dwYSize', DWORD),
('dwXCountChars', DWORD),
('dwYCountChars', DWORD),
('dwFillAttribute', DWORD),
('dwFlags', DWORD),
('wShowWindow', WORD),
('cbReserved2', WORD),
('lpReserved2', LPBYTE),
('hStdInput', HANDLE),
('hStdOutput', HANDLE),
('hStdError', HANDLE),
]
assert sizeof(_STARTUPINFOA) == 68, sizeof(_STARTUPINFOA)
assert alignment(_STARTUPINFOA) == 4, alignment(_STARTUPINFOA)
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 701
class N12_DEBUG_EVENT4DOLLAR_39E(Union):
pass
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 640
class _EXCEPTION_DEBUG_INFO(Structure):
pass
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 3101
class _EXCEPTION_RECORD(Structure):
pass
_EXCEPTION_RECORD._fields_ = [
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 3101
('ExceptionCode', DWORD),
('ExceptionFlags', DWORD),
('ExceptionRecord', POINTER(_EXCEPTION_RECORD)),
('ExceptionAddress', PVOID),
('NumberParameters', DWORD),
('ExceptionInformation', UINT_PTR * 15),
]
assert sizeof(_EXCEPTION_RECORD) == 80, sizeof(_EXCEPTION_RECORD)
assert alignment(_EXCEPTION_RECORD) == 4, alignment(_EXCEPTION_RECORD)
EXCEPTION_RECORD = _EXCEPTION_RECORD
_EXCEPTION_DEBUG_INFO._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 640
('ExceptionRecord', EXCEPTION_RECORD),
('dwFirstChance', DWORD),
]
assert sizeof(_EXCEPTION_DEBUG_INFO) == 84, sizeof(_EXCEPTION_DEBUG_INFO)
assert alignment(_EXCEPTION_DEBUG_INFO) == 4, alignment(_EXCEPTION_DEBUG_INFO)
EXCEPTION_DEBUG_INFO = _EXCEPTION_DEBUG_INFO
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 645
class _CREATE_THREAD_DEBUG_INFO(Structure):
pass
# macos compatability.
try:
PTHREAD_START_ROUTINE = WINFUNCTYPE(DWORD, c_void_p)
except:
PTHREAD_START_ROUTINE = CFUNCTYPE(DWORD, c_void_p)
LPTHREAD_START_ROUTINE = PTHREAD_START_ROUTINE
_CREATE_THREAD_DEBUG_INFO._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 645
('hThread', HANDLE),
('lpThreadLocalBase', LPVOID),
('lpStartAddress', LPTHREAD_START_ROUTINE),
]
assert sizeof(_CREATE_THREAD_DEBUG_INFO) == 12, sizeof(_CREATE_THREAD_DEBUG_INFO)
assert alignment(_CREATE_THREAD_DEBUG_INFO) == 4, alignment(_CREATE_THREAD_DEBUG_INFO)
CREATE_THREAD_DEBUG_INFO = _CREATE_THREAD_DEBUG_INFO
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 651
class _CREATE_PROCESS_DEBUG_INFO(Structure):
pass
_CREATE_PROCESS_DEBUG_INFO._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 651
('hFile', HANDLE),
('hProcess', HANDLE),
('hThread', HANDLE),
('lpBaseOfImage', LPVOID),
('dwDebugInfoFileOffset', DWORD),
('nDebugInfoSize', DWORD),
('lpThreadLocalBase', LPVOID),
('lpStartAddress', LPTHREAD_START_ROUTINE),
('lpImageName', LPVOID),
('fUnicode', WORD),
]
assert sizeof(_CREATE_PROCESS_DEBUG_INFO) == 40, sizeof(_CREATE_PROCESS_DEBUG_INFO)
assert alignment(_CREATE_PROCESS_DEBUG_INFO) == 4, alignment(_CREATE_PROCESS_DEBUG_INFO)
CREATE_PROCESS_DEBUG_INFO = _CREATE_PROCESS_DEBUG_INFO
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 664
class _EXIT_THREAD_DEBUG_INFO(Structure):
pass
_EXIT_THREAD_DEBUG_INFO._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 664
('dwExitCode', DWORD),
]
assert sizeof(_EXIT_THREAD_DEBUG_INFO) == 4, sizeof(_EXIT_THREAD_DEBUG_INFO)
assert alignment(_EXIT_THREAD_DEBUG_INFO) == 4, alignment(_EXIT_THREAD_DEBUG_INFO)
EXIT_THREAD_DEBUG_INFO = _EXIT_THREAD_DEBUG_INFO
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 668
class _EXIT_PROCESS_DEBUG_INFO(Structure):
pass
_EXIT_PROCESS_DEBUG_INFO._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 668
('dwExitCode', DWORD),
]
assert sizeof(_EXIT_PROCESS_DEBUG_INFO) == 4, sizeof(_EXIT_PROCESS_DEBUG_INFO)
assert alignment(_EXIT_PROCESS_DEBUG_INFO) == 4, alignment(_EXIT_PROCESS_DEBUG_INFO)
EXIT_PROCESS_DEBUG_INFO = _EXIT_PROCESS_DEBUG_INFO
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 672
class _LOAD_DLL_DEBUG_INFO(Structure):
pass
_LOAD_DLL_DEBUG_INFO._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 672
('hFile', HANDLE),
('lpBaseOfDll', LPVOID),
('dwDebugInfoFileOffset', DWORD),
('nDebugInfoSize', DWORD),
('lpImageName', LPVOID),
('fUnicode', WORD),
]
assert sizeof(_LOAD_DLL_DEBUG_INFO) == 24, sizeof(_LOAD_DLL_DEBUG_INFO)
assert alignment(_LOAD_DLL_DEBUG_INFO) == 4, alignment(_LOAD_DLL_DEBUG_INFO)
LOAD_DLL_DEBUG_INFO = _LOAD_DLL_DEBUG_INFO
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 681
class _UNLOAD_DLL_DEBUG_INFO(Structure):
pass
_UNLOAD_DLL_DEBUG_INFO._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 681
('lpBaseOfDll', LPVOID),
]
assert sizeof(_UNLOAD_DLL_DEBUG_INFO) == 4, sizeof(_UNLOAD_DLL_DEBUG_INFO)
assert alignment(_UNLOAD_DLL_DEBUG_INFO) == 4, alignment(_UNLOAD_DLL_DEBUG_INFO)
UNLOAD_DLL_DEBUG_INFO = _UNLOAD_DLL_DEBUG_INFO
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 685
class _OUTPUT_DEBUG_STRING_INFO(Structure):
pass
_OUTPUT_DEBUG_STRING_INFO._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 685
('lpDebugStringData', LPSTR),
('fUnicode', WORD),
('nDebugStringLength', WORD),
]
assert sizeof(_OUTPUT_DEBUG_STRING_INFO) == 8, sizeof(_OUTPUT_DEBUG_STRING_INFO)
assert alignment(_OUTPUT_DEBUG_STRING_INFO) == 4, alignment(_OUTPUT_DEBUG_STRING_INFO)
OUTPUT_DEBUG_STRING_INFO = _OUTPUT_DEBUG_STRING_INFO
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 691
class _RIP_INFO(Structure):
pass
_RIP_INFO._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 691
('dwError', DWORD),
('dwType', DWORD),
]
assert sizeof(_RIP_INFO) == 8, sizeof(_RIP_INFO)
assert alignment(_RIP_INFO) == 4, alignment(_RIP_INFO)
RIP_INFO = _RIP_INFO
N12_DEBUG_EVENT4DOLLAR_39E._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 701
('Exception', EXCEPTION_DEBUG_INFO),
('CreateThread', CREATE_THREAD_DEBUG_INFO),
('CreateProcessInfo', CREATE_PROCESS_DEBUG_INFO),
('ExitThread', EXIT_THREAD_DEBUG_INFO),
('ExitProcess', EXIT_PROCESS_DEBUG_INFO),
('LoadDll', LOAD_DLL_DEBUG_INFO),
('UnloadDll', UNLOAD_DLL_DEBUG_INFO),
('DebugString', OUTPUT_DEBUG_STRING_INFO),
('RipInfo', RIP_INFO),
]
assert sizeof(N12_DEBUG_EVENT4DOLLAR_39E) == 84, sizeof(N12_DEBUG_EVENT4DOLLAR_39E)
assert alignment(N12_DEBUG_EVENT4DOLLAR_39E) == 4, alignment(N12_DEBUG_EVENT4DOLLAR_39E)
_DEBUG_EVENT._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 697
('dwDebugEventCode', DWORD),
('dwProcessId', DWORD),
('dwThreadId', DWORD),
('u', N12_DEBUG_EVENT4DOLLAR_39E),
]
assert sizeof(_DEBUG_EVENT) == 96, sizeof(_DEBUG_EVENT)
assert alignment(_DEBUG_EVENT) == 4, alignment(_DEBUG_EVENT)
LONG = c_long
_LUID._fields_ = [
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 394
('LowPart', DWORD),
('HighPart', LONG),
]
assert sizeof(_LUID) == 8, sizeof(_LUID)
assert alignment(_LUID) == 4, alignment(_LUID)
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 3241
class _LUID_AND_ATTRIBUTES(Structure):
pass
_LUID_AND_ATTRIBUTES._fields_ = [
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 3241
('Luid', LUID),
('Attributes', DWORD),
]
assert sizeof(_LUID_AND_ATTRIBUTES) == 12, sizeof(_LUID_AND_ATTRIBUTES)
assert alignment(_LUID_AND_ATTRIBUTES) == 4, alignment(_LUID_AND_ATTRIBUTES)
LUID_AND_ATTRIBUTES = _LUID_AND_ATTRIBUTES
_TOKEN_PRIVILEGES._fields_ = [
# C:/PROGRA~1/gccxml/bin/Vc6/Include/winnt.h 4188
('PrivilegeCount', DWORD),
('Privileges', LUID_AND_ATTRIBUTES * 1),
]
assert sizeof(_TOKEN_PRIVILEGES) == 16, sizeof(_TOKEN_PRIVILEGES)
assert alignment(_TOKEN_PRIVILEGES) == 4, alignment(_TOKEN_PRIVILEGES)
_PROCESS_INFORMATION._fields_ = [
# C:/PROGRA~1/MICROS~2/VC98/Include/winbase.h 229
('hProcess', HANDLE),
('hThread', HANDLE),
('dwProcessId', DWORD),
('dwThreadId', DWORD),
]
assert sizeof(_PROCESS_INFORMATION) == 16, sizeof(_PROCESS_INFORMATION)
assert alignment(_PROCESS_INFORMATION) == 4, alignment(_PROCESS_INFORMATION)
| gpl-2.0 |
S-YOU/grumpy | compiler/block_test.py | 1 | 10738 | # coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests Package, Block, BlockVisitor and related classes."""
from __future__ import unicode_literals
import textwrap
import unittest
import pythonparser
from grumpy.compiler import block
from grumpy.compiler import imputil
from grumpy.compiler import imputil_test
from grumpy.compiler import util
class PackageTest(unittest.TestCase):
def testCreate(self):
package = block.Package('foo/bar/baz')
self.assertEqual(package.name, 'foo/bar/baz')
self.assertEqual(package.alias, 'π_fooΓbarΓbaz')
def testCreateGrump(self):
package = block.Package('foo/bar/baz', 'myalias')
self.assertEqual(package.name, 'foo/bar/baz')
self.assertEqual(package.alias, 'myalias')
class BlockTest(unittest.TestCase):
def testAddImport(self):
module_block = _MakeModuleBlock()
func1_block = block.FunctionBlock(module_block, 'func1', {}, False)
func2_block = block.FunctionBlock(func1_block, 'func2', {}, False)
package = func2_block.root.add_import('foo/bar')
self.assertEqual(package.name, '__python__/foo/bar')
self.assertEqual(package.alias, 'π___python__ΓfooΓbar')
self.assertEqual(module_block.imports, {'__python__/foo/bar': package})
def testAddImportRepeated(self):
b = _MakeModuleBlock()
package = b.root.add_import('foo')
self.assertEqual(package.name, '__python__/foo')
self.assertEqual(package.alias, 'π___python__Γfoo')
self.assertEqual(b.imports, {'__python__/foo': package})
package2 = b.root.add_import('foo')
self.assertIs(package, package2)
self.assertEqual(b.imports, {'__python__/foo': package})
def testLoop(self):
b = _MakeModuleBlock()
loop = b.push_loop()
self.assertEqual(loop, b.top_loop())
inner_loop = b.push_loop()
self.assertEqual(inner_loop, b.top_loop())
b.pop_loop()
self.assertEqual(loop, b.top_loop())
def testResolveName(self):
module_block = _MakeModuleBlock()
block_vars = {'foo': block.Var('foo', block.Var.TYPE_LOCAL)}
func1_block = block.FunctionBlock(module_block, 'func1', block_vars, False)
block_vars = {'bar': block.Var('bar', block.Var.TYPE_LOCAL)}
func2_block = block.FunctionBlock(func1_block, 'func2', block_vars, False)
block_vars = {'case': block.Var('case', block.Var.TYPE_LOCAL)}
keyword_block = block.FunctionBlock(
module_block, 'keyword_func', block_vars, False)
class1_block = block.ClassBlock(module_block, 'Class1', set())
class2_block = block.ClassBlock(func1_block, 'Class2', set())
self.assertRegexpMatches(self._ResolveName(module_block, 'foo'),
r'ResolveGlobal\b.*foo')
self.assertRegexpMatches(self._ResolveName(module_block, 'bar'),
r'ResolveGlobal\b.*bar')
self.assertRegexpMatches(self._ResolveName(module_block, 'baz'),
r'ResolveGlobal\b.*baz')
self.assertRegexpMatches(self._ResolveName(func1_block, 'foo'),
r'CheckLocal\b.*foo')
self.assertRegexpMatches(self._ResolveName(func1_block, 'bar'),
r'ResolveGlobal\b.*bar')
self.assertRegexpMatches(self._ResolveName(func1_block, 'baz'),
r'ResolveGlobal\b.*baz')
self.assertRegexpMatches(self._ResolveName(func2_block, 'foo'),
r'CheckLocal\b.*foo')
self.assertRegexpMatches(self._ResolveName(func2_block, 'bar'),
r'CheckLocal\b.*bar')
self.assertRegexpMatches(self._ResolveName(func2_block, 'baz'),
r'ResolveGlobal\b.*baz')
self.assertRegexpMatches(self._ResolveName(class1_block, 'foo'),
r'ResolveClass\(.*, nil, .*foo')
self.assertRegexpMatches(self._ResolveName(class2_block, 'foo'),
r'ResolveClass\(.*, µfoo, .*foo')
self.assertRegexpMatches(self._ResolveName(keyword_block, 'case'),
r'CheckLocal\b.*µcase, "case"')
def _ResolveName(self, b, name):
writer = util.Writer()
b.resolve_name(writer, name)
return writer.getvalue()
class BlockVisitorTest(unittest.TestCase):
def testAssignSingle(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt('foo = 3'))
self.assertEqual(visitor.vars.keys(), ['foo'])
self.assertRegexpMatches(visitor.vars['foo'].init_expr, r'UnboundLocal')
def testAssignMultiple(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt('foo = bar = 123'))
self.assertEqual(sorted(visitor.vars.keys()), ['bar', 'foo'])
self.assertRegexpMatches(visitor.vars['foo'].init_expr, r'UnboundLocal')
self.assertRegexpMatches(visitor.vars['bar'].init_expr, r'UnboundLocal')
def testAssignTuple(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt('foo, bar = "a", "b"'))
self.assertEqual(sorted(visitor.vars.keys()), ['bar', 'foo'])
self.assertRegexpMatches(visitor.vars['foo'].init_expr, r'UnboundLocal')
self.assertRegexpMatches(visitor.vars['bar'].init_expr, r'UnboundLocal')
def testAssignNested(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt('foo, (bar, baz) = "a", ("b", "c")'))
self.assertEqual(sorted(visitor.vars.keys()), ['bar', 'baz', 'foo'])
self.assertRegexpMatches(visitor.vars['foo'].init_expr, r'UnboundLocal')
self.assertRegexpMatches(visitor.vars['bar'].init_expr, r'UnboundLocal')
self.assertRegexpMatches(visitor.vars['baz'].init_expr, r'UnboundLocal')
def testAugAssignSingle(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt('foo += 3'))
self.assertEqual(visitor.vars.keys(), ['foo'])
self.assertRegexpMatches(visitor.vars['foo'].init_expr, r'UnboundLocal')
def testVisitClassDef(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt('class Foo(object): pass'))
self.assertEqual(visitor.vars.keys(), ['Foo'])
self.assertRegexpMatches(visitor.vars['Foo'].init_expr, r'UnboundLocal')
def testExceptHandler(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt(textwrap.dedent("""\
try:
pass
except Exception as foo:
pass
except TypeError as bar:
pass""")))
self.assertEqual(sorted(visitor.vars.keys()), ['bar', 'foo'])
self.assertRegexpMatches(visitor.vars['foo'].init_expr, r'UnboundLocal')
self.assertRegexpMatches(visitor.vars['bar'].init_expr, r'UnboundLocal')
def testFor(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt('for i in foo: pass'))
self.assertEqual(visitor.vars.keys(), ['i'])
self.assertRegexpMatches(visitor.vars['i'].init_expr, r'UnboundLocal')
def testFunctionDef(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt('def foo(): pass'))
self.assertEqual(visitor.vars.keys(), ['foo'])
self.assertRegexpMatches(visitor.vars['foo'].init_expr, r'UnboundLocal')
def testImport(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt('import foo.bar, baz'))
self.assertEqual(sorted(visitor.vars.keys()), ['baz', 'foo'])
self.assertRegexpMatches(visitor.vars['foo'].init_expr, r'UnboundLocal')
self.assertRegexpMatches(visitor.vars['baz'].init_expr, r'UnboundLocal')
def testImportFrom(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt('from foo.bar import baz, qux'))
self.assertEqual(sorted(visitor.vars.keys()), ['baz', 'qux'])
self.assertRegexpMatches(visitor.vars['baz'].init_expr, r'UnboundLocal')
self.assertRegexpMatches(visitor.vars['qux'].init_expr, r'UnboundLocal')
def testGlobal(self):
visitor = block.BlockVisitor()
visitor.visit(_ParseStmt('global foo, bar'))
self.assertEqual(sorted(visitor.vars.keys()), ['bar', 'foo'])
self.assertIsNone(visitor.vars['foo'].init_expr)
self.assertIsNone(visitor.vars['bar'].init_expr)
def testGlobalIsParam(self):
visitor = block.BlockVisitor()
visitor.vars['foo'] = block.Var('foo', block.Var.TYPE_PARAM, arg_index=0)
self.assertRaisesRegexp(util.ParseError, 'is parameter and global',
visitor.visit, _ParseStmt('global foo'))
def testGlobalUsedPriorToDeclaration(self):
node = pythonparser.parse('foo = 42\nglobal foo')
visitor = block.BlockVisitor()
self.assertRaisesRegexp(util.ParseError, 'used prior to global declaration',
visitor.generic_visit, node)
class FunctionBlockVisitorTest(unittest.TestCase):
def testArgs(self):
func = _ParseStmt('def foo(bar, baz, *args, **kwargs): pass')
visitor = block.FunctionBlockVisitor(func)
self.assertIn('bar', visitor.vars)
self.assertIn('baz', visitor.vars)
self.assertIn('args', visitor.vars)
self.assertIn('kwargs', visitor.vars)
self.assertRegexpMatches(visitor.vars['bar'].init_expr, r'Args\[0\]')
self.assertRegexpMatches(visitor.vars['baz'].init_expr, r'Args\[1\]')
self.assertRegexpMatches(visitor.vars['args'].init_expr, r'Args\[2\]')
self.assertRegexpMatches(visitor.vars['kwargs'].init_expr, r'Args\[3\]')
def testArgsDuplicate(self):
func = _ParseStmt('def foo(bar, baz, bar=None): pass')
self.assertRaisesRegexp(util.ParseError, 'duplicate argument',
block.FunctionBlockVisitor, func)
def testYield(self):
visitor = block.FunctionBlockVisitor(_ParseStmt('def foo(): pass'))
visitor.visit(_ParseStmt('yield "foo"'))
self.assertTrue(visitor.is_generator)
def testYieldExpr(self):
visitor = block.FunctionBlockVisitor(_ParseStmt('def foo(): pass'))
visitor.visit(_ParseStmt('foo = (yield)'))
self.assertTrue(visitor.is_generator)
self.assertEqual(sorted(visitor.vars.keys()), ['foo'])
self.assertRegexpMatches(visitor.vars['foo'].init_expr, r'UnboundLocal')
def _MakeModuleBlock():
return block.ModuleBlock(imputil_test.MockPath(), '__main__',
'<test>', '', imputil.FutureFeatures())
def _ParseStmt(stmt_str):
return pythonparser.parse(stmt_str).body[0]
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
EvanK/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway_facts.py | 44 | 4573 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: ec2_vpc_nat_gateway_facts
short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods.
description:
- Gets various details related to AWS VPC Managed Nat Gateways
version_added: "2.3"
requirements: [ boto3 ]
options:
nat_gateway_ids:
description:
- Get details of specific nat gateway IDs
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNatGateways.html)
for possible filters.
author: Karen Cheng (@Etherdaemon)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple example of listing all nat gateways
- name: List all managed nat gateways in ap-southeast-2
ec2_vpc_nat_gateway_facts:
region: ap-southeast-2
register: all_ngws
- name: Debugging the result
debug:
msg: "{{ all_ngws.result }}"
- name: Get details on specific nat gateways
ec2_vpc_nat_gateway_facts:
nat_gateway_ids:
- nat-1234567891234567
- nat-7654321987654321
region: ap-southeast-2
register: specific_ngws
- name: Get all nat gateways with specific filters
ec2_vpc_nat_gateway_facts:
region: ap-southeast-2
filters:
state: ['pending']
register: pending_ngws
- name: Get nat gateways with specific filter
ec2_vpc_nat_gateway_facts:
region: ap-southeast-2
filters:
subnet-id: subnet-12345678
state: ['available']
register: existing_nat_gateways
'''
RETURN = '''
result:
description: The result of the describe, converted to ansible snake case style.
See http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.Client.describe_nat_gateways for the response.
returned: success
type: list
'''
import json
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (ec2_argument_spec, get_aws_connection_info, boto3_conn,
camel_dict_to_snake_dict, ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, HAS_BOTO3)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def get_nat_gateways(client, module, nat_gateway_id=None):
params = dict()
nat_gateways = list()
params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
params['NatGatewayIds'] = module.params.get('nat_gateway_ids')
try:
result = json.loads(json.dumps(client.describe_nat_gateways(**params), default=date_handler))
except Exception as e:
module.fail_json(msg=str(e.message))
for gateway in result['NatGateways']:
# Turn the boto3 result into ansible_friendly_snaked_names
converted_gateway = camel_dict_to_snake_dict(gateway)
if 'tags' in converted_gateway:
# Turn the boto3 result into ansible friendly tag dictionary
converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags'])
nat_gateways.append(converted_gateway)
return nat_gateways
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
filters=dict(default={}, type='dict'),
nat_gateway_ids=dict(default=[], type='list'),
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore/boto3 is required.')
try:
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
except botocore.exceptions.NoCredentialsError as e:
module.fail_json(msg=str(e))
results = get_nat_gateways(connection, module)
module.exit_json(result=results)
if __name__ == '__main__':
main()
| gpl-3.0 |
rixrix/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/trie/datrie.py | 785 | 1166 | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| mpl-2.0 |
liangazhou/django-rdp | packages/Django-1.8.6/tests/inspectdb/models.py | 17 | 2720 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class People(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey('self')
class Message(models.Model):
from_field = models.ForeignKey(People, db_column='from_id')
class PeopleData(models.Model):
people_pk = models.ForeignKey(People, primary_key=True)
ssn = models.CharField(max_length=11)
class PeopleMoreData(models.Model):
people_unique = models.ForeignKey(People, unique=True)
license = models.CharField(max_length=255)
class DigitsInColumnName(models.Model):
all_digits = models.CharField(max_length=11, db_column='123')
leading_digit = models.CharField(max_length=11, db_column='4extra')
leading_digits = models.CharField(max_length=11, db_column='45extra')
class SpecialName(models.Model):
field = models.IntegerField(db_column='field')
# Underscores
field_field_0 = models.IntegerField(db_column='Field_')
field_field_1 = models.IntegerField(db_column='Field__')
field_field_2 = models.IntegerField(db_column='__field')
# Other chars
prc_x = models.IntegerField(db_column='prc(%) x')
non_ascii = models.IntegerField(db_column='tamaño')
class Meta:
db_table = "inspectdb_special.table name"
class ColumnTypes(models.Model):
id = models.AutoField(primary_key=True)
big_int_field = models.BigIntegerField()
bool_field = models.BooleanField(default=False)
null_bool_field = models.NullBooleanField()
char_field = models.CharField(max_length=10)
null_char_field = models.CharField(max_length=10, blank=True, null=True)
comma_separated_int_field = models.CommaSeparatedIntegerField(max_length=99)
date_field = models.DateField()
date_time_field = models.DateTimeField()
decimal_field = models.DecimalField(max_digits=6, decimal_places=1)
email_field = models.EmailField()
file_field = models.FileField(upload_to="unused")
file_path_field = models.FilePathField()
float_field = models.FloatField()
int_field = models.IntegerField()
ip_address_field = models.IPAddressField()
gen_ip_adress_field = models.GenericIPAddressField(protocol="ipv4")
pos_int_field = models.PositiveIntegerField()
pos_small_int_field = models.PositiveSmallIntegerField()
slug_field = models.SlugField()
small_int_field = models.SmallIntegerField()
text_field = models.TextField()
time_field = models.TimeField()
url_field = models.URLField()
class UniqueTogether(models.Model):
field1 = models.IntegerField()
field2 = models.CharField(max_length=10)
class Meta:
unique_together = ('field1', 'field2')
| apache-2.0 |
MQQiang/kbengine | kbe/res/scripts/common/Lib/encodings/rot_13.py | 155 | 2428 | #!/usr/bin/env python
""" Python Character Mapping Codec for ROT13.
This codec de/encodes from str to str.
Written by Marc-Andre Lemburg ([email protected]).
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return (input.translate(rot13_map), len(input))
def decode(self, input, errors='strict'):
return (input.translate(rot13_map), len(input))
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return input.translate(rot13_map)
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return input.translate(rot13_map)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='rot-13',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
_is_text_encoding=False,
)
### Map
rot13_map = codecs.make_identity_dict(range(256))
rot13_map.update({
0x0041: 0x004e,
0x0042: 0x004f,
0x0043: 0x0050,
0x0044: 0x0051,
0x0045: 0x0052,
0x0046: 0x0053,
0x0047: 0x0054,
0x0048: 0x0055,
0x0049: 0x0056,
0x004a: 0x0057,
0x004b: 0x0058,
0x004c: 0x0059,
0x004d: 0x005a,
0x004e: 0x0041,
0x004f: 0x0042,
0x0050: 0x0043,
0x0051: 0x0044,
0x0052: 0x0045,
0x0053: 0x0046,
0x0054: 0x0047,
0x0055: 0x0048,
0x0056: 0x0049,
0x0057: 0x004a,
0x0058: 0x004b,
0x0059: 0x004c,
0x005a: 0x004d,
0x0061: 0x006e,
0x0062: 0x006f,
0x0063: 0x0070,
0x0064: 0x0071,
0x0065: 0x0072,
0x0066: 0x0073,
0x0067: 0x0074,
0x0068: 0x0075,
0x0069: 0x0076,
0x006a: 0x0077,
0x006b: 0x0078,
0x006c: 0x0079,
0x006d: 0x007a,
0x006e: 0x0061,
0x006f: 0x0062,
0x0070: 0x0063,
0x0071: 0x0064,
0x0072: 0x0065,
0x0073: 0x0066,
0x0074: 0x0067,
0x0075: 0x0068,
0x0076: 0x0069,
0x0077: 0x006a,
0x0078: 0x006b,
0x0079: 0x006c,
0x007a: 0x006d,
})
### Filter API
def rot13(infile, outfile):
outfile.write(codecs.encode(infile.read(), 'rot-13'))
if __name__ == '__main__':
import sys
rot13(sys.stdin, sys.stdout)
| lgpl-3.0 |
bacaldwell/ironic | ironic/api/controllers/v1/__init__.py | 2 | 6897 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Version 1 of the Ironic API
Specification can be found at doc/source/webapi/v1.rst
"""
import pecan
from pecan import rest
from webob import exc
from wsme import types as wtypes
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers.v1 import chassis
from ironic.api.controllers.v1 import driver
from ironic.api.controllers.v1 import node
from ironic.api.controllers.v1 import port
from ironic.api.controllers.v1 import versions
from ironic.api import expose
from ironic.common.i18n import _
BASE_VERSION = versions.BASE_VERSION
MIN_VER = base.Version(
{base.Version.string: versions.MIN_VERSION_STRING},
versions.MIN_VERSION_STRING, versions.MAX_VERSION_STRING)
MAX_VER = base.Version(
{base.Version.string: versions.MAX_VERSION_STRING},
versions.MIN_VERSION_STRING, versions.MAX_VERSION_STRING)
class MediaType(base.APIBase):
"""A media type representation."""
base = wtypes.text
type = wtypes.text
def __init__(self, base, type):
self.base = base
self.type = type
class V1(base.APIBase):
"""The representation of the version 1 of the API."""
id = wtypes.text
"""The ID of the version, also acts as the release number"""
media_types = [MediaType]
"""An array of supported media types for this version"""
links = [link.Link]
"""Links that point to a specific URL for this version and documentation"""
chassis = [link.Link]
"""Links to the chassis resource"""
nodes = [link.Link]
"""Links to the nodes resource"""
ports = [link.Link]
"""Links to the ports resource"""
drivers = [link.Link]
"""Links to the drivers resource"""
@staticmethod
def convert():
v1 = V1()
v1.id = "v1"
v1.links = [link.Link.make_link('self', pecan.request.public_url,
'v1', '', bookmark=True),
link.Link.make_link('describedby',
'http://docs.openstack.org',
'developer/ironic/dev',
'api-spec-v1.html',
bookmark=True, type='text/html')
]
v1.media_types = [MediaType('application/json',
'application/vnd.openstack.ironic.v1+json')]
v1.chassis = [link.Link.make_link('self', pecan.request.public_url,
'chassis', ''),
link.Link.make_link('bookmark',
pecan.request.public_url,
'chassis', '',
bookmark=True)
]
v1.nodes = [link.Link.make_link('self', pecan.request.public_url,
'nodes', ''),
link.Link.make_link('bookmark',
pecan.request.public_url,
'nodes', '',
bookmark=True)
]
v1.ports = [link.Link.make_link('self', pecan.request.public_url,
'ports', ''),
link.Link.make_link('bookmark',
pecan.request.public_url,
'ports', '',
bookmark=True)
]
v1.drivers = [link.Link.make_link('self', pecan.request.public_url,
'drivers', ''),
link.Link.make_link('bookmark',
pecan.request.public_url,
'drivers', '',
bookmark=True)
]
return v1
class Controller(rest.RestController):
"""Version 1 API controller root."""
nodes = node.NodesController()
ports = port.PortsController()
chassis = chassis.ChassisController()
drivers = driver.DriversController()
@expose.expose(V1)
def get(self):
# NOTE: The reason why convert() it's being called for every
# request is because we need to get the host url from
# the request object to make the links.
return V1.convert()
def _check_version(self, version, headers=None):
if headers is None:
headers = {}
# ensure that major version in the URL matches the header
if version.major != BASE_VERSION:
raise exc.HTTPNotAcceptable(_(
"Mutually exclusive versions requested. Version %(ver)s "
"requested but not supported by this service. The supported "
"version range is: [%(min)s, %(max)s].") %
{'ver': version, 'min': versions.MIN_VERSION_STRING,
'max': versions.MAX_VERSION_STRING},
headers=headers)
# ensure the minor version is within the supported range
if version < MIN_VER or version > MAX_VER:
raise exc.HTTPNotAcceptable(_(
"Version %(ver)s was requested but the minor version is not "
"supported by this service. The supported version range is: "
"[%(min)s, %(max)s].") %
{'ver': version, 'min': versions.MIN_VERSION_STRING,
'max': versions.MAX_VERSION_STRING},
headers=headers)
@pecan.expose()
def _route(self, args):
v = base.Version(pecan.request.headers, versions.MIN_VERSION_STRING,
versions.MAX_VERSION_STRING)
# Always set the min and max headers
pecan.response.headers[base.Version.min_string] = (
versions.MIN_VERSION_STRING)
pecan.response.headers[base.Version.max_string] = (
versions.MAX_VERSION_STRING)
# assert that requested version is supported
self._check_version(v, pecan.response.headers)
pecan.response.headers[base.Version.string] = str(v)
pecan.request.version = v
return super(Controller, self)._route(args)
__all__ = (Controller)
| apache-2.0 |
porduna/weblabdeusto | server/launch/sample_balanced2_concurrent_experiments/main_machine/lab_and_experiment1/experiment48/server_config.py | 968 | 1526 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
weblab_xilinx_experiment_xilinx_device = 'FPGA'
weblab_xilinx_experiment_port_number = 1
# This should be something like this:
# import os as _os
# xilinx_home = _os.getenv('XILINX_HOME')
# if xilinx_home == None:
# if _os.name == 'nt':
# xilinx_home = r'C:\Program Files\Xilinx'
# elif _os.name == 'posix':
# xilinx_home = r"/home/nctrun/Xilinx"
#
# if _os.name == 'nt':
# xilinx_impact_full_path = [xilinx_home + r'\bin\nt\impact']
# elif _os.name == 'posix':
# xilinx_impact_full_path = [xilinx_home + r'/bin/lin/impact']
# But for testing we are going to fake it:
xilinx_home = "."
xilinx_impact_full_path = ["python","./tests/unit/weblab/experiment/devices/xilinx_impact/fake_impact.py" ]
xilinx_device_to_program = 'XilinxImpact' # 'JTagBlazer', 'DigilentAdept'
xilinx_device_to_send_commands = 'SerialPort' # 'HttpDevice'
digilent_adept_full_path = ["python","./test/unit/weblab/experiment/devices/digilent_adept/fake_digilent_adept.py" ]
digilent_adept_batch_content = """something with the variable $FILE"""
xilinx_http_device_ip_FPGA = "192.168.50.138"
xilinx_http_device_port_FPGA = 80
xilinx_http_device_app_FPGA = ""
xilinx_batch_content_FPGA = """setMode -bs
setCable -port auto
addDevice -position 1 -file $FILE
Program -p 1
exit
"""
# Though it is not really a FPGA, the webcam url var name depends on the device,
# specified above.
fpga_webcam_url = '''https://www.weblab.deusto.es/webcam/fpga0/image.jpg'''
| bsd-2-clause |
jonshern/raspberrypi-indoorhealthmonitor | lib/awsiot.py | 1 | 2251 | import boto3
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient
import sys
import logging
import time
import getopt
class AWSIOT(object):
host = ""
rootCAPath = ""
cognitoIdentityPoolID = ""
accesskeyid = ""
secretkeyid = ""
sessiontoken = ""
awsiotmqttclient = ""
def __init__(self, host, rootCAPath, cognitoIdentityPoolID):
self.host = host
self.rootCAPath = rootCAPath
self.cognitoIdentityPoolID = cognitoIdentityPoolID
# Cognito auth
identityPoolID = cognitoIdentityPoolID
region = host.split('.')[2]
cognitoIdentityClient = boto3.client('cognito-identity', region_name=region)
# identityPoolInfo = cognitoIdentityClient.describe_identity_pool(IdentityPoolId=identityPoolID)
# print identityPoolInfo
temporaryIdentityId = cognitoIdentityClient.get_id(IdentityPoolId=identityPoolID)
identityID = temporaryIdentityId["IdentityId"]
temporaryCredentials = cognitoIdentityClient.get_credentials_for_identity(IdentityId=identityID)
self.accesskeyid = temporaryCredentials["Credentials"]["AccessKeyId"]
self.secretkeyid = temporaryCredentials["Credentials"]["SecretKey"]
self.sessiontoken = temporaryCredentials["Credentials"]["SessionToken"]
# Init AWSIoTMQTTClient
myAWSIoTMQTTClient = AWSIoTMQTTClient("healthmonitoriot", useWebsocket=True)
# AWSIoTMQTTClient configuration
myAWSIoTMQTTClient.configureEndpoint(host, 443)
myAWSIoTMQTTClient.configureCredentials(rootCAPath)
myAWSIoTMQTTClient.configureIAMCredentials(AccessKeyId, SecretKey, SessionToken)
myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20)
myAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing
myAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz
myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec
myAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec
def sendmessage(self, topic, message):
# Connect and subscribe to AWS IoT
myAWSIoTMQTTClient.connect()
myAWSIoTMQTTClient.publish(topic, message, 1)
| unlicense |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.