repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
insomnia-lab/calibre | src/calibre/ebooks/mobi/writer2/serializer.py | 9 | 14926 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import re, unicodedata
from calibre.ebooks.oeb.base import (OEB_DOCS, XHTML, XHTML_NS, XML_NS,
namespace, prefixname, urlnormalize)
from calibre.ebooks.mobi.mobiml import MBP_NS
from calibre.ebooks.mobi.utils import is_guide_ref_start
from collections import defaultdict
from urlparse import urldefrag
from cStringIO import StringIO
class Serializer(object):
NSRMAP = {'': None, XML_NS: 'xml', XHTML_NS: '', MBP_NS: 'mbp'}
def __init__(self, oeb, images, is_periodical, write_page_breaks_after_item=True):
'''
Write all the HTML markup in oeb into a single in memory buffer
containing a single html document with links replaced by offsets into
the buffer.
:param oeb: OEBBook object that encapsulates the document to be
processed.
:param images: Mapping of image hrefs (urlnormalized) to image record
indices.
:param write_page_breaks_after_item: If True a MOBIpocket pagebreak tag
is written after every element of the spine in ``oeb``.
'''
self.oeb = oeb
# Map of image hrefs to image index in the MOBI file
self.images = images
self.used_images = set()
self.logger = oeb.logger
self.is_periodical = is_periodical
self.write_page_breaks_after_item = write_page_breaks_after_item
# If not None, this is a number pointing to the location at which to
# open the MOBI file on the Kindle
self.start_offset = None
# Mapping of hrefs (urlnormalized) to the offset in the buffer where
# the resource pointed to by the href lives. Used at the end to fill in
# the correct values into all filepos="..." links.
self.id_offsets = {}
# Mapping of hrefs (urlnormalized) to a list of offsets into the buffer
# where filepos="..." elements are written corresponding to links that
# point to the href. This is used at the end to fill in the correct values.
self.href_offsets = defaultdict(list)
# List of offsets in the buffer of non linear items in the spine. These
# become uncrossable breaks in the MOBI
self.breaks = []
self.find_blocks()
def find_blocks(self):
'''
Mark every item in the spine if it is the start/end of a
section/article, so that it can be wrapped in divs appropriately.
'''
for item in self.oeb.spine:
item.is_section_start = item.is_section_end = False
item.is_article_start = item.is_article_end = False
def spine_item(tocitem):
href = urldefrag(tocitem.href)[0]
for item in self.oeb.spine:
if item.href == href:
return item
for item in self.oeb.toc.iterdescendants():
if item.klass == 'section':
articles = list(item)
if not articles: continue
spine_item(item).is_section_start = True
for i, article in enumerate(articles):
si = spine_item(article)
if si is not None:
si.is_article_start = True
items = list(self.oeb.spine)
in_sec = in_art = False
for i, item in enumerate(items):
try:
prev_item = items[i-1]
except:
prev_item = None
if in_art and item.is_article_start == True:
prev_item.is_article_end = True
in_art = False
if in_sec and item.is_section_start == True:
prev_item.is_section_end = True
in_sec = False
if item.is_section_start: in_sec = True
if item.is_article_start: in_art = True
item.is_section_end = item.is_article_end = True
def __call__(self):
'''
Return the document serialized as a single UTF-8 encoded bytestring.
'''
buf = self.buf = StringIO()
buf.write(b'<html>')
self.serialize_head()
self.serialize_body()
buf.write(b'</html>')
self.end_offset = buf.tell()
self.fixup_links()
if self.start_offset is None and not self.is_periodical:
# If we don't set a start offset, the stupid Kindle will
# open the book at the location of the first IndexEntry, which
# could be anywhere. So ensure the book is always opened at the
# beginning, instead.
self.start_offset = self.body_start_offset
return buf.getvalue()
def serialize_head(self):
buf = self.buf
buf.write(b'<head>')
if len(self.oeb.guide) > 0:
self.serialize_guide()
buf.write(b'</head>')
def serialize_guide(self):
'''
The Kindle decides where to open a book based on the presence of
an item in the guide that looks like
<reference type="text" title="Start" href="chapter-one.xhtml"/>
Similarly an item with type="toc" controls where the Goto Table of
Contents operation on the kindle goes.
'''
buf = self.buf
hrefs = self.oeb.manifest.hrefs
buf.write(b'<guide>')
for ref in self.oeb.guide.values():
path = urldefrag(ref.href)[0]
if path not in hrefs or hrefs[path].media_type not in OEB_DOCS:
continue
buf.write(b'<reference type="')
if ref.type.startswith('other.') :
self.serialize_text(ref.type.replace('other.',''), quot=True)
else:
self.serialize_text(ref.type, quot=True)
buf.write(b'" ')
if ref.title is not None:
buf.write(b'title="')
self.serialize_text(ref.title, quot=True)
buf.write(b'" ')
if is_guide_ref_start(ref):
self._start_href = ref.href
self.serialize_href(ref.href)
# Space required or won't work, I kid you not
buf.write(b' />')
buf.write(b'</guide>')
def serialize_href(self, href, base=None):
'''
Serialize the href attribute of an <a> or <reference> tag. It is
serialized as filepos="000000000" and a pointer to its location is
stored in self.href_offsets so that the correct value can be filled in
at the end.
'''
hrefs = self.oeb.manifest.hrefs
try:
path, frag = urldefrag(urlnormalize(href))
except ValueError:
# Unparseable URL
return False
if path and base:
path = base.abshref(path)
if path and path not in hrefs:
return False
buf = self.buf
item = hrefs[path] if path else None
if item and item.spine_position is None:
return False
path = item.href if item else base.href
href = '#'.join((path, frag)) if frag else path
buf.write(b'filepos=')
self.href_offsets[href].append(buf.tell())
buf.write(b'0000000000')
return True
def serialize_body(self):
'''
Serialize all items in the spine of the document. Non linear items are
moved to the end.
'''
buf = self.buf
def serialize_toc_level(tocref, href=None):
# add the provided toc level to the output stream
# if href is provided add a link ref to the toc level output (e.g. feed_0/index.html)
if href is not None:
# resolve the section url in id_offsets
buf.write('<mbp:pagebreak />')
self.id_offsets[urlnormalize(href)] = buf.tell()
if tocref.klass == "periodical":
buf.write('<div> <div height="1em"></div>')
else:
t = tocref.title
if isinstance(t, unicode):
t = t.encode('utf-8')
buf.write('<div></div> <div> <h2 height="1em"><font size="+2"><b>'
+t+'</b></font></h2> <div height="1em"></div>')
buf.write('<ul>')
for tocitem in tocref.nodes:
buf.write('<li><a filepos=')
itemhref = tocitem.href
if tocref.klass == 'periodical':
# This is a section node.
# For periodical tocs, the section urls are like r'feed_\d+/index.html'
# We dont want to point to the start of the first article
# so we change the href.
itemhref = re.sub(r'article_\d+/', '', itemhref)
self.href_offsets[itemhref].append(buf.tell())
buf.write('0000000000')
buf.write(' ><font size="+1"><b><u>')
t = tocitem.title
if isinstance(t, unicode):
t = t.encode('utf-8')
buf.write(t)
buf.write('</u></b></font></a></li>')
buf.write('</ul><div height="1em"></div></div><mbp:pagebreak />')
self.anchor_offset = buf.tell()
buf.write(b'<body>')
self.body_start_offset = buf.tell()
if self.is_periodical:
top_toc = self.oeb.toc.nodes[0]
serialize_toc_level(top_toc)
spine = [item for item in self.oeb.spine if item.linear]
spine.extend([item for item in self.oeb.spine if not item.linear])
for item in spine:
if self.is_periodical and item.is_section_start:
for section_toc in top_toc.nodes:
if urlnormalize(item.href) == section_toc.href:
# create section url of the form r'feed_\d+/index.html'
section_url = re.sub(r'article_\d+/', '', section_toc.href)
serialize_toc_level(section_toc, section_url)
section_toc.href = section_url
break
self.serialize_item(item)
self.body_end_offset = buf.tell()
buf.write(b'</body>')
def serialize_item(self, item):
'''
Serialize an individual item from the spine of the input document.
A reference to this item is stored in self.href_offsets
'''
buf = self.buf
if not item.linear:
self.breaks.append(buf.tell() - 1)
self.id_offsets[urlnormalize(item.href)] = buf.tell()
if item.is_section_start:
buf.write(b'<a ></a> ')
if item.is_article_start:
buf.write(b'<a ></a> <a ></a>')
for elem in item.data.find(XHTML('body')):
self.serialize_elem(elem, item)
if self.write_page_breaks_after_item:
buf.write(b'<mbp:pagebreak/>')
if item.is_article_end:
# Kindle periodical article end marker
buf.write(b'<a ></a> <a ></a>')
if item.is_section_end:
buf.write(b' <a ></a>')
self.anchor_offset = None
def serialize_elem(self, elem, item, nsrmap=NSRMAP):
buf = self.buf
if not isinstance(elem.tag, basestring) \
or namespace(elem.tag) not in nsrmap:
return
tag = prefixname(elem.tag, nsrmap)
# Previous layers take care of @name
id_ = elem.attrib.pop('id', None)
if id_:
href = '#'.join((item.href, id_))
offset = self.anchor_offset or buf.tell()
key = urlnormalize(href)
# Only set this id_offset if it wasn't previously seen
self.id_offsets[key] = self.id_offsets.get(key, offset)
if self.anchor_offset is not None and \
tag == 'a' and not elem.attrib and \
not len(elem) and not elem.text:
return
self.anchor_offset = buf.tell()
buf.write(b'<')
buf.write(tag.encode('utf-8'))
if elem.attrib:
for attr, val in elem.attrib.items():
if namespace(attr) not in nsrmap:
continue
attr = prefixname(attr, nsrmap)
buf.write(b' ')
if attr == 'href':
if self.serialize_href(val, item):
continue
elif attr == 'src':
href = urlnormalize(item.abshref(val))
if href in self.images:
index = self.images[href]
self.used_images.add(href)
buf.write(b'recindex="%05d"' % index)
continue
buf.write(attr.encode('utf-8'))
buf.write(b'="')
self.serialize_text(val, quot=True)
buf.write(b'"')
buf.write(b'>')
if elem.text or len(elem) > 0:
if elem.text:
self.anchor_offset = None
self.serialize_text(elem.text)
for child in elem:
self.serialize_elem(child, item)
if child.tail:
self.anchor_offset = None
self.serialize_text(child.tail)
buf.write(b'</%s>' % tag.encode('utf-8'))
def serialize_text(self, text, quot=False):
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
text = text.replace(u'\u00AD', '') # Soft-hyphen
if quot:
text = text.replace('"', '"')
if isinstance(text, unicode):
text = unicodedata.normalize('NFC', text)
self.buf.write(text.encode('utf-8'))
def fixup_links(self):
'''
Fill in the correct values for all filepos="..." links with the offsets
of the linked to content (as stored in id_offsets).
'''
buf = self.buf
id_offsets = self.id_offsets
start_href = getattr(self, '_start_href', None)
for href, hoffs in self.href_offsets.items():
is_start = (href and href == start_href)
# Iterate over all filepos items
if href not in id_offsets:
self.logger.warn('Hyperlink target %r not found' % href)
# Link to the top of the document, better than just ignoring
href, _ = urldefrag(href)
if href in self.id_offsets:
ioff = self.id_offsets[href]
if is_start:
self.start_offset = ioff
for hoff in hoffs:
buf.seek(hoff)
buf.write(b'%010d' % ioff)
| gpl-3.0 | 4,621,349,403,689,018,000 | 4,794,722,911,528,580,000 | 37.768831 | 97 | 0.53859 | false |
GbalsaC/bitnamiP | lms/djangoapps/instructor/tests/test_registration_codes.py | 42 | 12260 | """
Test for the registration code status information.
"""
from courseware.tests.factories import InstructorFactory
from xmodule.modulestore.tests.factories import CourseFactory
from django.utils.translation import ugettext as _
from shoppingcart.models import (
Invoice, CourseRegistrationCodeInvoiceItem, CourseRegistrationCode,
CourseRegCodeItem, Order, RegistrationCodeRedemption
)
from student.models import CourseEnrollment
from student.roles import CourseSalesAdminRole
from nose.plugins.attrib import attr
import json
from student.tests.factories import UserFactory, CourseModeFactory
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
@attr('shard_1')
@override_settings(REGISTRATION_CODE_LENGTH=8)
class TestCourseRegistrationCodeStatus(ModuleStoreTestCase):
"""
Test registration code status.
"""
def setUp(self):
super(TestCourseRegistrationCodeStatus, self).setUp()
self.course = CourseFactory.create()
CourseModeFactory.create(course_id=self.course.id, min_price=50)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
CourseSalesAdminRole(self.course.id).add_users(self.instructor)
# create testing invoice
self.sale_invoice = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName',
company_contact_email='[email protected]', recipient_name='Testw', recipient_email='[email protected]',
customer_reference_number='2Fwe23S', internal_reference="A", course_id=self.course.id, is_valid=True
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
self.lookup_code_url = reverse('look_up_registration_code',
kwargs={'course_id': unicode(self.course.id)})
self.registration_code_detail_url = reverse('registration_code_details',
kwargs={'course_id': unicode(self.course.id)})
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 12,
'company_name': 'Test Group',
'company_contact_name': '[email protected]',
'company_contact_email': '[email protected]',
'unit_price': 122.45,
'recipient_name': 'Test123',
'recipient_email': '[email protected]',
'address_line_1': 'Portland Street',
'address_line_2': '',
'address_line_3': '',
'city': '',
'state': '',
'zip': '',
'country': '',
'customer_reference_number': '123A23F',
'internal_reference': '',
'invoice': ''
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
def test_look_up_invalid_registration_code(self):
"""
Verify the view returns HTTP status 400 if an invalid registration code is passed.
Also, verify the data returned includes a message indicating the error,
and the is_registration_code_valid is set to False.
"""
data = {
'registration_code': 'invalid_reg_code'
}
response = self.client.get(self.lookup_code_url, data)
self.assertEqual(response.status_code, 400)
json_dict = json.loads(response.content)
message = _('The enrollment code ({code}) was not found for the {course_name} course.').format(
course_name=self.course.display_name, code=data['registration_code']
)
self.assertEqual(message, json_dict['message'])
self.assertFalse(json_dict['is_registration_code_valid'])
self.assertFalse(json_dict['is_registration_code_redeemed'])
def test_look_up_valid_registration_code(self):
"""
test lookup for the valid registration code
and that registration code has been redeemed by user
and then mark the registration code as in_valid
when marking as invalidate, it also lookup for
registration redemption entry and also delete
that redemption entry and un_enroll the student
who used that registration code for their enrollment.
"""
for i in range(2):
CourseRegistrationCode.objects.create(
code='reg_code{}'.format(i),
course_id=unicode(self.course.id),
created_by=self.instructor,
invoice=self.sale_invoice,
invoice_item=self.invoice_item,
mode_slug='honor'
)
reg_code = CourseRegistrationCode.objects.all()[0]
student = UserFactory()
enrollment = CourseEnrollment.enroll(student, self.course.id)
RegistrationCodeRedemption.objects.create(
registration_code=reg_code,
redeemed_by=student,
course_enrollment=enrollment
)
data = {
'registration_code': reg_code.code
}
response = self.client.get(self.lookup_code_url, data)
self.assertEqual(response.status_code, 200)
json_dict = json.loads(response.content)
self.assertTrue(json_dict['is_registration_code_valid'])
self.assertTrue(json_dict['is_registration_code_redeemed'])
# now mark that registration code as invalid
data = {
'registration_code': reg_code.code,
'action_type': 'invalidate_registration_code'
}
response = self.client.post(self.registration_code_detail_url, data)
self.assertEqual(response.status_code, 200)
json_dict = json.loads(response.content)
message = _('This enrollment code has been canceled. It can no longer be used.')
self.assertEqual(message, json_dict['message'])
# now check that the registration code should be marked as invalid in the db.
reg_code = CourseRegistrationCode.objects.get(code=reg_code.code)
self.assertEqual(reg_code.is_valid, False)
redemption = RegistrationCodeRedemption.get_registration_code_redemption(reg_code.code, self.course.id)
self.assertIsNone(redemption)
# now the student course enrollment should be false.
enrollment = CourseEnrollment.get_enrollment(student, self.course.id)
self.assertEqual(enrollment.is_active, False)
def test_lookup_valid_redeemed_registration_code(self):
"""
test to lookup for the valid and redeemed registration code
and then mark that registration code as un_redeemed
which will unenroll the user and delete the redemption
entry from the database.
"""
student = UserFactory()
self.client.login(username=student.username, password='test')
cart = Order.get_cart_for_user(student)
cart.order_type = 'business'
cart.save()
CourseRegCodeItem.add_to_order(cart, self.course.id, 2)
cart.purchase()
reg_code = CourseRegistrationCode.objects.filter(order=cart)[0]
enrollment = CourseEnrollment.enroll(student, self.course.id)
RegistrationCodeRedemption.objects.create(
registration_code=reg_code,
redeemed_by=student,
course_enrollment=enrollment
)
self.client.login(username=self.instructor.username, password='test')
data = {
'registration_code': reg_code.code
}
response = self.client.get(self.lookup_code_url, data)
self.assertEqual(response.status_code, 200)
json_dict = json.loads(response.content)
self.assertTrue(json_dict['is_registration_code_valid'])
self.assertTrue(json_dict['is_registration_code_redeemed'])
# now mark the registration code as unredeemed
# this will unenroll the user and removed the redemption entry from
# the database.
data = {
'registration_code': reg_code.code,
'action_type': 'unredeem_registration_code'
}
response = self.client.post(self.registration_code_detail_url, data)
self.assertEqual(response.status_code, 200)
json_dict = json.loads(response.content)
message = _('This enrollment code has been marked as unused.')
self.assertEqual(message, json_dict['message'])
redemption = RegistrationCodeRedemption.get_registration_code_redemption(reg_code.code, self.course.id)
self.assertIsNone(redemption)
# now the student course enrollment should be false.
enrollment = CourseEnrollment.get_enrollment(student, self.course.id)
self.assertEqual(enrollment.is_active, False)
def test_apply_invalid_reg_code_when_updating_code_information(self):
"""
test to apply an invalid registration code
when updating the registration code information.
"""
data = {
'registration_code': 'invalid_registration_code',
'action_type': 'unredeem_registration_code'
}
response = self.client.post(self.registration_code_detail_url, data)
self.assertEqual(response.status_code, 400)
json_dict = json.loads(response.content)
message = _('The enrollment code ({code}) was not found for the {course_name} course.').format(
course_name=self.course.display_name, code=data['registration_code']
)
self.assertEqual(message, json_dict['message'])
def test_mark_registration_code_as_valid(self):
"""
test to mark the invalid registration code
as valid
"""
for i in range(2):
CourseRegistrationCode.objects.create(
code='reg_code{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice,
invoice_item=self.invoice_item,
mode_slug='honor',
is_valid=False
)
reg_code = CourseRegistrationCode.objects.all()[0]
data = {
'registration_code': reg_code.code,
'action_type': 'validate_registration_code'
}
response = self.client.post(self.registration_code_detail_url, data)
self.assertEqual(response.status_code, 200)
json_dict = json.loads(response.content)
message = _('The enrollment code has been restored.')
self.assertEqual(message, json_dict['message'])
# now check that the registration code should be marked as valid in the db.
reg_code = CourseRegistrationCode.objects.get(code=reg_code.code)
self.assertEqual(reg_code.is_valid, True)
def test_returns_error_when_unredeeming_already_unredeemed_registration_code_redemption(self):
"""
test to mark the already unredeemed registration code as unredeemed.
"""
for i in range(2):
CourseRegistrationCode.objects.create(
code='reg_code{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice,
invoice_item=self.invoice_item,
mode_slug='honor'
)
reg_code = CourseRegistrationCode.objects.all()[0]
data = {
'registration_code': reg_code.code,
'action_type': 'unredeem_registration_code'
}
response = self.client.post(self.registration_code_detail_url, data)
self.assertEqual(response.status_code, 400)
json_dict = json.loads(response.content)
message = _('The redemption does not exist against enrollment code ({code}).').format(code=reg_code.code)
self.assertEqual(message, json_dict['message'])
| agpl-3.0 | -137,765,093,533,615,870 | 8,138,564,142,458,582,000 | 41.130584 | 113 | 0.63491 | false |
hanlind/nova | nova/conf/service_token.py | 1 | 2573 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneauth1 import loading as ks_loading
from oslo_config import cfg
SERVICE_USER_GROUP = 'service_user'
service_user = cfg.OptGroup(
SERVICE_USER_GROUP,
title = 'Service token authentication type options',
help = """
Configuration options for service to service authentication using a service
token. These options allow to send a service token along with the
user's token when contacting external REST APIs.
"""
)
service_user_opts = [
cfg.BoolOpt('send_service_user_token',
default=False,
help="""
When True, if sending a user token to an REST API, also send a service token.
Nova often reuses the user token provided to the nova-api to talk to other
REST APIs, such as Cinder. It is possible that while the
user token was valid when the request was made to Nova, the token may expire
before it reaches the other service. To avoid any failures, and to
make it clear it is Nova calling the service on the users behalf, we include
a server token along with the user token. Should the user's token have
expired, a valid service token ensures the REST API request will still be
accepted by the keystone middleware.
This feature is currently experimental, and as such is turned off by default
while full testing and performance tuning of this feature is completed.
"""),
]
def register_opts(conf):
conf.register_group(service_user)
conf.register_opts(service_user_opts, group=service_user)
ks_loading.register_session_conf_options(conf, SERVICE_USER_GROUP)
ks_loading.register_auth_conf_options(conf, SERVICE_USER_GROUP)
def list_opts():
return {
service_user: (
service_user_opts +
ks_loading.get_session_conf_options() +
ks_loading.get_auth_common_conf_options() +
ks_loading.get_auth_plugin_conf_options('password') +
ks_loading.get_auth_plugin_conf_options('v2password') +
ks_loading.get_auth_plugin_conf_options('v3password'))
}
| apache-2.0 | 377,526,649,085,694,600 | 2,848,894,790,197,258,000 | 37.984848 | 78 | 0.720171 | false |
marrow/schema | test/transform/test_boolean.py | 1 | 1445 | from marrow.schema.testing import TransformTest
from marrow.schema.transform.type import Boolean, boolean, WebBoolean, web_boolean
class TestBooleanNative(TransformTest):
transform = boolean.native
invalid = ('x', )
@property
def valid(self):
yield None, None
if boolean.none:
yield '', None
for i in boolean.truthy + ('Y', 'True', True, 1, ['foo']):
yield i, True
for i in boolean.falsy + ('n', 'False', False, 0, []):
yield i, False
class TestBooleanForeign(TransformTest):
transform = boolean.foreign
@property
def valid(self):
if boolean.none:
yield None, ''
for i in (0, 1, False, True, [], [0]):
yield i, boolean.truthy[boolean.use] if bool(i) else boolean.falsy[boolean.use]
for i in boolean.truthy:
yield i, boolean.truthy[boolean.use]
for i in boolean.falsy:
yield i, boolean.falsy[boolean.use]
class TestBooleanNoNoneNative(TransformTest):
transform = Boolean(none=False).native
valid = ((None, False), )
invalid = ('', 'bob')
class TestBooleanNoNoneForeign(TransformTest):
transform = Boolean(none=False).foreign
valid = ((None, 'false'), ('foo', 'true'), ('', 'false'))
class TestWebBooleanNative(TransformTest):
transform = web_boolean.native
valid = (
(['', 'true'], True),
([''], False),
('', False),
)
class TestWebBooleanForeign(TransformTest):
transform = web_boolean.foreign
valid = [(i, bool(i)) for i in (0, 1, False, True)]
| mit | -7,302,433,722,795,748,000 | 523,699,360,191,340,540 | 21.578125 | 82 | 0.667128 | false |
messi2050/android_kernel_huawei_msm8610 | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 | -7,952,184,691,346,492,000 | -8,248,920,130,156,999,000 | 26.596899 | 70 | 0.673034 | false |
r-owen/TUI | TUI/Models/PermsModel.py | 1 | 1598 | #!/usr/bin/env python
"""A model of the state of the perms actor.
It contains instance variables that are KeyVariables
or sets of KeyVariables. All are directly associated
with status keywords.
History:
2003-12-10 ROwen
2003-12-17 ROwen Moved KeyVarFactory to RO.KeyVariable.
2004-05-18 ROwen Eliminated unused testMode argument.
2004-07-22 ROwen Stopped importing three unused modules.
"""
import RO.KeyVariable
import TUI.TUIModel
_theModel = None
def getModel():
global _theModel
if _theModel == None:
_theModel = _Model()
return _theModel
class _Model(object):
def __init__(self):
self.dispatcher = TUI.TUIModel.getModel().dispatcher
keyVarFact = RO.KeyVariable.KeyVarFactory(
actor = "perms",
dispatcher = self.dispatcher,
refreshCmd = "status",
nval = (0, None),
converters = str,
)
self.actors = keyVarFact(
keyword = "actors",
description = "Actors controlled by perms",
)
self.authList = keyVarFact(
keyword = "authList",
nval = (1,None),
description = "Program and 0 or more authorized actors",
refreshCmd = None, # no authLists if no programs yet registered
)
self.lockedActors = keyVarFact(
keyword = "lockedActors",
description = "Actors locked out by APO",
)
self.programs = keyVarFact(
keyword = "programs",
description = "Programs registered with perms",
)
| bsd-3-clause | 7,651,996,466,819,572,000 | 8,546,088,977,065,609,000 | 26.551724 | 75 | 0.602003 | false |
tony810430/flink | flink-python/pyflink/fn_execution/beam/beam_operations_slow.py | 2 | 4637 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import abstractmethod
from apache_beam.runners.worker.operations import Operation
from apache_beam.utils.windowed_value import WindowedValue
from pyflink.fn_execution.operations import BundleOperation
class FunctionOperation(Operation):
"""
Base class of function operation that will execute StatelessFunction or StatefulFunction for
each input element.
"""
def __init__(self, name, spec, counter_factory, sampler, consumers, operation_cls):
super(FunctionOperation, self).__init__(name, spec, counter_factory, sampler)
self.consumer = consumers['output'][0]
self._value_coder_impl = self.consumer.windowed_coder.wrapped_value_coder.get_impl()
self.operation_cls = operation_cls
self.operation = self.generate_operation()
self.process_element = self.operation.process_element
self.operation.open()
def setup(self):
super(FunctionOperation, self).setup()
def start(self):
with self.scoped_start_state:
super(FunctionOperation, self).start()
def finish(self):
with self.scoped_finish_state:
super(FunctionOperation, self).finish()
self.operation.finish()
def needs_finalization(self):
return False
def reset(self):
super(FunctionOperation, self).reset()
def teardown(self):
with self.scoped_finish_state:
self.operation.close()
def progress_metrics(self):
metrics = super(FunctionOperation, self).progress_metrics()
metrics.processed_elements.measured.output_element_counts.clear()
tag = None
receiver = self.receivers[0]
metrics.processed_elements.measured.output_element_counts[
str(tag)] = receiver.opcounter.element_counter.value()
return metrics
def process(self, o: WindowedValue):
with self.scoped_process_state:
output_stream = self.consumer.output_stream
if isinstance(self.operation, BundleOperation):
for value in o.value:
self.process_element(value)
self._value_coder_impl.encode_to_stream(
self.operation.finish_bundle(), output_stream, True)
output_stream.maybe_flush()
else:
for value in o.value:
self._value_coder_impl.encode_to_stream(
self.process_element(value), output_stream, True)
output_stream.maybe_flush()
def monitoring_infos(self, transform_id, tag_to_pcollection_id):
"""
Only pass user metric to Java
:param tag_to_pcollection_id: useless for user metric
"""
return super().user_monitoring_infos(transform_id)
@abstractmethod
def generate_operation(self):
pass
class StatelessFunctionOperation(FunctionOperation):
def __init__(self, name, spec, counter_factory, sampler, consumers, operation_cls):
super(StatelessFunctionOperation, self).__init__(
name, spec, counter_factory, sampler, consumers, operation_cls)
def generate_operation(self):
return self.operation_cls(self.spec)
class StatefulFunctionOperation(FunctionOperation):
def __init__(self, name, spec, counter_factory, sampler, consumers, operation_cls,
keyed_state_backend):
self.keyed_state_backend = keyed_state_backend
super(StatefulFunctionOperation, self).__init__(
name, spec, counter_factory, sampler, consumers, operation_cls)
def generate_operation(self):
return self.operation_cls(self.spec, self.keyed_state_backend)
| apache-2.0 | 725,199,631,683,094,000 | 7,131,175,330,454,626,000 | 38.974138 | 96 | 0.648911 | false |
viacoin/viacoin | test/functional/p2p_segwit.py | 1 | 92836 | #!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 8000
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
def test_transaction_acceptance(rpc, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
p2p.send_message(tx_message)
p2p.sync_with_ping()
assert_equal(tx.hash in rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(p2p.last_message["reject"].reason, reason)
def test_witness_block(rpc, p2p, block, accepted, with_witness=True):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
if with_witness:
p2p.send_message(msg_witness_block(block))
else:
p2p.send_message(msg_block(block))
p2p.sync_with_ping()
assert_equal(rpc.getbestblockhash() == block.hash, accepted)
class TestNode(P2PInterface):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO():
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [["-whitelist=127.0.0.1", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0", "-vbparams=segwit:0:999999999999"], ["-whitelist=127.0.0.1", "-vbparams=segwit:0:0"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=VB_TOP_BITS):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
self.log.info("Verifying NODE_WITNESS service bit")
assert((self.test_node.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
self.log.info("Testing non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
self.log.info("Testing behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, bitcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_message["reject"].reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx4.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, False, True)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
self.log.info("Testing witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
self.log.info("Testing witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
def test_witness_block_size(self):
self.log.info("Testing witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 10 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
# Viacoin: scale to blocksize proportion
assert(len(block.serialize(True)) > 2*1024*1024 // 16.66)
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
self.log.info("Testing extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-200000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
self.log.info("Testing maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
self.log.info("Testing maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
self.log.info("Testing witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
self.log.error("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0].rpc, self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
self.log.info("Testing block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0].rpc, self.test_node, block1, True)
# Viacoin: Blocks with nVersion < VB_TOP_BITS are rejected
# self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
# assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
# self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
test_witness_block(self.nodes[0].rpc, self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
# Viacoin: Blocks with nVersion < VB_TOP_BITS are rejected
block4 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [ CBlockHeader(block4) ]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-100000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000000, scriptPubKey)]
tx.vout.append(CTxOut(800000, scriptPubKey)) # Might burn this later
tx.rehash()
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(700000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-100000, scriptPubKey)]
tx2.rehash()
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue - 100000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
self.log.info("Testing standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 400000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-100000, scriptPubKey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-100000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 100000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
self.log.info("Testing premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
self.log.info("Testing segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 100000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 300
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) >= MAX_BLOCK_BASE_SIZE - len(tx.serialize_with_witness())):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
block = self.build_next_block()
block.vtx.append(tx)
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_ALL|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
assert_greater_than_or_equal(MAX_BLOCK_BASE_SIZE, get_virtual_size(block))
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
self.log.info("Testing P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-100000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-100000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
else:
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
# the test.
def test_upgrade_after_activation(self, node_id):
self.log.info("Testing software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
self.stop_node(node_id)
self.start_node(node_id, extra_args=["-vbparams=segwit:0:999999999999"])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = self.nodes[node_id].getblockcount()
while height >= 0:
block_hash = self.nodes[node_id].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[node_id].getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
self.log.info("Testing sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
self.log.info("Testing uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-100000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-100000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-100000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-100000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-100000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
self.log.info("Testing detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 100000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 500000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[0], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[3], True, False, b'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def run_test(self):
# Setup the p2p connections and start up the network thread.
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS)
network_thread_start()
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
self.log.info("Starting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
self.log.info("Testing behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
self.log.info("Testing behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
# Viacoin: Disable test due to occasional travis issue
#self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(node_id=2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
| mit | 5,502,786,182,529,609,000 | 6,175,885,246,126,872,000 | 46.486445 | 220 | 0.635939 | false |
mihail911/nupic | nupic/regions/PictureSensorExplorers/block1DOF.py | 8 | 2863 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file defines Block1DOFPictureExplorer, an explorer for
PictureSensor.
"""
from nupic.regions.PictureSensor import PictureSensor
class Block1DOFPictureExplorer(PictureSensor.PictureExplorer):
"""
Presents each category at an Nx1 "block" of shifted positions
centered upon the centroid of the canvas, where N is 2R+1
(where R is the radialLength); each such presentation is
spaced radialStep pixels apart in both X and Y dimensions.
"""
@classmethod
def queryRelevantParams(klass):
"""
Returns a sequence of parameter names that are relevant to
the operation of the explorer.
May be extended or overridden by sub-classes as appropriate.
"""
return ( 'radialLength', 'radialStep', )
def initSequence(self, state, params):
self._presentNextBlockPosn(state, params)
def updateSequence(self, state, params):
self._presentNextBlockPosn(state, params)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Internal helper method(s)
def _presentNextBlockPosn(self, state, params):
"""
Compute the appropriate category and block position
deterministically based on the current iteration count.
"""
# Compute iteration indices
edgeLen = 2 * params['radialLength'] + 1
numBlocksPerCat = edgeLen
numCats = self._getNumCategories()
numBlocks = numBlocksPerCat * numCats
blockCounter = self._getIterCount() % numBlocks
catIndex = blockCounter // numBlocksPerCat
blockCatIndex = blockCounter % numBlocksPerCat
# Compute position within onion block
posnX = ((blockCatIndex % edgeLen) - params['radialLength']) * params['radialStep']
# Override default state
state['posnX'] = posnX
state['posnY'] = 0
state['velocityX'] = 0
state['velocityY'] = 0
state['angularPosn'] = 0
state['angularVelocity'] = 0
state['catIndex'] = catIndex
| gpl-3.0 | -6,985,990,589,387,293,000 | -8,968,662,222,897,997,000 | 33.914634 | 87 | 0.676912 | false |
SlimRemix/android_external_chromium_org | tools/telemetry/telemetry/core/browser_options.py | 26 | 12934 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import logging
import optparse
import os
import shlex
import sys
from telemetry.core import browser_finder
from telemetry.core import profile_types
from telemetry.core import util
from telemetry.core import wpr_modes
from telemetry.core.platform.profiler import profiler_finder
util.AddDirToPythonPath(
util.GetChromiumSrcDir(), 'third_party', 'webpagereplay')
import net_configs # pylint: disable=F0401
class BrowserFinderOptions(optparse.Values):
"""Options to be used for discovering a browser."""
def __init__(self, browser_type=None):
optparse.Values.__init__(self)
self.browser_type = browser_type
self.browser_executable = None
self.chrome_root = None
self.android_device = None
self.cros_ssh_identity = None
self.extensions_to_load = []
# If set, copy the generated profile to this path on exit.
self.output_profile_path = None
self.cros_remote = None
self.profiler = None
self.verbosity = 0
self.browser_options = BrowserOptions()
self.output_file = None
self.android_rndis = False
self.no_performance_mode = False
def __repr__(self):
return str(sorted(self.__dict__.items()))
def Copy(self):
return copy.deepcopy(self)
def CreateParser(self, *args, **kwargs):
parser = optparse.OptionParser(*args, **kwargs)
# Selection group
group = optparse.OptionGroup(parser, 'Which browser to use')
group.add_option('--browser',
dest='browser_type',
default=None,
help='Browser type to run, '
'in order of priority. Supported values: list,%s' %
','.join(browser_finder.FindAllBrowserTypes(self)))
group.add_option('--browser-executable',
dest='browser_executable',
help='The exact browser to run.')
group.add_option('--chrome-root',
dest='chrome_root',
help='Where to look for chrome builds.'
'Defaults to searching parent dirs by default.')
group.add_option('--device',
dest='android_device',
help='The android device ID to use'
'If not specified, only 0 or 1 connected devices are supported.')
group.add_option('--target-arch',
dest='target_arch',
help='The target architecture of the browser. Options available are: '
'x64, x86_64, arm, arm64 and mips. '
'Defaults to the default architecture of the platform if omitted.')
group.add_option(
'--remote',
dest='cros_remote',
help='The IP address of a remote ChromeOS device to use.')
identity = None
testing_rsa = os.path.join(
util.GetChromiumSrcDir(),
'third_party', 'chromite', 'ssh_keys', 'testing_rsa')
if os.path.exists(testing_rsa):
identity = testing_rsa
group.add_option('--identity',
dest='cros_ssh_identity',
default=identity,
help='The identity file to use when ssh\'ing into the ChromeOS device')
parser.add_option_group(group)
# Debugging options
group = optparse.OptionGroup(parser, 'When things go wrong')
profiler_choices = profiler_finder.GetAllAvailableProfilers()
group.add_option(
'--profiler', default=None, type='choice',
choices=profiler_choices,
help='Record profiling data using this tool. Supported values: ' +
', '.join(profiler_choices))
group.add_option(
'--interactive', dest='interactive', action='store_true',
help='Let the user interact with the page; the actions specified for '
'the page are not run.')
group.add_option(
'-v', '--verbose', action='count', dest='verbosity',
help='Increase verbosity level (repeat as needed)')
group.add_option('--print-bootstrap-deps',
action='store_true',
help='Output bootstrap deps list.')
parser.add_option_group(group)
# Platform options
group = optparse.OptionGroup(parser, 'Platform options')
group.add_option('--no-performance-mode', action='store_true',
help='Some platforms run on "full performance mode" where the '
'test is executed at maximum CPU speed in order to minimize noise '
'(specially important for dashboards / continuous builds). '
'This option prevents Telemetry from tweaking such platform settings.')
group.add_option('--android-rndis', dest='android_rndis', default=False,
action='store_true', help='Use RNDIS forwarding on Android.')
group.add_option('--no-android-rndis', dest='android_rndis',
action='store_false', help='Do not use RNDIS forwarding on Android.'
' [default]')
parser.add_option_group(group)
# Browser options.
self.browser_options.AddCommandLineArgs(parser)
real_parse = parser.parse_args
def ParseArgs(args=None):
defaults = parser.get_default_values()
for k, v in defaults.__dict__.items():
if k in self.__dict__ and self.__dict__[k] != None:
continue
self.__dict__[k] = v
ret = real_parse(args, self) # pylint: disable=E1121
if self.verbosity >= 2:
logging.getLogger().setLevel(logging.DEBUG)
elif self.verbosity:
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
if self.browser_executable and not self.browser_type:
self.browser_type = 'exact'
if self.browser_type == 'list':
try:
types = browser_finder.GetAllAvailableBrowserTypes(self)
except browser_finder.BrowserFinderException, ex:
sys.stderr.write('ERROR: ' + str(ex))
sys.exit(1)
sys.stdout.write('Available browsers:\n')
sys.stdout.write(' %s\n' % '\n '.join(types))
sys.exit(0)
# Parse browser options.
self.browser_options.UpdateFromParseResults(self)
return ret
parser.parse_args = ParseArgs
return parser
def AppendExtraBrowserArgs(self, args):
self.browser_options.AppendExtraBrowserArgs(args)
def MergeDefaultValues(self, defaults):
for k, v in defaults.__dict__.items():
self.ensure_value(k, v)
class BrowserOptions(object):
"""Options to be used for launching a browser."""
def __init__(self):
self.browser_type = None
self.show_stdout = False
# When set to True, the browser will use the default profile. Telemetry
# will not provide an alternate profile directory.
self.dont_override_profile = False
self.profile_dir = None
self.profile_type = None
self._extra_browser_args = set()
self.extra_wpr_args = []
self.wpr_mode = wpr_modes.WPR_OFF
self.netsim = None
self.disable_background_networking = True
self.no_proxy_server = False
self.browser_user_agent_type = None
self.clear_sytem_cache_for_browser_and_profile_on_start = False
self.startup_url = 'about:blank'
# Background pages of built-in component extensions can interfere with
# performance measurements.
self.disable_component_extensions_with_background_pages = True
# Whether to use the new code path for choosing an ephemeral port for
# DevTools. The bots set this to true. When Chrome 37 reaches stable,
# remove this setting and the old code path. http://crbug.com/379980
self.use_devtools_active_port = False
def __repr__(self):
return str(sorted(self.__dict__.items()))
@classmethod
def AddCommandLineArgs(cls, parser):
############################################################################
# Please do not add any more options here without first discussing with #
# a telemetry owner. This is not the right place for platform-specific #
# options. #
############################################################################
group = optparse.OptionGroup(parser, 'Browser options')
profile_choices = profile_types.GetProfileTypes()
group.add_option('--profile-type',
dest='profile_type',
type='choice',
default='clean',
choices=profile_choices,
help=('The user profile to use. A clean profile is used by default. '
'Supported values: ' + ', '.join(profile_choices)))
group.add_option('--profile-dir',
dest='profile_dir',
help='Profile directory to launch the browser with. '
'A clean profile is used by default')
group.add_option('--extra-browser-args',
dest='extra_browser_args_as_string',
help='Additional arguments to pass to the browser when it starts')
group.add_option('--extra-wpr-args',
dest='extra_wpr_args_as_string',
help=('Additional arguments to pass to Web Page Replay. '
'See third_party/webpagereplay/replay.py for usage.'))
group.add_option('--netsim', default=None, type='choice',
choices=net_configs.NET_CONFIG_NAMES,
help=('Run benchmark under simulated network conditions. '
'Will prompt for sudo. Supported values: ' +
', '.join(net_configs.NET_CONFIG_NAMES)))
group.add_option('--show-stdout',
action='store_true',
help='When possible, will display the stdout of the process')
# This hidden option is to be removed, and the older code path deleted,
# once Chrome 37 reaches Stable. http://crbug.com/379980
group.add_option('--use-devtools-active-port',
action='store_true',
help=optparse.SUPPRESS_HELP)
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Compatibility options')
group.add_option('--gtest_output',
help='Ignored argument for compatibility with runtest.py harness')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Synthetic gesture options')
synthetic_gesture_source_type_choices = [ 'default', 'mouse', 'touch' ]
group.add_option('--synthetic-gesture-source-type',
dest='synthetic_gesture_source_type',
default='default', type='choice',
choices=synthetic_gesture_source_type_choices,
help='Specify the source type for synthetic gestures. Note that some ' +
'actions only support a specific source type. ' +
'Supported values: ' +
', '.join(synthetic_gesture_source_type_choices))
parser.add_option_group(group)
def UpdateFromParseResults(self, finder_options):
"""Copies our options from finder_options"""
browser_options_list = [
'extra_browser_args_as_string',
'extra_wpr_args_as_string',
'netsim',
'profile_dir',
'profile_type',
'show_stdout',
'synthetic_gesture_source_type',
'use_devtools_active_port',
]
for o in browser_options_list:
a = getattr(finder_options, o, None)
if a is not None:
setattr(self, o, a)
delattr(finder_options, o)
self.browser_type = finder_options.browser_type
if hasattr(self, 'extra_browser_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_browser_args_as_string) # pylint: disable=E1101
self.AppendExtraBrowserArgs(tmp)
delattr(self, 'extra_browser_args_as_string')
if hasattr(self, 'extra_wpr_args_as_string'): # pylint: disable=E1101
tmp = shlex.split(
self.extra_wpr_args_as_string) # pylint: disable=E1101
self.extra_wpr_args.extend(tmp)
delattr(self, 'extra_wpr_args_as_string')
if self.profile_type == 'default':
self.dont_override_profile = True
if self.profile_dir and self.profile_type != 'clean':
logging.critical(
"It's illegal to specify both --profile-type and --profile-dir.\n"
"For more information see: http://goo.gl/ngdGD5")
sys.exit(1)
if self.profile_dir and not os.path.isdir(self.profile_dir):
logging.critical(
"Directory specified by --profile-dir (%s) doesn't exist "
"or isn't a directory.\n"
"For more information see: http://goo.gl/ngdGD5" % self.profile_dir)
sys.exit(1)
if not self.profile_dir:
self.profile_dir = profile_types.GetProfileDir(self.profile_type)
# This deferred import is necessary because browser_options is imported in
# telemetry/telemetry/__init__.py.
from telemetry.core.backends.chrome import chrome_browser_options
finder_options.browser_options = (
chrome_browser_options.CreateChromeBrowserOptions(self))
@property
def extra_browser_args(self):
return self._extra_browser_args
def AppendExtraBrowserArgs(self, args):
if isinstance(args, list):
self._extra_browser_args.update(args)
else:
self._extra_browser_args.add(args)
| bsd-3-clause | -3,664,424,122,302,116,000 | -5,756,857,333,726,055,000 | 37.153392 | 80 | 0.643652 | false |
j-be/wien-geodatenviewer-exporter | convert_coordinates.py | 1 | 1134 | #!/usr/bin/python2.7
# -*- coding: utf-8 -*-
from get_size import getSize
import os
# Fetch parameters from environment
start_major = int(os.environ['__VJ_START_MAJOR__'])
start_minor = int(os.environ['__VJ_START_MINOR__'])
# Specific do internet data
LINE_SHIFT=10
def getNext((major, minor) = (None, None), n = 0):
if (major is None):
return (start_major, start_minor), 0
# End of Line
if not (n < getSize()[0] - 1):
# Next line is low Minors
if minor > 2:
if (start_minor < 3):
return (major + LINE_SHIFT - n / 2, start_minor), 0
else:
return (major + LINE_SHIFT - n / 2, (start_minor % 3) + 1), 0
# Next line is high Minors
else:
if (start_minor < 3):
return (major - n/2, start_minor + 2), 0
else:
return (major - n/2, start_minor), 0
# Normal case
n += 1
# Odd Minors
if (minor % 2 == 1):
return (major, minor + 1), n
# Even Minors
if (minor % 2 == 0):
return (major + 1, minor - 1), n
if __name__ == "__main__":
size=getSize()
x, n = getNext()
for i in range(size[0] * size[1]):
if (n == 0):
print
print str(x[0]) + "_" + str(x[1]),
x, n = getNext(x, n)
| mit | 316,433,139,350,779,700 | 6,414,899,273,396,195,000 | 21.68 | 66 | 0.577601 | false |
wakatime/komodo-wakatime | components/wakatime/packages/pygments/lexers/dotnet.py | 7 | 27664 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dotnet
~~~~~~~~~~~~~~~~~~~~~~
Lexers for .net languages.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, DelegatingLexer, bygroups, include, \
using, this, default, words
from pygments.token import Punctuation, \
Text, Comment, Operator, Keyword, Name, String, Number, Literal, Other
from pygments.util import get_choice_opt, iteritems
from pygments import unistring as uni
from pygments.lexers.html import XmlLexer
__all__ = ['CSharpLexer', 'NemerleLexer', 'BooLexer', 'VbNetLexer',
'CSharpAspxLexer', 'VbNetAspxLexer', 'FSharpLexer']
class CSharpLexer(RegexLexer):
"""
For `C# <http://msdn2.microsoft.com/en-us/vcsharp/default.aspx>`_
source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 0.8
"""
name = 'C#'
aliases = ['csharp', 'c#']
filenames = ['*.cs']
mimetypes = ['text/x-csharp'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': '@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?"
r"[flFLdD]?|0[xX][0-9a-fA-F]+[Ll]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|as|async|await|base|break|by|case|catch|'
r'checked|const|continue|default|delegate|'
r'do|else|enum|event|explicit|extern|false|finally|'
r'fixed|for|foreach|goto|if|implicit|in|interface|'
r'internal|is|let|lock|new|null|on|operator|'
r'out|override|params|private|protected|public|readonly|'
r'ref|return|sealed|sizeof|stackalloc|static|'
r'switch|this|throw|true|try|typeof|'
r'unchecked|unsafe|virtual|void|while|'
r'get|set|new|partial|yield|add|remove|value|alias|ascending|'
r'descending|from|group|into|orderby|select|thenby|where|'
r'join|equals)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|dynamic|float|int|long|object|'
r'sbyte|short|string|uint|ulong|ushort|var)\b\??', Keyword.Type),
(r'(class|struct)(\s+)', bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text), 'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop'),
default('#pop'),
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop'),
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens), 'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class NemerleLexer(RegexLexer):
"""
For `Nemerle <http://nemerle.org>`_ source code.
Additional options accepted:
`unicodelevel`
Determines which Unicode characters this lexer allows for identifiers.
The possible values are:
* ``none`` -- only the ASCII letters and numbers are allowed. This
is the fastest selection.
* ``basic`` -- all Unicode characters from the specification except
category ``Lo`` are allowed.
* ``full`` -- all Unicode characters as specified in the C# specs
are allowed. Note that this means a considerable slowdown since the
``Lo`` category has more than 40,000 characters in it!
The default value is ``basic``.
.. versionadded:: 1.5
"""
name = 'Nemerle'
aliases = ['nemerle']
filenames = ['*.n']
mimetypes = ['text/x-nemerle'] # inferred
flags = re.MULTILINE | re.DOTALL | re.UNICODE
# for the range of allowed unicode characters in identifiers, see
# http://www.ecma-international.org/publications/files/ECMA-ST/Ecma-334.pdf
levels = {
'none': '@?[_a-zA-Z]\w*',
'basic': ('@?[_' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl') + ']' +
'[' + uni.combine('Lu', 'Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'),
'full': ('@?(?:_|[^' +
uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl') + '])'
+ '[^' + uni.allexcept('Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl',
'Nd', 'Pc', 'Cf', 'Mn', 'Mc') + ']*'),
}
tokens = {}
token_variants = True
for levelname, cs_ident in iteritems(levels):
tokens[levelname] = {
'root': [
# method names
(r'^([ \t]*(?:' + cs_ident + r'(?:\[\])?\s+)+?)' # return type
r'(' + cs_ident + ')' # method name
r'(\s*)(\()', # signature start
bygroups(using(this), Name.Function, Text, Punctuation)),
(r'^\s*\[.*?\]', Name.Attribute),
(r'[^\S\n]+', Text),
(r'\\\n', Text), # line continuation
(r'//.*?\n', Comment.Single),
(r'/[*].*?[*]/', Comment.Multiline),
(r'\n', Text),
(r'\$\s*"', String, 'splice-string'),
(r'\$\s*<#', String, 'splice-string2'),
(r'<#', String, 'recursive-string'),
(r'(<\[)\s*(' + cs_ident + ':)?', Keyword),
(r'\]\>', Keyword),
# quasiquotation only
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'[~!%^&*()+=|\[\]:;,.<>/?-]', Punctuation),
(r'[{}]', Punctuation),
(r'@"(""|[^"])*"', String),
(r'"(\\\\|\\"|[^"\n])*["\n]', String),
(r"'\\.'|'[^\\]'", String.Char),
(r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"[0-9](\.[0-9]*)?([eE][+-][0-9]+)?[flFLdD]?", Number),
(r'#[ \t]*(if|endif|else|elif|define|undef|'
r'line|error|warning|region|endregion|pragma)\b.*?\n',
Comment.Preproc),
(r'\b(extern)(\s+)(alias)\b', bygroups(Keyword, Text,
Keyword)),
(r'(abstract|and|as|base|catch|def|delegate|'
r'enum|event|extern|false|finally|'
r'fun|implements|interface|internal|'
r'is|macro|match|matches|module|mutable|new|'
r'null|out|override|params|partial|private|'
r'protected|public|ref|sealed|static|'
r'syntax|this|throw|true|try|type|typeof|'
r'virtual|volatile|when|where|with|'
r'assert|assert2|async|break|checked|continue|do|else|'
r'ensures|for|foreach|if|late|lock|new|nolate|'
r'otherwise|regexp|repeat|requires|return|surroundwith|'
r'unchecked|unless|using|while|yield)\b', Keyword),
(r'(global)(::)', bygroups(Keyword, Punctuation)),
(r'(bool|byte|char|decimal|double|float|int|long|object|sbyte|'
r'short|string|uint|ulong|ushort|void|array|list)\b\??',
Keyword.Type),
(r'(:>?)\s*(' + cs_ident + r'\??)',
bygroups(Punctuation, Keyword.Type)),
(r'(class|struct|variant|module)(\s+)',
bygroups(Keyword, Text), 'class'),
(r'(namespace|using)(\s+)', bygroups(Keyword, Text),
'namespace'),
(cs_ident, Name),
],
'class': [
(cs_ident, Name.Class, '#pop')
],
'namespace': [
(r'(?=\()', Text, '#pop'), # using (resource)
('(' + cs_ident + r'|\.)+', Name.Namespace, '#pop')
],
'splice-string': [
(r'[^"$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'\\"', String),
(r'"', String, '#pop')
],
'splice-string2': [
(r'[^#<>$]', String),
(r'\$' + cs_ident, Name),
(r'(\$)(\()', bygroups(Name, Punctuation),
'splice-string-content'),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'recursive-string': [
(r'[^#<>]', String),
(r'<#', String, '#push'),
(r'#>', String, '#pop')
],
'splice-string-content': [
(r'if|match', Keyword),
(r'[~!%^&*+=|\[\]:;,.<>/?-\\"$ ]', Punctuation),
(cs_ident, Name),
(r'\d+', Number),
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop')
]
}
def __init__(self, **options):
level = get_choice_opt(options, 'unicodelevel', list(self.tokens),
'basic')
if level not in self._all_tokens:
# compile the regexes now
self._tokens = self.__class__.process_tokendef(level)
else:
self._tokens = self._all_tokens[level]
RegexLexer.__init__(self, **options)
class BooLexer(RegexLexer):
"""
For `Boo <http://boo.codehaus.org/>`_ source code.
"""
name = 'Boo'
aliases = ['boo']
filenames = ['*.boo']
mimetypes = ['text/x-boo']
tokens = {
'root': [
(r'\s+', Text),
(r'(#|//).*$', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment'),
(r'[]{}:(),.;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'/(\\\\|\\/|[^/\s])/', String.Regex),
(r'@/(\\\\|\\/|[^/])*/', String.Regex),
(r'=~|!=|==|<<|>>|[-+/*%=<>&^|]', Operator),
(r'(as|abstract|callable|constructor|destructor|do|import|'
r'enum|event|final|get|interface|internal|of|override|'
r'partial|private|protected|public|return|set|static|'
r'struct|transient|virtual|yield|super|and|break|cast|'
r'continue|elif|else|ensure|except|for|given|goto|if|in|'
r'is|isa|not|or|otherwise|pass|raise|ref|try|unless|when|'
r'while|from|as)\b', Keyword),
(r'def(?=\s+\(.*?\))', Keyword),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(namespace)(\s+)', bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(true|false|null|self|__eval__|__switch__|array|'
r'assert|checked|enumerate|filter|getter|len|lock|map|'
r'matrix|max|min|normalArrayIndexing|print|property|range|'
r'rawArrayIndexing|required|typeof|unchecked|using|'
r'yieldAll|zip)\b', Name.Builtin),
(r'"""(\\\\|\\"|.*?)"""', String.Double),
(r'"(\\\\|\\"|[^"]*?)"', String.Double),
(r"'(\\\\|\\'|[^']*?)'", String.Single),
(r'[a-zA-Z_]\w*', Name),
(r'(\d+\.\d*|\d*\.\d+)([fF][+-]?[0-9]+)?', Number.Float),
(r'[0-9][0-9.]*(ms?|d|h|s)', Number),
(r'0\d+', Number.Oct),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer),
],
'comment': [
('/[*]', Comment.Multiline, '#push'),
('[*]/', Comment.Multiline, '#pop'),
('[^/*]', Comment.Multiline),
('[*/]', Comment.Multiline)
],
'funcname': [
('[a-zA-Z_]\w*', Name.Function, '#pop')
],
'classname': [
('[a-zA-Z_]\w*', Name.Class, '#pop')
],
'namespace': [
('[a-zA-Z_][\w.]*', Name.Namespace, '#pop')
]
}
class VbNetLexer(RegexLexer):
"""
For
`Visual Basic.NET <http://msdn2.microsoft.com/en-us/vbasic/default.aspx>`_
source code.
"""
name = 'VB.net'
aliases = ['vb.net', 'vbnet']
filenames = ['*.vb', '*.bas']
mimetypes = ['text/x-vbnet', 'text/x-vba'] # (?)
uni_name = '[_' + uni.combine('Ll', 'Lt', 'Lm', 'Nl') + ']' + \
'[' + uni.combine('Ll', 'Lt', 'Lm', 'Nl', 'Nd', 'Pc',
'Cf', 'Mn', 'Mc') + ']*'
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^\s*<.*?>', Name.Attribute),
(r'\s+', Text),
(r'\n', Text),
(r'rem\b.*?\n', Comment),
(r"'.*?\n", Comment),
(r'#If\s.*?\sThen|#ElseIf\s.*?\sThen|#Else|#End\s+If|#Const|'
r'#ExternalSource.*?\n|#End\s+ExternalSource|'
r'#Region.*?\n|#End\s+Region|#ExternalChecksum',
Comment.Preproc),
(r'[(){}!#,.:]', Punctuation),
(r'Option\s+(Strict|Explicit|Compare)\s+'
r'(On|Off|Binary|Text)', Keyword.Declaration),
(words((
'AddHandler', 'Alias', 'ByRef', 'ByVal', 'Call', 'Case',
'Catch', 'CBool', 'CByte', 'CChar', 'CDate', 'CDec', 'CDbl',
'CInt', 'CLng', 'CObj', 'Continue', 'CSByte', 'CShort', 'CSng',
'CStr', 'CType', 'CUInt', 'CULng', 'CUShort', 'Declare',
'Default', 'Delegate', 'DirectCast', 'Do', 'Each', 'Else',
'ElseIf', 'EndIf', 'Erase', 'Error', 'Event', 'Exit', 'False',
'Finally', 'For', 'Friend', 'Get', 'Global', 'GoSub', 'GoTo',
'Handles', 'If', 'Implements', 'Inherits', 'Interface', 'Let',
'Lib', 'Loop', 'Me', 'MustInherit', 'MustOverride', 'MyBase',
'MyClass', 'Narrowing', 'New', 'Next', 'Not', 'Nothing',
'NotInheritable', 'NotOverridable', 'Of', 'On', 'Operator',
'Option', 'Optional', 'Overloads', 'Overridable', 'Overrides',
'ParamArray', 'Partial', 'Private', 'Protected', 'Public',
'RaiseEvent', 'ReadOnly', 'ReDim', 'RemoveHandler', 'Resume',
'Return', 'Select', 'Set', 'Shadows', 'Shared', 'Single',
'Static', 'Step', 'Stop', 'SyncLock', 'Then', 'Throw', 'To',
'True', 'Try', 'TryCast', 'Wend', 'Using', 'When', 'While',
'Widening', 'With', 'WithEvents', 'WriteOnly'),
prefix='(?<!\.)', suffix=r'\b'), Keyword),
(r'(?<!\.)End\b', Keyword, 'end'),
(r'(?<!\.)(Dim|Const)\b', Keyword, 'dim'),
(r'(?<!\.)(Function|Sub|Property)(\s+)',
bygroups(Keyword, Text), 'funcname'),
(r'(?<!\.)(Class|Structure|Enum)(\s+)',
bygroups(Keyword, Text), 'classname'),
(r'(?<!\.)(Module|Namespace|Imports)(\s+)',
bygroups(Keyword, Text), 'namespace'),
(r'(?<!\.)(Boolean|Byte|Char|Date|Decimal|Double|Integer|Long|'
r'Object|SByte|Short|Single|String|Variant|UInteger|ULong|'
r'UShort)\b', Keyword.Type),
(r'(?<!\.)(AddressOf|And|AndAlso|As|GetType|In|Is|IsNot|Like|Mod|'
r'Or|OrElse|TypeOf|Xor)\b', Operator.Word),
(r'&=|[*]=|/=|\\=|\^=|\+=|-=|<<=|>>=|<<|>>|:=|'
r'<=|>=|<>|[-&*/\\^+=<>\[\]]',
Operator),
('"', String, 'string'),
(r'_\n', Text), # Line continuation (must be before Name)
(uni_name + '[%&@!#$]?', Name),
('#.*?#', Literal.Date),
(r'(\d+\.\d*|\d*\.\d+)(F[+-]?[0-9]+)?', Number.Float),
(r'\d+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&H[0-9a-f]+([SILDFR]|US|UI|UL)?', Number.Integer),
(r'&O[0-7]+([SILDFR]|US|UI|UL)?', Number.Integer),
],
'string': [
(r'""', String),
(r'"C?', String, '#pop'),
(r'[^"]+', String),
],
'dim': [
(uni_name, Name.Variable, '#pop'),
default('#pop'), # any other syntax
],
'funcname': [
(uni_name, Name.Function, '#pop'),
],
'classname': [
(uni_name, Name.Class, '#pop'),
],
'namespace': [
(uni_name, Name.Namespace),
(r'\.', Name.Namespace),
default('#pop'),
],
'end': [
(r'\s+', Text),
(r'(Function|Sub|Property|Class|Structure|Enum|Module|Namespace)\b',
Keyword, '#pop'),
default('#pop'),
]
}
def analyse_text(text):
if re.search(r'^\s*(#If|Module|Namespace)', text, re.MULTILINE):
return 0.5
class GenericAspxLexer(RegexLexer):
"""
Lexer for ASP.NET pages.
"""
name = 'aspx-gen'
filenames = []
mimetypes = []
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#]?)(.*?)(%>)', bygroups(Name.Tag, Other, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)', bygroups(using(XmlLexer),
Other,
using(XmlLexer))),
(r'(.+?)(?=<)', using(XmlLexer)),
(r'.+', using(XmlLexer)),
],
}
# TODO support multiple languages within the same source file
class CSharpAspxLexer(DelegatingLexer):
"""
Lexer for highlighting C# within ASP.NET pages.
"""
name = 'aspx-cs'
aliases = ['aspx-cs']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(CSharpAspxLexer, self).__init__(CSharpLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="C#"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']C#', text, re.I) is not None:
return 0.15
class VbNetAspxLexer(DelegatingLexer):
"""
Lexer for highlighting Visual Basic.net within ASP.NET pages.
"""
name = 'aspx-vb'
aliases = ['aspx-vb']
filenames = ['*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd']
mimetypes = []
def __init__(self, **options):
super(VbNetAspxLexer, self).__init__(VbNetLexer, GenericAspxLexer,
**options)
def analyse_text(text):
if re.search(r'Page\s*Language="Vb"', text, re.I) is not None:
return 0.2
elif re.search(r'script[^>]+language=["\']vb', text, re.I) is not None:
return 0.15
# Very close to functional.OcamlLexer
class FSharpLexer(RegexLexer):
"""
For the F# language (version 3.0).
AAAAACK Strings
http://research.microsoft.com/en-us/um/cambridge/projects/fsharp/manual/spec.html#_Toc335818775
.. versionadded:: 1.5
"""
name = 'F#'
aliases = ['fsharp']
filenames = ['*.fs', '*.fsi']
mimetypes = ['text/x-fsharp']
keywords = [
'abstract', 'as', 'assert', 'base', 'begin', 'class', 'default',
'delegate', 'do!', 'do', 'done', 'downcast', 'downto', 'elif', 'else',
'end', 'exception', 'extern', 'false', 'finally', 'for', 'function',
'fun', 'global', 'if', 'inherit', 'inline', 'interface', 'internal',
'in', 'lazy', 'let!', 'let', 'match', 'member', 'module', 'mutable',
'namespace', 'new', 'null', 'of', 'open', 'override', 'private', 'public',
'rec', 'return!', 'return', 'select', 'static', 'struct', 'then', 'to',
'true', 'try', 'type', 'upcast', 'use!', 'use', 'val', 'void', 'when',
'while', 'with', 'yield!', 'yield',
]
# Reserved words; cannot hurt to color them as keywords too.
keywords += [
'atomic', 'break', 'checked', 'component', 'const', 'constraint',
'constructor', 'continue', 'eager', 'event', 'external', 'fixed',
'functor', 'include', 'method', 'mixin', 'object', 'parallel',
'process', 'protected', 'pure', 'sealed', 'tailcall', 'trait',
'virtual', 'volatile',
]
keyopts = [
'!=', '#', '&&', '&', '\(', '\)', '\*', '\+', ',', '-\.',
'->', '-', '\.\.', '\.', '::', ':=', ':>', ':', ';;', ';', '<-',
'<\]', '<', '>\]', '>', '\?\?', '\?', '\[<', '\[\|', '\[', '\]',
'_', '`', '\{', '\|\]', '\|', '\}', '~', '<@@', '<@', '=', '@>', '@@>',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'or', 'not']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = [
'sbyte', 'byte', 'char', 'nativeint', 'unativeint', 'float32', 'single',
'float', 'double', 'int8', 'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64', 'decimal', 'unit', 'bool', 'string',
'list', 'exn', 'obj', 'enum',
]
# See http://msdn.microsoft.com/en-us/library/dd233181.aspx and/or
# http://fsharp.org/about/files/spec.pdf for reference. Good luck.
tokens = {
'escape-sequence': [
(r'\\[\\"\'ntbrafv]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\U[0-9a-fA-F]{8}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b(?<!\.)([A-Z][\w\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][\w\']*)', Name),
(r'///.*?\n', String.Doc),
(r'//.*?\n', Comment.Single),
(r'\(\*(?!\))', Comment, 'comment'),
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'\b(open|module)(\s+)([\w.]+)',
bygroups(Keyword, Text, Name.Namespace)),
(r'\b(let!?)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Variable)),
(r'\b(type)(\s+)(\w+)',
bygroups(Keyword, Text, Name.Class)),
(r'\b(member|override)(\s+)(\w+)(\.)(\w+)',
bygroups(Keyword, Text, Name, Punctuation, Name.Function)),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'``([^`\n\r\t]|`[^`\n\r\t])+``', Name),
(r'(%s)' % '|'.join(keyopts), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r'#[ \t]*(if|endif|else|line|nowarn|light|\d+)\b.*?\n',
Comment.Preproc),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*[uU]?[yslLnQRZINGmM]?', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*[uU]?[yslLn]?[fF]?', Number.Hex),
(r'0[oO][0-7][0-7_]*[uU]?[yslLn]?', Number.Oct),
(r'0[bB][01][01_]*[uU]?[yslLn]?', Number.Bin),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)[fFmM]?',
Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'B?",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'@?"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][\w\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][\w\']*', Name, '#pop'),
(r'[a-z_][\w\']*', Name, '#pop'),
# e.g. dictionary index access
default('#pop'),
],
'comment': [
(r'[^(*)@"]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
# comments cannot be closed within strings in comments
(r'@"', String, 'lstring'),
(r'"""', String, 'tqs'),
(r'"', String, 'string'),
(r'[(*)@]', Comment),
],
'string': [
(r'[^\\"]+', String),
include('escape-sequence'),
(r'\\\n', String),
(r'\n', String), # newlines are allowed in any string
(r'"B?', String, '#pop'),
],
'lstring': [
(r'[^"]+', String),
(r'\n', String),
(r'""', String),
(r'"B?', String, '#pop'),
],
'tqs': [
(r'[^"]+', String),
(r'\n', String),
(r'"""B?', String, '#pop'),
(r'"', String),
],
}
| bsd-3-clause | -4,172,034,566,410,009,600 | -3,633,174,433,605,470,000 | 39.034732 | 99 | 0.4405 | false |
ludmilamarian/invenio | invenio/modules/upgrader/upgrades/invenio_2013_03_18_aidPERSONIDDATA_last_updated.py | 15 | 1685 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import warnings
from invenio.legacy.dbquery import run_sql
from invenio.utils.text import wait_for_user
depends_on = ['invenio_release_1_1_0']
def info():
return "Introduces aidPERSONIDDATA last_updated column and new table indexes"
def do_upgrade():
column_exists = run_sql("SHOW COLUMNS FROM `aidPERSONIDDATA` LIKE 'last_updated'")
if not column_exists:
run_sql("""
ALTER TABLE aidPERSONIDDATA
ADD COLUMN last_updated TIMESTAMP ON UPDATE CURRENT_TIMESTAMP NOT NULL
DEFAULT CURRENT_TIMESTAMP AFTER opt3,
ADD INDEX `timestamp-b` (`last_updated`)
""")
indexes = [i[2] for i in run_sql('SHOW INDEX FROM aidPERSONIDPAPERS')]
if 'personid-flag-b' not in indexes:
run_sql("""
ALTER TABLE aidPERSONIDPAPERS
ADD INDEX `personid-flag-b` (`personid`, `flag`)
""")
def estimate():
return 1
| gpl-2.0 | 8,558,343,289,799,387,000 | 7,856,050,986,836,695,000 | 34.104167 | 86 | 0.693769 | false |
John-Boik/Principled-Societies-Project | leddaApp/static/brython/src/Lib/test/test_shlex.py | 113 | 5912 | import io
import shlex
import string
import unittest
from test import support
# The original test data set was from shellwords, by Hartmut Goebel.
data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|\|x|bar|
\ x bar|\|x|bar|
\ bar|\|bar|
foo \x bar|foo|\|x|bar|
foo \ x bar|foo|\|x|bar|
foo \ bar|foo|\|bar|
foo "bar" bla|foo|"bar"|bla|
"foo" "bar" "bla"|"foo"|"bar"|"bla"|
"foo" bar "bla"|"foo"|bar|"bla"|
"foo" bar bla|"foo"|bar|bla|
foo 'bar' bla|foo|'bar'|bla|
'foo' 'bar' 'bla'|'foo'|'bar'|'bla'|
'foo' bar 'bla'|'foo'|bar|'bla'|
'foo' bar bla|'foo'|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foo"bar"bar"fasel"|baz|
blurb foo'bar'bar'fasel' baz|blurb|foo'bar'bar'fasel'|baz|
""|""|
''|''|
foo "" bar|foo|""|bar|
foo '' bar|foo|''|bar|
foo "" "" "" bar|foo|""|""|""|bar|
foo '' '' '' bar|foo|''|''|''|bar|
\""|\|""|
"\"|"\"|
"foo\ bar"|"foo\ bar"|
"foo\\ bar"|"foo\\ bar"|
"foo\\ bar\"|"foo\\ bar\"|
"foo\\" bar\""|"foo\\"|bar|\|""|
"foo\\ bar\" dfadf"|"foo\\ bar\"|dfadf"|
"foo\\\ bar\" dfadf"|"foo\\\ bar\"|dfadf"|
"foo\\\x bar\" dfadf"|"foo\\\x bar\"|dfadf"|
"foo\x bar\" dfadf"|"foo\x bar\"|dfadf"|
\''|\|''|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
"foo\\\x bar\" df'a\ 'df'|"foo\\\x bar\"|df'a|\|'df'|
\"foo"|\|"foo"|
\"foo"\x|\|"foo"|\|x|
"foo\x"|"foo\x"|
"foo\ "|"foo\ "|
foo\ xx|foo|\|xx|
foo\ x\x|foo|\|x|\|x|
foo\ x\x\""|foo|\|x|\|x|\|""|
"foo\ x\x"|"foo\ x\x"|
"foo\ x\x\\"|"foo\ x\x\\"|
"foo\ x\x\\""foobar"|"foo\ x\x\\"|"foobar"|
"foo\ x\x\\"\''"foobar"|"foo\ x\x\\"|\|''|"foobar"|
"foo\ x\x\\"\'"fo'obar"|"foo\ x\x\\"|\|'"fo'|obar"|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|"foo\ x\x\\"|\|'"fo'|obar"|'don'|\|''|t'|
'foo\ bar'|'foo\ bar'|
'foo\\ bar'|'foo\\ bar'|
foo\ bar|foo|\|bar|
foo#bar\nbaz|foobaz|
:-) ;-)|:|-|)|;|-|)|
áéíóú|á|é|í|ó|ú|
"""
posix_data = r"""x|x|
foo bar|foo|bar|
foo bar|foo|bar|
foo bar |foo|bar|
foo bar bla fasel|foo|bar|bla|fasel|
x y z xxxx|x|y|z|xxxx|
\x bar|x|bar|
\ x bar| x|bar|
\ bar| bar|
foo \x bar|foo|x|bar|
foo \ x bar|foo| x|bar|
foo \ bar|foo| bar|
foo "bar" bla|foo|bar|bla|
"foo" "bar" "bla"|foo|bar|bla|
"foo" bar "bla"|foo|bar|bla|
"foo" bar bla|foo|bar|bla|
foo 'bar' bla|foo|bar|bla|
'foo' 'bar' 'bla'|foo|bar|bla|
'foo' bar 'bla'|foo|bar|bla|
'foo' bar bla|foo|bar|bla|
blurb foo"bar"bar"fasel" baz|blurb|foobarbarfasel|baz|
blurb foo'bar'bar'fasel' baz|blurb|foobarbarfasel|baz|
""||
''||
foo "" bar|foo||bar|
foo '' bar|foo||bar|
foo "" "" "" bar|foo||||bar|
foo '' '' '' bar|foo||||bar|
\"|"|
"\""|"|
"foo\ bar"|foo\ bar|
"foo\\ bar"|foo\ bar|
"foo\\ bar\""|foo\ bar"|
"foo\\" bar\"|foo\|bar"|
"foo\\ bar\" dfadf"|foo\ bar" dfadf|
"foo\\\ bar\" dfadf"|foo\\ bar" dfadf|
"foo\\\x bar\" dfadf"|foo\\x bar" dfadf|
"foo\x bar\" dfadf"|foo\x bar" dfadf|
\'|'|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
"foo\\\x bar\" df'a\ 'df"|foo\\x bar" df'a\ 'df|
\"foo|"foo|
\"foo\x|"foox|
"foo\x"|foo\x|
"foo\ "|foo\ |
foo\ xx|foo xx|
foo\ x\x|foo xx|
foo\ x\x\"|foo xx"|
"foo\ x\x"|foo\ x\x|
"foo\ x\x\\"|foo\ x\x\|
"foo\ x\x\\""foobar"|foo\ x\x\foobar|
"foo\ x\x\\"\'"foobar"|foo\ x\x\'foobar|
"foo\ x\x\\"\'"fo'obar"|foo\ x\x\'fo'obar|
"foo\ x\x\\"\'"fo'obar" 'don'\''t'|foo\ x\x\'fo'obar|don't|
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\|foo\ x\x\'fo'obar|don't|\|
'foo\ bar'|foo\ bar|
'foo\\ bar'|foo\\ bar|
foo\ bar|foo bar|
foo#bar\nbaz|foo|baz|
:-) ;-)|:-)|;-)|
áéíóú|áéíóú|
"""
class ShlexTest(unittest.TestCase):
def setUp(self):
self.data = [x.split("|")[:-1]
for x in data.splitlines()]
self.posix_data = [x.split("|")[:-1]
for x in posix_data.splitlines()]
for item in self.data:
item[0] = item[0].replace(r"\n", "\n")
for item in self.posix_data:
item[0] = item[0].replace(r"\n", "\n")
def splitTest(self, data, comments):
for i in range(len(data)):
l = shlex.split(data[i][0], comments=comments)
self.assertEqual(l, data[i][1:],
"%s: %s != %s" %
(data[i][0], l, data[i][1:]))
def oldSplit(self, s):
ret = []
lex = shlex.shlex(io.StringIO(s))
tok = lex.get_token()
while tok:
ret.append(tok)
tok = lex.get_token()
return ret
def testSplitPosix(self):
"""Test data splitting with posix parser"""
self.splitTest(self.posix_data, comments=True)
def testCompat(self):
"""Test compatibility interface"""
for i in range(len(self.data)):
l = self.oldSplit(self.data[i][0])
self.assertEqual(l, self.data[i][1:],
"%s: %s != %s" %
(self.data[i][0], l, self.data[i][1:]))
def testQuote(self):
safeunquoted = string.ascii_letters + string.digits + '@%_-+=:,./'
unicode_sample = '\xe9\xe0\xdf' # e + acute accent, a + grave, sharp s
unsafe = '"`$\\!' + unicode_sample
self.assertEqual(shlex.quote(''), "''")
self.assertEqual(shlex.quote(safeunquoted), safeunquoted)
self.assertEqual(shlex.quote('test file name'), "'test file name'")
for u in unsafe:
self.assertEqual(shlex.quote('test%sname' % u),
"'test%sname'" % u)
for u in unsafe:
self.assertEqual(shlex.quote("test%s'name'" % u),
"'test%s'\"'\"'name'\"'\"''" % u)
# Allow this test to be used with old shlex.py
if not getattr(shlex, "split", None):
for methname in dir(ShlexTest):
if methname.startswith("test") and methname != "testCompat":
delattr(ShlexTest, methname)
def test_main():
support.run_unittest(ShlexTest)
if __name__ == "__main__":
test_main()
| gpl-3.0 | 4,732,219,623,694,019,000 | -7,758,979,406,513,415,000 | 28.168317 | 79 | 0.521894 | false |
DNFcode/edx-platform | lms/djangoapps/courseware/tests/test_about.py | 4 | 19162 | """
Test the about xblock
"""
import datetime
import pytz
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from mock import patch
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from course_modes.models import CourseMode
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MOCK_MODULESTORE, TEST_DATA_MIXED_CLOSED_MODULESTORE
)
from student.models import CourseEnrollment
from student.tests.factories import UserFactory, CourseEnrollmentAllowedFactory
from shoppingcart.models import Order, PaidCourseRegistration
from xmodule.course_module import CATALOG_VISIBILITY_ABOUT, CATALOG_VISIBILITY_NONE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from .helpers import LoginEnrollmentTestCase
# HTML for registration button
REG_STR = "<form id=\"class_enroll_form\" method=\"post\" data-remote=\"true\" action=\"/change_enrollment\">"
SHIB_ERROR_STR = "The currently logged-in user account does not have permission to enroll in this course."
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class AboutTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests about xblock.
"""
def setUp(self):
self.course = CourseFactory.create()
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
self.course_without_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_NONE)
self.about = ItemFactory.create(
category="about", parent_location=self.course_without_about.location,
data="WITHOUT ABOUT", display_name="overview"
)
self.course_with_about = CourseFactory.create(catalog_visibility=CATALOG_VISIBILITY_ABOUT)
self.about = ItemFactory.create(
category="about", parent_location=self.course_with_about.location,
data="WITH ABOUT", display_name="overview"
)
self.purchase_course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
self.course_mode = CourseMode(course_id=self.purchase_course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=10)
self.course_mode.save()
def test_anonymous_user(self):
"""
This test asserts that a non-logged in user can visit the course about page
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
# Check that registration button is present
self.assertIn(REG_STR, resp.content)
def test_logged_in(self):
"""
This test asserts that a logged-in user can visit the course about page
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
def test_already_enrolled(self):
"""
Asserts that the end user sees the appropriate messaging
when he/she visits the course about page, but is already enrolled
"""
self.setup_user()
self.enroll(self.course, True)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("You are registered for this course", resp.content)
self.assertIn("View Courseware", resp.content)
@override_settings(COURSE_ABOUT_VISIBILITY_PERMISSION="see_about_page")
def test_visible_about_page_settings(self):
"""
Verify that the About Page honors the permission settings in the course module
"""
url = reverse('about_course', args=[self.course_with_about.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("WITH ABOUT", resp.content)
url = reverse('about_course', args=[self.course_without_about.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 404)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_logged_in_marketing(self):
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
# should be redirected
self.assertEqual(resp.status_code, 302)
# follow this time, and check we're redirected to the course info page
resp = self.client.get(url, follow=True)
target_url = resp.redirect_chain[-1][0]
info_url = reverse('info', args=[self.course.id.to_deprecated_string()])
self.assertTrue(target_url.endswith(info_url))
@override_settings(MODULESTORE=TEST_DATA_MIXED_CLOSED_MODULESTORE)
class AboutTestCaseXML(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for the course about page
"""
# The following XML test course (which lives at common/test/data/2014)
# is closed; we're testing that an about page still appears when
# the course is already closed
xml_course_id = SlashSeparatedCourseKey('edX', 'detached_pages', '2014')
# this text appears in that course's about page
# common/test/data/2014/about/overview.html
xml_data = "about page 463139"
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_logged_in_xml(self):
self.setup_user()
url = reverse('about_course', args=[self.xml_course_id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@patch.dict('django.conf.settings.FEATURES', {'DISABLE_START_DATES': False})
def test_anonymous_user_xml(self):
url = reverse('about_course', args=[self.xml_course_id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(self.xml_data, resp.content)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class AboutWithCappedEnrollmentsTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
This test case will check the About page when a course has a capped enrollment
"""
def setUp(self):
"""
Set up the tests
"""
self.course = CourseFactory.create(metadata={"max_student_enrollments_allowed": 1})
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def test_enrollment_cap(self):
"""
This test will make sure that enrollment caps are enforced
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('<a href="#" class="register">', resp.content)
self.enroll(self.course, verify=True)
# create a new account since the first account is already registered for the course
self.email = '[email protected]'
self.password = 'bar'
self.username = 'test_second'
self.create_account(self.username,
self.email, self.password)
self.activate_user(self.email)
self.login(self.email, self.password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Course is full", resp.content)
# Try to enroll as well
result = self.enroll(self.course)
self.assertFalse(result)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class AboutWithInvitationOnly(ModuleStoreTestCase):
"""
This test case will check the About page when a course is invitation only.
"""
def setUp(self):
self.course = CourseFactory.create(metadata={"invitation_only": True})
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
display_name="overview"
)
def test_invitation_only(self):
"""
Test for user not logged in, invitation only course.
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment in this course is by invitation only", resp.content)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
def test_invitation_only_but_allowed(self):
"""
Test for user logged in and allowed to enroll in invitation only course.
"""
# Course is invitation only, student is allowed to enroll and logged in
user = UserFactory.create(username='allowed_student', password='test', email='[email protected]')
CourseEnrollmentAllowedFactory(email=user.email, course_id=self.course.id)
self.client.login(username=user.username, password='test')
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn(u"Register for {}".format(self.course.id.course), resp.content)
# Check that registration button is present
self.assertIn(REG_STR, resp.content)
@patch.dict(settings.FEATURES, {'RESTRICT_ENROLL_BY_REG_METHOD': True})
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class AboutTestCaseShibCourse(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Test cases covering about page behavior for courses that use shib enrollment domain ("shib courses")
"""
def setUp(self):
self.course = CourseFactory.create(enrollment_domain="shib:https://idp.stanford.edu/")
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
data="OOGIE BLOOGIE", display_name="overview"
)
def test_logged_in_shib_course(self):
"""
For shib courses, logged in users will see the register button, but get rejected once they click there
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn(u"Register for {}".format(self.course.id.course), resp.content)
self.assertIn(SHIB_ERROR_STR, resp.content)
self.assertIn(REG_STR, resp.content)
def test_anonymous_user_shib_course(self):
"""
For shib courses, anonymous users will also see the register button
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("OOGIE BLOOGIE", resp.content)
self.assertIn(u"Register for {}".format(self.course.id.course), resp.content)
self.assertIn(SHIB_ERROR_STR, resp.content)
self.assertIn(REG_STR, resp.content)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
class AboutWithClosedEnrollment(ModuleStoreTestCase):
"""
This test case will check the About page for a course that has enrollment start/end
set but it is currently outside of that period.
"""
def setUp(self):
super(AboutWithClosedEnrollment, self).setUp()
self.course = CourseFactory.create(metadata={"invitation_only": False})
# Setup enrollment period to be in future
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
self.course.enrollment_start = tomorrow
self.course.enrollment_end = nextday
self.course = self.update_course(self.course, self.user.id)
self.about = ItemFactory.create(
category="about", parent_location=self.course.location,
display_name="overview"
)
def test_closed_enrollmement(self):
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment is Closed", resp.content)
# Check that registration button is not present
self.assertNotIn(REG_STR, resp.content)
@override_settings(MODULESTORE=TEST_DATA_MOCK_MODULESTORE)
@patch.dict(settings.FEATURES, {'ENABLE_SHOPPING_CART': True})
@patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True})
class AboutPurchaseCourseTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
This test class runs through a suite of verifications regarding
purchaseable courses
"""
def setUp(self):
super(AboutPurchaseCourseTestCase, self).setUp()
self.course = CourseFactory.create(org='MITx', number='buyme', display_name='Course To Buy')
self._set_ecomm(self.course)
def _set_ecomm(self, course):
"""
Helper method to turn on ecommerce on the course
"""
course_mode = CourseMode(
course_id=course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=10,
)
course_mode.save()
def test_anonymous_user(self):
"""
Make sure an anonymous user sees the purchase button
"""
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart ($10)", resp.content)
def test_logged_in(self):
"""
Make sure a logged in user sees the purchase button
"""
self.setup_user()
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart ($10)", resp.content)
def test_already_in_cart(self):
"""
This makes sure if a user has this course in the cart, that the expected message
appears
"""
self.setup_user()
cart = Order.get_cart_for_user(self.user)
PaidCourseRegistration.add_to_order(cart, self.course.id)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("This course is in your", resp.content)
self.assertNotIn("Add buyme to Cart ($10)", resp.content)
def test_already_enrolled(self):
"""
This makes sure that the already enrolled message appears for paywalled courses
"""
self.setup_user()
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, self.course.id)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("You are registered for this course", resp.content)
self.assertIn("View Courseware", resp.content)
self.assertNotIn("Add buyme to Cart ($10)", resp.content)
def test_closed_enrollment(self):
"""
This makes sure that paywalled courses also honor the registration
window
"""
self.setup_user()
now = datetime.datetime.now(pytz.UTC)
tomorrow = now + datetime.timedelta(days=1)
nextday = tomorrow + datetime.timedelta(days=1)
self.course.enrollment_start = tomorrow
self.course.enrollment_end = nextday
self.course = self.update_course(self.course, self.user.id)
url = reverse('about_course', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment is Closed", resp.content)
self.assertNotIn("Add buyme to Cart ($10)", resp.content)
def test_invitation_only(self):
"""
This makes sure that the invitation only restirction takes prescendence over
any purchase enablements
"""
course = CourseFactory.create(metadata={"invitation_only": True})
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enrollment in this course is by invitation only", resp.content)
def test_enrollment_cap(self):
"""
Make sure that capped enrollments work even with
paywalled courses
"""
course = CourseFactory.create(
metadata={
"max_student_enrollments_allowed": 1,
"display_coursenumber": "buyme",
}
)
self._set_ecomm(course)
self.setup_user()
url = reverse('about_course', args=[course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Add buyme to Cart ($10)", resp.content)
# note that we can't call self.enroll here since that goes through
# the Django student views, which doesn't allow for enrollments
# for paywalled courses
CourseEnrollment.enroll(self.user, course.id)
# create a new account since the first account is already registered for the course
email = '[email protected]'
password = 'bar'
username = 'test_second'
self.create_account(username,
email, password)
self.activate_user(email)
self.login(email, password)
# Get the about page again and make sure that the page says that the course is full
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Course is full", resp.content)
self.assertNotIn("Add buyme to Cart ($10)", resp.content)
| agpl-3.0 | 3,819,342,254,327,059,000 | 468,601,341,615,095,200 | 39.341053 | 112 | 0.65442 | false |
UrLab/incubator | events/tests/test_forms.py | 1 | 1547 | from events.forms import EventForm
from datetime import datetime
import pytest
from users.models import User
@pytest.fixture(scope='function')
def user():
user = User.objects.create(username="test", email="[email protected]", first_name="Test", last_name="Test")
return user.id
@pytest.mark.django_db
def test_only_title_and_state_required(user):
form_data = {
'title': 'wtf',
'status': 'i',
'organizer': user,
}
form = EventForm(data=form_data)
assert form.is_valid(), form.errors
@pytest.mark.django_db
def test_no_stop_but_start(user):
form_data = {
'title': 'wtf',
'status': 'i',
'start': datetime(2000, 1, 1),
'organizer': user,
}
form = EventForm(data=form_data)
assert form.is_valid(), form.errors
assert form.cleaned_data['start'] == form.cleaned_data['stop']
assert form.cleaned_data['start'].year == 2000
def test_ready_must_have_date():
form_data = {
'title': 'wtf',
'status': 'r',
}
form = EventForm(data=form_data)
assert not form.is_valid(), form.errors
assert 'Un événement prêt doit avoir une date de début' in form.errors['__all__']
def test_stop_must_be_after_start():
form_data = {
'title': 'wtf',
'status': 'i',
'start': datetime(2100, 1, 1),
'stop': datetime(2000, 1, 1)
}
form = EventForm(data=form_data)
assert not form.is_valid()
assert 'La date de fin ne peut être avant la date de début' in form.errors['__all__']
| agpl-3.0 | 2,127,922,589,430,512,000 | 1,157,447,247,107,435,500 | 24.262295 | 106 | 0.606749 | false |
gerrive/horizon | openstack_dashboard/test/integration_tests/tests/test_host_aggregates.py | 14 | 2211 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestHostAggregates(helpers.AdminTestCase):
HOST_AGGREGATE_NAME = helpers.gen_random_resource_name("host_aggregate")
HOST_AGGREGATE_AVAILABILITY_ZONE = "nova"
def test_host_aggregate_create(self):
"""tests the host aggregate creation and deletion functionalities:
* creates a new host aggregate
* verifies the host aggregate appears in the host aggregates table
* deletes the newly created host aggregate
* verifies the host aggregate does not appear in the table
* after deletion
"""
hostaggregates_page = self.home_pg.go_to_system_hostaggregatespage()
hostaggregates_page.create_host_aggregate(
name=self.HOST_AGGREGATE_NAME,
availability_zone=self.HOST_AGGREGATE_AVAILABILITY_ZONE)
self.assertTrue(
hostaggregates_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(hostaggregates_page.find_message_and_dismiss(
messages.ERROR))
self.assertTrue(hostaggregates_page.is_host_aggregate_present(
self.HOST_AGGREGATE_NAME))
hostaggregates_page.delete_host_aggregate(self.HOST_AGGREGATE_NAME)
self.assertTrue(
hostaggregates_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(hostaggregates_page.find_message_and_dismiss(
messages.ERROR))
self.assertFalse(hostaggregates_page.is_host_aggregate_present(
self.HOST_AGGREGATE_NAME))
| apache-2.0 | 5,508,639,350,379,004,000 | 2,235,628,946,386,269,700 | 45.0625 | 78 | 0.711443 | false |
zacoxicompton/damnvid | ui/dMainFrame/dMainFrame.py | 12 | 32421 | # -*- coding: utf-8 -*-
from ..dUI import *
from dConverter import *
from dUpdater import *
from ..dPrefEditor import *
from ..dDoneDialog import *
from ..dAddURLDialog import *
from ..dAboutDialog import *
from ..dReportBug import *
from ..dBrowser import *
from ..dVideoHistory import *
from dMenubar import *
from dMainListPanel import *
from dMainSidePanel import *
from dMainGaugePanel import *
from dMainGoPanel import *
class DamnMainFrame(DamnFrame): # The main window
def __init__(self, parent, id, title):
Damnlog('DamnMainFrame GUI building starting.')
DamnFrame.__init__(self, parent, wx.ID_ANY, title, pos=wx.Display().GetGeometry()[:2], size=(780, 580), style=wx.DEFAULT_FRAME_STYLE)
self.CreateStatusBar()
self.SetMenuBar(DamnMainMenubar(self))
Damnlog('DamnMainFrame menu bar is up.')
vbox = wx.BoxSizer(wx.VERTICAL)
self.SetSizer(vbox)
#vbox.Add((0,DV.border_padding)) #Actually, do NOT add a padding there, it looks better when stuck on the edge
panel = wx.Panel(self, -1)
vbox.Add(panel, 1, wx.EXPAND)
self.setupGrid(panel)
self.Bind(wx.EVT_CLOSE, self.onClose, self)
self.Bind(wx.EVT_SIZE, self.onResize, self)
self.Bind(wx.EVT_ICONIZE, self.onMinimize)
self.Bind(DV.evt_prog, self.onProgress)
self.Bind(DV.evt_load, self.onLoading)
Damnlog('DamnMainFrame: All GUI is up.')
self.clipboardtimer = wx.Timer(self, -1)
self.clipboardtimer.Start(1000)
self.Bind(wx.EVT_TIMER, self.onClipboardTimer, self.clipboardtimer)
Damnlog('DaminMainFrame: Clipboard timer started.')
DV.icon = wx.Icon(DV.images_path + 'icon.ico', wx.BITMAP_TYPE_ICO)
#DV.icon2 = wx.Icon(DV.images_path + 'icon-alt.ico', wx.BITMAP_TYPE_ICO)
DV.icon16 = wx.Icon(DV.images_path + 'icon16.ico', wx.BITMAP_TYPE_ICO)
self.SetIcon(DV.icon)
Damnlog('DamnMainFrame: init stage 1 done.')
def setupGrid(self, panel):
grid = wx.FlexGridSizer(2, 2, 8, 8)
grid.Add(DamnMainListPanel(self), 1, wx.EXPAND)
grid.Add(DamnMainSidePanel(self), 0, wx.EXPAND)
grid.Add(DamnMainGaugePanel(self), 0, wx.EXPAND)
grid.Add(DamnMainGoPanel(self), 0, wx.EXPAND)
panel.SetSizer(grid)
grid.AddGrowableRow(0, 1)
grid.AddGrowableCol(0, 1)
def getNiceDimensions(self):
return self.niceDimensions
def setNiceDimensions(self, dimensions):
self.niceDimensions = dimensions
def init2(self):
Damnlog('Starting DamnMainFrame init stage 2.')
if os.path.exists(DV.conf_file_directory + u'lastversion.' + DV.safeProduct):
lastversion = DamnOpenFile(DV.conf_file_directory + u'lastversion.' + DV.safeProduct, 'r')
dvversion = lastversion.readline().strip()
lastversion.close()
del lastversion
Damnlog('Version file found; version number read:',dvversion)
else:
dvversion = 'old' # This is not just an arbitrary erroneous value, it's actually handy in the concatenation on the wx.FileDialog line below
Damnlog('No version file found.')
Damnlog('Read version:',dvversion,';running version:',DV.version)
if dvversion != DV.version: # Just updated to new version, ask what to do about the preferences
#dlg = wx.MessageDialog(self, DV.l('DamnVid was updated to ') + DV.version + '.\n' + DV.l('locale:damnvid-updated-export-prefs'), DV.l('DamnVid was successfully updated'), wx.YES | wx.NO | wx.ICON_QUESTION)
tmpprefs = DamnPrefs()
try:
checkupdates = tmpprefs.get('CheckForUpdates')
locale = tmpprefs.get('locale')
except:
pass
Damnlog('Check for updates preference is',checkupdates)
if False: #dlg.ShowModal() == wx.ID_YES:
dlg.Destroy()
dlg = wx.FileDialog(self, DV.l('Where do you want to export DamnVid\'s configuration?'), tmpprefs.get('lastprefdir'), 'DamnVid-' + dvversion + '-configuration.ini', DV.l('locale:browse-ini-files'), wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
f = DamnOpenFile(path, 'w')
tmpprefs.ini.write(f)
f.close()
dlg.Destroy()
else:
pass
# Now, overwrite the preferences!
del tmpprefs
os.remove(DV.conf_file)
shutil.copyfile(DV.curdir + 'conf' + DV.sep + 'conf.ini', DV.conf_file)
lastversion = DamnOpenFile(DV.conf_file_directory + u'lastversion.' + DV.safeProduct, 'w')
lastversion.write(DV.version.encode('utf8'))
lastversion.close()
del lastversion
tmpprefs = DamnPrefs()
try:
tmpprefs.set('CheckForUpdates', checkupdates)
tmpprefs.set('locale', locale)
except:
pass
tmpprefs.save()
del tmpprefs
Damnlog('Local version check done, initializing DamnMainFrame properties.')
self.videos = []
self.clippedvideos = []
self.resultlist = []
self.thisbatch = 0
self.thisvideo = []
self.meta = {}
DV.prefs = DamnPrefs()
self.converting = -1
self.isclosing = False
self.searchopen = False
self.addurl = None
self.loadingvisible = 0
self.trayicon = None
self.historyDialog = None
self.onListSelect()
Damnlog('DamnMainFrame properties OK, first run?',DV.first_run)
if DV.first_run:
if DV.os == 'mac':
DV.prefs.set('CheckForUpdates', 'True')
Damnlog('Skipping asking user for updates because this is a Mac and Mac users don\'t want to configure stuff.')
else:
dlg = wx.MessageDialog(self, DV.l('Welcome to DamnVid ') + DV.version + '!\n' + DV.l('Would you like DamnVid to check for updates every time it starts?'), DV.l('Welcome to DamnVid ') + DV.version + '!', wx.YES | wx.NO | wx.ICON_QUESTION)
if dlg.ShowModal() == wx.ID_YES:
DV.prefs.set('CheckForUpdates', 'True')
else:
DV.prefs.set('CheckForUpdates', 'False')
if DV.prefs.get('CheckForUpdates') == 'True':
Damnlog('DamnMainFrame checking for updates.')
self.onCheckUpdates(None)
self.SetStatusText(DV.l('DamnVid ready.'))
windowpolicy = DV.prefs.get('windowpolicy')
if not len(windowpolicy) or windowpolicy=='center':
Damnlog('Window policy is centering.')
self.Center()
elif windowpolicy=='remember':
Damnlog('Window policy is remember; trying to load saved window geometry.')
allstuff=(
DV.prefs.gets('damnvid-mainwindow','lastx'), DV.prefs.gets('damnvid-mainwindow','lasty'),
DV.prefs.gets('damnvid-mainwindow','lastw'), DV.prefs.gets('damnvid-mainwindow','lasth'),
DV.prefs.gets('damnvid-mainwindow','lastresw'), DV.prefs.gets('damnvid-mainwindow','lastresh')
)
Damnlog('Old window geometry information:',allstuff)
allstuff2=[]
for i in allstuff:
try:
allstuff2.append(int(i))
except:
allstuff2.append(-1)
if -1 in allstuff2:
Damnlog('Invalid information in old window geometry information; giving up on restoring window geometry.')
else:
try:
screen = wx.Display().GetGeometry()[2:]
if allstuff2[4] != screen[0] or allstuff2[5]!= screen[1]:
Damnlog('Resolution information is different:',allstuff2[4:5],'vs',screen,'(current); giving up on restoring window geometry.')
elif allstuff2[0] < 0 or allstuff2[0] + allstuff2[2] >= allstuff2[4] or allstuff2[1] < 0 or allstuff2[1] + allstuff2[3] >= allstuff2[5]:
Damnlog('Window position is out of bounds; giving up.')
else:
Damnlog('All window geometry tests passed, attempting to restore window geometry.')
try:
self.SetSizeWH(allstuff2[2],allstuff2[3])
self.MoveXY(allstuff2[0],allstuff2[1])
Damnlog('Window geometry restored successfully.')
except:
Damnlog('Window manager refused to change window geometry.')
except:
Damnlog('Could not get screen resolution; giving up on restoring window geometry.')
else:
Damnlog('Window policy is',windowpolicy,'; doing nothing.')
self.updateHistory()
Damnlog('DamnMainFrame: Main window all ready,')
def onMinimize(self, event):
if DV.os == u'posix':
return # Do not do anything on Linux, let the window manager handle it
Damnlog('DamnMainFrame iconize event fired. Is being minimized?', event.Iconized())
if self.isclosing:
Damnlog('DamnMainFrame being closed, not interfering.')
return
if not event.Iconized():
Damnlog('DamnMainFrame being restored, doing nothing.')
return
if DV.prefs.get('minimizetotray')=='True':
Damnlog('Minimize to tray preference is True, creating tray icon.')
self.trayicon = DamnTrayIcon(self)
else:
Damnlog('Minimize to tray preference is False, doing nothing.')
def onExit(self, event):
self.Close(True)
def onListSelect(self, event=None):
sel = self.list.getAllSelectedItems()
gotstuff = bool(len(sel))
count = self.list.GetItemCount()
self.btnRename.Enable(len(sel) == 1)
self.profiledropdown.Enable(bool(count))
if not count:
self.profiledropdown.SetItems([DV.l('(None)')])
videosAffected = range(count)
if gotstuff:
videosAffected = sel
self.deletebutton.SetLabel(DV.l('Remove'))
self.deletebutton.Enable(self.converting not in sel)
self.btnMoveUp.Enable(sel[0])
self.btnMoveDown.Enable(sel[-1] != self.list.GetItemCount() - 1)
else:
self.deletebutton.SetLabel(DV.l('Remove all'))
self.deletebutton.Enable(self.converting == -1)
self.btnMoveUp.Disable()
self.btnMoveDown.Disable()
if len(videosAffected):
choices = []
uniprofile = int(self.meta[self.videos[videosAffected[0]]]['profile'])
for i in videosAffected:
if int(self.meta[self.videos[i]]['profile']) != uniprofile:
uniprofile = -2
for p in range(-1, DV.prefs.profiles):
choices.append(DV.l(DV.prefs.getp(p, 'name'), warn=False))
if uniprofile == -2:
choices.insert(0, DV.l('(Multiple)'))
self.profiledropdown.SetItems(choices)
if uniprofile == -2:
self.profiledropdown.SetSelection(0)
else:
self.profiledropdown.SetSelection(uniprofile + 1)
def onListKeyDown(self, event):
keycode = event.GetKeyCode()
if (keycode == wx.WXK_BACK or keycode == wx.WXK_DELETE) and self.list.GetSelectedItemCount():
self.onDelSelection(event)
elif (keycode == wx.WXK_F2 or keycode == wx.WXK_NUMPAD_F2) and self.list.GetSelectedItemCount() == 1:
self.onRename(event)
def onAddFile(self, event):
d = os.getcwd()
if os.path.exists(DV.prefs.get('LastFileDir')):
if os.path.isdir(DV.prefs.get('LastFileDir')):
d = DV.prefs.get('LastFileDir')
elif os.path.exists(DV.prefs.expandPath('?DAMNVID_MY_VIDEOS?')):
if os.path.isdir(DV.prefs.expandPath('?DAMNVID_MY_VIDEOS?')):
d = DV.prefs.expandPath('?DAMNVID_MY_VIDEOS?')
dlg = wx.FileDialog(self, DV.l('Choose a damn video.'), d, '', DV.l('locale:browse-video-files'), wx.OPEN | wx.FD_MULTIPLE)
dlg.SetIcon(DV.icon)
if dlg.ShowModal() == wx.ID_OK:
vids = dlg.GetPaths()
DV.prefs.set('LastFileDir', os.path.dirname(vids[0]))
DV.prefs.save()
self.addVid(vids)
dlg.Destroy()
def onOpenHistory(self, event=None):
Damnlog('onOpenHistory event fired:', event)
if self.historyDialog is None:
self.historyDialog = DamnHistoryViewer(self)
self.historyDialog.Show()
def onCloseHistory(self, event=None):
Damnlog('onCloseHistory event fired:', event)
self.historyDialog = None
def onAddURL(self, event=None):
Damnlog('onAddURL event fired:',event)
default = ''
try:
if not wx.TheClipboard.IsOpened():
if wx.TheClipboard.Open():
dataobject = wx.TextDataObject()
wx.TheClipboard.GetData(dataobject)
default = dataobject.GetText()
wx.TheClipboard.Close()
Damnlog('Text scavenged from clipboard:',default)
if not self.validURI(default):
default = '' # Only set that as default text if the clipboard's text content is not a URL
except:
default = ''
try:
wx.TheClipboard.Close() # In case there's been an error before the clipboard could be closed, try to close it now
except:
pass # There's probably wasn't any error, just pass
self.addurl = DamnAddURLDialog(self, default)
self.addurl.SetIcon(DV.icon)
self.addurl.ShowModal()
try:
self.addurl.Destroy()
except:
pass # The addurl destroys itself, supposedly, and doing it again sometimes (sometimes!) generates errors.
self.addurl = None
def validURI(self, uri):
if REGEX_HTTP_GENERIC.match(uri):
for i in DamnIterModules(False):
if i['class'](uri).validURI():
return 'Video site'
return 'Online video' # Not necessarily true, but ffmpeg will tell
elif os.path.exists(uri):
return 'Local file'
return None
def getVidName(self, uri):
try:
html = DamnURLOpen(uri[3:])
for i in html:
res = REGEX_HTTP_GENERIC_TITLE_EXTRACT.search(i)
if res:
return DamnHtmlEntities(res.group(1)).strip()
except:
pass # Can't grab this? Return Unknown title
return DV.l('Unknown title')
def onDropTargetClick(self, event):
dlg = wx.MessageDialog(self, DV.l('This is a droptarget: You may drop video files and folders here (or in the big list as well).'), DV.l('DamnVid Droptarget'), wx.ICON_INFORMATION)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
def toggleLoading(self, show):
isvisible = self.loadingvisible > 0
self.loadingvisible = max((0, self.loadingvisible + int(show) * 2 - 1))
if (isvisible and not self.loadingvisible) or (not isvisible and self.loadingvisible):
DV.postEvent(self, DamnLoadingEvent(DV.evt_loading, -1, {'show':bool(self.loadingvisible)}))
def onLoading(self, event):
info = event.GetInfo()
if info.has_key('show'):
if info['show']:
self.droptarget.LoadFile(DV.images_path + 'droptargetloading.gif')
self.droptarget.Play()
else:
self.droptarget.Stop()
self.droptarget.LoadFile(DV.images_path + 'droptarget.gif')
if info.has_key('status'):
self.SetStatusText(info['status'])
if info.has_key('dialog'):
dlg = wx.MessageDialog(self, info['dialog'][1], info['dialog'][0], info['dialog'][2])
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
if info.has_key('meta'):
self.addValid(info['meta'])
if info.has_key('go') and self.converting == -1:
if info['go']:
self.onGo()
if info.has_key('updateinfo'):
if info['updateinfo'].has_key('verbose'):
verbose = info['updateinfo']['verbose']
else:
verbose = True
if info['updateinfo'].has_key('main'):
if info['updateinfo']['main'] is not None:
msg = None
if DamnVersionCompare(info['updateinfo']['main'], DV.version) == 1 and type(info['updateinfo']['main']) is type(''):
if DV.os != 'posix':
dlg = wx.MessageDialog(self, DV.l('A new version (') + info['updateinfo']['main'] + DV.l(') is available! You are running DamnVid ') + DV.version + '.\n' + DV.l('Want to go to the download page and download the update?'), DV.l('Update available!'), wx.YES | wx.NO | wx.YES_DEFAULT | wx.ICON_INFORMATION)
dlg.SetIcon(DV.icon)
if dlg.ShowModal() == wx.ID_YES:
webbrowser.open(DV.url_download, 2)
dlg.Destroy()
elif verbose and type(info['updateinfo']['main']) is type(''):
if DV.version != info['updateinfo']['main']:
versionwarning = DV.l(' However, your version (') + DV.version + DV.l(') seems different than the latest version available online. Where would you get that?')
else:
versionwarning = ''
msg = (DV.l('DamnVid is up-to-date.'), DV.l('DamnVid is up-to-date! The latest version is ') + info['updateinfo']['main'] + '.' + versionwarning, wx.ICON_INFORMATION)
elif verbose:
msg = (DV.l('Error!'), DV.l('There was a problem while checking for updates. You are running DamnVid ') + DV.version + '.\n' + DV.l('Make sure you are connected to the Internet, and that no firewall is blocking DamnVid.'), wx.ICON_INFORMATION)
if msg is not None:
dlg = wx.MessageDialog(self, msg[1], msg[0], msg[2])
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
if info['updateinfo'].has_key('modules'):
msg = []
for i in info['updateinfo']['modules'].iterkeys():
if type(info['updateinfo']['modules'][i]) is type(()):
msg.append((True, DV.modules[i]['title'] + DV.l(' was updated to version ') + info['updateinfo']['modules'][i][0] + '.'))
elif type(info['updateinfo']['modules'][i]) is type('') and verbose:
if info['updateinfo']['modules'][i] == 'error':
msg.append((False, DV.modules[i]['title'] + DV.l(' is up-to-date (version ') + DV.modules[i]['version'] + ').'))
if len(msg):
msgs = []
for i in msg:
if i[0]:
msgs.append(i[1])
if not len(msg) and verbose:
msgs = msg
if len(msgs):
msg = DV.l('DamnVid also checked for updates to its modules.') + '\n'
for i in msgs:
msg += '\n' + i
dlg = wx.MessageDialog(self, msg, DV.l('Module updates'), wx.ICON_INFORMATION)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
def updateHistory(self):
if self.historyDialog is not None:
self.historyDialog.update()
def onAddHistoryVideo(self, uri, event=None):
Damnlog('History video add event fired:', event,'with URI', uri)
self.addVid([uri], DV.prefs.get('autoconvert') == 'True')
def addVid(self, uris, thengo=False):
if thengo is None:
thengo = DV.prefs.get('autoconvert') == 'True'
if type(uris) in (type(''), type(u'')):
uris = [DamnUnicode(uris)]
DV.videoLoader(self, uris, thengo).start()
def addTohistory(self, uri, title, icon=None):
uri = DamnUnicode(uri)
title = DamnUnicode(title)
icon = DamnUnicode(icon)
Damnlog('Adding video to history:', uri, 'with title', title, 'and icon', icon)
history = DV.prefs.geta('damnvid-videohistory', 'videos')
histsize = int(DV.prefs.get('videohistorysize'))
if not histsize:
Damnlog('Histsize is zero, not touching anything.')
return
for i in history:
tempvideo = i.split(DV.history_split)
if len(tempvideo) != 3:
Damnlog('Invalid entry in history:', i)
continue
if tempvideo[0].strip().lower() == uri.strip().lower():
Damnlog('URI',uri,'is already in history, not adding it to history again.')
return
history.reverse()
while len(history) >= histsize:
history = history[1:]
history.append(DV.history_split.join([uri,title,icon]))
history.reverse()
DV.prefs.seta('damnvid-videohistory','videos',history)
Damnlog('Video added successfully, rebuilding history menu.')
self.updateHistory()
def addValid(self, meta):
Damnlog('Adding video to DamnList with meta:',meta)
if type(meta['icon']) in (type(''), type(u'')):
meta['icon'] = DamnGetListIcon(meta['icon'])
self.addTohistory(meta['original'], meta['name'], DV.listicons.getHandle(meta['icon']))
curvid = len(self.videos)
self.list.InsertStringItem(curvid, meta['name'])
self.list.SetStringItem(curvid, ID_COL_VIDPROFILE, DV.l(DV.prefs.getp(meta['profile'], 'name')))
self.list.SetStringItem(curvid, ID_COL_VIDPATH, meta['dirname'])
self.list.SetStringItem(curvid, ID_COL_VIDSTAT, meta['status'])
self.list.SetItemImage(curvid, meta['icon'], meta['icon'])
self.videos.append(meta['uri'])
self.meta[meta['uri']] = meta
self.SetStatusText(DV.l('Added ') + meta['name'] + '.')
if self.addurl is not None:
self.addurl.update(meta['original'], meta['name'], meta['icon'])
self.onListSelect()
def onProgress(self, event):
info = event.GetInfo()
if info.has_key('progress'):
self.gauge1.SetValue(info['progress'])
if info.has_key('statustext'):
self.SetStatusText(info['statustext'])
if info.has_key('status'):
self.list.SetStringItem(self.converting, ID_COL_VIDSTAT, info['status'])
if self.trayicon is not None:
self.trayicon.setTooltip(DamnUnicode(self.meta[self.videos[self.converting]]['name'])+u': '+info['status'])
if info.has_key('dialog'):
dlg = wx.MessageDialog(self, info['dialog'][0], info['dialog'][1], info['dialog'][2])
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
if info.has_key('go'):
self.go(info['go'])
def go(self, aborted=False):
Damnlog('Time to go. Aborted?', aborted)
self.converting = -1
for i in range(len(self.videos)):
if self.videos[i] not in self.thisvideo and self.meta[self.videos[i]]['status'] != DV.l('Success!'):
self.converting = i
break
if self.converting != -1 and not aborted: # Let's go for the actual conversion...
self.meta[self.videos[self.converting]]['status'] = DV.l('In progress...')
self.list.SetStringItem(self.converting, ID_COL_VIDSTAT, DV.l('In progress...'))
self.thisbatch = self.thisbatch + 1
self.thread = DamnConverter(parent=self)
if self.trayicon is not None:
self.trayicon.startAlternate()
self.thread.start()
else:
if self.trayicon is not None:
self.trayicon.stopAlternate()
if not self.isclosing:
self.SetStatusText(DV.l('DamnVid, waiting for instructions.'))
dlg = DamnDoneDialog(content=self.resultlist, aborted=aborted, main=self)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
self.converting = -1
self.stopbutton.Disable()
self.gobutton1.Enable()
self.gauge1.SetValue(0.0)
self.onListSelect()
def onGo(self, event=None):
if not len(self.videos):
dlg = wx.MessageDialog(self, DV.l('Put some videos in the list first!'), DV.l('No videos!'), wx.ICON_EXCLAMATION | wx.OK)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
elif self.converting != -1:
dlg = wx.MessageDialog(self, DV.l('DamnVid is already converting!'), DV.l('Already converting!'), wx.ICON_EXCLAMATION | wx.OK)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
else:
success = 0
for i in self.videos:
if self.meta[i]['status'] == DV.l('Success!'):
success = success + 1
if success == len(self.videos):
dlg = wx.MessageDialog(self, DV.l('All videos in the list have already been processed!'), DV.l('Already done'), wx.OK | wx.ICON_INFORMATION)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
else:
self.thisbatch = 0
self.thisvideo = []
self.resultlist = []
self.stopbutton.Enable()
self.gobutton1.Disable()
self.go()
self.onListSelect()
def onStop(self, event):
self.thread.abortProcess()
def onRename(self, event):
item = self.list.getAllSelectedItems()
if len(item) > 1:
dlg = wx.MessageDialog(self, DV.l('You can only rename one video at a time.'), DV.l('Multiple videos selected.'), wx.ICON_EXCLAMATION | wx.OK)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
elif not len(item):
dlg = wx.MessageDialog(self, DV.l('Select a video in order to rename it.'), DV.l('No videos selected'), wx.ICON_EXCLAMATION | wx.OK)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
else:
item = item[0]
dlg = wx.TextEntryDialog(self, DV.l('Enter the new name for "') + self.meta[self.videos[item]]['name'] + '".', DV.l('Rename'), self.meta[self.videos[item]]['name'])
dlg.SetIcon(DV.icon)
dlg.ShowModal()
newname = dlg.GetValue()
self.meta[self.videos[item]]['name'] = newname
self.list.SetStringItem(item, ID_COL_VIDNAME, newname)
dlg.Destroy()
history = DV.prefs.geta('damnvid-videohistory','videos')
for i in range(len(history)):
video = history[i].split(DV.history_split)
if len(video) != 3:
continue
if video[0] == self.meta[self.videos[item]]['original']:
video[1] = newname
history[i] = DV.history_split.join(video)
DV.prefs.seta('damnvid-videohistory','videos',history)
DV.prefs.save()
self.updateHistory()
def onSearch(self, event):
if not self.searchopen:
self.searchopen = True
self.searchdialog = DamnVidBrowser(self)
self.searchdialog.Show()
else:
self.searchdialog.Raise()
def invertVids(self, i1, i2):
tmp = self.videos[i1]
self.videos[i1] = self.videos[i2]
self.videos[i2] = tmp
tmp = self.list.IsSelected(i2)
self.list.Select(i2, on=self.list.IsSelected(i1))
self.list.Select(i1, on=tmp)
self.list.invertItems(i1, i2)
if i1 == self.converting:
self.converting = i2
elif i2 == self.converting:
self.converting = i1
self.onListSelect()
def onMoveUp(self, event):
items = self.list.getAllSelectedItems()
if len(items):
if items[0]:
for i in items:
self.invertVids(i, i - 1)
else:
dlg = wx.MessageDialog(self, DV.l('You\'ve selected the first item in the list, which cannot be moved further up!'), DV.l('Invalid selection'), wx.OK | wx.ICON_EXCLAMATION)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
else:
dlg = wx.MessageDialog(self, DV.l('Select some videos in the list first.'), DV.l('No videos selected!'), wx.OK | wx.ICON_EXCLAMATION)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
self.onListSelect()
def onMoveDown(self, event):
items = self.list.getAllSelectedItems()
if len(items):
if items[-1] < self.list.GetItemCount() - 1:
for i in reversed(self.list.getAllSelectedItems()):
self.invertVids(i, i + 1)
else:
dlg = wx.MessageDialog(self, DV.l('You\'ve selected the last item in the list, which cannot be moved further down!'), DV.l('Invalid selection'), wx.OK | wx.ICON_EXCLAMATION)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
else:
dlg = wx.MessageDialog(self, DV.l('Select some videos in the list first.'), DV.l('No videos selected!'), wx.OK | wx.ICON_EXCLAMATION)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
self.onListSelect()
def onChangeProfileDropdown(self, event):
sel = self.profiledropdown.GetCurrentSelection()
if self.profiledropdown.GetItems()[0] == '(Multiple)':
sel -= 1
if sel != -1:
self.onChangeProfile(sel - 1, event)
def onChangeProfile(self, profile, event):
items = self.list.getAllSelectedItems()
if not len(items):
items = range(self.list.GetItemCount())
for i in items:
if self.meta[self.videos[i]]['profile'] != profile:
self.meta[self.videos[i]]['profile'] = profile
self.meta[self.videos[i]]['profilemodified'] = True
self.list.SetStringItem(i, ID_COL_VIDPROFILE, DV.l(DV.prefs.getp(profile, 'name')))
self.onListSelect()
def onResetStatus(self, event=None):
items = self.list.getAllSelectedItems()
for i in items:
self.meta[self.videos[i]]['status'] = DV.l('Pending.')
self.list.SetStringItem(i, ID_COL_VIDSTAT, DV.l('Pending.'))
def onPrefs(self, event):
self.reopenprefs = False
prefs = DamnPrefEditor(self, -1, DV.l('DamnVid preferences'), main=self)
prefs.ShowModal()
prefs.Destroy()
if self.reopenprefs:
self.onPrefs(event)
else:
for i in range(len(self.videos)):
if self.meta[self.videos[i]]['profile'] >= DV.prefs.profiles or not self.meta[self.videos[i]]['profilemodified']:
# Yes, using icons as source identifiers, why not? Lol
if self.meta[self.videos[i]].has_key('module'):
self.meta[self.videos[i]]['profile'] = self.meta[self.videos[i]]['module'].getProfile()
elif self.meta[self.videos[i]]['icon'] == DamnGetListIcon('damnvid'):
self.meta[self.videos[i]]['profile'] = DV.prefs.get('defaultprofile')
elif self.meta[self.videos[i]]['icon'] == DamnGetListIcon('generic'):
self.meta[self.videos[i]]['profile'] = DV.prefs.get('defaultwebprofile')
self.list.SetStringItem(i, ID_COL_VIDPROFILE, DV.l(DV.prefs.getp(self.meta[self.videos[i]]['profile'], 'name')))
try:
del self.reopenprefs
except:
pass
self.updateHistory() # In case history size changed
self.onListSelect()
def onOpenOutDir(self, event):
if DV.os == 'nt':
os.system('explorer.exe "' + DV.prefs.get('outdir') + '"')
else:
pass # Halp here?
def onHalp(self, event):
webbrowser.open(DV.url_halp, 2)
def onReportBug(self, event):
dlg = DamnReportBug(self, -1, main=self)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
def onCheckUpdates(self, event=None):
updater = DamnVidUpdater(self, verbose=event is not None)
updater.start()
def onAboutDV(self, event):
dlg = DamnAboutDamnVid(self, -1, main=self)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
def delVid(self, i):
self.list.DeleteItem(i)
for vid in range(len(self.thisvideo)):
if self.thisvideo[vid] == self.videos[i]:
self.thisvideo.pop(vid)
self.thisbatch = self.thisbatch - 1
del self.meta[self.videos[i]]
self.videos.pop(i)
if self.converting > i:
self.converting -= 1
def onDelete(self, event):
Damnlog('onDelete event fired:', event)
if len(self.list.getAllSelectedItems()):
self.onDelSelection(event)
else:
self.onDelAll(event)
def confirmDeletion(self):
if DV.prefs.get('warnremove')!='True':
return True
dlg = wx.MessageDialog(self, DV.l('Are you sure? (This will not delete any files, it will just remove them from the list.)'), DV.l('Confirmation'), wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
dlg.SetIcon(DV.icon)
return dlg.ShowModal() == wx.ID_YES
def onDelSelection(self, event):
items = self.list.getAllSelectedItems()
if len(items):
if self.converting in items:
dlg = wx.MessageDialog(self, DV.l('Stop the video conversion before deleting the video being converted.'), DV.l('Cannot delete this video'), wx.ICON_EXCLAMATION | wx.OK)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
elif self.confirmDeletion():
for i in reversed(items): # Sequence MUST be reversed, otherwise the first items get deleted first, which changes the indexes of the following items
self.delVid(i)
self.onListSelect()
else:
dlg = wx.MessageDialog(self, DV.l('You must select some videos from the list first!'), DV.l('Select some videos!'), wx.ICON_EXCLAMATION | wx.OK)
dlg.SetIcon(DV.icon)
dlg.ShowModal()
dlg.Destroy()
def onDelAll(self, event):
if len(self.videos):
if self.confirmDeletion():
if self.converting != -1:
self.onStop(None) # Stop conversion if it's in progress
self.list.DeleteAllItems()
self.videos = []
self.thisvideo = []
self.thisbatch = 0
self.meta = {}
else:
dlg = wx.MessageDialog(self, DV.l('Add some videos in the list first.'), DV.l('No videos!'), wx.OK | wx.ICON_EXCLAMATION)
dlg.SetIcon(DV.icon)
dlg.Destroy()
def onResize(self, event):
self.Layout()
def onClipboardTimer(self, event):
self.clipboardtimer.Stop()
try:
if DV.gui_ok and DV.prefs.get('clipboard') == 'True' and not wx.TheClipboard.IsOpened():
if wx.TheClipboard.Open():
dataobject = wx.TextDataObject()
wx.TheClipboard.GetData(dataobject)
clip = dataobject.GetText()
wx.TheClipboard.Close()
clip = DamnUnicode(clip)
if DV.oldclipboard != clip:
DV.oldclipboard = clip
Damnlog('Text scavenged from clipboard (in loop):', clip)
if self.validURI(clip) == 'Video site' and clip not in self.clippedvideos:
self.clippedvideos.append(clip)
if self.addurl is not None:
self.addurl.onAdd(val=clip)
else:
self.addVid([clip], DV.prefs.get('autoconvert') == 'True')
except:
Damnlog('Failed to open clipboard.') # The clipboard might not get opened properly, or the prefs object might not exist yet. Just silently pass, gonna catch up at next timer event.
try:
wx.TheClipboard.Close() # Try to close it, just in case it's left open.
except:
pass
try:
self.clipboardtimer.Start(1000)
except:
pass # Sometimes the timer can still live while DamnMainFrame is closed, and if EVT_TIMER is then raised, error!
def onClose(self, event):
Damnlog('Main window onClose event fired. Converting?', self.converting, '; Is already closing?', self.isclosing)
if self.converting != -1:
dlg = wx.MessageDialog(self, DV.l('DamnVid is currently converting a video! Closing DamnVid will cause it to abort the conversion.') + '\r\n' + DV.l('Continue?'), DV.l('Conversion in progress'), wx.YES_NO | wx.NO_DEFAULT | wx.ICON_EXCLAMATION)
dlg.SetIcon(DV.icon)
if dlg.ShowModal() == wx.ID_YES:
Damnlog('User forced shutdown!')
self.shutdown()
else:
self.shutdown()
def shutdown(self):
Damnlog('Main window got shutdown() call')
if self.historyDialog is not None:
self.historyDialog.onClose()
try:
Damnlog('Attempting to get window position/size information.')
position = self.GetPositionTuple()
size = self.GetSize()
screen = wx.Display().GetGeometry()[2:]
Damnlog('Position is',position,'; size is',size,'; resolution is',screen)
DV.prefs.sets('damnvid-mainwindow','lastx',position[0])
DV.prefs.sets('damnvid-mainwindow','lasty',position[1])
DV.prefs.sets('damnvid-mainwindow','lastw',size[0])
DV.prefs.sets('damnvid-mainwindow','lasth',size[1])
DV.prefs.sets('damnvid-mainwindow','lastresw',screen[0])
DV.prefs.sets('damnvid-mainwindow','lastresh',screen[1])
except:
Damnlog('Error while trying to grab position/size information.')
self.isclosing = True
self.clipboardtimer.Stop()
self.Destroy()
| gpl-3.0 | -8,523,453,966,718,319,000 | 472,520,401,676,790,600 | 40.248092 | 310 | 0.681318 | false |
daviddupont69/CouchPotatoServer | libs/subliminal/api.py | 106 | 5646 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <[email protected]>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from .core import (SERVICES, LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE,
MATCHING_CONFIDENCE, create_list_tasks, consume_task, create_download_tasks,
group_by_video, key_subtitles)
from .language import language_set, language_list, LANGUAGES
import logging
__all__ = ['list_subtitles', 'download_subtitles']
logger = logging.getLogger(__name__)
def list_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None):
"""List subtitles in given paths according to the criteria
:param paths: path(s) to video file or folder
:type paths: string or list
:param languages: languages to search for, in preferred order
:type languages: list of :class:`~subliminal.language.Language` or string
:param list services: services to use for the search, in preferred order
:param bool force: force searching for subtitles even if some are detected
:param bool multi: search multiple languages for the same video
:param string cache_dir: path to the cache directory to use
:param int max_depth: maximum depth for scanning entries
:param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``)
:return: found subtitles
:rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.ResultSubtitle`]
"""
services = services or SERVICES
languages = language_set(languages) if languages is not None else language_set(LANGUAGES)
if isinstance(paths, basestring):
paths = [paths]
if any([not isinstance(p, unicode) for p in paths]):
logger.warning(u'Not all entries are unicode')
results = []
service_instances = {}
tasks = create_list_tasks(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter)
for task in tasks:
try:
result = consume_task(task, service_instances)
results.append((task.video, result))
except:
logger.error(u'Error consuming task %r' % task, exc_info=True)
for service_instance in service_instances.itervalues():
service_instance.terminate()
return group_by_video(results)
def download_subtitles(paths, languages=None, services=None, force=True, multi=False, cache_dir=None, max_depth=3, scan_filter=None, order=None):
"""Download subtitles in given paths according to the criteria
:param paths: path(s) to video file or folder
:type paths: string or list
:param languages: languages to search for, in preferred order
:type languages: list of :class:`~subliminal.language.Language` or string
:param list services: services to use for the search, in preferred order
:param bool force: force searching for subtitles even if some are detected
:param bool multi: search multiple languages for the same video
:param string cache_dir: path to the cache directory to use
:param int max_depth: maximum depth for scanning entries
:param function scan_filter: filter function that takes a path as argument and returns a boolean indicating whether it has to be filtered out (``True``) or not (``False``)
:param order: preferred order for subtitles sorting
:type list: list of :data:`~subliminal.core.LANGUAGE_INDEX`, :data:`~subliminal.core.SERVICE_INDEX`, :data:`~subliminal.core.SERVICE_CONFIDENCE`, :data:`~subliminal.core.MATCHING_CONFIDENCE`
:return: downloaded subtitles
:rtype: dict of :class:`~subliminal.videos.Video` => [:class:`~subliminal.subtitles.ResultSubtitle`]
.. note::
If you use ``multi=True``, :data:`~subliminal.core.LANGUAGE_INDEX` has to be the first item of the ``order`` list
or you might get unexpected results.
"""
services = services or SERVICES
languages = language_list(languages) if languages is not None else language_list(LANGUAGES)
if isinstance(paths, basestring):
paths = [paths]
order = order or [LANGUAGE_INDEX, SERVICE_INDEX, SERVICE_CONFIDENCE, MATCHING_CONFIDENCE]
subtitles_by_video = list_subtitles(paths, languages, services, force, multi, cache_dir, max_depth, scan_filter)
for video, subtitles in subtitles_by_video.iteritems():
subtitles.sort(key=lambda s: key_subtitles(s, video, languages, services, order), reverse=True)
results = []
service_instances = {}
tasks = create_download_tasks(subtitles_by_video, languages, multi)
for task in tasks:
try:
result = consume_task(task, service_instances)
results.append((task.video, result))
except:
logger.error(u'Error consuming task %r' % task, exc_info=True)
for service_instance in service_instances.itervalues():
service_instance.terminate()
return group_by_video(results)
| gpl-3.0 | 527,739,789,959,561,100 | -2,564,569,289,926,909,400 | 50.798165 | 194 | 0.715905 | false |
mpetyx/palmdrop | venv/lib/python2.7/site-packages/html5lib/constants.py | 963 | 87346 | from __future__ import absolute_import, division, unicode_literals
import string
import gettext
_ = gettext.gettext
EOF = None
E = {
"null-character":
_("Null character in input stream, replaced with U+FFFD."),
"invalid-codepoint":
_("Invalid codepoint in stream."),
"incorrectly-placed-solidus":
_("Solidus (/) incorrectly placed in tag."),
"incorrect-cr-newline-entity":
_("Incorrect CR newline entity, replaced with LF."),
"illegal-windows-1252-entity":
_("Entity used with illegal number (windows-1252 reference)."),
"cant-convert-numeric-entity":
_("Numeric entity couldn't be converted to character "
"(codepoint U+%(charAsInt)08x)."),
"illegal-codepoint-for-numeric-entity":
_("Numeric entity represents an illegal codepoint: "
"U+%(charAsInt)08x."),
"numeric-entity-without-semicolon":
_("Numeric entity didn't end with ';'."),
"expected-numeric-entity-but-got-eof":
_("Numeric entity expected. Got end of file instead."),
"expected-numeric-entity":
_("Numeric entity expected but none found."),
"named-entity-without-semicolon":
_("Named entity didn't end with ';'."),
"expected-named-entity":
_("Named entity expected. Got none."),
"attributes-in-end-tag":
_("End tag contains unexpected attributes."),
'self-closing-flag-on-end-tag':
_("End tag contains unexpected self-closing flag."),
"expected-tag-name-but-got-right-bracket":
_("Expected tag name. Got '>' instead."),
"expected-tag-name-but-got-question-mark":
_("Expected tag name. Got '?' instead. (HTML doesn't "
"support processing instructions.)"),
"expected-tag-name":
_("Expected tag name. Got something else instead"),
"expected-closing-tag-but-got-right-bracket":
_("Expected closing tag. Got '>' instead. Ignoring '</>'."),
"expected-closing-tag-but-got-eof":
_("Expected closing tag. Unexpected end of file."),
"expected-closing-tag-but-got-char":
_("Expected closing tag. Unexpected character '%(data)s' found."),
"eof-in-tag-name":
_("Unexpected end of file in the tag name."),
"expected-attribute-name-but-got-eof":
_("Unexpected end of file. Expected attribute name instead."),
"eof-in-attribute-name":
_("Unexpected end of file in attribute name."),
"invalid-character-in-attribute-name":
_("Invalid character in attribute name"),
"duplicate-attribute":
_("Dropped duplicate attribute on tag."),
"expected-end-of-tag-name-but-got-eof":
_("Unexpected end of file. Expected = or end of tag."),
"expected-attribute-value-but-got-eof":
_("Unexpected end of file. Expected attribute value."),
"expected-attribute-value-but-got-right-bracket":
_("Expected attribute value. Got '>' instead."),
'equals-in-unquoted-attribute-value':
_("Unexpected = in unquoted attribute"),
'unexpected-character-in-unquoted-attribute-value':
_("Unexpected character in unquoted attribute"),
"invalid-character-after-attribute-name":
_("Unexpected character after attribute name."),
"unexpected-character-after-attribute-value":
_("Unexpected character after attribute value."),
"eof-in-attribute-value-double-quote":
_("Unexpected end of file in attribute value (\")."),
"eof-in-attribute-value-single-quote":
_("Unexpected end of file in attribute value (')."),
"eof-in-attribute-value-no-quotes":
_("Unexpected end of file in attribute value."),
"unexpected-EOF-after-solidus-in-tag":
_("Unexpected end of file in tag. Expected >"),
"unexpected-character-after-solidus-in-tag":
_("Unexpected character after / in tag. Expected >"),
"expected-dashes-or-doctype":
_("Expected '--' or 'DOCTYPE'. Not found."),
"unexpected-bang-after-double-dash-in-comment":
_("Unexpected ! after -- in comment"),
"unexpected-space-after-double-dash-in-comment":
_("Unexpected space after -- in comment"),
"incorrect-comment":
_("Incorrect comment."),
"eof-in-comment":
_("Unexpected end of file in comment."),
"eof-in-comment-end-dash":
_("Unexpected end of file in comment (-)"),
"unexpected-dash-after-double-dash-in-comment":
_("Unexpected '-' after '--' found in comment."),
"eof-in-comment-double-dash":
_("Unexpected end of file in comment (--)."),
"eof-in-comment-end-space-state":
_("Unexpected end of file in comment."),
"eof-in-comment-end-bang-state":
_("Unexpected end of file in comment."),
"unexpected-char-in-comment":
_("Unexpected character in comment found."),
"need-space-after-doctype":
_("No space after literal string 'DOCTYPE'."),
"expected-doctype-name-but-got-right-bracket":
_("Unexpected > character. Expected DOCTYPE name."),
"expected-doctype-name-but-got-eof":
_("Unexpected end of file. Expected DOCTYPE name."),
"eof-in-doctype-name":
_("Unexpected end of file in DOCTYPE name."),
"eof-in-doctype":
_("Unexpected end of file in DOCTYPE."),
"expected-space-or-right-bracket-in-doctype":
_("Expected space or '>'. Got '%(data)s'"),
"unexpected-end-of-doctype":
_("Unexpected end of DOCTYPE."),
"unexpected-char-in-doctype":
_("Unexpected character in DOCTYPE."),
"eof-in-innerhtml":
_("XXX innerHTML EOF"),
"unexpected-doctype":
_("Unexpected DOCTYPE. Ignored."),
"non-html-root":
_("html needs to be the first start tag."),
"expected-doctype-but-got-eof":
_("Unexpected End of file. Expected DOCTYPE."),
"unknown-doctype":
_("Erroneous DOCTYPE."),
"expected-doctype-but-got-chars":
_("Unexpected non-space characters. Expected DOCTYPE."),
"expected-doctype-but-got-start-tag":
_("Unexpected start tag (%(name)s). Expected DOCTYPE."),
"expected-doctype-but-got-end-tag":
_("Unexpected end tag (%(name)s). Expected DOCTYPE."),
"end-tag-after-implied-root":
_("Unexpected end tag (%(name)s) after the (implied) root element."),
"expected-named-closing-tag-but-got-eof":
_("Unexpected end of file. Expected end tag (%(name)s)."),
"two-heads-are-not-better-than-one":
_("Unexpected start tag head in existing head. Ignored."),
"unexpected-end-tag":
_("Unexpected end tag (%(name)s). Ignored."),
"unexpected-start-tag-out-of-my-head":
_("Unexpected start tag (%(name)s) that can be in head. Moved."),
"unexpected-start-tag":
_("Unexpected start tag (%(name)s)."),
"missing-end-tag":
_("Missing end tag (%(name)s)."),
"missing-end-tags":
_("Missing end tags (%(name)s)."),
"unexpected-start-tag-implies-end-tag":
_("Unexpected start tag (%(startName)s) "
"implies end tag (%(endName)s)."),
"unexpected-start-tag-treated-as":
_("Unexpected start tag (%(originalName)s). Treated as %(newName)s."),
"deprecated-tag":
_("Unexpected start tag %(name)s. Don't use it!"),
"unexpected-start-tag-ignored":
_("Unexpected start tag %(name)s. Ignored."),
"expected-one-end-tag-but-got-another":
_("Unexpected end tag (%(gotName)s). "
"Missing end tag (%(expectedName)s)."),
"end-tag-too-early":
_("End tag (%(name)s) seen too early. Expected other end tag."),
"end-tag-too-early-named":
_("Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."),
"end-tag-too-early-ignored":
_("End tag (%(name)s) seen too early. Ignored."),
"adoption-agency-1.1":
_("End tag (%(name)s) violates step 1, "
"paragraph 1 of the adoption agency algorithm."),
"adoption-agency-1.2":
_("End tag (%(name)s) violates step 1, "
"paragraph 2 of the adoption agency algorithm."),
"adoption-agency-1.3":
_("End tag (%(name)s) violates step 1, "
"paragraph 3 of the adoption agency algorithm."),
"adoption-agency-4.4":
_("End tag (%(name)s) violates step 4, "
"paragraph 4 of the adoption agency algorithm."),
"unexpected-end-tag-treated-as":
_("Unexpected end tag (%(originalName)s). Treated as %(newName)s."),
"no-end-tag":
_("This element (%(name)s) has no end tag."),
"unexpected-implied-end-tag-in-table":
_("Unexpected implied end tag (%(name)s) in the table phase."),
"unexpected-implied-end-tag-in-table-body":
_("Unexpected implied end tag (%(name)s) in the table body phase."),
"unexpected-char-implies-table-voodoo":
_("Unexpected non-space characters in "
"table context caused voodoo mode."),
"unexpected-hidden-input-in-table":
_("Unexpected input with type hidden in table context."),
"unexpected-form-in-table":
_("Unexpected form in table context."),
"unexpected-start-tag-implies-table-voodoo":
_("Unexpected start tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-end-tag-implies-table-voodoo":
_("Unexpected end tag (%(name)s) in "
"table context caused voodoo mode."),
"unexpected-cell-in-table-body":
_("Unexpected table cell start tag (%(name)s) "
"in the table body phase."),
"unexpected-cell-end-tag":
_("Got table cell end tag (%(name)s) "
"while required end tags are missing."),
"unexpected-end-tag-in-table-body":
_("Unexpected end tag (%(name)s) in the table body phase. Ignored."),
"unexpected-implied-end-tag-in-table-row":
_("Unexpected implied end tag (%(name)s) in the table row phase."),
"unexpected-end-tag-in-table-row":
_("Unexpected end tag (%(name)s) in the table row phase. Ignored."),
"unexpected-select-in-select":
_("Unexpected select start tag in the select phase "
"treated as select end tag."),
"unexpected-input-in-select":
_("Unexpected input start tag in the select phase."),
"unexpected-start-tag-in-select":
_("Unexpected start tag token (%(name)s in the select phase. "
"Ignored."),
"unexpected-end-tag-in-select":
_("Unexpected end tag (%(name)s) in the select phase. Ignored."),
"unexpected-table-element-start-tag-in-select-in-table":
_("Unexpected table element start tag (%(name)s) in the select in table phase."),
"unexpected-table-element-end-tag-in-select-in-table":
_("Unexpected table element end tag (%(name)s) in the select in table phase."),
"unexpected-char-after-body":
_("Unexpected non-space characters in the after body phase."),
"unexpected-start-tag-after-body":
_("Unexpected start tag token (%(name)s)"
" in the after body phase."),
"unexpected-end-tag-after-body":
_("Unexpected end tag token (%(name)s)"
" in the after body phase."),
"unexpected-char-in-frameset":
_("Unexpected characters in the frameset phase. Characters ignored."),
"unexpected-start-tag-in-frameset":
_("Unexpected start tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-frameset-in-frameset-innerhtml":
_("Unexpected end tag token (frameset) "
"in the frameset phase (innerHTML)."),
"unexpected-end-tag-in-frameset":
_("Unexpected end tag token (%(name)s)"
" in the frameset phase. Ignored."),
"unexpected-char-after-frameset":
_("Unexpected non-space characters in the "
"after frameset phase. Ignored."),
"unexpected-start-tag-after-frameset":
_("Unexpected start tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-frameset":
_("Unexpected end tag (%(name)s)"
" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-body-innerhtml":
_("Unexpected end tag after body(innerHtml)"),
"expected-eof-but-got-char":
_("Unexpected non-space characters. Expected end of file."),
"expected-eof-but-got-start-tag":
_("Unexpected start tag (%(name)s)"
". Expected end of file."),
"expected-eof-but-got-end-tag":
_("Unexpected end tag (%(name)s)"
". Expected end of file."),
"eof-in-table":
_("Unexpected end of file. Expected table content."),
"eof-in-select":
_("Unexpected end of file. Expected select content."),
"eof-in-frameset":
_("Unexpected end of file. Expected frameset content."),
"eof-in-script-in-script":
_("Unexpected end of file. Expected script content."),
"eof-in-foreign-lands":
_("Unexpected end of file. Expected foreign content"),
"non-void-element-with-trailing-solidus":
_("Trailing solidus not allowed on element %(name)s"),
"unexpected-html-element-in-foreign-content":
_("Element %(name)s not allowed in a non-html context"),
"unexpected-end-tag-before-html":
_("Unexpected end tag (%(name)s) before html."),
"XXX-undefined-error":
_("Undefined error (this sucks and should be fixed)"),
}
namespaces = {
"html": "http://www.w3.org/1999/xhtml",
"mathml": "http://www.w3.org/1998/Math/MathML",
"svg": "http://www.w3.org/2000/svg",
"xlink": "http://www.w3.org/1999/xlink",
"xml": "http://www.w3.org/XML/1998/namespace",
"xmlns": "http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset((
(namespaces["html"], "applet"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext"),
(namespaces["mathml"], "annotation-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title"),
))
formattingElements = frozenset((
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
))
specialElements = frozenset((
(namespaces["html"], "address"),
(namespaces["html"], "applet"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "html"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "marquee"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "object"),
(namespaces["html"], "ol"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "style"),
(namespaces["html"], "table"),
(namespaces["html"], "tbody"),
(namespaces["html"], "td"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "th"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr"),
(namespaces["html"], "xmp"),
(namespaces["svg"], "foreignObject")
))
htmlIntegrationPointElements = frozenset((
(namespaces["mathml"], "annotaion-xml"),
(namespaces["svg"], "foreignObject"),
(namespaces["svg"], "desc"),
(namespaces["svg"], "title")
))
mathmlTextIntegrationPointElements = frozenset((
(namespaces["mathml"], "mi"),
(namespaces["mathml"], "mo"),
(namespaces["mathml"], "mn"),
(namespaces["mathml"], "ms"),
(namespaces["mathml"], "mtext")
))
adjustForeignAttributes = {
"xlink:actuate": ("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]),
"xlink:href": ("xlink", "href", namespaces["xlink"]),
"xlink:role": ("xlink", "role", namespaces["xlink"]),
"xlink:show": ("xlink", "show", namespaces["xlink"]),
"xlink:title": ("xlink", "title", namespaces["xlink"]),
"xlink:type": ("xlink", "type", namespaces["xlink"]),
"xml:base": ("xml", "base", namespaces["xml"]),
"xml:lang": ("xml", "lang", namespaces["xml"]),
"xml:space": ("xml", "space", namespaces["xml"]),
"xmlns": (None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"])
}
unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in
adjustForeignAttributes.items()])
spaceCharacters = frozenset((
"\t",
"\n",
"\u000C",
" ",
"\r"
))
tableInsertModeElements = frozenset((
"table",
"tbody",
"tfoot",
"thead",
"tr"
))
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c), ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset((
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source",
"track"
))
cdataElements = frozenset(('title', 'textarea'))
rcdataElements = frozenset((
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
))
booleanAttributes = {
"": frozenset(("irrelevant",)),
"style": frozenset(("scoped",)),
"img": frozenset(("ismap",)),
"audio": frozenset(("autoplay", "controls")),
"video": frozenset(("autoplay", "controls")),
"script": frozenset(("defer", "async")),
"details": frozenset(("open",)),
"datagrid": frozenset(("multiple", "disabled")),
"command": frozenset(("hidden", "disabled", "checked", "default")),
"hr": frozenset(("noshade")),
"menu": frozenset(("autosubmit",)),
"fieldset": frozenset(("disabled", "readonly")),
"option": frozenset(("disabled", "readonly", "selected")),
"optgroup": frozenset(("disabled", "readonly")),
"button": frozenset(("disabled", "autofocus")),
"input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")),
"select": frozenset(("disabled", "readonly", "autofocus", "multiple")),
"output": frozenset(("disabled", "readonly")),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;'))
entities = {
"AElig": "\xc6",
"AElig;": "\xc6",
"AMP": "&",
"AMP;": "&",
"Aacute": "\xc1",
"Aacute;": "\xc1",
"Abreve;": "\u0102",
"Acirc": "\xc2",
"Acirc;": "\xc2",
"Acy;": "\u0410",
"Afr;": "\U0001d504",
"Agrave": "\xc0",
"Agrave;": "\xc0",
"Alpha;": "\u0391",
"Amacr;": "\u0100",
"And;": "\u2a53",
"Aogon;": "\u0104",
"Aopf;": "\U0001d538",
"ApplyFunction;": "\u2061",
"Aring": "\xc5",
"Aring;": "\xc5",
"Ascr;": "\U0001d49c",
"Assign;": "\u2254",
"Atilde": "\xc3",
"Atilde;": "\xc3",
"Auml": "\xc4",
"Auml;": "\xc4",
"Backslash;": "\u2216",
"Barv;": "\u2ae7",
"Barwed;": "\u2306",
"Bcy;": "\u0411",
"Because;": "\u2235",
"Bernoullis;": "\u212c",
"Beta;": "\u0392",
"Bfr;": "\U0001d505",
"Bopf;": "\U0001d539",
"Breve;": "\u02d8",
"Bscr;": "\u212c",
"Bumpeq;": "\u224e",
"CHcy;": "\u0427",
"COPY": "\xa9",
"COPY;": "\xa9",
"Cacute;": "\u0106",
"Cap;": "\u22d2",
"CapitalDifferentialD;": "\u2145",
"Cayleys;": "\u212d",
"Ccaron;": "\u010c",
"Ccedil": "\xc7",
"Ccedil;": "\xc7",
"Ccirc;": "\u0108",
"Cconint;": "\u2230",
"Cdot;": "\u010a",
"Cedilla;": "\xb8",
"CenterDot;": "\xb7",
"Cfr;": "\u212d",
"Chi;": "\u03a7",
"CircleDot;": "\u2299",
"CircleMinus;": "\u2296",
"CirclePlus;": "\u2295",
"CircleTimes;": "\u2297",
"ClockwiseContourIntegral;": "\u2232",
"CloseCurlyDoubleQuote;": "\u201d",
"CloseCurlyQuote;": "\u2019",
"Colon;": "\u2237",
"Colone;": "\u2a74",
"Congruent;": "\u2261",
"Conint;": "\u222f",
"ContourIntegral;": "\u222e",
"Copf;": "\u2102",
"Coproduct;": "\u2210",
"CounterClockwiseContourIntegral;": "\u2233",
"Cross;": "\u2a2f",
"Cscr;": "\U0001d49e",
"Cup;": "\u22d3",
"CupCap;": "\u224d",
"DD;": "\u2145",
"DDotrahd;": "\u2911",
"DJcy;": "\u0402",
"DScy;": "\u0405",
"DZcy;": "\u040f",
"Dagger;": "\u2021",
"Darr;": "\u21a1",
"Dashv;": "\u2ae4",
"Dcaron;": "\u010e",
"Dcy;": "\u0414",
"Del;": "\u2207",
"Delta;": "\u0394",
"Dfr;": "\U0001d507",
"DiacriticalAcute;": "\xb4",
"DiacriticalDot;": "\u02d9",
"DiacriticalDoubleAcute;": "\u02dd",
"DiacriticalGrave;": "`",
"DiacriticalTilde;": "\u02dc",
"Diamond;": "\u22c4",
"DifferentialD;": "\u2146",
"Dopf;": "\U0001d53b",
"Dot;": "\xa8",
"DotDot;": "\u20dc",
"DotEqual;": "\u2250",
"DoubleContourIntegral;": "\u222f",
"DoubleDot;": "\xa8",
"DoubleDownArrow;": "\u21d3",
"DoubleLeftArrow;": "\u21d0",
"DoubleLeftRightArrow;": "\u21d4",
"DoubleLeftTee;": "\u2ae4",
"DoubleLongLeftArrow;": "\u27f8",
"DoubleLongLeftRightArrow;": "\u27fa",
"DoubleLongRightArrow;": "\u27f9",
"DoubleRightArrow;": "\u21d2",
"DoubleRightTee;": "\u22a8",
"DoubleUpArrow;": "\u21d1",
"DoubleUpDownArrow;": "\u21d5",
"DoubleVerticalBar;": "\u2225",
"DownArrow;": "\u2193",
"DownArrowBar;": "\u2913",
"DownArrowUpArrow;": "\u21f5",
"DownBreve;": "\u0311",
"DownLeftRightVector;": "\u2950",
"DownLeftTeeVector;": "\u295e",
"DownLeftVector;": "\u21bd",
"DownLeftVectorBar;": "\u2956",
"DownRightTeeVector;": "\u295f",
"DownRightVector;": "\u21c1",
"DownRightVectorBar;": "\u2957",
"DownTee;": "\u22a4",
"DownTeeArrow;": "\u21a7",
"Downarrow;": "\u21d3",
"Dscr;": "\U0001d49f",
"Dstrok;": "\u0110",
"ENG;": "\u014a",
"ETH": "\xd0",
"ETH;": "\xd0",
"Eacute": "\xc9",
"Eacute;": "\xc9",
"Ecaron;": "\u011a",
"Ecirc": "\xca",
"Ecirc;": "\xca",
"Ecy;": "\u042d",
"Edot;": "\u0116",
"Efr;": "\U0001d508",
"Egrave": "\xc8",
"Egrave;": "\xc8",
"Element;": "\u2208",
"Emacr;": "\u0112",
"EmptySmallSquare;": "\u25fb",
"EmptyVerySmallSquare;": "\u25ab",
"Eogon;": "\u0118",
"Eopf;": "\U0001d53c",
"Epsilon;": "\u0395",
"Equal;": "\u2a75",
"EqualTilde;": "\u2242",
"Equilibrium;": "\u21cc",
"Escr;": "\u2130",
"Esim;": "\u2a73",
"Eta;": "\u0397",
"Euml": "\xcb",
"Euml;": "\xcb",
"Exists;": "\u2203",
"ExponentialE;": "\u2147",
"Fcy;": "\u0424",
"Ffr;": "\U0001d509",
"FilledSmallSquare;": "\u25fc",
"FilledVerySmallSquare;": "\u25aa",
"Fopf;": "\U0001d53d",
"ForAll;": "\u2200",
"Fouriertrf;": "\u2131",
"Fscr;": "\u2131",
"GJcy;": "\u0403",
"GT": ">",
"GT;": ">",
"Gamma;": "\u0393",
"Gammad;": "\u03dc",
"Gbreve;": "\u011e",
"Gcedil;": "\u0122",
"Gcirc;": "\u011c",
"Gcy;": "\u0413",
"Gdot;": "\u0120",
"Gfr;": "\U0001d50a",
"Gg;": "\u22d9",
"Gopf;": "\U0001d53e",
"GreaterEqual;": "\u2265",
"GreaterEqualLess;": "\u22db",
"GreaterFullEqual;": "\u2267",
"GreaterGreater;": "\u2aa2",
"GreaterLess;": "\u2277",
"GreaterSlantEqual;": "\u2a7e",
"GreaterTilde;": "\u2273",
"Gscr;": "\U0001d4a2",
"Gt;": "\u226b",
"HARDcy;": "\u042a",
"Hacek;": "\u02c7",
"Hat;": "^",
"Hcirc;": "\u0124",
"Hfr;": "\u210c",
"HilbertSpace;": "\u210b",
"Hopf;": "\u210d",
"HorizontalLine;": "\u2500",
"Hscr;": "\u210b",
"Hstrok;": "\u0126",
"HumpDownHump;": "\u224e",
"HumpEqual;": "\u224f",
"IEcy;": "\u0415",
"IJlig;": "\u0132",
"IOcy;": "\u0401",
"Iacute": "\xcd",
"Iacute;": "\xcd",
"Icirc": "\xce",
"Icirc;": "\xce",
"Icy;": "\u0418",
"Idot;": "\u0130",
"Ifr;": "\u2111",
"Igrave": "\xcc",
"Igrave;": "\xcc",
"Im;": "\u2111",
"Imacr;": "\u012a",
"ImaginaryI;": "\u2148",
"Implies;": "\u21d2",
"Int;": "\u222c",
"Integral;": "\u222b",
"Intersection;": "\u22c2",
"InvisibleComma;": "\u2063",
"InvisibleTimes;": "\u2062",
"Iogon;": "\u012e",
"Iopf;": "\U0001d540",
"Iota;": "\u0399",
"Iscr;": "\u2110",
"Itilde;": "\u0128",
"Iukcy;": "\u0406",
"Iuml": "\xcf",
"Iuml;": "\xcf",
"Jcirc;": "\u0134",
"Jcy;": "\u0419",
"Jfr;": "\U0001d50d",
"Jopf;": "\U0001d541",
"Jscr;": "\U0001d4a5",
"Jsercy;": "\u0408",
"Jukcy;": "\u0404",
"KHcy;": "\u0425",
"KJcy;": "\u040c",
"Kappa;": "\u039a",
"Kcedil;": "\u0136",
"Kcy;": "\u041a",
"Kfr;": "\U0001d50e",
"Kopf;": "\U0001d542",
"Kscr;": "\U0001d4a6",
"LJcy;": "\u0409",
"LT": "<",
"LT;": "<",
"Lacute;": "\u0139",
"Lambda;": "\u039b",
"Lang;": "\u27ea",
"Laplacetrf;": "\u2112",
"Larr;": "\u219e",
"Lcaron;": "\u013d",
"Lcedil;": "\u013b",
"Lcy;": "\u041b",
"LeftAngleBracket;": "\u27e8",
"LeftArrow;": "\u2190",
"LeftArrowBar;": "\u21e4",
"LeftArrowRightArrow;": "\u21c6",
"LeftCeiling;": "\u2308",
"LeftDoubleBracket;": "\u27e6",
"LeftDownTeeVector;": "\u2961",
"LeftDownVector;": "\u21c3",
"LeftDownVectorBar;": "\u2959",
"LeftFloor;": "\u230a",
"LeftRightArrow;": "\u2194",
"LeftRightVector;": "\u294e",
"LeftTee;": "\u22a3",
"LeftTeeArrow;": "\u21a4",
"LeftTeeVector;": "\u295a",
"LeftTriangle;": "\u22b2",
"LeftTriangleBar;": "\u29cf",
"LeftTriangleEqual;": "\u22b4",
"LeftUpDownVector;": "\u2951",
"LeftUpTeeVector;": "\u2960",
"LeftUpVector;": "\u21bf",
"LeftUpVectorBar;": "\u2958",
"LeftVector;": "\u21bc",
"LeftVectorBar;": "\u2952",
"Leftarrow;": "\u21d0",
"Leftrightarrow;": "\u21d4",
"LessEqualGreater;": "\u22da",
"LessFullEqual;": "\u2266",
"LessGreater;": "\u2276",
"LessLess;": "\u2aa1",
"LessSlantEqual;": "\u2a7d",
"LessTilde;": "\u2272",
"Lfr;": "\U0001d50f",
"Ll;": "\u22d8",
"Lleftarrow;": "\u21da",
"Lmidot;": "\u013f",
"LongLeftArrow;": "\u27f5",
"LongLeftRightArrow;": "\u27f7",
"LongRightArrow;": "\u27f6",
"Longleftarrow;": "\u27f8",
"Longleftrightarrow;": "\u27fa",
"Longrightarrow;": "\u27f9",
"Lopf;": "\U0001d543",
"LowerLeftArrow;": "\u2199",
"LowerRightArrow;": "\u2198",
"Lscr;": "\u2112",
"Lsh;": "\u21b0",
"Lstrok;": "\u0141",
"Lt;": "\u226a",
"Map;": "\u2905",
"Mcy;": "\u041c",
"MediumSpace;": "\u205f",
"Mellintrf;": "\u2133",
"Mfr;": "\U0001d510",
"MinusPlus;": "\u2213",
"Mopf;": "\U0001d544",
"Mscr;": "\u2133",
"Mu;": "\u039c",
"NJcy;": "\u040a",
"Nacute;": "\u0143",
"Ncaron;": "\u0147",
"Ncedil;": "\u0145",
"Ncy;": "\u041d",
"NegativeMediumSpace;": "\u200b",
"NegativeThickSpace;": "\u200b",
"NegativeThinSpace;": "\u200b",
"NegativeVeryThinSpace;": "\u200b",
"NestedGreaterGreater;": "\u226b",
"NestedLessLess;": "\u226a",
"NewLine;": "\n",
"Nfr;": "\U0001d511",
"NoBreak;": "\u2060",
"NonBreakingSpace;": "\xa0",
"Nopf;": "\u2115",
"Not;": "\u2aec",
"NotCongruent;": "\u2262",
"NotCupCap;": "\u226d",
"NotDoubleVerticalBar;": "\u2226",
"NotElement;": "\u2209",
"NotEqual;": "\u2260",
"NotEqualTilde;": "\u2242\u0338",
"NotExists;": "\u2204",
"NotGreater;": "\u226f",
"NotGreaterEqual;": "\u2271",
"NotGreaterFullEqual;": "\u2267\u0338",
"NotGreaterGreater;": "\u226b\u0338",
"NotGreaterLess;": "\u2279",
"NotGreaterSlantEqual;": "\u2a7e\u0338",
"NotGreaterTilde;": "\u2275",
"NotHumpDownHump;": "\u224e\u0338",
"NotHumpEqual;": "\u224f\u0338",
"NotLeftTriangle;": "\u22ea",
"NotLeftTriangleBar;": "\u29cf\u0338",
"NotLeftTriangleEqual;": "\u22ec",
"NotLess;": "\u226e",
"NotLessEqual;": "\u2270",
"NotLessGreater;": "\u2278",
"NotLessLess;": "\u226a\u0338",
"NotLessSlantEqual;": "\u2a7d\u0338",
"NotLessTilde;": "\u2274",
"NotNestedGreaterGreater;": "\u2aa2\u0338",
"NotNestedLessLess;": "\u2aa1\u0338",
"NotPrecedes;": "\u2280",
"NotPrecedesEqual;": "\u2aaf\u0338",
"NotPrecedesSlantEqual;": "\u22e0",
"NotReverseElement;": "\u220c",
"NotRightTriangle;": "\u22eb",
"NotRightTriangleBar;": "\u29d0\u0338",
"NotRightTriangleEqual;": "\u22ed",
"NotSquareSubset;": "\u228f\u0338",
"NotSquareSubsetEqual;": "\u22e2",
"NotSquareSuperset;": "\u2290\u0338",
"NotSquareSupersetEqual;": "\u22e3",
"NotSubset;": "\u2282\u20d2",
"NotSubsetEqual;": "\u2288",
"NotSucceeds;": "\u2281",
"NotSucceedsEqual;": "\u2ab0\u0338",
"NotSucceedsSlantEqual;": "\u22e1",
"NotSucceedsTilde;": "\u227f\u0338",
"NotSuperset;": "\u2283\u20d2",
"NotSupersetEqual;": "\u2289",
"NotTilde;": "\u2241",
"NotTildeEqual;": "\u2244",
"NotTildeFullEqual;": "\u2247",
"NotTildeTilde;": "\u2249",
"NotVerticalBar;": "\u2224",
"Nscr;": "\U0001d4a9",
"Ntilde": "\xd1",
"Ntilde;": "\xd1",
"Nu;": "\u039d",
"OElig;": "\u0152",
"Oacute": "\xd3",
"Oacute;": "\xd3",
"Ocirc": "\xd4",
"Ocirc;": "\xd4",
"Ocy;": "\u041e",
"Odblac;": "\u0150",
"Ofr;": "\U0001d512",
"Ograve": "\xd2",
"Ograve;": "\xd2",
"Omacr;": "\u014c",
"Omega;": "\u03a9",
"Omicron;": "\u039f",
"Oopf;": "\U0001d546",
"OpenCurlyDoubleQuote;": "\u201c",
"OpenCurlyQuote;": "\u2018",
"Or;": "\u2a54",
"Oscr;": "\U0001d4aa",
"Oslash": "\xd8",
"Oslash;": "\xd8",
"Otilde": "\xd5",
"Otilde;": "\xd5",
"Otimes;": "\u2a37",
"Ouml": "\xd6",
"Ouml;": "\xd6",
"OverBar;": "\u203e",
"OverBrace;": "\u23de",
"OverBracket;": "\u23b4",
"OverParenthesis;": "\u23dc",
"PartialD;": "\u2202",
"Pcy;": "\u041f",
"Pfr;": "\U0001d513",
"Phi;": "\u03a6",
"Pi;": "\u03a0",
"PlusMinus;": "\xb1",
"Poincareplane;": "\u210c",
"Popf;": "\u2119",
"Pr;": "\u2abb",
"Precedes;": "\u227a",
"PrecedesEqual;": "\u2aaf",
"PrecedesSlantEqual;": "\u227c",
"PrecedesTilde;": "\u227e",
"Prime;": "\u2033",
"Product;": "\u220f",
"Proportion;": "\u2237",
"Proportional;": "\u221d",
"Pscr;": "\U0001d4ab",
"Psi;": "\u03a8",
"QUOT": "\"",
"QUOT;": "\"",
"Qfr;": "\U0001d514",
"Qopf;": "\u211a",
"Qscr;": "\U0001d4ac",
"RBarr;": "\u2910",
"REG": "\xae",
"REG;": "\xae",
"Racute;": "\u0154",
"Rang;": "\u27eb",
"Rarr;": "\u21a0",
"Rarrtl;": "\u2916",
"Rcaron;": "\u0158",
"Rcedil;": "\u0156",
"Rcy;": "\u0420",
"Re;": "\u211c",
"ReverseElement;": "\u220b",
"ReverseEquilibrium;": "\u21cb",
"ReverseUpEquilibrium;": "\u296f",
"Rfr;": "\u211c",
"Rho;": "\u03a1",
"RightAngleBracket;": "\u27e9",
"RightArrow;": "\u2192",
"RightArrowBar;": "\u21e5",
"RightArrowLeftArrow;": "\u21c4",
"RightCeiling;": "\u2309",
"RightDoubleBracket;": "\u27e7",
"RightDownTeeVector;": "\u295d",
"RightDownVector;": "\u21c2",
"RightDownVectorBar;": "\u2955",
"RightFloor;": "\u230b",
"RightTee;": "\u22a2",
"RightTeeArrow;": "\u21a6",
"RightTeeVector;": "\u295b",
"RightTriangle;": "\u22b3",
"RightTriangleBar;": "\u29d0",
"RightTriangleEqual;": "\u22b5",
"RightUpDownVector;": "\u294f",
"RightUpTeeVector;": "\u295c",
"RightUpVector;": "\u21be",
"RightUpVectorBar;": "\u2954",
"RightVector;": "\u21c0",
"RightVectorBar;": "\u2953",
"Rightarrow;": "\u21d2",
"Ropf;": "\u211d",
"RoundImplies;": "\u2970",
"Rrightarrow;": "\u21db",
"Rscr;": "\u211b",
"Rsh;": "\u21b1",
"RuleDelayed;": "\u29f4",
"SHCHcy;": "\u0429",
"SHcy;": "\u0428",
"SOFTcy;": "\u042c",
"Sacute;": "\u015a",
"Sc;": "\u2abc",
"Scaron;": "\u0160",
"Scedil;": "\u015e",
"Scirc;": "\u015c",
"Scy;": "\u0421",
"Sfr;": "\U0001d516",
"ShortDownArrow;": "\u2193",
"ShortLeftArrow;": "\u2190",
"ShortRightArrow;": "\u2192",
"ShortUpArrow;": "\u2191",
"Sigma;": "\u03a3",
"SmallCircle;": "\u2218",
"Sopf;": "\U0001d54a",
"Sqrt;": "\u221a",
"Square;": "\u25a1",
"SquareIntersection;": "\u2293",
"SquareSubset;": "\u228f",
"SquareSubsetEqual;": "\u2291",
"SquareSuperset;": "\u2290",
"SquareSupersetEqual;": "\u2292",
"SquareUnion;": "\u2294",
"Sscr;": "\U0001d4ae",
"Star;": "\u22c6",
"Sub;": "\u22d0",
"Subset;": "\u22d0",
"SubsetEqual;": "\u2286",
"Succeeds;": "\u227b",
"SucceedsEqual;": "\u2ab0",
"SucceedsSlantEqual;": "\u227d",
"SucceedsTilde;": "\u227f",
"SuchThat;": "\u220b",
"Sum;": "\u2211",
"Sup;": "\u22d1",
"Superset;": "\u2283",
"SupersetEqual;": "\u2287",
"Supset;": "\u22d1",
"THORN": "\xde",
"THORN;": "\xde",
"TRADE;": "\u2122",
"TSHcy;": "\u040b",
"TScy;": "\u0426",
"Tab;": "\t",
"Tau;": "\u03a4",
"Tcaron;": "\u0164",
"Tcedil;": "\u0162",
"Tcy;": "\u0422",
"Tfr;": "\U0001d517",
"Therefore;": "\u2234",
"Theta;": "\u0398",
"ThickSpace;": "\u205f\u200a",
"ThinSpace;": "\u2009",
"Tilde;": "\u223c",
"TildeEqual;": "\u2243",
"TildeFullEqual;": "\u2245",
"TildeTilde;": "\u2248",
"Topf;": "\U0001d54b",
"TripleDot;": "\u20db",
"Tscr;": "\U0001d4af",
"Tstrok;": "\u0166",
"Uacute": "\xda",
"Uacute;": "\xda",
"Uarr;": "\u219f",
"Uarrocir;": "\u2949",
"Ubrcy;": "\u040e",
"Ubreve;": "\u016c",
"Ucirc": "\xdb",
"Ucirc;": "\xdb",
"Ucy;": "\u0423",
"Udblac;": "\u0170",
"Ufr;": "\U0001d518",
"Ugrave": "\xd9",
"Ugrave;": "\xd9",
"Umacr;": "\u016a",
"UnderBar;": "_",
"UnderBrace;": "\u23df",
"UnderBracket;": "\u23b5",
"UnderParenthesis;": "\u23dd",
"Union;": "\u22c3",
"UnionPlus;": "\u228e",
"Uogon;": "\u0172",
"Uopf;": "\U0001d54c",
"UpArrow;": "\u2191",
"UpArrowBar;": "\u2912",
"UpArrowDownArrow;": "\u21c5",
"UpDownArrow;": "\u2195",
"UpEquilibrium;": "\u296e",
"UpTee;": "\u22a5",
"UpTeeArrow;": "\u21a5",
"Uparrow;": "\u21d1",
"Updownarrow;": "\u21d5",
"UpperLeftArrow;": "\u2196",
"UpperRightArrow;": "\u2197",
"Upsi;": "\u03d2",
"Upsilon;": "\u03a5",
"Uring;": "\u016e",
"Uscr;": "\U0001d4b0",
"Utilde;": "\u0168",
"Uuml": "\xdc",
"Uuml;": "\xdc",
"VDash;": "\u22ab",
"Vbar;": "\u2aeb",
"Vcy;": "\u0412",
"Vdash;": "\u22a9",
"Vdashl;": "\u2ae6",
"Vee;": "\u22c1",
"Verbar;": "\u2016",
"Vert;": "\u2016",
"VerticalBar;": "\u2223",
"VerticalLine;": "|",
"VerticalSeparator;": "\u2758",
"VerticalTilde;": "\u2240",
"VeryThinSpace;": "\u200a",
"Vfr;": "\U0001d519",
"Vopf;": "\U0001d54d",
"Vscr;": "\U0001d4b1",
"Vvdash;": "\u22aa",
"Wcirc;": "\u0174",
"Wedge;": "\u22c0",
"Wfr;": "\U0001d51a",
"Wopf;": "\U0001d54e",
"Wscr;": "\U0001d4b2",
"Xfr;": "\U0001d51b",
"Xi;": "\u039e",
"Xopf;": "\U0001d54f",
"Xscr;": "\U0001d4b3",
"YAcy;": "\u042f",
"YIcy;": "\u0407",
"YUcy;": "\u042e",
"Yacute": "\xdd",
"Yacute;": "\xdd",
"Ycirc;": "\u0176",
"Ycy;": "\u042b",
"Yfr;": "\U0001d51c",
"Yopf;": "\U0001d550",
"Yscr;": "\U0001d4b4",
"Yuml;": "\u0178",
"ZHcy;": "\u0416",
"Zacute;": "\u0179",
"Zcaron;": "\u017d",
"Zcy;": "\u0417",
"Zdot;": "\u017b",
"ZeroWidthSpace;": "\u200b",
"Zeta;": "\u0396",
"Zfr;": "\u2128",
"Zopf;": "\u2124",
"Zscr;": "\U0001d4b5",
"aacute": "\xe1",
"aacute;": "\xe1",
"abreve;": "\u0103",
"ac;": "\u223e",
"acE;": "\u223e\u0333",
"acd;": "\u223f",
"acirc": "\xe2",
"acirc;": "\xe2",
"acute": "\xb4",
"acute;": "\xb4",
"acy;": "\u0430",
"aelig": "\xe6",
"aelig;": "\xe6",
"af;": "\u2061",
"afr;": "\U0001d51e",
"agrave": "\xe0",
"agrave;": "\xe0",
"alefsym;": "\u2135",
"aleph;": "\u2135",
"alpha;": "\u03b1",
"amacr;": "\u0101",
"amalg;": "\u2a3f",
"amp": "&",
"amp;": "&",
"and;": "\u2227",
"andand;": "\u2a55",
"andd;": "\u2a5c",
"andslope;": "\u2a58",
"andv;": "\u2a5a",
"ang;": "\u2220",
"ange;": "\u29a4",
"angle;": "\u2220",
"angmsd;": "\u2221",
"angmsdaa;": "\u29a8",
"angmsdab;": "\u29a9",
"angmsdac;": "\u29aa",
"angmsdad;": "\u29ab",
"angmsdae;": "\u29ac",
"angmsdaf;": "\u29ad",
"angmsdag;": "\u29ae",
"angmsdah;": "\u29af",
"angrt;": "\u221f",
"angrtvb;": "\u22be",
"angrtvbd;": "\u299d",
"angsph;": "\u2222",
"angst;": "\xc5",
"angzarr;": "\u237c",
"aogon;": "\u0105",
"aopf;": "\U0001d552",
"ap;": "\u2248",
"apE;": "\u2a70",
"apacir;": "\u2a6f",
"ape;": "\u224a",
"apid;": "\u224b",
"apos;": "'",
"approx;": "\u2248",
"approxeq;": "\u224a",
"aring": "\xe5",
"aring;": "\xe5",
"ascr;": "\U0001d4b6",
"ast;": "*",
"asymp;": "\u2248",
"asympeq;": "\u224d",
"atilde": "\xe3",
"atilde;": "\xe3",
"auml": "\xe4",
"auml;": "\xe4",
"awconint;": "\u2233",
"awint;": "\u2a11",
"bNot;": "\u2aed",
"backcong;": "\u224c",
"backepsilon;": "\u03f6",
"backprime;": "\u2035",
"backsim;": "\u223d",
"backsimeq;": "\u22cd",
"barvee;": "\u22bd",
"barwed;": "\u2305",
"barwedge;": "\u2305",
"bbrk;": "\u23b5",
"bbrktbrk;": "\u23b6",
"bcong;": "\u224c",
"bcy;": "\u0431",
"bdquo;": "\u201e",
"becaus;": "\u2235",
"because;": "\u2235",
"bemptyv;": "\u29b0",
"bepsi;": "\u03f6",
"bernou;": "\u212c",
"beta;": "\u03b2",
"beth;": "\u2136",
"between;": "\u226c",
"bfr;": "\U0001d51f",
"bigcap;": "\u22c2",
"bigcirc;": "\u25ef",
"bigcup;": "\u22c3",
"bigodot;": "\u2a00",
"bigoplus;": "\u2a01",
"bigotimes;": "\u2a02",
"bigsqcup;": "\u2a06",
"bigstar;": "\u2605",
"bigtriangledown;": "\u25bd",
"bigtriangleup;": "\u25b3",
"biguplus;": "\u2a04",
"bigvee;": "\u22c1",
"bigwedge;": "\u22c0",
"bkarow;": "\u290d",
"blacklozenge;": "\u29eb",
"blacksquare;": "\u25aa",
"blacktriangle;": "\u25b4",
"blacktriangledown;": "\u25be",
"blacktriangleleft;": "\u25c2",
"blacktriangleright;": "\u25b8",
"blank;": "\u2423",
"blk12;": "\u2592",
"blk14;": "\u2591",
"blk34;": "\u2593",
"block;": "\u2588",
"bne;": "=\u20e5",
"bnequiv;": "\u2261\u20e5",
"bnot;": "\u2310",
"bopf;": "\U0001d553",
"bot;": "\u22a5",
"bottom;": "\u22a5",
"bowtie;": "\u22c8",
"boxDL;": "\u2557",
"boxDR;": "\u2554",
"boxDl;": "\u2556",
"boxDr;": "\u2553",
"boxH;": "\u2550",
"boxHD;": "\u2566",
"boxHU;": "\u2569",
"boxHd;": "\u2564",
"boxHu;": "\u2567",
"boxUL;": "\u255d",
"boxUR;": "\u255a",
"boxUl;": "\u255c",
"boxUr;": "\u2559",
"boxV;": "\u2551",
"boxVH;": "\u256c",
"boxVL;": "\u2563",
"boxVR;": "\u2560",
"boxVh;": "\u256b",
"boxVl;": "\u2562",
"boxVr;": "\u255f",
"boxbox;": "\u29c9",
"boxdL;": "\u2555",
"boxdR;": "\u2552",
"boxdl;": "\u2510",
"boxdr;": "\u250c",
"boxh;": "\u2500",
"boxhD;": "\u2565",
"boxhU;": "\u2568",
"boxhd;": "\u252c",
"boxhu;": "\u2534",
"boxminus;": "\u229f",
"boxplus;": "\u229e",
"boxtimes;": "\u22a0",
"boxuL;": "\u255b",
"boxuR;": "\u2558",
"boxul;": "\u2518",
"boxur;": "\u2514",
"boxv;": "\u2502",
"boxvH;": "\u256a",
"boxvL;": "\u2561",
"boxvR;": "\u255e",
"boxvh;": "\u253c",
"boxvl;": "\u2524",
"boxvr;": "\u251c",
"bprime;": "\u2035",
"breve;": "\u02d8",
"brvbar": "\xa6",
"brvbar;": "\xa6",
"bscr;": "\U0001d4b7",
"bsemi;": "\u204f",
"bsim;": "\u223d",
"bsime;": "\u22cd",
"bsol;": "\\",
"bsolb;": "\u29c5",
"bsolhsub;": "\u27c8",
"bull;": "\u2022",
"bullet;": "\u2022",
"bump;": "\u224e",
"bumpE;": "\u2aae",
"bumpe;": "\u224f",
"bumpeq;": "\u224f",
"cacute;": "\u0107",
"cap;": "\u2229",
"capand;": "\u2a44",
"capbrcup;": "\u2a49",
"capcap;": "\u2a4b",
"capcup;": "\u2a47",
"capdot;": "\u2a40",
"caps;": "\u2229\ufe00",
"caret;": "\u2041",
"caron;": "\u02c7",
"ccaps;": "\u2a4d",
"ccaron;": "\u010d",
"ccedil": "\xe7",
"ccedil;": "\xe7",
"ccirc;": "\u0109",
"ccups;": "\u2a4c",
"ccupssm;": "\u2a50",
"cdot;": "\u010b",
"cedil": "\xb8",
"cedil;": "\xb8",
"cemptyv;": "\u29b2",
"cent": "\xa2",
"cent;": "\xa2",
"centerdot;": "\xb7",
"cfr;": "\U0001d520",
"chcy;": "\u0447",
"check;": "\u2713",
"checkmark;": "\u2713",
"chi;": "\u03c7",
"cir;": "\u25cb",
"cirE;": "\u29c3",
"circ;": "\u02c6",
"circeq;": "\u2257",
"circlearrowleft;": "\u21ba",
"circlearrowright;": "\u21bb",
"circledR;": "\xae",
"circledS;": "\u24c8",
"circledast;": "\u229b",
"circledcirc;": "\u229a",
"circleddash;": "\u229d",
"cire;": "\u2257",
"cirfnint;": "\u2a10",
"cirmid;": "\u2aef",
"cirscir;": "\u29c2",
"clubs;": "\u2663",
"clubsuit;": "\u2663",
"colon;": ":",
"colone;": "\u2254",
"coloneq;": "\u2254",
"comma;": ",",
"commat;": "@",
"comp;": "\u2201",
"compfn;": "\u2218",
"complement;": "\u2201",
"complexes;": "\u2102",
"cong;": "\u2245",
"congdot;": "\u2a6d",
"conint;": "\u222e",
"copf;": "\U0001d554",
"coprod;": "\u2210",
"copy": "\xa9",
"copy;": "\xa9",
"copysr;": "\u2117",
"crarr;": "\u21b5",
"cross;": "\u2717",
"cscr;": "\U0001d4b8",
"csub;": "\u2acf",
"csube;": "\u2ad1",
"csup;": "\u2ad0",
"csupe;": "\u2ad2",
"ctdot;": "\u22ef",
"cudarrl;": "\u2938",
"cudarrr;": "\u2935",
"cuepr;": "\u22de",
"cuesc;": "\u22df",
"cularr;": "\u21b6",
"cularrp;": "\u293d",
"cup;": "\u222a",
"cupbrcap;": "\u2a48",
"cupcap;": "\u2a46",
"cupcup;": "\u2a4a",
"cupdot;": "\u228d",
"cupor;": "\u2a45",
"cups;": "\u222a\ufe00",
"curarr;": "\u21b7",
"curarrm;": "\u293c",
"curlyeqprec;": "\u22de",
"curlyeqsucc;": "\u22df",
"curlyvee;": "\u22ce",
"curlywedge;": "\u22cf",
"curren": "\xa4",
"curren;": "\xa4",
"curvearrowleft;": "\u21b6",
"curvearrowright;": "\u21b7",
"cuvee;": "\u22ce",
"cuwed;": "\u22cf",
"cwconint;": "\u2232",
"cwint;": "\u2231",
"cylcty;": "\u232d",
"dArr;": "\u21d3",
"dHar;": "\u2965",
"dagger;": "\u2020",
"daleth;": "\u2138",
"darr;": "\u2193",
"dash;": "\u2010",
"dashv;": "\u22a3",
"dbkarow;": "\u290f",
"dblac;": "\u02dd",
"dcaron;": "\u010f",
"dcy;": "\u0434",
"dd;": "\u2146",
"ddagger;": "\u2021",
"ddarr;": "\u21ca",
"ddotseq;": "\u2a77",
"deg": "\xb0",
"deg;": "\xb0",
"delta;": "\u03b4",
"demptyv;": "\u29b1",
"dfisht;": "\u297f",
"dfr;": "\U0001d521",
"dharl;": "\u21c3",
"dharr;": "\u21c2",
"diam;": "\u22c4",
"diamond;": "\u22c4",
"diamondsuit;": "\u2666",
"diams;": "\u2666",
"die;": "\xa8",
"digamma;": "\u03dd",
"disin;": "\u22f2",
"div;": "\xf7",
"divide": "\xf7",
"divide;": "\xf7",
"divideontimes;": "\u22c7",
"divonx;": "\u22c7",
"djcy;": "\u0452",
"dlcorn;": "\u231e",
"dlcrop;": "\u230d",
"dollar;": "$",
"dopf;": "\U0001d555",
"dot;": "\u02d9",
"doteq;": "\u2250",
"doteqdot;": "\u2251",
"dotminus;": "\u2238",
"dotplus;": "\u2214",
"dotsquare;": "\u22a1",
"doublebarwedge;": "\u2306",
"downarrow;": "\u2193",
"downdownarrows;": "\u21ca",
"downharpoonleft;": "\u21c3",
"downharpoonright;": "\u21c2",
"drbkarow;": "\u2910",
"drcorn;": "\u231f",
"drcrop;": "\u230c",
"dscr;": "\U0001d4b9",
"dscy;": "\u0455",
"dsol;": "\u29f6",
"dstrok;": "\u0111",
"dtdot;": "\u22f1",
"dtri;": "\u25bf",
"dtrif;": "\u25be",
"duarr;": "\u21f5",
"duhar;": "\u296f",
"dwangle;": "\u29a6",
"dzcy;": "\u045f",
"dzigrarr;": "\u27ff",
"eDDot;": "\u2a77",
"eDot;": "\u2251",
"eacute": "\xe9",
"eacute;": "\xe9",
"easter;": "\u2a6e",
"ecaron;": "\u011b",
"ecir;": "\u2256",
"ecirc": "\xea",
"ecirc;": "\xea",
"ecolon;": "\u2255",
"ecy;": "\u044d",
"edot;": "\u0117",
"ee;": "\u2147",
"efDot;": "\u2252",
"efr;": "\U0001d522",
"eg;": "\u2a9a",
"egrave": "\xe8",
"egrave;": "\xe8",
"egs;": "\u2a96",
"egsdot;": "\u2a98",
"el;": "\u2a99",
"elinters;": "\u23e7",
"ell;": "\u2113",
"els;": "\u2a95",
"elsdot;": "\u2a97",
"emacr;": "\u0113",
"empty;": "\u2205",
"emptyset;": "\u2205",
"emptyv;": "\u2205",
"emsp13;": "\u2004",
"emsp14;": "\u2005",
"emsp;": "\u2003",
"eng;": "\u014b",
"ensp;": "\u2002",
"eogon;": "\u0119",
"eopf;": "\U0001d556",
"epar;": "\u22d5",
"eparsl;": "\u29e3",
"eplus;": "\u2a71",
"epsi;": "\u03b5",
"epsilon;": "\u03b5",
"epsiv;": "\u03f5",
"eqcirc;": "\u2256",
"eqcolon;": "\u2255",
"eqsim;": "\u2242",
"eqslantgtr;": "\u2a96",
"eqslantless;": "\u2a95",
"equals;": "=",
"equest;": "\u225f",
"equiv;": "\u2261",
"equivDD;": "\u2a78",
"eqvparsl;": "\u29e5",
"erDot;": "\u2253",
"erarr;": "\u2971",
"escr;": "\u212f",
"esdot;": "\u2250",
"esim;": "\u2242",
"eta;": "\u03b7",
"eth": "\xf0",
"eth;": "\xf0",
"euml": "\xeb",
"euml;": "\xeb",
"euro;": "\u20ac",
"excl;": "!",
"exist;": "\u2203",
"expectation;": "\u2130",
"exponentiale;": "\u2147",
"fallingdotseq;": "\u2252",
"fcy;": "\u0444",
"female;": "\u2640",
"ffilig;": "\ufb03",
"fflig;": "\ufb00",
"ffllig;": "\ufb04",
"ffr;": "\U0001d523",
"filig;": "\ufb01",
"fjlig;": "fj",
"flat;": "\u266d",
"fllig;": "\ufb02",
"fltns;": "\u25b1",
"fnof;": "\u0192",
"fopf;": "\U0001d557",
"forall;": "\u2200",
"fork;": "\u22d4",
"forkv;": "\u2ad9",
"fpartint;": "\u2a0d",
"frac12": "\xbd",
"frac12;": "\xbd",
"frac13;": "\u2153",
"frac14": "\xbc",
"frac14;": "\xbc",
"frac15;": "\u2155",
"frac16;": "\u2159",
"frac18;": "\u215b",
"frac23;": "\u2154",
"frac25;": "\u2156",
"frac34": "\xbe",
"frac34;": "\xbe",
"frac35;": "\u2157",
"frac38;": "\u215c",
"frac45;": "\u2158",
"frac56;": "\u215a",
"frac58;": "\u215d",
"frac78;": "\u215e",
"frasl;": "\u2044",
"frown;": "\u2322",
"fscr;": "\U0001d4bb",
"gE;": "\u2267",
"gEl;": "\u2a8c",
"gacute;": "\u01f5",
"gamma;": "\u03b3",
"gammad;": "\u03dd",
"gap;": "\u2a86",
"gbreve;": "\u011f",
"gcirc;": "\u011d",
"gcy;": "\u0433",
"gdot;": "\u0121",
"ge;": "\u2265",
"gel;": "\u22db",
"geq;": "\u2265",
"geqq;": "\u2267",
"geqslant;": "\u2a7e",
"ges;": "\u2a7e",
"gescc;": "\u2aa9",
"gesdot;": "\u2a80",
"gesdoto;": "\u2a82",
"gesdotol;": "\u2a84",
"gesl;": "\u22db\ufe00",
"gesles;": "\u2a94",
"gfr;": "\U0001d524",
"gg;": "\u226b",
"ggg;": "\u22d9",
"gimel;": "\u2137",
"gjcy;": "\u0453",
"gl;": "\u2277",
"glE;": "\u2a92",
"gla;": "\u2aa5",
"glj;": "\u2aa4",
"gnE;": "\u2269",
"gnap;": "\u2a8a",
"gnapprox;": "\u2a8a",
"gne;": "\u2a88",
"gneq;": "\u2a88",
"gneqq;": "\u2269",
"gnsim;": "\u22e7",
"gopf;": "\U0001d558",
"grave;": "`",
"gscr;": "\u210a",
"gsim;": "\u2273",
"gsime;": "\u2a8e",
"gsiml;": "\u2a90",
"gt": ">",
"gt;": ">",
"gtcc;": "\u2aa7",
"gtcir;": "\u2a7a",
"gtdot;": "\u22d7",
"gtlPar;": "\u2995",
"gtquest;": "\u2a7c",
"gtrapprox;": "\u2a86",
"gtrarr;": "\u2978",
"gtrdot;": "\u22d7",
"gtreqless;": "\u22db",
"gtreqqless;": "\u2a8c",
"gtrless;": "\u2277",
"gtrsim;": "\u2273",
"gvertneqq;": "\u2269\ufe00",
"gvnE;": "\u2269\ufe00",
"hArr;": "\u21d4",
"hairsp;": "\u200a",
"half;": "\xbd",
"hamilt;": "\u210b",
"hardcy;": "\u044a",
"harr;": "\u2194",
"harrcir;": "\u2948",
"harrw;": "\u21ad",
"hbar;": "\u210f",
"hcirc;": "\u0125",
"hearts;": "\u2665",
"heartsuit;": "\u2665",
"hellip;": "\u2026",
"hercon;": "\u22b9",
"hfr;": "\U0001d525",
"hksearow;": "\u2925",
"hkswarow;": "\u2926",
"hoarr;": "\u21ff",
"homtht;": "\u223b",
"hookleftarrow;": "\u21a9",
"hookrightarrow;": "\u21aa",
"hopf;": "\U0001d559",
"horbar;": "\u2015",
"hscr;": "\U0001d4bd",
"hslash;": "\u210f",
"hstrok;": "\u0127",
"hybull;": "\u2043",
"hyphen;": "\u2010",
"iacute": "\xed",
"iacute;": "\xed",
"ic;": "\u2063",
"icirc": "\xee",
"icirc;": "\xee",
"icy;": "\u0438",
"iecy;": "\u0435",
"iexcl": "\xa1",
"iexcl;": "\xa1",
"iff;": "\u21d4",
"ifr;": "\U0001d526",
"igrave": "\xec",
"igrave;": "\xec",
"ii;": "\u2148",
"iiiint;": "\u2a0c",
"iiint;": "\u222d",
"iinfin;": "\u29dc",
"iiota;": "\u2129",
"ijlig;": "\u0133",
"imacr;": "\u012b",
"image;": "\u2111",
"imagline;": "\u2110",
"imagpart;": "\u2111",
"imath;": "\u0131",
"imof;": "\u22b7",
"imped;": "\u01b5",
"in;": "\u2208",
"incare;": "\u2105",
"infin;": "\u221e",
"infintie;": "\u29dd",
"inodot;": "\u0131",
"int;": "\u222b",
"intcal;": "\u22ba",
"integers;": "\u2124",
"intercal;": "\u22ba",
"intlarhk;": "\u2a17",
"intprod;": "\u2a3c",
"iocy;": "\u0451",
"iogon;": "\u012f",
"iopf;": "\U0001d55a",
"iota;": "\u03b9",
"iprod;": "\u2a3c",
"iquest": "\xbf",
"iquest;": "\xbf",
"iscr;": "\U0001d4be",
"isin;": "\u2208",
"isinE;": "\u22f9",
"isindot;": "\u22f5",
"isins;": "\u22f4",
"isinsv;": "\u22f3",
"isinv;": "\u2208",
"it;": "\u2062",
"itilde;": "\u0129",
"iukcy;": "\u0456",
"iuml": "\xef",
"iuml;": "\xef",
"jcirc;": "\u0135",
"jcy;": "\u0439",
"jfr;": "\U0001d527",
"jmath;": "\u0237",
"jopf;": "\U0001d55b",
"jscr;": "\U0001d4bf",
"jsercy;": "\u0458",
"jukcy;": "\u0454",
"kappa;": "\u03ba",
"kappav;": "\u03f0",
"kcedil;": "\u0137",
"kcy;": "\u043a",
"kfr;": "\U0001d528",
"kgreen;": "\u0138",
"khcy;": "\u0445",
"kjcy;": "\u045c",
"kopf;": "\U0001d55c",
"kscr;": "\U0001d4c0",
"lAarr;": "\u21da",
"lArr;": "\u21d0",
"lAtail;": "\u291b",
"lBarr;": "\u290e",
"lE;": "\u2266",
"lEg;": "\u2a8b",
"lHar;": "\u2962",
"lacute;": "\u013a",
"laemptyv;": "\u29b4",
"lagran;": "\u2112",
"lambda;": "\u03bb",
"lang;": "\u27e8",
"langd;": "\u2991",
"langle;": "\u27e8",
"lap;": "\u2a85",
"laquo": "\xab",
"laquo;": "\xab",
"larr;": "\u2190",
"larrb;": "\u21e4",
"larrbfs;": "\u291f",
"larrfs;": "\u291d",
"larrhk;": "\u21a9",
"larrlp;": "\u21ab",
"larrpl;": "\u2939",
"larrsim;": "\u2973",
"larrtl;": "\u21a2",
"lat;": "\u2aab",
"latail;": "\u2919",
"late;": "\u2aad",
"lates;": "\u2aad\ufe00",
"lbarr;": "\u290c",
"lbbrk;": "\u2772",
"lbrace;": "{",
"lbrack;": "[",
"lbrke;": "\u298b",
"lbrksld;": "\u298f",
"lbrkslu;": "\u298d",
"lcaron;": "\u013e",
"lcedil;": "\u013c",
"lceil;": "\u2308",
"lcub;": "{",
"lcy;": "\u043b",
"ldca;": "\u2936",
"ldquo;": "\u201c",
"ldquor;": "\u201e",
"ldrdhar;": "\u2967",
"ldrushar;": "\u294b",
"ldsh;": "\u21b2",
"le;": "\u2264",
"leftarrow;": "\u2190",
"leftarrowtail;": "\u21a2",
"leftharpoondown;": "\u21bd",
"leftharpoonup;": "\u21bc",
"leftleftarrows;": "\u21c7",
"leftrightarrow;": "\u2194",
"leftrightarrows;": "\u21c6",
"leftrightharpoons;": "\u21cb",
"leftrightsquigarrow;": "\u21ad",
"leftthreetimes;": "\u22cb",
"leg;": "\u22da",
"leq;": "\u2264",
"leqq;": "\u2266",
"leqslant;": "\u2a7d",
"les;": "\u2a7d",
"lescc;": "\u2aa8",
"lesdot;": "\u2a7f",
"lesdoto;": "\u2a81",
"lesdotor;": "\u2a83",
"lesg;": "\u22da\ufe00",
"lesges;": "\u2a93",
"lessapprox;": "\u2a85",
"lessdot;": "\u22d6",
"lesseqgtr;": "\u22da",
"lesseqqgtr;": "\u2a8b",
"lessgtr;": "\u2276",
"lesssim;": "\u2272",
"lfisht;": "\u297c",
"lfloor;": "\u230a",
"lfr;": "\U0001d529",
"lg;": "\u2276",
"lgE;": "\u2a91",
"lhard;": "\u21bd",
"lharu;": "\u21bc",
"lharul;": "\u296a",
"lhblk;": "\u2584",
"ljcy;": "\u0459",
"ll;": "\u226a",
"llarr;": "\u21c7",
"llcorner;": "\u231e",
"llhard;": "\u296b",
"lltri;": "\u25fa",
"lmidot;": "\u0140",
"lmoust;": "\u23b0",
"lmoustache;": "\u23b0",
"lnE;": "\u2268",
"lnap;": "\u2a89",
"lnapprox;": "\u2a89",
"lne;": "\u2a87",
"lneq;": "\u2a87",
"lneqq;": "\u2268",
"lnsim;": "\u22e6",
"loang;": "\u27ec",
"loarr;": "\u21fd",
"lobrk;": "\u27e6",
"longleftarrow;": "\u27f5",
"longleftrightarrow;": "\u27f7",
"longmapsto;": "\u27fc",
"longrightarrow;": "\u27f6",
"looparrowleft;": "\u21ab",
"looparrowright;": "\u21ac",
"lopar;": "\u2985",
"lopf;": "\U0001d55d",
"loplus;": "\u2a2d",
"lotimes;": "\u2a34",
"lowast;": "\u2217",
"lowbar;": "_",
"loz;": "\u25ca",
"lozenge;": "\u25ca",
"lozf;": "\u29eb",
"lpar;": "(",
"lparlt;": "\u2993",
"lrarr;": "\u21c6",
"lrcorner;": "\u231f",
"lrhar;": "\u21cb",
"lrhard;": "\u296d",
"lrm;": "\u200e",
"lrtri;": "\u22bf",
"lsaquo;": "\u2039",
"lscr;": "\U0001d4c1",
"lsh;": "\u21b0",
"lsim;": "\u2272",
"lsime;": "\u2a8d",
"lsimg;": "\u2a8f",
"lsqb;": "[",
"lsquo;": "\u2018",
"lsquor;": "\u201a",
"lstrok;": "\u0142",
"lt": "<",
"lt;": "<",
"ltcc;": "\u2aa6",
"ltcir;": "\u2a79",
"ltdot;": "\u22d6",
"lthree;": "\u22cb",
"ltimes;": "\u22c9",
"ltlarr;": "\u2976",
"ltquest;": "\u2a7b",
"ltrPar;": "\u2996",
"ltri;": "\u25c3",
"ltrie;": "\u22b4",
"ltrif;": "\u25c2",
"lurdshar;": "\u294a",
"luruhar;": "\u2966",
"lvertneqq;": "\u2268\ufe00",
"lvnE;": "\u2268\ufe00",
"mDDot;": "\u223a",
"macr": "\xaf",
"macr;": "\xaf",
"male;": "\u2642",
"malt;": "\u2720",
"maltese;": "\u2720",
"map;": "\u21a6",
"mapsto;": "\u21a6",
"mapstodown;": "\u21a7",
"mapstoleft;": "\u21a4",
"mapstoup;": "\u21a5",
"marker;": "\u25ae",
"mcomma;": "\u2a29",
"mcy;": "\u043c",
"mdash;": "\u2014",
"measuredangle;": "\u2221",
"mfr;": "\U0001d52a",
"mho;": "\u2127",
"micro": "\xb5",
"micro;": "\xb5",
"mid;": "\u2223",
"midast;": "*",
"midcir;": "\u2af0",
"middot": "\xb7",
"middot;": "\xb7",
"minus;": "\u2212",
"minusb;": "\u229f",
"minusd;": "\u2238",
"minusdu;": "\u2a2a",
"mlcp;": "\u2adb",
"mldr;": "\u2026",
"mnplus;": "\u2213",
"models;": "\u22a7",
"mopf;": "\U0001d55e",
"mp;": "\u2213",
"mscr;": "\U0001d4c2",
"mstpos;": "\u223e",
"mu;": "\u03bc",
"multimap;": "\u22b8",
"mumap;": "\u22b8",
"nGg;": "\u22d9\u0338",
"nGt;": "\u226b\u20d2",
"nGtv;": "\u226b\u0338",
"nLeftarrow;": "\u21cd",
"nLeftrightarrow;": "\u21ce",
"nLl;": "\u22d8\u0338",
"nLt;": "\u226a\u20d2",
"nLtv;": "\u226a\u0338",
"nRightarrow;": "\u21cf",
"nVDash;": "\u22af",
"nVdash;": "\u22ae",
"nabla;": "\u2207",
"nacute;": "\u0144",
"nang;": "\u2220\u20d2",
"nap;": "\u2249",
"napE;": "\u2a70\u0338",
"napid;": "\u224b\u0338",
"napos;": "\u0149",
"napprox;": "\u2249",
"natur;": "\u266e",
"natural;": "\u266e",
"naturals;": "\u2115",
"nbsp": "\xa0",
"nbsp;": "\xa0",
"nbump;": "\u224e\u0338",
"nbumpe;": "\u224f\u0338",
"ncap;": "\u2a43",
"ncaron;": "\u0148",
"ncedil;": "\u0146",
"ncong;": "\u2247",
"ncongdot;": "\u2a6d\u0338",
"ncup;": "\u2a42",
"ncy;": "\u043d",
"ndash;": "\u2013",
"ne;": "\u2260",
"neArr;": "\u21d7",
"nearhk;": "\u2924",
"nearr;": "\u2197",
"nearrow;": "\u2197",
"nedot;": "\u2250\u0338",
"nequiv;": "\u2262",
"nesear;": "\u2928",
"nesim;": "\u2242\u0338",
"nexist;": "\u2204",
"nexists;": "\u2204",
"nfr;": "\U0001d52b",
"ngE;": "\u2267\u0338",
"nge;": "\u2271",
"ngeq;": "\u2271",
"ngeqq;": "\u2267\u0338",
"ngeqslant;": "\u2a7e\u0338",
"nges;": "\u2a7e\u0338",
"ngsim;": "\u2275",
"ngt;": "\u226f",
"ngtr;": "\u226f",
"nhArr;": "\u21ce",
"nharr;": "\u21ae",
"nhpar;": "\u2af2",
"ni;": "\u220b",
"nis;": "\u22fc",
"nisd;": "\u22fa",
"niv;": "\u220b",
"njcy;": "\u045a",
"nlArr;": "\u21cd",
"nlE;": "\u2266\u0338",
"nlarr;": "\u219a",
"nldr;": "\u2025",
"nle;": "\u2270",
"nleftarrow;": "\u219a",
"nleftrightarrow;": "\u21ae",
"nleq;": "\u2270",
"nleqq;": "\u2266\u0338",
"nleqslant;": "\u2a7d\u0338",
"nles;": "\u2a7d\u0338",
"nless;": "\u226e",
"nlsim;": "\u2274",
"nlt;": "\u226e",
"nltri;": "\u22ea",
"nltrie;": "\u22ec",
"nmid;": "\u2224",
"nopf;": "\U0001d55f",
"not": "\xac",
"not;": "\xac",
"notin;": "\u2209",
"notinE;": "\u22f9\u0338",
"notindot;": "\u22f5\u0338",
"notinva;": "\u2209",
"notinvb;": "\u22f7",
"notinvc;": "\u22f6",
"notni;": "\u220c",
"notniva;": "\u220c",
"notnivb;": "\u22fe",
"notnivc;": "\u22fd",
"npar;": "\u2226",
"nparallel;": "\u2226",
"nparsl;": "\u2afd\u20e5",
"npart;": "\u2202\u0338",
"npolint;": "\u2a14",
"npr;": "\u2280",
"nprcue;": "\u22e0",
"npre;": "\u2aaf\u0338",
"nprec;": "\u2280",
"npreceq;": "\u2aaf\u0338",
"nrArr;": "\u21cf",
"nrarr;": "\u219b",
"nrarrc;": "\u2933\u0338",
"nrarrw;": "\u219d\u0338",
"nrightarrow;": "\u219b",
"nrtri;": "\u22eb",
"nrtrie;": "\u22ed",
"nsc;": "\u2281",
"nsccue;": "\u22e1",
"nsce;": "\u2ab0\u0338",
"nscr;": "\U0001d4c3",
"nshortmid;": "\u2224",
"nshortparallel;": "\u2226",
"nsim;": "\u2241",
"nsime;": "\u2244",
"nsimeq;": "\u2244",
"nsmid;": "\u2224",
"nspar;": "\u2226",
"nsqsube;": "\u22e2",
"nsqsupe;": "\u22e3",
"nsub;": "\u2284",
"nsubE;": "\u2ac5\u0338",
"nsube;": "\u2288",
"nsubset;": "\u2282\u20d2",
"nsubseteq;": "\u2288",
"nsubseteqq;": "\u2ac5\u0338",
"nsucc;": "\u2281",
"nsucceq;": "\u2ab0\u0338",
"nsup;": "\u2285",
"nsupE;": "\u2ac6\u0338",
"nsupe;": "\u2289",
"nsupset;": "\u2283\u20d2",
"nsupseteq;": "\u2289",
"nsupseteqq;": "\u2ac6\u0338",
"ntgl;": "\u2279",
"ntilde": "\xf1",
"ntilde;": "\xf1",
"ntlg;": "\u2278",
"ntriangleleft;": "\u22ea",
"ntrianglelefteq;": "\u22ec",
"ntriangleright;": "\u22eb",
"ntrianglerighteq;": "\u22ed",
"nu;": "\u03bd",
"num;": "#",
"numero;": "\u2116",
"numsp;": "\u2007",
"nvDash;": "\u22ad",
"nvHarr;": "\u2904",
"nvap;": "\u224d\u20d2",
"nvdash;": "\u22ac",
"nvge;": "\u2265\u20d2",
"nvgt;": ">\u20d2",
"nvinfin;": "\u29de",
"nvlArr;": "\u2902",
"nvle;": "\u2264\u20d2",
"nvlt;": "<\u20d2",
"nvltrie;": "\u22b4\u20d2",
"nvrArr;": "\u2903",
"nvrtrie;": "\u22b5\u20d2",
"nvsim;": "\u223c\u20d2",
"nwArr;": "\u21d6",
"nwarhk;": "\u2923",
"nwarr;": "\u2196",
"nwarrow;": "\u2196",
"nwnear;": "\u2927",
"oS;": "\u24c8",
"oacute": "\xf3",
"oacute;": "\xf3",
"oast;": "\u229b",
"ocir;": "\u229a",
"ocirc": "\xf4",
"ocirc;": "\xf4",
"ocy;": "\u043e",
"odash;": "\u229d",
"odblac;": "\u0151",
"odiv;": "\u2a38",
"odot;": "\u2299",
"odsold;": "\u29bc",
"oelig;": "\u0153",
"ofcir;": "\u29bf",
"ofr;": "\U0001d52c",
"ogon;": "\u02db",
"ograve": "\xf2",
"ograve;": "\xf2",
"ogt;": "\u29c1",
"ohbar;": "\u29b5",
"ohm;": "\u03a9",
"oint;": "\u222e",
"olarr;": "\u21ba",
"olcir;": "\u29be",
"olcross;": "\u29bb",
"oline;": "\u203e",
"olt;": "\u29c0",
"omacr;": "\u014d",
"omega;": "\u03c9",
"omicron;": "\u03bf",
"omid;": "\u29b6",
"ominus;": "\u2296",
"oopf;": "\U0001d560",
"opar;": "\u29b7",
"operp;": "\u29b9",
"oplus;": "\u2295",
"or;": "\u2228",
"orarr;": "\u21bb",
"ord;": "\u2a5d",
"order;": "\u2134",
"orderof;": "\u2134",
"ordf": "\xaa",
"ordf;": "\xaa",
"ordm": "\xba",
"ordm;": "\xba",
"origof;": "\u22b6",
"oror;": "\u2a56",
"orslope;": "\u2a57",
"orv;": "\u2a5b",
"oscr;": "\u2134",
"oslash": "\xf8",
"oslash;": "\xf8",
"osol;": "\u2298",
"otilde": "\xf5",
"otilde;": "\xf5",
"otimes;": "\u2297",
"otimesas;": "\u2a36",
"ouml": "\xf6",
"ouml;": "\xf6",
"ovbar;": "\u233d",
"par;": "\u2225",
"para": "\xb6",
"para;": "\xb6",
"parallel;": "\u2225",
"parsim;": "\u2af3",
"parsl;": "\u2afd",
"part;": "\u2202",
"pcy;": "\u043f",
"percnt;": "%",
"period;": ".",
"permil;": "\u2030",
"perp;": "\u22a5",
"pertenk;": "\u2031",
"pfr;": "\U0001d52d",
"phi;": "\u03c6",
"phiv;": "\u03d5",
"phmmat;": "\u2133",
"phone;": "\u260e",
"pi;": "\u03c0",
"pitchfork;": "\u22d4",
"piv;": "\u03d6",
"planck;": "\u210f",
"planckh;": "\u210e",
"plankv;": "\u210f",
"plus;": "+",
"plusacir;": "\u2a23",
"plusb;": "\u229e",
"pluscir;": "\u2a22",
"plusdo;": "\u2214",
"plusdu;": "\u2a25",
"pluse;": "\u2a72",
"plusmn": "\xb1",
"plusmn;": "\xb1",
"plussim;": "\u2a26",
"plustwo;": "\u2a27",
"pm;": "\xb1",
"pointint;": "\u2a15",
"popf;": "\U0001d561",
"pound": "\xa3",
"pound;": "\xa3",
"pr;": "\u227a",
"prE;": "\u2ab3",
"prap;": "\u2ab7",
"prcue;": "\u227c",
"pre;": "\u2aaf",
"prec;": "\u227a",
"precapprox;": "\u2ab7",
"preccurlyeq;": "\u227c",
"preceq;": "\u2aaf",
"precnapprox;": "\u2ab9",
"precneqq;": "\u2ab5",
"precnsim;": "\u22e8",
"precsim;": "\u227e",
"prime;": "\u2032",
"primes;": "\u2119",
"prnE;": "\u2ab5",
"prnap;": "\u2ab9",
"prnsim;": "\u22e8",
"prod;": "\u220f",
"profalar;": "\u232e",
"profline;": "\u2312",
"profsurf;": "\u2313",
"prop;": "\u221d",
"propto;": "\u221d",
"prsim;": "\u227e",
"prurel;": "\u22b0",
"pscr;": "\U0001d4c5",
"psi;": "\u03c8",
"puncsp;": "\u2008",
"qfr;": "\U0001d52e",
"qint;": "\u2a0c",
"qopf;": "\U0001d562",
"qprime;": "\u2057",
"qscr;": "\U0001d4c6",
"quaternions;": "\u210d",
"quatint;": "\u2a16",
"quest;": "?",
"questeq;": "\u225f",
"quot": "\"",
"quot;": "\"",
"rAarr;": "\u21db",
"rArr;": "\u21d2",
"rAtail;": "\u291c",
"rBarr;": "\u290f",
"rHar;": "\u2964",
"race;": "\u223d\u0331",
"racute;": "\u0155",
"radic;": "\u221a",
"raemptyv;": "\u29b3",
"rang;": "\u27e9",
"rangd;": "\u2992",
"range;": "\u29a5",
"rangle;": "\u27e9",
"raquo": "\xbb",
"raquo;": "\xbb",
"rarr;": "\u2192",
"rarrap;": "\u2975",
"rarrb;": "\u21e5",
"rarrbfs;": "\u2920",
"rarrc;": "\u2933",
"rarrfs;": "\u291e",
"rarrhk;": "\u21aa",
"rarrlp;": "\u21ac",
"rarrpl;": "\u2945",
"rarrsim;": "\u2974",
"rarrtl;": "\u21a3",
"rarrw;": "\u219d",
"ratail;": "\u291a",
"ratio;": "\u2236",
"rationals;": "\u211a",
"rbarr;": "\u290d",
"rbbrk;": "\u2773",
"rbrace;": "}",
"rbrack;": "]",
"rbrke;": "\u298c",
"rbrksld;": "\u298e",
"rbrkslu;": "\u2990",
"rcaron;": "\u0159",
"rcedil;": "\u0157",
"rceil;": "\u2309",
"rcub;": "}",
"rcy;": "\u0440",
"rdca;": "\u2937",
"rdldhar;": "\u2969",
"rdquo;": "\u201d",
"rdquor;": "\u201d",
"rdsh;": "\u21b3",
"real;": "\u211c",
"realine;": "\u211b",
"realpart;": "\u211c",
"reals;": "\u211d",
"rect;": "\u25ad",
"reg": "\xae",
"reg;": "\xae",
"rfisht;": "\u297d",
"rfloor;": "\u230b",
"rfr;": "\U0001d52f",
"rhard;": "\u21c1",
"rharu;": "\u21c0",
"rharul;": "\u296c",
"rho;": "\u03c1",
"rhov;": "\u03f1",
"rightarrow;": "\u2192",
"rightarrowtail;": "\u21a3",
"rightharpoondown;": "\u21c1",
"rightharpoonup;": "\u21c0",
"rightleftarrows;": "\u21c4",
"rightleftharpoons;": "\u21cc",
"rightrightarrows;": "\u21c9",
"rightsquigarrow;": "\u219d",
"rightthreetimes;": "\u22cc",
"ring;": "\u02da",
"risingdotseq;": "\u2253",
"rlarr;": "\u21c4",
"rlhar;": "\u21cc",
"rlm;": "\u200f",
"rmoust;": "\u23b1",
"rmoustache;": "\u23b1",
"rnmid;": "\u2aee",
"roang;": "\u27ed",
"roarr;": "\u21fe",
"robrk;": "\u27e7",
"ropar;": "\u2986",
"ropf;": "\U0001d563",
"roplus;": "\u2a2e",
"rotimes;": "\u2a35",
"rpar;": ")",
"rpargt;": "\u2994",
"rppolint;": "\u2a12",
"rrarr;": "\u21c9",
"rsaquo;": "\u203a",
"rscr;": "\U0001d4c7",
"rsh;": "\u21b1",
"rsqb;": "]",
"rsquo;": "\u2019",
"rsquor;": "\u2019",
"rthree;": "\u22cc",
"rtimes;": "\u22ca",
"rtri;": "\u25b9",
"rtrie;": "\u22b5",
"rtrif;": "\u25b8",
"rtriltri;": "\u29ce",
"ruluhar;": "\u2968",
"rx;": "\u211e",
"sacute;": "\u015b",
"sbquo;": "\u201a",
"sc;": "\u227b",
"scE;": "\u2ab4",
"scap;": "\u2ab8",
"scaron;": "\u0161",
"sccue;": "\u227d",
"sce;": "\u2ab0",
"scedil;": "\u015f",
"scirc;": "\u015d",
"scnE;": "\u2ab6",
"scnap;": "\u2aba",
"scnsim;": "\u22e9",
"scpolint;": "\u2a13",
"scsim;": "\u227f",
"scy;": "\u0441",
"sdot;": "\u22c5",
"sdotb;": "\u22a1",
"sdote;": "\u2a66",
"seArr;": "\u21d8",
"searhk;": "\u2925",
"searr;": "\u2198",
"searrow;": "\u2198",
"sect": "\xa7",
"sect;": "\xa7",
"semi;": ";",
"seswar;": "\u2929",
"setminus;": "\u2216",
"setmn;": "\u2216",
"sext;": "\u2736",
"sfr;": "\U0001d530",
"sfrown;": "\u2322",
"sharp;": "\u266f",
"shchcy;": "\u0449",
"shcy;": "\u0448",
"shortmid;": "\u2223",
"shortparallel;": "\u2225",
"shy": "\xad",
"shy;": "\xad",
"sigma;": "\u03c3",
"sigmaf;": "\u03c2",
"sigmav;": "\u03c2",
"sim;": "\u223c",
"simdot;": "\u2a6a",
"sime;": "\u2243",
"simeq;": "\u2243",
"simg;": "\u2a9e",
"simgE;": "\u2aa0",
"siml;": "\u2a9d",
"simlE;": "\u2a9f",
"simne;": "\u2246",
"simplus;": "\u2a24",
"simrarr;": "\u2972",
"slarr;": "\u2190",
"smallsetminus;": "\u2216",
"smashp;": "\u2a33",
"smeparsl;": "\u29e4",
"smid;": "\u2223",
"smile;": "\u2323",
"smt;": "\u2aaa",
"smte;": "\u2aac",
"smtes;": "\u2aac\ufe00",
"softcy;": "\u044c",
"sol;": "/",
"solb;": "\u29c4",
"solbar;": "\u233f",
"sopf;": "\U0001d564",
"spades;": "\u2660",
"spadesuit;": "\u2660",
"spar;": "\u2225",
"sqcap;": "\u2293",
"sqcaps;": "\u2293\ufe00",
"sqcup;": "\u2294",
"sqcups;": "\u2294\ufe00",
"sqsub;": "\u228f",
"sqsube;": "\u2291",
"sqsubset;": "\u228f",
"sqsubseteq;": "\u2291",
"sqsup;": "\u2290",
"sqsupe;": "\u2292",
"sqsupset;": "\u2290",
"sqsupseteq;": "\u2292",
"squ;": "\u25a1",
"square;": "\u25a1",
"squarf;": "\u25aa",
"squf;": "\u25aa",
"srarr;": "\u2192",
"sscr;": "\U0001d4c8",
"ssetmn;": "\u2216",
"ssmile;": "\u2323",
"sstarf;": "\u22c6",
"star;": "\u2606",
"starf;": "\u2605",
"straightepsilon;": "\u03f5",
"straightphi;": "\u03d5",
"strns;": "\xaf",
"sub;": "\u2282",
"subE;": "\u2ac5",
"subdot;": "\u2abd",
"sube;": "\u2286",
"subedot;": "\u2ac3",
"submult;": "\u2ac1",
"subnE;": "\u2acb",
"subne;": "\u228a",
"subplus;": "\u2abf",
"subrarr;": "\u2979",
"subset;": "\u2282",
"subseteq;": "\u2286",
"subseteqq;": "\u2ac5",
"subsetneq;": "\u228a",
"subsetneqq;": "\u2acb",
"subsim;": "\u2ac7",
"subsub;": "\u2ad5",
"subsup;": "\u2ad3",
"succ;": "\u227b",
"succapprox;": "\u2ab8",
"succcurlyeq;": "\u227d",
"succeq;": "\u2ab0",
"succnapprox;": "\u2aba",
"succneqq;": "\u2ab6",
"succnsim;": "\u22e9",
"succsim;": "\u227f",
"sum;": "\u2211",
"sung;": "\u266a",
"sup1": "\xb9",
"sup1;": "\xb9",
"sup2": "\xb2",
"sup2;": "\xb2",
"sup3": "\xb3",
"sup3;": "\xb3",
"sup;": "\u2283",
"supE;": "\u2ac6",
"supdot;": "\u2abe",
"supdsub;": "\u2ad8",
"supe;": "\u2287",
"supedot;": "\u2ac4",
"suphsol;": "\u27c9",
"suphsub;": "\u2ad7",
"suplarr;": "\u297b",
"supmult;": "\u2ac2",
"supnE;": "\u2acc",
"supne;": "\u228b",
"supplus;": "\u2ac0",
"supset;": "\u2283",
"supseteq;": "\u2287",
"supseteqq;": "\u2ac6",
"supsetneq;": "\u228b",
"supsetneqq;": "\u2acc",
"supsim;": "\u2ac8",
"supsub;": "\u2ad4",
"supsup;": "\u2ad6",
"swArr;": "\u21d9",
"swarhk;": "\u2926",
"swarr;": "\u2199",
"swarrow;": "\u2199",
"swnwar;": "\u292a",
"szlig": "\xdf",
"szlig;": "\xdf",
"target;": "\u2316",
"tau;": "\u03c4",
"tbrk;": "\u23b4",
"tcaron;": "\u0165",
"tcedil;": "\u0163",
"tcy;": "\u0442",
"tdot;": "\u20db",
"telrec;": "\u2315",
"tfr;": "\U0001d531",
"there4;": "\u2234",
"therefore;": "\u2234",
"theta;": "\u03b8",
"thetasym;": "\u03d1",
"thetav;": "\u03d1",
"thickapprox;": "\u2248",
"thicksim;": "\u223c",
"thinsp;": "\u2009",
"thkap;": "\u2248",
"thksim;": "\u223c",
"thorn": "\xfe",
"thorn;": "\xfe",
"tilde;": "\u02dc",
"times": "\xd7",
"times;": "\xd7",
"timesb;": "\u22a0",
"timesbar;": "\u2a31",
"timesd;": "\u2a30",
"tint;": "\u222d",
"toea;": "\u2928",
"top;": "\u22a4",
"topbot;": "\u2336",
"topcir;": "\u2af1",
"topf;": "\U0001d565",
"topfork;": "\u2ada",
"tosa;": "\u2929",
"tprime;": "\u2034",
"trade;": "\u2122",
"triangle;": "\u25b5",
"triangledown;": "\u25bf",
"triangleleft;": "\u25c3",
"trianglelefteq;": "\u22b4",
"triangleq;": "\u225c",
"triangleright;": "\u25b9",
"trianglerighteq;": "\u22b5",
"tridot;": "\u25ec",
"trie;": "\u225c",
"triminus;": "\u2a3a",
"triplus;": "\u2a39",
"trisb;": "\u29cd",
"tritime;": "\u2a3b",
"trpezium;": "\u23e2",
"tscr;": "\U0001d4c9",
"tscy;": "\u0446",
"tshcy;": "\u045b",
"tstrok;": "\u0167",
"twixt;": "\u226c",
"twoheadleftarrow;": "\u219e",
"twoheadrightarrow;": "\u21a0",
"uArr;": "\u21d1",
"uHar;": "\u2963",
"uacute": "\xfa",
"uacute;": "\xfa",
"uarr;": "\u2191",
"ubrcy;": "\u045e",
"ubreve;": "\u016d",
"ucirc": "\xfb",
"ucirc;": "\xfb",
"ucy;": "\u0443",
"udarr;": "\u21c5",
"udblac;": "\u0171",
"udhar;": "\u296e",
"ufisht;": "\u297e",
"ufr;": "\U0001d532",
"ugrave": "\xf9",
"ugrave;": "\xf9",
"uharl;": "\u21bf",
"uharr;": "\u21be",
"uhblk;": "\u2580",
"ulcorn;": "\u231c",
"ulcorner;": "\u231c",
"ulcrop;": "\u230f",
"ultri;": "\u25f8",
"umacr;": "\u016b",
"uml": "\xa8",
"uml;": "\xa8",
"uogon;": "\u0173",
"uopf;": "\U0001d566",
"uparrow;": "\u2191",
"updownarrow;": "\u2195",
"upharpoonleft;": "\u21bf",
"upharpoonright;": "\u21be",
"uplus;": "\u228e",
"upsi;": "\u03c5",
"upsih;": "\u03d2",
"upsilon;": "\u03c5",
"upuparrows;": "\u21c8",
"urcorn;": "\u231d",
"urcorner;": "\u231d",
"urcrop;": "\u230e",
"uring;": "\u016f",
"urtri;": "\u25f9",
"uscr;": "\U0001d4ca",
"utdot;": "\u22f0",
"utilde;": "\u0169",
"utri;": "\u25b5",
"utrif;": "\u25b4",
"uuarr;": "\u21c8",
"uuml": "\xfc",
"uuml;": "\xfc",
"uwangle;": "\u29a7",
"vArr;": "\u21d5",
"vBar;": "\u2ae8",
"vBarv;": "\u2ae9",
"vDash;": "\u22a8",
"vangrt;": "\u299c",
"varepsilon;": "\u03f5",
"varkappa;": "\u03f0",
"varnothing;": "\u2205",
"varphi;": "\u03d5",
"varpi;": "\u03d6",
"varpropto;": "\u221d",
"varr;": "\u2195",
"varrho;": "\u03f1",
"varsigma;": "\u03c2",
"varsubsetneq;": "\u228a\ufe00",
"varsubsetneqq;": "\u2acb\ufe00",
"varsupsetneq;": "\u228b\ufe00",
"varsupsetneqq;": "\u2acc\ufe00",
"vartheta;": "\u03d1",
"vartriangleleft;": "\u22b2",
"vartriangleright;": "\u22b3",
"vcy;": "\u0432",
"vdash;": "\u22a2",
"vee;": "\u2228",
"veebar;": "\u22bb",
"veeeq;": "\u225a",
"vellip;": "\u22ee",
"verbar;": "|",
"vert;": "|",
"vfr;": "\U0001d533",
"vltri;": "\u22b2",
"vnsub;": "\u2282\u20d2",
"vnsup;": "\u2283\u20d2",
"vopf;": "\U0001d567",
"vprop;": "\u221d",
"vrtri;": "\u22b3",
"vscr;": "\U0001d4cb",
"vsubnE;": "\u2acb\ufe00",
"vsubne;": "\u228a\ufe00",
"vsupnE;": "\u2acc\ufe00",
"vsupne;": "\u228b\ufe00",
"vzigzag;": "\u299a",
"wcirc;": "\u0175",
"wedbar;": "\u2a5f",
"wedge;": "\u2227",
"wedgeq;": "\u2259",
"weierp;": "\u2118",
"wfr;": "\U0001d534",
"wopf;": "\U0001d568",
"wp;": "\u2118",
"wr;": "\u2240",
"wreath;": "\u2240",
"wscr;": "\U0001d4cc",
"xcap;": "\u22c2",
"xcirc;": "\u25ef",
"xcup;": "\u22c3",
"xdtri;": "\u25bd",
"xfr;": "\U0001d535",
"xhArr;": "\u27fa",
"xharr;": "\u27f7",
"xi;": "\u03be",
"xlArr;": "\u27f8",
"xlarr;": "\u27f5",
"xmap;": "\u27fc",
"xnis;": "\u22fb",
"xodot;": "\u2a00",
"xopf;": "\U0001d569",
"xoplus;": "\u2a01",
"xotime;": "\u2a02",
"xrArr;": "\u27f9",
"xrarr;": "\u27f6",
"xscr;": "\U0001d4cd",
"xsqcup;": "\u2a06",
"xuplus;": "\u2a04",
"xutri;": "\u25b3",
"xvee;": "\u22c1",
"xwedge;": "\u22c0",
"yacute": "\xfd",
"yacute;": "\xfd",
"yacy;": "\u044f",
"ycirc;": "\u0177",
"ycy;": "\u044b",
"yen": "\xa5",
"yen;": "\xa5",
"yfr;": "\U0001d536",
"yicy;": "\u0457",
"yopf;": "\U0001d56a",
"yscr;": "\U0001d4ce",
"yucy;": "\u044e",
"yuml": "\xff",
"yuml;": "\xff",
"zacute;": "\u017a",
"zcaron;": "\u017e",
"zcy;": "\u0437",
"zdot;": "\u017c",
"zeetrf;": "\u2128",
"zeta;": "\u03b6",
"zfr;": "\U0001d537",
"zhcy;": "\u0436",
"zigrarr;": "\u21dd",
"zopf;": "\U0001d56b",
"zscr;": "\U0001d4cf",
"zwj;": "\u200d",
"zwnj;": "\u200c",
}
replacementCharacters = {
0x0: "\uFFFD",
0x0d: "\u000D",
0x80: "\u20AC",
0x81: "\u0081",
0x81: "\u0081",
0x82: "\u201A",
0x83: "\u0192",
0x84: "\u201E",
0x85: "\u2026",
0x86: "\u2020",
0x87: "\u2021",
0x88: "\u02C6",
0x89: "\u2030",
0x8A: "\u0160",
0x8B: "\u2039",
0x8C: "\u0152",
0x8D: "\u008D",
0x8E: "\u017D",
0x8F: "\u008F",
0x90: "\u0090",
0x91: "\u2018",
0x92: "\u2019",
0x93: "\u201C",
0x94: "\u201D",
0x95: "\u2022",
0x96: "\u2013",
0x97: "\u2014",
0x98: "\u02DC",
0x99: "\u2122",
0x9A: "\u0161",
0x9B: "\u203A",
0x9C: "\u0153",
0x9D: "\u009D",
0x9E: "\u017E",
0x9F: "\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype": 0,
"Characters": 1,
"SpaceCharacters": 2,
"StartTag": 3,
"EndTag": 4,
"EmptyTag": 5,
"Comment": 6,
"ParseError": 7
}
tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]))
prefixes = dict([(v, k) for k, v in namespaces.items()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
| apache-2.0 | -6,417,520,440,085,506,000 | -1,324,894,430,979,093,000 | 27.13982 | 94 | 0.498603 | false |
DVM-BITS-Pilani/BITS-BOSM-2015 | bosm2015/events/migrations/0001_initial.py | 2 | 1132 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='EventNew',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
('content', ckeditor.fields.RichTextField()),
('description', models.CharField(max_length=140, blank=True)),
('icon', models.ImageField(upload_to=b'icons', blank=True)),
('date', models.CharField(default=b'TBA', max_length=100)),
('time', models.CharField(default=b'TBA', max_length=100)),
('venue', models.CharField(default=b'TBA', max_length=100)),
('endtime', models.CharField(default=b'TBA', max_length=100)),
],
options={
'verbose_name_plural': 'events',
},
),
]
| gpl-2.0 | 3,606,316,723,701,133,000 | 2,975,911,735,693,816,300 | 35.516129 | 114 | 0.555654 | false |
ibinti/intellij-community | python/helpers/pycharm/django_manage_commands_provider/_xml.py | 78 | 6083 | # coding=utf-8
"""
This module exports information about manage commands and options from django to PyCharm.
Information is provided in XML (to prevent encoding troubles and simplify deserialization on java side).
Right after xml declaration, before root tag it contains following comment:
<!--jb pycharm data start-->
Use it to make sure you found correct XML
It does not have schema (yet!) but here is XML format it uses.
<commandInfo-array> -- root
<commandInfo args="args description" help="human readable text" name="command name"> -- info about command
<option help="option help" numberOfArgs="number of values (nargs)" type="option_type (see below)"> -- one entry for each option
<longNames>--each-for-one-long-opt-name</longNames>
<shortNames>-each-for-one-short-name</shortNames>
<choices>--each-for-one-available-value</choices>
</option>
</commandInfo>
</commandInfo-array>
"option_type" is only set if "numberOfArgs" > 0, and it can be: "int" (means integer),
"choices" (means opt can have one of the values, provided in choices) or "str" that means "string" (option may have any value)
Classes like DjangoCommandsInfo is used on Java side.
TODO: Since Django 1.8 we can fetch much more info from argparse like positional argument names, nargs etc. Use it!
"""
from xml.dom import minidom
from xml.dom.minidom import Element
from _jb_utils import VersionAgnosticUtils
__author__ = 'Ilya.Kazakevich'
class XmlDumper(object):
""""
Creates an API to generate XML provided in this package.
How to use:
* dumper.start_command(..)
* dumper.add_command_option(..) # optional
* dumper.close_command()
* print(dumper.xml)
"""
__command_info_tag = "commandInfo" # Name of main tag
def __init__(self):
self.__document = minidom.Document()
self.__root = self.__document.createElement("{0}-array".format(XmlDumper.__command_info_tag))
self.__document.appendChild(self.__document.createComment("jb pycharm data start"))
self.__document.appendChild(self.__root)
self.__command_element = None
def __create_text_array(self, parent, tag_name, values):
"""
Creates array of text elements and adds them to parent
:type parent Element
:type tag_name str
:type values list of str
:param parent destination to add new elements
:param tag_name name tag to create to hold text
:param values list of values to add
"""
for value in values:
tag = self.__document.createElement(tag_name)
text = self.__document.createTextNode(str(value))
tag.appendChild(text)
parent.appendChild(tag)
def start_command(self, command_name, command_help_text):
"""
Starts manage command
:param command_name: command name
:param command_help_text: command help
"""
assert not bool(self.__command_element), "Already in command"
self.__command_element = self.__document.createElement(XmlDumper.__command_info_tag)
self.__command_element.setAttribute("name", command_name)
self.__command_element.setAttribute("help", command_help_text)
self.__root.appendChild(self.__command_element)
def set_arguments(self, command_args_text):
"""
Adds "arguments help" to command.
TODO: Use real list of arguments instead of this text when people migrate to argparse (Dj. 1.8)
:param command_args_text: command text for args
:type command_args_text str
"""
assert bool(self.__command_element), "Not in a a command"
self.__command_element.setAttribute("args", VersionAgnosticUtils().to_unicode(command_args_text))
def add_command_option(self, long_opt_names, short_opt_names, help_text, argument_info):
"""
Adds command option
:param argument_info: None if option does not accept any arguments or tuple of (num_of_args, type_info) \
where num_of_args is int > 0 and type_info is str, representing type (only "int" and "string" are supported) \
or list of available types in case of choices
:param long_opt_names: list of long opt names
:param short_opt_names: list of short opt names
:param help_text: help text
:type long_opt_names iterable of str
:type short_opt_names iterable of str
:type help_text str
:type argument_info tuple or None
"""
assert isinstance(self.__command_element, Element), "Add option in command only"
option = self.__document.createElement("option")
opt_type_to_report = None
num_of_args = 0
if argument_info:
(num_of_args, type_info) = argument_info
if isinstance(type_info, list):
self.__create_text_array(option, "choices", type_info)
opt_type_to_report = "choices"
else:
opt_type_to_report = "int" if str(type_info) == "int" else "str"
if long_opt_names:
self.__create_text_array(option, "longNames", long_opt_names)
if short_opt_names:
self.__create_text_array(option, "shortNames", short_opt_names)
if opt_type_to_report:
option.setAttribute("type", opt_type_to_report)
option.setAttribute("help", help_text)
if num_of_args:
option.setAttribute("numberOfArgs", str(num_of_args))
self.__command_element.appendChild(option)
def close_command(self):
"""
Closes currently opened command
"""
assert bool(self.__command_element), "No command to close"
self.__command_element = None
pass
@property
def xml(self):
"""
:return: current commands as XML as described in package
:rtype str
"""
document = self.__document.toxml(encoding="utf-8")
return VersionAgnosticUtils().to_unicode(document.decode("utf-8") if isinstance(document, bytes) else document)
| apache-2.0 | 8,675,116,974,259,739,000 | -2,309,499,542,646,660,600 | 36.091463 | 127 | 0.646063 | false |
redhat-openstack/django | django/contrib/formtools/tests/wizard/wizardtests/forms.py | 313 | 2203 | import os
import tempfile
from django import forms
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
from django.forms.formsets import formset_factory
from django.forms.models import modelformset_factory
from django.http import HttpResponse
from django.template import Template, Context
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import WizardView
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
class Page1(forms.Form):
name = forms.CharField(max_length=100)
user = forms.ModelChoiceField(queryset=User.objects.all())
thirsty = forms.NullBooleanField()
class Page2(forms.Form):
address1 = forms.CharField(max_length=100)
address2 = forms.CharField(max_length=100)
file1 = forms.FileField()
class Page3(forms.Form):
random_crap = forms.CharField(max_length=100)
Page4 = formset_factory(Page3, extra=2)
class ContactWizard(WizardView):
file_storage = temp_storage
def done(self, form_list, **kwargs):
c = Context({
'form_list': [x.cleaned_data for x in form_list],
'all_cleaned_data': self.get_all_cleaned_data(),
})
for form in self.form_list.keys():
c[form] = self.get_cleaned_data_for_step(form)
c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail')
return HttpResponse(Template('').render(c))
def get_context_data(self, form, **kwargs):
context = super(ContactWizard, self).get_context_data(form, **kwargs)
if self.storage.current_step == 'form2':
context.update({'another_var': True})
return context
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email')
UserFormSet = modelformset_factory(User, form=UserForm)
class SessionContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
| bsd-3-clause | -5,342,199,912,334,142,000 | 8,865,828,221,490,668,000 | 31.880597 | 84 | 0.71675 | false |
zaeleus/rust | src/grammar/testparser.py | 80 | 2564 | #!/usr/bin/env python
#
# Copyright 2015 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
# ignore-tidy-linelength
import sys
import os
import subprocess
import argparse
# usage: testparser.py [-h] [-p PARSER [PARSER ...]] -s SOURCE_DIR
# Parsers should read from stdin and return exit status 0 for a
# successful parse, and nonzero for an unsuccessful parse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--parser', nargs='+')
parser.add_argument('-s', '--source-dir', nargs=1, required=True)
args = parser.parse_args(sys.argv[1:])
total = 0
ok = {}
bad = {}
for parser in args.parser:
ok[parser] = 0
bad[parser] = []
devnull = open(os.devnull, 'w')
print("\n")
for base, dirs, files in os.walk(args.source_dir[0]):
for f in filter(lambda p: p.endswith('.rs'), files):
p = os.path.join(base, f)
parse_fail = 'parse-fail' in p
if sys.version_info.major == 3:
lines = open(p, encoding='utf-8').readlines()
else:
lines = open(p).readlines()
if any('ignore-test' in line or 'ignore-lexer-test' in line for line in lines):
continue
total += 1
for parser in args.parser:
if subprocess.call(parser, stdin=open(p), stderr=subprocess.STDOUT, stdout=devnull) == 0:
if parse_fail:
bad[parser].append(p)
else:
ok[parser] += 1
else:
if parse_fail:
ok[parser] += 1
else:
bad[parser].append(p)
parser_stats = ', '.join(['{}: {}'.format(parser, ok[parser]) for parser in args.parser])
sys.stdout.write("\033[K\r total: {}, {}, scanned {}"
.format(total, os.path.relpath(parser_stats), os.path.relpath(p)))
devnull.close()
print("\n")
for parser in args.parser:
filename = os.path.basename(parser) + '.bad'
print("writing {} files that did not yield the correct result with {} to {}".format(len(bad[parser]), parser, filename))
with open(filename, "w") as f:
for p in bad[parser]:
f.write(p)
f.write("\n")
| apache-2.0 | 5,025,622,891,980,768,000 | 1,189,460,582,408,206,300 | 32.736842 | 124 | 0.602964 | false |
Yipit/pyeqs | tests/functional/test_connection.py | 1 | 1657 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from sure import scenario
from pyeqs import QuerySet
from tests.helpers import prepare_data, cleanup_data, add_document
@scenario(prepare_data, cleanup_data)
def test_simple_search_with_host_string(context):
"""
Connect with host string
"""
# When create a queryset
t = QuerySet("localhost", index="foo")
# And there are records
add_document("foo", {"bar": "baz"})
# And I do a search
results = t[0:1]
# Then I get a the expected results
len(results).should.equal(1)
results[0]['_source'].should.equal({"bar": "baz"})
@scenario(prepare_data, cleanup_data)
def test_simple_search_with_host_dict(context):
"""
Connect with host dict
"""
# When create a queryset
connection_info = {"host": "localhost", "port": 9200}
t = QuerySet(connection_info, index="foo")
# And there are records
add_document("foo", {"bar": "baz"})
# And I do a search
results = t[0:1]
# Then I get a the expected results
len(results).should.equal(1)
results[0]['_source'].should.equal({"bar": "baz"})
@scenario(prepare_data, cleanup_data)
def test_simple_search_with_host_list(context):
"""
Connect with host list
"""
# When create a queryset
connection_info = [{"host": "localhost", "port": 9200}]
t = QuerySet(connection_info, index="foo")
# And there are records
add_document("foo", {"bar": "baz"})
# And I do a search
results = t[0:1]
# Then I get a the expected results
len(results).should.equal(1)
results[0]['_source'].should.equal({"bar": "baz"})
| mit | -1,786,302,472,892,158,500 | -4,064,591,940,564,422,000 | 24.106061 | 66 | 0.630054 | false |
jawilson/home-assistant | homeassistant/components/camera/bloomsky.py | 28 | 2160 | """
Support for a camera of a BloomSky weather station.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/camera.bloomsky/
"""
import logging
import requests
from homeassistant.components.camera import Camera
from homeassistant.loader import get_component
DEPENDENCIES = ['bloomsky']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup access to BloomSky cameras."""
bloomsky = get_component('bloomsky')
for device in bloomsky.BLOOMSKY.devices.values():
add_devices([BloomSkyCamera(bloomsky.BLOOMSKY, device)])
class BloomSkyCamera(Camera):
"""Representation of the images published from the BloomSky's camera."""
def __init__(self, bs, device):
"""Setup for access to the BloomSky camera images."""
super(BloomSkyCamera, self).__init__()
self._name = device['DeviceName']
self._id = device['DeviceID']
self._bloomsky = bs
self._url = ""
self._last_url = ""
# _last_image will store images as they are downloaded so that the
# frequent updates in home-assistant don't keep poking the server
# to download the same image over and over.
self._last_image = ""
self._logger = logging.getLogger(__name__)
def camera_image(self):
"""Update the camera's image if it has changed."""
try:
self._url = self._bloomsky.devices[self._id]['Data']['ImageURL']
self._bloomsky.refresh_devices()
# If the URL hasn't changed then the image hasn't changed.
if self._url != self._last_url:
response = requests.get(self._url, timeout=10)
self._last_url = self._url
self._last_image = response.content
except requests.exceptions.RequestException as error:
self._logger.error("Error getting bloomsky image: %s", error)
return None
return self._last_image
@property
def name(self):
"""Return the name of this BloomSky device."""
return self._name
| mit | -1,539,491,081,776,510,700 | 7,438,319,636,261,870,000 | 34.409836 | 76 | 0.641667 | false |
jsoref/django | tests/postgres_tests/test_aggregates.py | 307 | 11910 | from django.contrib.postgres.aggregates import (
ArrayAgg, BitAnd, BitOr, BoolAnd, BoolOr, Corr, CovarPop, RegrAvgX,
RegrAvgY, RegrCount, RegrIntercept, RegrR2, RegrSlope, RegrSXX, RegrSXY,
RegrSYY, StatAggregate, StringAgg,
)
from django.db.models.expressions import F, Value
from django.test.utils import Approximate
from . import PostgreSQLTestCase
from .models import AggregateTestModel, StatTestModel
class TestGeneralAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo1', integer_field=0)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo2', integer_field=1)
AggregateTestModel.objects.create(boolean_field=False, char_field='Foo3', integer_field=2)
AggregateTestModel.objects.create(boolean_field=True, char_field='Foo4', integer_field=0)
def test_array_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': ['Foo1', 'Foo2', 'Foo3', 'Foo4']})
def test_array_agg_integerfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': [0, 1, 2, 0]})
def test_array_agg_booleanfield(self):
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': [True, False, False, True]})
def test_array_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('char_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('integer_field'))
self.assertEqual(values, {'arrayagg': []})
values = AggregateTestModel.objects.aggregate(arrayagg=ArrayAgg('boolean_field'))
self.assertEqual(values, {'arrayagg': []})
def test_bit_and_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 1})
def test_bit_and_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': 0})
def test_bit_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitand=BitAnd('integer_field'))
self.assertEqual(values, {'bitand': None})
def test_bit_or_general(self):
values = AggregateTestModel.objects.filter(
integer_field__in=[0, 1]).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_true_values(self):
values = AggregateTestModel.objects.filter(
integer_field=1).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 1})
def test_bit_or_on_only_false_values(self):
values = AggregateTestModel.objects.filter(
integer_field=0).aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': 0})
def test_bit_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(bitor=BitOr('integer_field'))
self.assertEqual(values, {'bitor': None})
def test_bool_and_general(self):
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': False})
def test_bool_and_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(booland=BoolAnd('boolean_field'))
self.assertEqual(values, {'booland': None})
def test_bool_or_general(self):
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': True})
def test_bool_or_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(boolor=BoolOr('boolean_field'))
self.assertEqual(values, {'boolor': None})
def test_string_agg_requires_delimiter(self):
with self.assertRaises(TypeError):
AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field'))
def test_string_agg_charfield(self):
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': 'Foo1;Foo2;Foo3;Foo4'})
def test_string_agg_empty_result(self):
AggregateTestModel.objects.all().delete()
values = AggregateTestModel.objects.aggregate(stringagg=StringAgg('char_field', delimiter=';'))
self.assertEqual(values, {'stringagg': ''})
class TestStatisticsAggregate(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
StatTestModel.objects.create(
int1=1,
int2=3,
related_field=AggregateTestModel.objects.create(integer_field=0),
)
StatTestModel.objects.create(
int1=2,
int2=2,
related_field=AggregateTestModel.objects.create(integer_field=1),
)
StatTestModel.objects.create(
int1=3,
int2=1,
related_field=AggregateTestModel.objects.create(integer_field=2),
)
# Tests for base class (StatAggregate)
def test_missing_arguments_raises_exception(self):
with self.assertRaisesMessage(ValueError, 'Both y and x must be provided.'):
StatAggregate(x=None, y=None)
def test_correct_source_expressions(self):
func = StatAggregate(x='test', y=13)
self.assertIsInstance(func.source_expressions[0], Value)
self.assertIsInstance(func.source_expressions[1], F)
def test_alias_is_required(self):
class SomeFunc(StatAggregate):
function = 'TEST'
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
StatTestModel.objects.aggregate(SomeFunc(y='int2', x='int1'))
# Test aggregates
def test_corr_general(self):
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': -1.0})
def test_corr_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(corr=Corr(y='int2', x='int1'))
self.assertEqual(values, {'corr': None})
def test_covar_pop_general(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': Approximate(-0.66, places=1)})
def test_covar_pop_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1'))
self.assertEqual(values, {'covarpop': None})
def test_covar_pop_sample(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': -1.0})
def test_covar_pop_sample_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y='int2', x='int1', sample=True))
self.assertEqual(values, {'covarpop': None})
def test_regr_avgx_general(self):
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': 2.0})
def test_regr_avgx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y='int2', x='int1'))
self.assertEqual(values, {'regravgx': None})
def test_regr_avgy_general(self):
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': 2.0})
def test_regr_avgy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y='int2', x='int1'))
self.assertEqual(values, {'regravgy': None})
def test_regr_count_general(self):
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 3})
def test_regr_count_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrcount=RegrCount(y='int2', x='int1'))
self.assertEqual(values, {'regrcount': 0})
def test_regr_intercept_general(self):
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': 4})
def test_regr_intercept_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrintercept=RegrIntercept(y='int2', x='int1'))
self.assertEqual(values, {'regrintercept': None})
def test_regr_r2_general(self):
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': 1})
def test_regr_r2_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y='int2', x='int1'))
self.assertEqual(values, {'regrr2': None})
def test_regr_slope_general(self):
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': -1})
def test_regr_slope_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrslope=RegrSlope(y='int2', x='int1'))
self.assertEqual(values, {'regrslope': None})
def test_regr_sxx_general(self):
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': 2.0})
def test_regr_sxx_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y='int2', x='int1'))
self.assertEqual(values, {'regrsxx': None})
def test_regr_sxy_general(self):
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': -2.0})
def test_regr_sxy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y='int2', x='int1'))
self.assertEqual(values, {'regrsxy': None})
def test_regr_syy_general(self):
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': 2.0})
def test_regr_syy_empty_result(self):
StatTestModel.objects.all().delete()
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y='int2', x='int1'))
self.assertEqual(values, {'regrsyy': None})
def test_regr_avgx_with_related_obj_and_number_as_argument(self):
"""
This is more complex test to check if JOIN on field and
number as argument works as expected.
"""
values = StatTestModel.objects.aggregate(complex_regravgx=RegrAvgX(y=5, x='related_field__integer_field'))
self.assertEqual(values, {'complex_regravgx': 1.0})
| bsd-3-clause | 3,852,928,585,429,600,000 | 1,465,383,994,044,729,600 | 43.774436 | 114 | 0.671872 | false |
xbot/alfred-pushbullet | lib/requests/packages/chardet/jisfreq.py | 3131 | 47315 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
#
# Japanese frequency table, applied to both S-JIS and EUC-JP
# They are sorted in order.
# 128 --> 0.77094
# 256 --> 0.85710
# 512 --> 0.92635
# 1024 --> 0.97130
# 2048 --> 0.99431
#
# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
#
# Typical Distribution Ratio, 25% of IDR
JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
# Char to FreqOrder table ,
JIS_TABLE_SIZE = 4368
JISCharToFreqOrder = (
40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
#Everything below is of no interest for detection purpose
2138,2122,3730,2888,1995,1820,1044,6190,6191,6192,6193,6194,6195,6196,6197,6198, # 4384
6199,6200,6201,6202,6203,6204,6205,4670,6206,6207,6208,6209,6210,6211,6212,6213, # 4400
6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,6224,6225,6226,6227,6228,6229, # 4416
6230,6231,6232,6233,6234,6235,6236,6237,3187,6238,6239,3969,6240,6241,6242,6243, # 4432
6244,4671,6245,6246,4672,6247,6248,4133,6249,6250,4364,6251,2923,2556,2613,4673, # 4448
4365,3970,6252,6253,6254,6255,4674,6256,6257,6258,2768,2353,4366,4675,4676,3188, # 4464
4367,3463,6259,4134,4677,4678,6260,2267,6261,3842,3332,4368,3543,6262,6263,6264, # 4480
3013,1954,1928,4135,4679,6265,6266,2478,3091,6267,4680,4369,6268,6269,1699,6270, # 4496
3544,4136,4681,6271,4137,6272,4370,2804,6273,6274,2593,3971,3972,4682,6275,2236, # 4512
4683,6276,6277,4684,6278,6279,4138,3973,4685,6280,6281,3258,6282,6283,6284,6285, # 4528
3974,4686,2841,3975,6286,6287,3545,6288,6289,4139,4687,4140,6290,4141,6291,4142, # 4544
6292,6293,3333,6294,6295,6296,4371,6297,3399,6298,6299,4372,3976,6300,6301,6302, # 4560
4373,6303,6304,3843,3731,6305,4688,4374,6306,6307,3259,2294,6308,3732,2530,4143, # 4576
6309,4689,6310,6311,6312,3048,6313,6314,4690,3733,2237,6315,6316,2282,3334,6317, # 4592
6318,3844,6319,6320,4691,6321,3400,4692,6322,4693,6323,3049,6324,4375,6325,3977, # 4608
6326,6327,6328,3546,6329,4694,3335,6330,4695,4696,6331,6332,6333,6334,4376,3978, # 4624
6335,4697,3979,4144,6336,3980,4698,6337,6338,6339,6340,6341,4699,4700,4701,6342, # 4640
6343,4702,6344,6345,4703,6346,6347,4704,6348,4705,4706,3135,6349,4707,6350,4708, # 4656
6351,4377,6352,4709,3734,4145,6353,2506,4710,3189,6354,3050,4711,3981,6355,3547, # 4672
3014,4146,4378,3735,2651,3845,3260,3136,2224,1986,6356,3401,6357,4712,2594,3627, # 4688
3137,2573,3736,3982,4713,3628,4714,4715,2682,3629,4716,6358,3630,4379,3631,6359, # 4704
6360,6361,3983,6362,6363,6364,6365,4147,3846,4717,6366,6367,3737,2842,6368,4718, # 4720
2628,6369,3261,6370,2386,6371,6372,3738,3984,4719,3464,4720,3402,6373,2924,3336, # 4736
4148,2866,6374,2805,3262,4380,2704,2069,2531,3138,2806,2984,6375,2769,6376,4721, # 4752
4722,3403,6377,6378,3548,6379,6380,2705,3092,1979,4149,2629,3337,2889,6381,3338, # 4768
4150,2557,3339,4381,6382,3190,3263,3739,6383,4151,4723,4152,2558,2574,3404,3191, # 4784
6384,6385,4153,6386,4724,4382,6387,6388,4383,6389,6390,4154,6391,4725,3985,6392, # 4800
3847,4155,6393,6394,6395,6396,6397,3465,6398,4384,6399,6400,6401,6402,6403,6404, # 4816
4156,6405,6406,6407,6408,2123,6409,6410,2326,3192,4726,6411,6412,6413,6414,4385, # 4832
4157,6415,6416,4158,6417,3093,3848,6418,3986,6419,6420,3849,6421,6422,6423,4159, # 4848
6424,6425,4160,6426,3740,6427,6428,6429,6430,3987,6431,4727,6432,2238,6433,6434, # 4864
4386,3988,6435,6436,3632,6437,6438,2843,6439,6440,6441,6442,3633,6443,2958,6444, # 4880
6445,3466,6446,2364,4387,3850,6447,4388,2959,3340,6448,3851,6449,4728,6450,6451, # 4896
3264,4729,6452,3193,6453,4389,4390,2706,3341,4730,6454,3139,6455,3194,6456,3051, # 4912
2124,3852,1602,4391,4161,3853,1158,3854,4162,3989,4392,3990,4731,4732,4393,2040, # 4928
4163,4394,3265,6457,2807,3467,3855,6458,6459,6460,3991,3468,4733,4734,6461,3140, # 4944
2960,6462,4735,6463,6464,6465,6466,4736,4737,4738,4739,6467,6468,4164,2403,3856, # 4960
6469,6470,2770,2844,6471,4740,6472,6473,6474,6475,6476,6477,6478,3195,6479,4741, # 4976
4395,6480,2867,6481,4742,2808,6482,2493,4165,6483,6484,6485,6486,2295,4743,6487, # 4992
6488,6489,3634,6490,6491,6492,6493,6494,6495,6496,2985,4744,6497,6498,4745,6499, # 5008
6500,2925,3141,4166,6501,6502,4746,6503,6504,4747,6505,6506,6507,2890,6508,6509, # 5024
6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,3469,4167,6520,6521,6522,4748, # 5040
4396,3741,4397,4749,4398,3342,2125,4750,6523,4751,4752,4753,3052,6524,2961,4168, # 5056
6525,4754,6526,4755,4399,2926,4169,6527,3857,6528,4400,4170,6529,4171,6530,6531, # 5072
2595,6532,6533,6534,6535,3635,6536,6537,6538,6539,6540,6541,6542,4756,6543,6544, # 5088
6545,6546,6547,6548,4401,6549,6550,6551,6552,4402,3405,4757,4403,6553,6554,6555, # 5104
4172,3742,6556,6557,6558,3992,3636,6559,6560,3053,2726,6561,3549,4173,3054,4404, # 5120
6562,6563,3993,4405,3266,3550,2809,4406,6564,6565,6566,4758,4759,6567,3743,6568, # 5136
4760,3744,4761,3470,6569,6570,6571,4407,6572,3745,4174,6573,4175,2810,4176,3196, # 5152
4762,6574,4177,6575,6576,2494,2891,3551,6577,6578,3471,6579,4408,6580,3015,3197, # 5168
6581,3343,2532,3994,3858,6582,3094,3406,4409,6583,2892,4178,4763,4410,3016,4411, # 5184
6584,3995,3142,3017,2683,6585,4179,6586,6587,4764,4412,6588,6589,4413,6590,2986, # 5200
6591,2962,3552,6592,2963,3472,6593,6594,4180,4765,6595,6596,2225,3267,4414,6597, # 5216
3407,3637,4766,6598,6599,3198,6600,4415,6601,3859,3199,6602,3473,4767,2811,4416, # 5232
1856,3268,3200,2575,3996,3997,3201,4417,6603,3095,2927,6604,3143,6605,2268,6606, # 5248
3998,3860,3096,2771,6607,6608,3638,2495,4768,6609,3861,6610,3269,2745,4769,4181, # 5264
3553,6611,2845,3270,6612,6613,6614,3862,6615,6616,4770,4771,6617,3474,3999,4418, # 5280
4419,6618,3639,3344,6619,4772,4182,6620,2126,6621,6622,6623,4420,4773,6624,3018, # 5296
6625,4774,3554,6626,4183,2025,3746,6627,4184,2707,6628,4421,4422,3097,1775,4185, # 5312
3555,6629,6630,2868,6631,6632,4423,6633,6634,4424,2414,2533,2928,6635,4186,2387, # 5328
6636,4775,6637,4187,6638,1891,4425,3202,3203,6639,6640,4776,6641,3345,6642,6643, # 5344
3640,6644,3475,3346,3641,4000,6645,3144,6646,3098,2812,4188,3642,3204,6647,3863, # 5360
3476,6648,3864,6649,4426,4001,6650,6651,6652,2576,6653,4189,4777,6654,6655,6656, # 5376
2846,6657,3477,3205,4002,6658,4003,6659,3347,2252,6660,6661,6662,4778,6663,6664, # 5392
6665,6666,6667,6668,6669,4779,4780,2048,6670,3478,3099,6671,3556,3747,4004,6672, # 5408
6673,6674,3145,4005,3748,6675,6676,6677,6678,6679,3408,6680,6681,6682,6683,3206, # 5424
3207,6684,6685,4781,4427,6686,4782,4783,4784,6687,6688,6689,4190,6690,6691,3479, # 5440
6692,2746,6693,4428,6694,6695,6696,6697,6698,6699,4785,6700,6701,3208,2727,6702, # 5456
3146,6703,6704,3409,2196,6705,4429,6706,6707,6708,2534,1996,6709,6710,6711,2747, # 5472
6712,6713,6714,4786,3643,6715,4430,4431,6716,3557,6717,4432,4433,6718,6719,6720, # 5488
6721,3749,6722,4006,4787,6723,6724,3644,4788,4434,6725,6726,4789,2772,6727,6728, # 5504
6729,6730,6731,2708,3865,2813,4435,6732,6733,4790,4791,3480,6734,6735,6736,6737, # 5520
4436,3348,6738,3410,4007,6739,6740,4008,6741,6742,4792,3411,4191,6743,6744,6745, # 5536
6746,6747,3866,6748,3750,6749,6750,6751,6752,6753,6754,6755,3867,6756,4009,6757, # 5552
4793,4794,6758,2814,2987,6759,6760,6761,4437,6762,6763,6764,6765,3645,6766,6767, # 5568
3481,4192,6768,3751,6769,6770,2174,6771,3868,3752,6772,6773,6774,4193,4795,4438, # 5584
3558,4796,4439,6775,4797,6776,6777,4798,6778,4799,3559,4800,6779,6780,6781,3482, # 5600
6782,2893,6783,6784,4194,4801,4010,6785,6786,4440,6787,4011,6788,6789,6790,6791, # 5616
6792,6793,4802,6794,6795,6796,4012,6797,6798,6799,6800,3349,4803,3483,6801,4804, # 5632
4195,6802,4013,6803,6804,4196,6805,4014,4015,6806,2847,3271,2848,6807,3484,6808, # 5648
6809,6810,4441,6811,4442,4197,4443,3272,4805,6812,3412,4016,1579,6813,6814,4017, # 5664
6815,3869,6816,2964,6817,4806,6818,6819,4018,3646,6820,6821,4807,4019,4020,6822, # 5680
6823,3560,6824,6825,4021,4444,6826,4198,6827,6828,4445,6829,6830,4199,4808,6831, # 5696
6832,6833,3870,3019,2458,6834,3753,3413,3350,6835,4809,3871,4810,3561,4446,6836, # 5712
6837,4447,4811,4812,6838,2459,4448,6839,4449,6840,6841,4022,3872,6842,4813,4814, # 5728
6843,6844,4815,4200,4201,4202,6845,4023,6846,6847,4450,3562,3873,6848,6849,4816, # 5744
4817,6850,4451,4818,2139,6851,3563,6852,6853,3351,6854,6855,3352,4024,2709,3414, # 5760
4203,4452,6856,4204,6857,6858,3874,3875,6859,6860,4819,6861,6862,6863,6864,4453, # 5776
3647,6865,6866,4820,6867,6868,6869,6870,4454,6871,2869,6872,6873,4821,6874,3754, # 5792
6875,4822,4205,6876,6877,6878,3648,4206,4455,6879,4823,6880,4824,3876,6881,3055, # 5808
4207,6882,3415,6883,6884,6885,4208,4209,6886,4210,3353,6887,3354,3564,3209,3485, # 5824
2652,6888,2728,6889,3210,3755,6890,4025,4456,6891,4825,6892,6893,6894,6895,4211, # 5840
6896,6897,6898,4826,6899,6900,4212,6901,4827,6902,2773,3565,6903,4828,6904,6905, # 5856
6906,6907,3649,3650,6908,2849,3566,6909,3567,3100,6910,6911,6912,6913,6914,6915, # 5872
4026,6916,3355,4829,3056,4457,3756,6917,3651,6918,4213,3652,2870,6919,4458,6920, # 5888
2438,6921,6922,3757,2774,4830,6923,3356,4831,4832,6924,4833,4459,3653,2507,6925, # 5904
4834,2535,6926,6927,3273,4027,3147,6928,3568,6929,6930,6931,4460,6932,3877,4461, # 5920
2729,3654,6933,6934,6935,6936,2175,4835,2630,4214,4028,4462,4836,4215,6937,3148, # 5936
4216,4463,4837,4838,4217,6938,6939,2850,4839,6940,4464,6941,6942,6943,4840,6944, # 5952
4218,3274,4465,6945,6946,2710,6947,4841,4466,6948,6949,2894,6950,6951,4842,6952, # 5968
4219,3057,2871,6953,6954,6955,6956,4467,6957,2711,6958,6959,6960,3275,3101,4843, # 5984
6961,3357,3569,6962,4844,6963,6964,4468,4845,3570,6965,3102,4846,3758,6966,4847, # 6000
3878,4848,4849,4029,6967,2929,3879,4850,4851,6968,6969,1733,6970,4220,6971,6972, # 6016
6973,6974,6975,6976,4852,6977,6978,6979,6980,6981,6982,3759,6983,6984,6985,3486, # 6032
3487,6986,3488,3416,6987,6988,6989,6990,6991,6992,6993,6994,6995,6996,6997,4853, # 6048
6998,6999,4030,7000,7001,3211,7002,7003,4221,7004,7005,3571,4031,7006,3572,7007, # 6064
2614,4854,2577,7008,7009,2965,3655,3656,4855,2775,3489,3880,4222,4856,3881,4032, # 6080
3882,3657,2730,3490,4857,7010,3149,7011,4469,4858,2496,3491,4859,2283,7012,7013, # 6096
7014,2365,4860,4470,7015,7016,3760,7017,7018,4223,1917,7019,7020,7021,4471,7022, # 6112
2776,4472,7023,7024,7025,7026,4033,7027,3573,4224,4861,4034,4862,7028,7029,1929, # 6128
3883,4035,7030,4473,3058,7031,2536,3761,3884,7032,4036,7033,2966,2895,1968,4474, # 6144
3276,4225,3417,3492,4226,2105,7034,7035,1754,2596,3762,4227,4863,4475,3763,4864, # 6160
3764,2615,2777,3103,3765,3658,3418,4865,2296,3766,2815,7036,7037,7038,3574,2872, # 6176
3277,4476,7039,4037,4477,7040,7041,4038,7042,7043,7044,7045,7046,7047,2537,7048, # 6192
7049,7050,7051,7052,7053,7054,4478,7055,7056,3767,3659,4228,3575,7057,7058,4229, # 6208
7059,7060,7061,3660,7062,3212,7063,3885,4039,2460,7064,7065,7066,7067,7068,7069, # 6224
7070,7071,7072,7073,7074,4866,3768,4867,7075,7076,7077,7078,4868,3358,3278,2653, # 6240
7079,7080,4479,3886,7081,7082,4869,7083,7084,7085,7086,7087,7088,2538,7089,7090, # 6256
7091,4040,3150,3769,4870,4041,2896,3359,4230,2930,7092,3279,7093,2967,4480,3213, # 6272
4481,3661,7094,7095,7096,7097,7098,7099,7100,7101,7102,2461,3770,7103,7104,4231, # 6288
3151,7105,7106,7107,4042,3662,7108,7109,4871,3663,4872,4043,3059,7110,7111,7112, # 6304
3493,2988,7113,4873,7114,7115,7116,3771,4874,7117,7118,4232,4875,7119,3576,2336, # 6320
4876,7120,4233,3419,4044,4877,4878,4482,4483,4879,4484,4234,7121,3772,4880,1045, # 6336
3280,3664,4881,4882,7122,7123,7124,7125,4883,7126,2778,7127,4485,4486,7128,4884, # 6352
3214,3887,7129,7130,3215,7131,4885,4045,7132,7133,4046,7134,7135,7136,7137,7138, # 6368
7139,7140,7141,7142,7143,4235,7144,4886,7145,7146,7147,4887,7148,7149,7150,4487, # 6384
4047,4488,7151,7152,4888,4048,2989,3888,7153,3665,7154,4049,7155,7156,7157,7158, # 6400
7159,7160,2931,4889,4890,4489,7161,2631,3889,4236,2779,7162,7163,4891,7164,3060, # 6416
7165,1672,4892,7166,4893,4237,3281,4894,7167,7168,3666,7169,3494,7170,7171,4050, # 6432
7172,7173,3104,3360,3420,4490,4051,2684,4052,7174,4053,7175,7176,7177,2253,4054, # 6448
7178,7179,4895,7180,3152,3890,3153,4491,3216,7181,7182,7183,2968,4238,4492,4055, # 6464
7184,2990,7185,2479,7186,7187,4493,7188,7189,7190,7191,7192,4896,7193,4897,2969, # 6480
4494,4898,7194,3495,7195,7196,4899,4495,7197,3105,2731,7198,4900,7199,7200,7201, # 6496
4056,7202,3361,7203,7204,4496,4901,4902,7205,4497,7206,7207,2315,4903,7208,4904, # 6512
7209,4905,2851,7210,7211,3577,7212,3578,4906,7213,4057,3667,4907,7214,4058,2354, # 6528
3891,2376,3217,3773,7215,7216,7217,7218,7219,4498,7220,4908,3282,2685,7221,3496, # 6544
4909,2632,3154,4910,7222,2337,7223,4911,7224,7225,7226,4912,4913,3283,4239,4499, # 6560
7227,2816,7228,7229,7230,7231,7232,7233,7234,4914,4500,4501,7235,7236,7237,2686, # 6576
7238,4915,7239,2897,4502,7240,4503,7241,2516,7242,4504,3362,3218,7243,7244,7245, # 6592
4916,7246,7247,4505,3363,7248,7249,7250,7251,3774,4506,7252,7253,4917,7254,7255, # 6608
3284,2991,4918,4919,3219,3892,4920,3106,3497,4921,7256,7257,7258,4922,7259,4923, # 6624
3364,4507,4508,4059,7260,4240,3498,7261,7262,4924,7263,2992,3893,4060,3220,7264, # 6640
7265,7266,7267,7268,7269,4509,3775,7270,2817,7271,4061,4925,4510,3776,7272,4241, # 6656
4511,3285,7273,7274,3499,7275,7276,7277,4062,4512,4926,7278,3107,3894,7279,7280, # 6672
4927,7281,4513,7282,7283,3668,7284,7285,4242,4514,4243,7286,2058,4515,4928,4929, # 6688
4516,7287,3286,4244,7288,4517,7289,7290,7291,3669,7292,7293,4930,4931,4932,2355, # 6704
4933,7294,2633,4518,7295,4245,7296,7297,4519,7298,7299,4520,4521,4934,7300,4246, # 6720
4522,7301,7302,7303,3579,7304,4247,4935,7305,4936,7306,7307,7308,7309,3777,7310, # 6736
4523,7311,7312,7313,4248,3580,7314,4524,3778,4249,7315,3581,7316,3287,7317,3221, # 6752
7318,4937,7319,7320,7321,7322,7323,7324,4938,4939,7325,4525,7326,7327,7328,4063, # 6768
7329,7330,4940,7331,7332,4941,7333,4526,7334,3500,2780,1741,4942,2026,1742,7335, # 6784
7336,3582,4527,2388,7337,7338,7339,4528,7340,4250,4943,7341,7342,7343,4944,7344, # 6800
7345,7346,3020,7347,4945,7348,7349,7350,7351,3895,7352,3896,4064,3897,7353,7354, # 6816
7355,4251,7356,7357,3898,7358,3779,7359,3780,3288,7360,7361,4529,7362,4946,4530, # 6832
2027,7363,3899,4531,4947,3222,3583,7364,4948,7365,7366,7367,7368,4949,3501,4950, # 6848
3781,4951,4532,7369,2517,4952,4252,4953,3155,7370,4954,4955,4253,2518,4533,7371, # 6864
7372,2712,4254,7373,7374,7375,3670,4956,3671,7376,2389,3502,4065,7377,2338,7378, # 6880
7379,7380,7381,3061,7382,4957,7383,7384,7385,7386,4958,4534,7387,7388,2993,7389, # 6896
3062,7390,4959,7391,7392,7393,4960,3108,4961,7394,4535,7395,4962,3421,4536,7396, # 6912
4963,7397,4964,1857,7398,4965,7399,7400,2176,3584,4966,7401,7402,3422,4537,3900, # 6928
3585,7403,3782,7404,2852,7405,7406,7407,4538,3783,2654,3423,4967,4539,7408,3784, # 6944
3586,2853,4540,4541,7409,3901,7410,3902,7411,7412,3785,3109,2327,3903,7413,7414, # 6960
2970,4066,2932,7415,7416,7417,3904,3672,3424,7418,4542,4543,4544,7419,4968,7420, # 6976
7421,4255,7422,7423,7424,7425,7426,4067,7427,3673,3365,4545,7428,3110,2559,3674, # 6992
7429,7430,3156,7431,7432,3503,7433,3425,4546,7434,3063,2873,7435,3223,4969,4547, # 7008
4548,2898,4256,4068,7436,4069,3587,3786,2933,3787,4257,4970,4971,3788,7437,4972, # 7024
3064,7438,4549,7439,7440,7441,7442,7443,4973,3905,7444,2874,7445,7446,7447,7448, # 7040
3021,7449,4550,3906,3588,4974,7450,7451,3789,3675,7452,2578,7453,4070,7454,7455, # 7056
7456,4258,3676,7457,4975,7458,4976,4259,3790,3504,2634,4977,3677,4551,4260,7459, # 7072
7460,7461,7462,3907,4261,4978,7463,7464,7465,7466,4979,4980,7467,7468,2213,4262, # 7088
7469,7470,7471,3678,4981,7472,2439,7473,4263,3224,3289,7474,3908,2415,4982,7475, # 7104
4264,7476,4983,2655,7477,7478,2732,4552,2854,2875,7479,7480,4265,7481,4553,4984, # 7120
7482,7483,4266,7484,3679,3366,3680,2818,2781,2782,3367,3589,4554,3065,7485,4071, # 7136
2899,7486,7487,3157,2462,4072,4555,4073,4985,4986,3111,4267,2687,3368,4556,4074, # 7152
3791,4268,7488,3909,2783,7489,2656,1962,3158,4557,4987,1963,3159,3160,7490,3112, # 7168
4988,4989,3022,4990,4991,3792,2855,7491,7492,2971,4558,7493,7494,4992,7495,7496, # 7184
7497,7498,4993,7499,3426,4559,4994,7500,3681,4560,4269,4270,3910,7501,4075,4995, # 7200
4271,7502,7503,4076,7504,4996,7505,3225,4997,4272,4077,2819,3023,7506,7507,2733, # 7216
4561,7508,4562,7509,3369,3793,7510,3590,2508,7511,7512,4273,3113,2994,2616,7513, # 7232
7514,7515,7516,7517,7518,2820,3911,4078,2748,7519,7520,4563,4998,7521,7522,7523, # 7248
7524,4999,4274,7525,4564,3682,2239,4079,4565,7526,7527,7528,7529,5000,7530,7531, # 7264
5001,4275,3794,7532,7533,7534,3066,5002,4566,3161,7535,7536,4080,7537,3162,7538, # 7280
7539,4567,7540,7541,7542,7543,7544,7545,5003,7546,4568,7547,7548,7549,7550,7551, # 7296
7552,7553,7554,7555,7556,5004,7557,7558,7559,5005,7560,3795,7561,4569,7562,7563, # 7312
7564,2821,3796,4276,4277,4081,7565,2876,7566,5006,7567,7568,2900,7569,3797,3912, # 7328
7570,7571,7572,4278,7573,7574,7575,5007,7576,7577,5008,7578,7579,4279,2934,7580, # 7344
7581,5009,7582,4570,7583,4280,7584,7585,7586,4571,4572,3913,7587,4573,3505,7588, # 7360
5010,7589,7590,7591,7592,3798,4574,7593,7594,5011,7595,4281,7596,7597,7598,4282, # 7376
5012,7599,7600,5013,3163,7601,5014,7602,3914,7603,7604,2734,4575,4576,4577,7605, # 7392
7606,7607,7608,7609,3506,5015,4578,7610,4082,7611,2822,2901,2579,3683,3024,4579, # 7408
3507,7612,4580,7613,3226,3799,5016,7614,7615,7616,7617,7618,7619,7620,2995,3290, # 7424
7621,4083,7622,5017,7623,7624,7625,7626,7627,4581,3915,7628,3291,7629,5018,7630, # 7440
7631,7632,7633,4084,7634,7635,3427,3800,7636,7637,4582,7638,5019,4583,5020,7639, # 7456
3916,7640,3801,5021,4584,4283,7641,7642,3428,3591,2269,7643,2617,7644,4585,3592, # 7472
7645,4586,2902,7646,7647,3227,5022,7648,4587,7649,4284,7650,7651,7652,4588,2284, # 7488
7653,5023,7654,7655,7656,4589,5024,3802,7657,7658,5025,3508,4590,7659,7660,7661, # 7504
1969,5026,7662,7663,3684,1821,2688,7664,2028,2509,4285,7665,2823,1841,7666,2689, # 7520
3114,7667,3917,4085,2160,5027,5028,2972,7668,5029,7669,7670,7671,3593,4086,7672, # 7536
4591,4087,5030,3803,7673,7674,7675,7676,7677,7678,7679,4286,2366,4592,4593,3067, # 7552
2328,7680,7681,4594,3594,3918,2029,4287,7682,5031,3919,3370,4288,4595,2856,7683, # 7568
3509,7684,7685,5032,5033,7686,7687,3804,2784,7688,7689,7690,7691,3371,7692,7693, # 7584
2877,5034,7694,7695,3920,4289,4088,7696,7697,7698,5035,7699,5036,4290,5037,5038, # 7600
5039,7700,7701,7702,5040,5041,3228,7703,1760,7704,5042,3229,4596,2106,4089,7705, # 7616
4597,2824,5043,2107,3372,7706,4291,4090,5044,7707,4091,7708,5045,3025,3805,4598, # 7632
4292,4293,4294,3373,7709,4599,7710,5046,7711,7712,5047,5048,3806,7713,7714,7715, # 7648
5049,7716,7717,7718,7719,4600,5050,7720,7721,7722,5051,7723,4295,3429,7724,7725, # 7664
7726,7727,3921,7728,3292,5052,4092,7729,7730,7731,7732,7733,7734,7735,5053,5054, # 7680
7736,7737,7738,7739,3922,3685,7740,7741,7742,7743,2635,5055,7744,5056,4601,7745, # 7696
7746,2560,7747,7748,7749,7750,3923,7751,7752,7753,7754,7755,4296,2903,7756,7757, # 7712
7758,7759,7760,3924,7761,5057,4297,7762,7763,5058,4298,7764,4093,7765,7766,5059, # 7728
3925,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,3595,7777,4299,5060,4094, # 7744
7778,3293,5061,7779,7780,4300,7781,7782,4602,7783,3596,7784,7785,3430,2367,7786, # 7760
3164,5062,5063,4301,7787,7788,4095,5064,5065,7789,3374,3115,7790,7791,7792,7793, # 7776
7794,7795,7796,3597,4603,7797,7798,3686,3116,3807,5066,7799,7800,5067,7801,7802, # 7792
4604,4302,5068,4303,4096,7803,7804,3294,7805,7806,5069,4605,2690,7807,3026,7808, # 7808
7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824, # 7824
7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7840
7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,7856, # 7856
7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,7872, # 7872
7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,7888, # 7888
7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,7904, # 7904
7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,7920, # 7920
7921,7922,7923,7924,3926,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935, # 7936
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951, # 7952
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967, # 7968
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983, # 7984
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999, # 8000
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015, # 8016
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031, # 8032
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047, # 8048
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063, # 8064
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079, # 8080
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095, # 8096
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111, # 8112
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127, # 8128
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143, # 8144
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159, # 8160
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175, # 8176
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191, # 8192
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207, # 8208
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223, # 8224
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239, # 8240
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255, # 8256
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271) # 8272
# flake8: noqa
| mit | -8,551,369,509,816,712,000 | -2,511,568,456,403,894,000 | 82.154657 | 98 | 0.745134 | false |
SmartPeople/zulip | zerver/webhooks/stash/view.py | 13 | 1862 | # Webhooks for external integrations.
from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.models import get_client
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, authenticated_rest_api_view
from zerver.models import UserProfile
import ujson
from typing import Any, Dict, Text
@authenticated_rest_api_view(is_webhook=True)
@has_request_variables
def api_stash_webhook(request, user_profile, payload=REQ(argument_type='body'),
stream=REQ(default='commits')):
# type: (HttpRequest, UserProfile, Dict[str, Any], Text) -> HttpResponse
# We don't get who did the push, or we'd try to report that.
try:
repo_name = payload["repository"]["name"]
project_name = payload["repository"]["project"]["name"]
branch_name = payload["refChanges"][0]["refId"].split("/")[-1]
commit_entries = payload["changesets"]["values"]
commits = [(entry["toCommit"]["displayId"],
entry["toCommit"]["message"].split("\n")[0]) for
entry in commit_entries]
head_ref = commit_entries[-1]["toCommit"]["displayId"]
except KeyError as e:
return json_error(_("Missing key %s in JSON") % (str(e),))
subject = "%s/%s: %s" % (project_name, repo_name, branch_name)
content = "`%s` was pushed to **%s** in **%s/%s** with:\n\n" % (
head_ref, branch_name, project_name, repo_name)
content += "\n".join("* `%s`: %s" % (
commit[0], commit[1]) for commit in commits)
check_send_message(user_profile, get_client("ZulipStashWebhook"), "stream",
[stream], subject, content)
return json_success()
| apache-2.0 | 7,740,619,395,839,768,000 | -8,912,848,508,269,560,000 | 41.318182 | 84 | 0.647691 | false |
ltsimps/metis_ros | vendor/googletest/googletest/scripts/common.py | 1180 | 2919 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Shared utilities for writing scripts for Google Test/Mock."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
# Matches the line from 'svn info .' output that describes what SVN
# path the current local directory corresponds to. For example, in
# a googletest SVN workspace's trunk/test directory, the output will be:
#
# URL: https://googletest.googlecode.com/svn/trunk/test
_SVN_INFO_URL_RE = re.compile(r'^URL: https://(\w+)\.googlecode\.com/svn(.*)')
def GetCommandOutput(command):
"""Runs the shell command and returns its stdout as a list of lines."""
f = os.popen(command, 'r')
lines = [line.strip() for line in f.readlines()]
f.close()
return lines
def GetSvnInfo():
"""Returns the project name and the current SVN workspace's root path."""
for line in GetCommandOutput('svn info .'):
m = _SVN_INFO_URL_RE.match(line)
if m:
project = m.group(1) # googletest or googlemock
rel_path = m.group(2)
root = os.path.realpath(rel_path.count('/') * '../')
return project, root
return None, None
def GetSvnTrunk():
"""Returns the current SVN workspace's trunk root path."""
_, root = GetSvnInfo()
return root + '/trunk' if root else None
def IsInGTestSvn():
project, _ = GetSvnInfo()
return project == 'googletest'
def IsInGMockSvn():
project, _ = GetSvnInfo()
return project == 'googlemock'
| mit | -3,722,771,936,124,970,500 | -2,726,017,757,413,231,600 | 34.168675 | 78 | 0.730045 | false |
EarToEarOak/RTLSDR-Scanner | nsis/test_urls.py | 3 | 1992 | #
# rtlsdr_scan
#
# http://eartoearoak.com/software/rtlsdr-scanner
#
# Copyright 2012 - 2015 Al Brown
#
# A frequency scanning GUI for the OsmoSDR rtl-sdr library at
# http://sdr.osmocom.org/trac/wiki/rtl-sdr
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import re
from pip._vendor import requests
def __load_nsis():
files = [f for f in os.listdir('.') if os.path.isfile(f)]
files = [f for f in files if os.path.splitext(f)[1] == '.nsi' ]
return files[0]
def __find_urls(nsis):
rePath = re.compile('StrCpy \$UriPath \"(http[s]{0,1}.*?)\".*?StrCpy \$UriFile \"(.*?)\"',
re.DOTALL | re.MULTILINE)
reInetc = re.compile('inetc::get \"(http[s]{0,1}.+?)\"', re.MULTILINE)
f = open(nsis, 'r')
data = f.read()
matchPath = rePath.findall(data)
urlsPath = [f + '/' + p for f, p in matchPath]
urlsInetc = reInetc.findall(data)
urlsPath.extend(urlsInetc)
return urlsPath
def __test_urls(urls):
ok = True
for url in urls:
request = requests.head(url)
ok &= request.ok
print '{} - {} - {}'.format(request.ok,
url,
request.status_code)
if ok:
print 'Passed'
else:
print 'Failed'
if __name__ == '__main__':
print 'Testing installer URLs\n'
nsis = __load_nsis()
urls = __find_urls(nsis)
__test_urls(urls)
| gpl-3.0 | 2,313,780,736,282,288,600 | -2,293,741,884,602,416,600 | 25.210526 | 94 | 0.614458 | false |
projectexpert/pmis | analytic_resource_plan/wizard/resource_plan_line_make_purchase.py | 1 | 6559 | # Copyright 2019 LUXIM, Slovenia (Matjaž Mozetič)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import api, fields, models
from odoo.tools.translate import _
from odoo.exceptions import ValidationError
class ResourcePlanLineMakePurchase(models.TransientModel):
_name = "resource.plan.line.make.purchase"
_description = "Analytic resource plan line make purchase"
@api.multi
def default_partner(self):
context = self.env.context
case_id = context and context.get('active_ids', []) or []
case_id = case_id and case_id[0] or False
crm_id = self.env['crm.lead'].browse(case_id)
return crm_id and crm_id.partner_id.id or ''
partner_id = fields.Many2one(
comodel_name='res.partner',
string='Customer',
default=default_partner
)
update_quotation = fields.Boolean(
string='Update existing quotation'
)
@api.multi
def make_order(self):
context = self.env.context
case_id = context and context.get('active_ids', []) or []
case_id = case_id and case_id[0] or False
crm_id = self.env['crm.lead'].browse(case_id)
if self.update_quotation and crm_id and crm_id.order_ids:
for order in crm_id.order_ids:
if order.order_line:
order.order_line.unlink()
if crm_id and crm_id.account_id:
partner = crm_id.partner_id
purchase_order = self.env['purchase.order']
# TODO: check vendor pricelist for purchases field name
pricelist = partner.property_product_pricelist.id
partner_address = partner.address_get(
[
'default',
'invoice',
'delivery',
'contact'
]
)
purchase_order_values = {
'partner_id': partner.id,
'opportunity_id': crm_id.id,
'partner_invoice_id': partner_address['invoice'],
'partner_shipping_id': partner_address['delivery'],
'date_order': fields.datetime.now(),
}
for resource in crm_id.account_id.resource_ids:
purchase_order_values.update({
'client_order_ref': (
resource.account_id.name),
'origin': resource.account_id.code,
'account_id': resource.account_id.id
})
if resource:
purchase_order_values.update({
'pricelist_id': pricelist
})
# if resource and crm_id.account_id.pricelist_id:
# purchase_order_values.update({
# 'pricelist_id': resource.pricelist_id.id
# })
# else:
# purchase_order_values.update({
# 'pricelist_id': pricelist
# })
order_id = purchase_order.create(purchase_order_values)
order_lines = self.prepare_purchase_order_line(case_id, order_id.id)
self.create_purchase_order_line(order_lines)
return {
'domain': str([('id', 'in', [order_id.id])]),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'purchase.order',
'view_id': False,
'type': 'ir.actions.act_window',
'name': _('Quotation'),
'res_id': order_id.id
}
if crm_id and crm_id.order_ids:
return {
'domain': str([('id', 'in', crm_id.order_ids.ids)]),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'purchase.order',
'view_id': False,
'type': 'ir.actions.act_window',
'name': _('Quotation'),
'res_ids': crm_id.order_ids.ids
}
def prepare_purchase_order_line(self, case_id, order_id):
lines = []
case = self.env['crm.lead'].browse(case_id)
order_id = self.env['purchase.order'].browse(order_id)
linked_resources = (
case.account_id and case.account_id.resource_ids or []
)
if not linked_resources:
raise ValidationError(
_("There is no available resource to "
"make purchase order!")
)
for resource in linked_resources:
if resource.state in 'draft':
continue
for resource_line in resource:
vals = {
'order_id': order_id and order_id.id,
'product_id': resource_line.product_id.id,
'name': resource_line.name,
'product_qty': resource_line.unit_amount,
'product_uom': resource_line.product_uom_id.id,
'price_unit': resource_line.price_unit,
'date_planned': resource_line.date,
'account_analytic_id': resource_line.account_id.id,
'resource_id': self
}
lines.append(vals)
return lines
def create_purchase_order_line(self, order_lines):
purchaseorder_line_obj = self.env['purchase.order.line']
for line in order_lines:
purchaseorder_line_obj.create(line)
# noinspection PyAttributeOutsideInit
class CrmLead(models.Model):
_inherit = "crm.lead"
# project_id = fields.Many2one(
# comodel_name='project.project',
# string='Project',
# ondelete='set null',
# )
planned_cost_total = fields.Float(
compute='_compute_planned_cost_total',
string='Total planned cost'
)
account_id = fields.Many2one(
comodel_name='account.analytic.account',
string='Project Account',
ondelete='set null',
)
@api.multi
def _compute_planned_cost_total(self):
self.ensure_one()
self.planned_cost_total = sum(
[resource.price_total for resource in
self.account_id and self.account_id.resource_ids
if resource.state not in 'draft'])
@api.multi
@api.onchange('account_id')
def account_id_change(self):
self.ensure_one()
if self.account_id:
self.partner_id = self.account_id.partner_id.id
| agpl-3.0 | 5,914,910,853,196,400,000 | 3,458,263,246,368,239,600 | 36.255682 | 80 | 0.522648 | false |
robbiet480/home-assistant | homeassistant/components/zestimate/sensor.py | 19 | 4377 | """Support for zestimate data from zillow.com."""
from datetime import timedelta
import logging
import requests
import voluptuous as vol
import xmltodict
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
_RESOURCE = "http://www.zillow.com/webservice/GetZestimate.htm"
ATTRIBUTION = "Data provided by Zillow.com"
CONF_ZPID = "zpid"
DEFAULT_NAME = "Zestimate"
NAME = "zestimate"
ZESTIMATE = f"{DEFAULT_NAME}:{NAME}"
ICON = "mdi:home-variant"
ATTR_AMOUNT = "amount"
ATTR_CHANGE = "amount_change_30_days"
ATTR_CURRENCY = "amount_currency"
ATTR_LAST_UPDATED = "amount_last_updated"
ATTR_VAL_HI = "valuation_range_high"
ATTR_VAL_LOW = "valuation_range_low"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ZPID): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SCAN_INTERVAL = timedelta(minutes=30)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Zestimate sensor."""
name = config.get(CONF_NAME)
properties = config[CONF_ZPID]
sensors = []
for zpid in properties:
params = {"zws-id": config[CONF_API_KEY]}
params["zpid"] = zpid
sensors.append(ZestimateDataSensor(name, params))
add_entities(sensors, True)
class ZestimateDataSensor(Entity):
"""Implementation of a Zestimate sensor."""
def __init__(self, name, params):
"""Initialize the sensor."""
self._name = name
self.params = params
self.data = None
self.address = None
self._state = None
@property
def unique_id(self):
"""Return the ZPID."""
return self.params["zpid"]
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self.address}"
@property
def state(self):
"""Return the state of the sensor."""
try:
return round(float(self._state), 1)
except ValueError:
return None
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {}
if self.data is not None:
attributes = self.data
attributes["address"] = self.address
attributes[ATTR_ATTRIBUTION] = ATTRIBUTION
return attributes
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and update the states."""
try:
response = requests.get(_RESOURCE, params=self.params, timeout=5)
data = response.content.decode("utf-8")
data_dict = xmltodict.parse(data).get(ZESTIMATE)
error_code = int(data_dict["message"]["code"])
if error_code != 0:
_LOGGER.error("The API returned: %s", data_dict["message"]["text"])
return
except requests.exceptions.ConnectionError:
_LOGGER.error("Unable to retrieve data from %s", _RESOURCE)
return
data = data_dict["response"][NAME]
details = {}
if "amount" in data and data["amount"] is not None:
details[ATTR_AMOUNT] = data["amount"]["#text"]
details[ATTR_CURRENCY] = data["amount"]["@currency"]
if "last-updated" in data and data["last-updated"] is not None:
details[ATTR_LAST_UPDATED] = data["last-updated"]
if "valueChange" in data and data["valueChange"] is not None:
details[ATTR_CHANGE] = int(data["valueChange"]["#text"])
if "valuationRange" in data and data["valuationRange"] is not None:
details[ATTR_VAL_HI] = int(data["valuationRange"]["high"]["#text"])
details[ATTR_VAL_LOW] = int(data["valuationRange"]["low"]["#text"])
self.address = data_dict["response"]["address"]["street"]
self.data = details
if self.data is not None:
self._state = self.data[ATTR_AMOUNT]
else:
self._state = None
_LOGGER.error("Unable to parase Zestimate data from response")
| apache-2.0 | -528,499,752,323,929,340 | -5,124,825,968,541,692,000 | 31.422222 | 83 | 0.621202 | false |
batisteo/pasportaservo | blog/admin.py | 3 | 1434 | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from .models import Post
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'content', 'pub_date', 'published']
fields = (
('title', 'slug', 'author'),
'content',
'body',
'description',
'pub_date',
)
prepopulated_fields = {'slug': ('title',)}
readonly_fields = ('body', 'description',)
date_hierarchy = 'created'
def published(self, obj):
return obj.published
published.short_description = _("Published")
published.admin_order_field = 'pub_date'
published.boolean = True
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == 'author':
kwargs['queryset'] = get_user_model().objects.filter(username=request.user.username)
return super().formfield_for_foreignkey(db_field, request, **kwargs)
def get_readonly_fields(self, request, obj=None):
if obj is not None:
return self.readonly_fields + ('author',)
return self.readonly_fields
def add_view(self, request, form_url="", extra_context=None):
data = request.GET.copy()
data['author'] = request.user
request.GET = data
return super().add_view(request, form_url="", extra_context=extra_context)
| agpl-3.0 | -2,978,903,412,663,024,000 | -7,123,226,788,729,367,000 | 33.142857 | 96 | 0.635983 | false |
ridfrustum/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/modeladmin/tests.py | 49 | 41260 | from datetime import date
from django import forms
from django.conf import settings
from django.contrib.admin.options import ModelAdmin, TabularInline, \
HORIZONTAL, VERTICAL
from django.contrib.admin.sites import AdminSite
from django.contrib.admin.validation import validate
from django.contrib.admin.widgets import AdminDateWidget, AdminRadioSelect
from django.core.exceptions import ImproperlyConfigured
from django.forms.models import BaseModelFormSet
from django.forms.widgets import Select
from django.test import TestCase
from django.utils import unittest
from models import Band, Concert, ValidationTestModel, \
ValidationTestInlineModel
# None of the following tests really depend on the content of the request,
# so we'll just pass in None.
request = None
class ModelAdminTests(TestCase):
def setUp(self):
self.band = Band.objects.create(
name='The Doors',
bio='',
sign_date=date(1965, 1, 1),
)
self.site = AdminSite()
# form/fields/fieldsets interaction ##############################
def test_default_fields(self):
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'bio', 'sign_date'])
def test_default_fieldsets(self):
# fieldsets_add and fieldsets_change should return a special data structure that
# is used in the templates. They should generate the "right thing" whether we
# have specified a custom form, the fields argument, or nothing at all.
#
# Here's the default case. There are no custom form_add/form_change methods,
# no fields argument, and no fieldsets argument.
ma = ModelAdmin(Band, self.site)
self.assertEqual(ma.get_fieldsets(request),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name', 'bio', 'sign_date']})])
def test_field_arguments(self):
# If we specify the fields argument, fieldsets_add and fielsets_change should
# just stick the fields into a formsets structure and return it.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual( ma.get_fieldsets(request),
[(None, {'fields': ['name']})])
self.assertEqual(ma.get_fieldsets(request, self.band),
[(None, {'fields': ['name']})])
def test_field_arguments_restricted_on_form(self):
# If we specify fields or fieldsets, it should exclude fields on the Form class
# to the fields specified. This may cause errors to be raised in the db layer if
# required model fields arent in fields/fieldsets, but that's preferable to
# ghost errors where you have a field in your Form class that isn't being
# displayed because you forgot to add it to fields/fieldsets
# Using `fields`.
class BandAdmin(ModelAdmin):
fields = ['name']
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(), ['name'])
self.assertEqual(ma.get_form(request, self.band).base_fields.keys(),
['name'])
# Using `fieldsets`.
class BandAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name']})]
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(), ['name'])
self.assertEqual(ma.get_form(request, self.band).base_fields.keys(),
['name'])
# Using `exclude`.
class BandAdmin(ModelAdmin):
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'sign_date'])
# You can also pass a tuple to `exclude`.
class BandAdmin(ModelAdmin):
exclude = ('bio',)
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'sign_date'])
# Using `fields` and `exclude`.
class BandAdmin(ModelAdmin):
fields = ['name', 'bio']
exclude = ['bio']
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name'])
def test_custom_form_validation(self):
# If we specify a form, it should use it allowing custom validation to work
# properly. This won't, however, break any of the admin widgets or media.
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
ma = BandAdmin(Band, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['name', 'bio', 'sign_date', 'delete'])
self.assertEqual(
type(ma.get_form(request).base_fields['sign_date'].widget),
AdminDateWidget)
def test_queryset_override(self):
# If we need to override the queryset of a ModelChoiceField in our custom form
# make sure that RelatedFieldWidgetWrapper doesn't mess that up.
band2 = Band(name='The Beatles', bio='', sign_date=date(1962, 1, 1))
band2.save()
class ConcertAdmin(ModelAdmin):
pass
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertEqual(str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Beatles</option>\n'
'<option value="%d">The Doors</option>\n'
'</select>' % (band2.id, self.band.id))
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
def __init__(self, *args, **kwargs):
super(AdminConcertForm, self).__init__(*args, **kwargs)
self.fields["main_band"].queryset = Band.objects.filter(name='The Doors')
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
form = ma.get_form(request)()
self.assertEqual(str(form["main_band"]),
'<select name="main_band" id="id_main_band">\n'
'<option value="" selected="selected">---------</option>\n'
'<option value="%d">The Doors</option>\n'
'</select>' % self.band.id)
# radio_fields behavior ###########################################
def test_default_foreign_key_widget(self):
# First, without any radio_fields specified, the widgets for ForeignKey
# and fields with choices specified ought to be a basic Select widget.
# ForeignKey widgets in the admin are wrapped with RelatedFieldWidgetWrapper so
# they need to be handled properly when type checking. For Select fields, all of
# the choices lists have a first entry of dashes.
cma = ModelAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
Select)
self.assertEqual(
list(cmafa.base_fields['main_band'].widget.choices),
[(u'', u'---------'), (self.band.id, u'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget), Select)
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[(u'', u'---------'), (self.band.id, u'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget), Select)
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[('', '---------'), (1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
Select)
self.assertEqual(
list(cmafa.base_fields['transport'].widget.choices),
[('', '---------'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
def test_foreign_key_as_radio_field(self):
# Now specify all the fields as radio_fields. Widgets should now be
# RadioSelect, and the choices list should have a first entry of 'None' if
# blank=True for the model field. Finally, the widget should have the
# 'radiolist' attr, and 'inline' as well if the field is specified HORIZONTAL.
class ConcertAdmin(ModelAdmin):
radio_fields = {
'main_band': HORIZONTAL,
'opening_band': VERTICAL,
'day': VERTICAL,
'transport': HORIZONTAL,
}
cma = ConcertAdmin(Concert, self.site)
cmafa = cma.get_form(request)
self.assertEqual(type(cmafa.base_fields['main_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['main_band'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['main_band'].widget.choices),
[(self.band.id, u'The Doors')])
self.assertEqual(
type(cmafa.base_fields['opening_band'].widget.widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['opening_band'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(
list(cmafa.base_fields['opening_band'].widget.choices),
[(u'', u'None'), (self.band.id, u'The Doors')])
self.assertEqual(type(cmafa.base_fields['day'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['day'].widget.attrs,
{'class': 'radiolist'})
self.assertEqual(list(cmafa.base_fields['day'].widget.choices),
[(1, 'Fri'), (2, 'Sat')])
self.assertEqual(type(cmafa.base_fields['transport'].widget),
AdminRadioSelect)
self.assertEqual(cmafa.base_fields['transport'].widget.attrs,
{'class': 'radiolist inline'})
self.assertEqual(list(cmafa.base_fields['transport'].widget.choices),
[('', u'None'), (1, 'Plane'), (2, 'Train'), (3, 'Bus')])
class AdminConcertForm(forms.ModelForm):
class Meta:
model = Concert
exclude = ('transport',)
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['main_band', 'opening_band', 'day'])
class AdminConcertForm(forms.ModelForm):
extra = forms.CharField()
class Meta:
model = Concert
fields = ['extra', 'transport']
class ConcertAdmin(ModelAdmin):
form = AdminConcertForm
ma = ConcertAdmin(Concert, self.site)
self.assertEqual(ma.get_form(request).base_fields.keys(),
['extra', 'transport'])
class ConcertInline(TabularInline):
form = AdminConcertForm
model = Concert
fk_name = 'main_band'
can_delete = True
class BandAdmin(ModelAdmin):
inlines = [
ConcertInline
]
ma = BandAdmin(Band, self.site)
self.assertEqual(
list(ma.get_formsets(request))[0]().forms[0].fields.keys(),
['extra', 'transport', 'id', 'DELETE', 'main_band'])
class ValidationTests(unittest.TestCase):
def test_validation_only_runs_in_debug(self):
# Ensure validation only runs when DEBUG = True
try:
settings.DEBUG = True
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
site = AdminSite()
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
site.register,
ValidationTestModel,
ValidationTestModelAdmin,
)
finally:
settings.DEBUG = False
site = AdminSite()
site.register(ValidationTestModel, ValidationTestModelAdmin)
def test_raw_id_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.raw_id_fields\[0\]', 'name' must be either a ForeignKey or ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_fieldsets_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]' does not have exactly two elements.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", ()),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]\[1\]' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {}),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'fields' key is required in ValidationTestModelAdmin.fieldsets\[0\]\[1\] field options dict.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("non_existent_field",)}),)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = (("General", {"fields": ("name",)}),)
fields = ["name",]
self.assertRaisesRegexp(
ImproperlyConfigured,
"Both fieldsets and fields are specified in ValidationTestModelAdmin.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertRaisesRegexp(
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fieldsets",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
fields = ["name", "name"]
self.assertRaisesRegexp(
ImproperlyConfigured,
"There are duplicate field\(s\) in ValidationTestModelAdmin.fields",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_form_validation(self):
class FakeForm(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
form = FakeForm
self.assertRaisesRegexp(
ImproperlyConfigured,
"ValidationTestModelAdmin.form does not inherit from BaseModelForm.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('non_existent_field',)
}),
)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'BandAdmin.fieldsets\[0\]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
BandAdmin,
Band,
)
class BandAdmin(ModelAdmin):
fieldsets = (
('Band', {
'fields': ('name',)
}),
)
validate(BandAdmin, Band)
class AdminBandForm(forms.ModelForm):
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('non_existent_field',)
}),
)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'BandAdmin.fieldsets\[0]\[1\]\['fields'\]' refers to field 'non_existent_field' that is missing from the form.",
validate,
BandAdmin,
Band,
)
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class Meta:
model = Band
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
validate(BandAdmin, Band)
def test_filter_vertical_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("non_existent_field",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("name",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_vertical\[0\]' must be a ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_filter_horizontal_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("non_existent_field",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("name",)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.filter_horizontal\[0\]' must be a ManyToManyField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
filter_horizontal = ("users",)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_radio_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"non_existent_field": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"name": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['name'\]' is neither an instance of ForeignKey nor does have choices set.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.radio_fields\['state'\]' is neither admin.HORIZONTAL nor admin.VERTICAL.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
radio_fields = {"state": VERTICAL}
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_prepopulated_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' must be a dictionary.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"non_existent_field": None}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("non_existent_field",)}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['slug'\]\[0\]' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"users": ("name",)}
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.prepopulated_fields\['users'\]' is either a DateTimeField, ForeignKey or ManyToManyField. This isn't allowed.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_display_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"ValidationTestModelAdmin.list_display\[0\], 'non_existent_field' is not a callable or an attribute of 'ValidationTestModelAdmin' or found in the model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display\[0\]', 'users' is a ManyToManyField which is not supported.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_display_links_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'non_existent_field' which is not defined in 'list_display'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_display_links\[0\]' refers to 'name' which is not defined in 'list_display'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def a_callable(obj):
pass
class ValidationTestModelAdmin(ModelAdmin):
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_filter_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_filter = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_filter\[0\]' refers to 'non_existent_field' which does not refer to a Field.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_filter = ('is_active',)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_per_page_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_per_page' should be a integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_per_page = 100
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_search_fields_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
search_fields = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.search_fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_date_hierarchy_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.date_hierarchy is neither an instance of DateField nor DateTimeField.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_ordering_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
ordering = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering\[0\]' refers to field 'non_existent_field' that is missing from model 'ValidationTestModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.ordering' has the random ordering marker '\?', but contains other fields as well. Please either remove '\?' or the other fields.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('?',)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('band__name',)
validate(ValidationTestModelAdmin, ValidationTestModel)
class ValidationTestModelAdmin(ModelAdmin):
ordering = ('name',)
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_list_select_related_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.list_select_related' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
list_select_related = False
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_save_as_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_as = 1
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_as' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_as = True
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_save_on_top_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.save_on_top' should be a boolean.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestModelAdmin(ModelAdmin):
save_on_top = True
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_inlines_validation(self):
class ValidationTestModelAdmin(ModelAdmin):
inlines = 10
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(object):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\]' does not inherit from BaseModelAdmin.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
pass
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'model' is a required attribute of 'ValidationTestModelAdmin.inlines\[0\]'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class SomethingBad(object):
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestModelAdmin.inlines\[0\].model' does not inherit from models.Model.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_fields_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.fields' must be a list or tuple.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = ("non_existent_field",)
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.fields' refers to field 'non_existent_field' that is missing from the form.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
def test_fk_name_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "non_existent_field"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.fk_name' refers to field 'non_existent_field' that is missing from model 'ValidationTestInlineModel'.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = "parent"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_extra_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.extra' should be a integer.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_max_num_validation(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = "hello"
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.max_num' should be an integer or None \(default\).",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
def test_formset_validation(self):
class FakeFormSet(object):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertRaisesRegexp(
ImproperlyConfigured,
"'ValidationTestInline.formset' does not inherit from BaseModelFormSet.",
validate,
ValidationTestModelAdmin,
ValidationTestModel,
)
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class ValidationTestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
validate(ValidationTestModelAdmin, ValidationTestModel)
| gpl-3.0 | -8,226,127,710,496,715,000 | -7,446,814,207,071,528,000 | 32.57201 | 188 | 0.604823 | false |
liberatetheweb/blockedinrussia.org | code.py | 1 | 9146 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import web, bcrypt, redis, json, requests, dns.resolver, re, M2Crypto, cgi
from urlparse import urlsplit
from zbase62 import zbase62
web.config.debug = False
r_server = redis.Redis('localhost')
urls = (
'/', 'index',
'/check', 'check',
'/logout', 'logout',
'/register', 'register'
)
app = web.application(urls, globals())
session = web.session.Session(app, web.session.DiskStore('sessions'))
render = web.template.render('templates/', base='layout',)
proxies = {
"http": "http://localhost:8118",
"https": "http://localhost:8118",
}
checkstr = [
u'Запрашиваемая страница заблокирована на основании <br> Постановления Правительства от 26.10.2012 №1101 <br>',
u'<h1><strong>Уважаемые пользователи!<br/><br/>Мы приносим свои извинения, но доступ к запрашиваемому ресурсу ограничен.</strong></h1>',
u'<p>Уважаемый абонент,<br /> Вы хотите перейти по адресу, который внесён в единый реестр запрещённых сайтов или доступ к нему <span id="more">заблокирован</span> судебным решением.</p>',
u'<div style="font-size: 13px; margin-top: 25px; font-weight: normal;">Адрес сайта Единого реестра доменных имен, указателей, страниц сайтов в сети "Интернет" и сетевых адресов, позволяющих идентифицировать сайты</br> в сети "Интернет", содержащие информацию, распространение которой в Российской Федерации запрещено: <a href="http://zapret-info.gov.ru/" target="_blank">zapret-info.gov.ru</a></br>Адрес Реестра нарушителей авторских прав: <a href="http://nap.rkn.gov.ru/reestr/" target="_blank">http://nap.rkn.gov.ru/reestr/</a>',
u'<img src="http://212.1.224.79/stop.jpg" width="672" height="180" alt="Внимание!" />',
u'<p>Access control configuration prevents your request from being allowed at this time. Please contact your service provider if you feel this is incorrect.</p>',
u'<p>Сайт, который вы хотите посетить, внесен в <u><a href="http://eais.rkn.gov.ru" style="color:#FF6600">единый',
u'либо в <a href="http://eais.rkn.gov.ru/" target="_blank">едином реестре</a>',
u'<p>Доступ к запрашиваемому Вами Интернет-ресурсу ограничен в соответствии с требованиями законодательства. Дополнительную информацию можно получить на сайте <a href="http://www.zapret-info.gov.ru./">www.zapret-info.gov.ru</a>.</p>',
u'материалов</a>, доступ к нему закрыт на основании решения суда РФ.',
u'<p>В соответствии с требованиями законодательства доступ к запрашиваемому Интернет-ресурсу <br>закрыт.</p>',
u'<p><b>В соответствии с требованиями законодательства Российской Федерации доступ к запрашиваемому Интернет-ресурсу',
u'<h5>Ссылка заблокирована по решению суда</h5><br><br><a href=\'http://ttk.ru/\'>ЗАО "Компания ТрансТелеКом"</a>'
u'Причину блокировки можно посмотреть в <a href="http://eais.rkn.gov.ru/">Едином Реестре</a>',
u':80/f/accept/\' not found...',
u'<a href="http://zapret-info.gov.ru"/><b>Постановление Правительства Российской Федерации от 26 октября 2012 г. N 1101</b>',
u'<p>Your cache administrator is <a href="mailto:webmaster?subject=CacheErrorInfo%20-%20ERR_CONNECT_FAIL&body=CacheHost%3A%20atel76.ru%0D%0AErrPage%3A%20ERR_CONNECT_FAIL%0D%0AErr%3A%20(110)%20Connection%20timed%20out%0D%0A',
u'<h5><a href=\'http://eais.rkn.gov.ru/\'>Ссылка заблокирована <br>в соответствии с законодательством РФ</a></h5><br><br><a href=\'http://ttk.ru/\'>ЗАО "Компания ТрансТелеКом"</a>',
u'<div class="section">\n Доступ к запрашиваемому вами интернет-ресурсу ограничен в \n соответствии с требованиями Законодательства Российской Федерации.',
u'<p>Доступ к запрашиваемому Вами Интернет-ресурсу ограничен по требованию правоохранительных органов в соответствии с законодательством и/или на основании решения суда.</p>',
u'<p>Доступ к запрашиваемому Вами Интернет-ресурсу ограничен в соответствии с требованиями законодательства. Дополнительную информацию можно получить на сайте <a href="/PDATransfer.axd?next_url=http%3a%2f%2fwww.zapret-info.gov.ru.%2f">www.zapret-info.gov.ru</a>.</p>'
]
class index:
def GET(self):
if session.get('login', False):
return render.index()
else:
return render.login('')
def POST(self):
data = web.input()
if not all(x in data for x in ('username', 'password')):
raise web.seeother('/')
if 'login' not in session:
session.login = 0
hashed = r_server.get('user:' + data.username)
if hashed is None:
session.login = 0
return render.login('No such user')
if bcrypt.hashpw(data.password.encode('utf-8'), hashed) == hashed:
session.login = 1
return render.index()
else:
session.login = 0
return render.login('Wrong password')
class logout:
def GET(self):
session.login = 0
raise web.seeother('/')
class check:
def GET(self):
if not session.get('login', False):
raise web.seeother('/')
data = web.input()
if not 'url' in data:
return 'Wrong parameters'
results = {}
results['results'] = []
try:
url = urlsplit(data.url)
except:
return 'Not a valid URL'
s = requests.Session()
s.proxies=proxies
results['ip'] = []
try:
answers = dns.resolver.query(url.hostname, 'A')
except:
return 'Can\'t get A records of hostname'
results['registry'] = []
regnumbers = []
for rdata in answers:
results['ip'].append(rdata.address)
regcheck = r_server.smembers('registry:' + rdata.address)
if regcheck:
regnumbers = regnumbers + list(regcheck)
regcheck = r_server.smembers('registry:' + url.hostname)
if regcheck:
regnumbers = regnumbers + list(regcheck)
for value in list(set(regnumbers)):
info = r_server.hgetall('registry:' + value)
results['registry'].append({
'ip': json.loads(info['ip']),
'url': json.loads(info['url']),
'authority': info['authority'],
'base': info['base'],
'date': info['date']
})
for value in r_server.keys('as:*'):
fingerprint = r_server.srandmember(value).replace('relay:','')
s.headers.update({'host': url.hostname})
as_id = value.replace('as:','')
try:
r = s.get(url.scheme + '://' + url.hostname + '.' + fingerprint + '.exit' + url.path + '?' + url.query, verify=False)
print as_id
status = r.status_code
if status == 200:
if any(x in r.text for x in checkstr):
blocked = 'yes'
else:
blocked = 'no'
r.text
else:
blocked = 'maybe'
except:
status = 'fail'
blocked = 'dunno'
info = r_server.hgetall('relay:' + fingerprint)
results['results'].append({
'blocked': blocked,
'status': status,
'fingerprint': fingerprint,
'as_name': info['as_name'],
'lat': info['latitude'],
'lon': info['longitude']
})
return json.dumps(results)
class register:
def GET(self):
data = web.input()
if not 'email' in data:
return 'Wrong parameters'
if not re.match(r'[^@]+@[^@]+\.[^@]+', data.email):
return 'This is not email'
if r_server.sismember('nonregistred',data.email):
password = zbase62.b2a(M2Crypto.m2.rand_bytes(20))
hashed = bcrypt.hashpw(password, bcrypt.gensalt())
r_server.set('user:' + data.email,hashed)
r_server.srem('nonregistred',data.email)
return render.register(password,cgi.escape(data.email))
else:
return 'No such email'
if __name__ == "__main__":
app.run()
| cc0-1.0 | 2,158,448,776,455,601,200 | -875,055,786,221,788,200 | 45.746914 | 533 | 0.654034 | false |
heeraj123/oh-mainline | vendor/packages/whoosh/src/whoosh/compat.py | 17 | 1735 | import sys
if sys.version_info[0] < 3:
PY3 = False
def b(s):
return s
import cStringIO as StringIO
StringIO = BytesIO = StringIO.StringIO
callable = callable
integer_types = (int, long)
iteritems = lambda o: o.iteritems()
itervalues = lambda o: o.itervalues()
iterkeys = lambda o: o.iterkeys()
from itertools import izip
long_type = long
next = lambda o: o.next()
import cPickle as pickle
from cPickle import dumps, loads, dump, load
string_type = basestring
text_type = unicode
unichr = unichr
from urllib import urlretrieve
def u(s):
return unicode(s, "unicode_escape")
def with_metaclass(meta, base=object):
class _WhooshBase(base):
__metaclass__ = meta
return _WhooshBase
xrange = xrange
zip_ = zip
else:
PY3 = True
import collections
def b(s):
return s.encode("latin-1")
import io
BytesIO = io.BytesIO
callable = lambda o: isinstance(o, collections.Callable)
exec_ = eval("exec")
integer_types = (int,)
iteritems = lambda o: o.items()
itervalues = lambda o: o.values()
iterkeys = lambda o: iter(o.keys())
izip = zip
long_type = int
next = next
import pickle
from pickle import dumps, loads, dump, load
StringIO = io.StringIO
string_type = str
text_type = str
unichr = chr
from urllib.request import urlretrieve
def u(s):
return s
def with_metaclass(meta, base=object):
ns = dict(base=base, meta=meta)
exec_("""class _WhooshBase(base, metaclass=meta):
pass""", ns)
return ns["_WhooshBase"]
xrange = range
zip_ = lambda * args: list(zip(*args))
| agpl-3.0 | -8,578,114,968,110,168,000 | -187,767,235,973,678,240 | 23.097222 | 60 | 0.610375 | false |
fighterCui/L4ReFiascoOC | l4/pkg/python/contrib/Lib/multiprocessing/pool.py | 52 | 17699 | #
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2007-2008, R Oudkerk --- see COPYING.txt
#
__all__ = ['Pool']
#
# Imports
#
import threading
import Queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return map(*args)
#
# Code run by worker processes
#
def worker(inqueue, outqueue, initializer=None, initargs=()):
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
while 1:
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception, e:
result = (False, e)
put((job, i, result))
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of the `apply()` builtin
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=()):
self._setup_queues()
self._taskqueue = Queue.Queue()
self._cache = {}
self._state = RUN
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
self._pool = []
for i in range(processes):
w = self.Process(
target=worker,
args=(self._inqueue, self._outqueue, initializer, initargs)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._task_handler, self._result_handler, self._cache),
exitpriority=15
)
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `apply()` builtin
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Equivalent of `map()` builtin
'''
assert self._state == RUN
return self.map_async(func, iterable, chunksize).get()
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `itertools.imap()` -- can be MUCH slower than `Pool.map()`
'''
assert self._state == RUN
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary
'''
assert self._state == RUN
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None):
'''
Asynchronous equivalent of `apply()` builtin
'''
assert self._state == RUN
result = ApplyResult(self._cache, callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None):
'''
Asynchronous equivalent of `map()` builtin
'''
assert self._state == RUN
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._taskqueue.put(None)
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
task_handler._state = TERMINATE
taskqueue.put(None) # sentinel
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
p.terminate()
debug('joining task handler')
task_handler.join(1e100)
debug('joining result handler')
result_handler.join(1e100)
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
p.join()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._ready = False
self._callback = callback
cache[self._job] = self
def ready(self):
return self._ready
def successful(self):
assert self._ready
return self._success
def wait(self, timeout=None):
self._cond.acquire()
try:
if not self._ready:
self._cond.wait(timeout)
finally:
self._cond.release()
def get(self, timeout=None):
self.wait(timeout)
if not self._ready:
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
del self._cache[self._job]
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback):
ApplyResult.__init__(self, cache, callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._ready = True
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
else:
self._success = False
self._value = result
del self._cache[self._job]
self._cond.acquire()
try:
self._ready = True
self._cond.notify()
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = job_counter.next()
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = Queue.Queue()
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
| gpl-2.0 | -381,087,472,164,867,800 | -3,974,093,815,428,389,400 | 28.696309 | 80 | 0.526301 | false |
JensTimmerman/pyopenssl | OpenSSL/test/util.py | 4 | 16826 | # Copyright (C) Jean-Paul Calderone
# Copyright (C) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Helpers for the OpenSSL test suite, largely copied from
U{Twisted<http://twistedmatrix.com/>}.
"""
import shutil
import traceback
import os, os.path
from tempfile import mktemp
from unittest import TestCase
import sys
from OpenSSL._util import exception_from_error_queue
from OpenSSL.crypto import Error
try:
import memdbg
except Exception:
class _memdbg(object): heap = None
memdbg = _memdbg()
from OpenSSL._util import ffi, lib, byte_string as b
class TestCase(TestCase):
"""
:py:class:`TestCase` adds useful testing functionality beyond what is available
from the standard library :py:class:`unittest.TestCase`.
"""
def run(self, result):
run = super(TestCase, self).run
if memdbg.heap is None:
return run(result)
# Run the test as usual
before = set(memdbg.heap)
run(result)
# Clean up some long-lived allocations so they won't be reported as
# memory leaks.
lib.CRYPTO_cleanup_all_ex_data()
lib.ERR_remove_thread_state(ffi.NULL)
after = set(memdbg.heap)
if not after - before:
# No leaks, fast succeed
return
if result.wasSuccessful():
# If it passed, run it again with memory debugging
before = set(memdbg.heap)
run(result)
# Clean up some long-lived allocations so they won't be reported as
# memory leaks.
lib.CRYPTO_cleanup_all_ex_data()
lib.ERR_remove_thread_state(ffi.NULL)
after = set(memdbg.heap)
self._reportLeaks(after - before, result)
def _reportLeaks(self, leaks, result):
def format_leak(p):
stacks = memdbg.heap[p]
# Eventually look at multiple stacks for the realloc() case. For
# now just look at the original allocation location.
(size, python_stack, c_stack) = stacks[0]
stack = traceback.format_list(python_stack)[:-1]
# c_stack looks something like this (interesting parts indicated
# with inserted arrows not part of the data):
#
# /home/exarkun/Projects/pyOpenSSL/branches/use-opentls/__pycache__/_cffi__x89095113xb9185b9b.so(+0x12cf) [0x7fe2e20582cf]
# /home/exarkun/Projects/cpython/2.7/python(PyCFunction_Call+0x8b) [0x56265a]
# /home/exarkun/Projects/cpython/2.7/python() [0x4d5f52]
# /home/exarkun/Projects/cpython/2.7/python(PyEval_EvalFrameEx+0x753b) [0x4d0e1e]
# /home/exarkun/Projects/cpython/2.7/python() [0x4d6419]
# /home/exarkun/Projects/cpython/2.7/python() [0x4d6129]
# /home/exarkun/Projects/cpython/2.7/python(PyEval_EvalFrameEx+0x753b) [0x4d0e1e]
# /home/exarkun/Projects/cpython/2.7/python(PyEval_EvalCodeEx+0x1043) [0x4d3726]
# /home/exarkun/Projects/cpython/2.7/python() [0x55fd51]
# /home/exarkun/Projects/cpython/2.7/python(PyObject_Call+0x7e) [0x420ee6]
# /home/exarkun/Projects/cpython/2.7/python(PyEval_CallObjectWithKeywords+0x158) [0x4d56ec]
# /home/exarkun/.local/lib/python2.7/site-packages/cffi-0.5-py2.7-linux-x86_64.egg/_cffi_backend.so(+0xe96e) [0x7fe2e38be96e]
# /usr/lib/x86_64-linux-gnu/libffi.so.6(ffi_closure_unix64_inner+0x1b9) [0x7fe2e36ad819]
# /usr/lib/x86_64-linux-gnu/libffi.so.6(ffi_closure_unix64+0x46) [0x7fe2e36adb7c]
# /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(CRYPTO_malloc+0x64) [0x7fe2e1cef784] <------ end interesting
# /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(lh_insert+0x16b) [0x7fe2e1d6a24b] .
# /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(+0x61c18) [0x7fe2e1cf0c18] .
# /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(+0x625ec) [0x7fe2e1cf15ec] .
# /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(DSA_new_method+0xe6) [0x7fe2e1d524d6] .
# /lib/x86_64-linux-gnu/libcrypto.so.1.0.0(DSA_generate_parameters+0x3a) [0x7fe2e1d5364a] <------ begin interesting
# /home/exarkun/Projects/opentls/trunk/tls/c/__pycache__/_cffi__x305d4698xb539baaa.so(+0x1f397) [0x7fe2df84d397]
# /home/exarkun/Projects/cpython/2.7/python(PyCFunction_Call+0x8b) [0x56265a]
# /home/exarkun/Projects/cpython/2.7/python() [0x4d5f52]
# /home/exarkun/Projects/cpython/2.7/python(PyEval_EvalFrameEx+0x753b) [0x4d0e1e]
# /home/exarkun/Projects/cpython/2.7/python() [0x4d6419]
# ...
#
# Notice the stack is upside down compared to a Python traceback.
# Identify the start and end of interesting bits and stuff it into the stack we report.
saved = list(c_stack)
# Figure the first interesting frame will be after a the cffi-compiled module
while c_stack and '/__pycache__/_cffi__' not in c_stack[-1]:
c_stack.pop()
# Figure the last interesting frame will always be CRYPTO_malloc,
# since that's where we hooked in to things.
while c_stack and 'CRYPTO_malloc' not in c_stack[0] and 'CRYPTO_realloc' not in c_stack[0]:
c_stack.pop(0)
if c_stack:
c_stack.reverse()
else:
c_stack = saved[::-1]
stack.extend([frame + "\n" for frame in c_stack])
stack.insert(0, "Leaked (%s) at:\n")
return "".join(stack)
if leaks:
unique_leaks = {}
for p in leaks:
size = memdbg.heap[p][-1][0]
new_leak = format_leak(p)
if new_leak not in unique_leaks:
unique_leaks[new_leak] = [(size, p)]
else:
unique_leaks[new_leak].append((size, p))
memdbg.free(p)
for (stack, allocs) in unique_leaks.iteritems():
allocs_accum = []
for (size, pointer) in allocs:
addr = int(ffi.cast('uintptr_t', pointer))
allocs_accum.append("%d@0x%x" % (size, addr))
allocs_report = ", ".join(sorted(allocs_accum))
result.addError(
self,
(None, Exception(stack % (allocs_report,)), None))
def tearDown(self):
"""
Clean up any files or directories created using :py:meth:`TestCase.mktemp`.
Subclasses must invoke this method if they override it or the
cleanup will not occur.
"""
if False and self._temporaryFiles is not None:
for temp in self._temporaryFiles:
if os.path.isdir(temp):
shutil.rmtree(temp)
elif os.path.exists(temp):
os.unlink(temp)
try:
exception_from_error_queue(Error)
except Error:
e = sys.exc_info()[1]
if e.args != ([],):
self.fail("Left over errors in OpenSSL error queue: " + repr(e))
def assertIsInstance(self, instance, classOrTuple, message=None):
"""
Fail if C{instance} is not an instance of the given class or of
one of the given classes.
@param instance: the object to test the type (first argument of the
C{isinstance} call).
@type instance: any.
@param classOrTuple: the class or classes to test against (second
argument of the C{isinstance} call).
@type classOrTuple: class, type, or tuple.
@param message: Custom text to include in the exception text if the
assertion fails.
"""
if not isinstance(instance, classOrTuple):
if message is None:
suffix = ""
else:
suffix = ": " + message
self.fail("%r is not an instance of %s%s" % (
instance, classOrTuple, suffix))
def failUnlessIn(self, containee, container, msg=None):
"""
Fail the test if :py:data:`containee` is not found in :py:data:`container`.
:param containee: the value that should be in :py:class:`container`
:param container: a sequence type, or in the case of a mapping type,
will follow semantics of 'if key in dict.keys()'
:param msg: if msg is None, then the failure message will be
'%r not in %r' % (first, second)
"""
if containee not in container:
raise self.failureException(msg or "%r not in %r"
% (containee, container))
return containee
assertIn = failUnlessIn
def assertNotIn(self, containee, container, msg=None):
"""
Fail the test if C{containee} is found in C{container}.
@param containee: the value that should not be in C{container}
@param container: a sequence type, or in the case of a mapping type,
will follow semantics of 'if key in dict.keys()'
@param msg: if msg is None, then the failure message will be
'%r in %r' % (first, second)
"""
if containee in container:
raise self.failureException(msg or "%r in %r"
% (containee, container))
return containee
failIfIn = assertNotIn
def failUnlessIdentical(self, first, second, msg=None):
"""
Fail the test if :py:data:`first` is not :py:data:`second`. This is an
obect-identity-equality test, not an object equality
(i.e. :py:func:`__eq__`) test.
:param msg: if msg is None, then the failure message will be
'%r is not %r' % (first, second)
"""
if first is not second:
raise self.failureException(msg or '%r is not %r' % (first, second))
return first
assertIdentical = failUnlessIdentical
def failIfIdentical(self, first, second, msg=None):
"""
Fail the test if :py:data:`first` is :py:data:`second`. This is an
obect-identity-equality test, not an object equality
(i.e. :py:func:`__eq__`) test.
:param msg: if msg is None, then the failure message will be
'%r is %r' % (first, second)
"""
if first is second:
raise self.failureException(msg or '%r is %r' % (first, second))
return first
assertNotIdentical = failIfIdentical
def failUnlessRaises(self, exception, f, *args, **kwargs):
"""
Fail the test unless calling the function :py:data:`f` with the given
:py:data:`args` and :py:data:`kwargs` raises :py:data:`exception`. The
failure will report the traceback and call stack of the unexpected
exception.
:param exception: exception type that is to be expected
:param f: the function to call
:return: The raised exception instance, if it is of the given type.
:raise self.failureException: Raised if the function call does
not raise an exception or if it raises an exception of a
different type.
"""
try:
result = f(*args, **kwargs)
except exception:
inst = sys.exc_info()[1]
return inst
except:
raise self.failureException('%s raised instead of %s'
% (sys.exc_info()[0],
exception.__name__,
))
else:
raise self.failureException('%s not raised (%r returned)'
% (exception.__name__, result))
assertRaises = failUnlessRaises
_temporaryFiles = None
def mktemp(self):
"""
Pathetic substitute for twisted.trial.unittest.TestCase.mktemp.
"""
if self._temporaryFiles is None:
self._temporaryFiles = []
temp = b(mktemp(dir="."))
self._temporaryFiles.append(temp)
return temp
# Other stuff
def assertConsistentType(self, theType, name, *constructionArgs):
"""
Perform various assertions about :py:data:`theType` to ensure that it is a
well-defined type. This is useful for extension types, where it's
pretty easy to do something wacky. If something about the type is
unusual, an exception will be raised.
:param theType: The type object about which to make assertions.
:param name: A string giving the name of the type.
:param constructionArgs: Positional arguments to use with :py:data:`theType` to
create an instance of it.
"""
self.assertEqual(theType.__name__, name)
self.assertTrue(isinstance(theType, type))
instance = theType(*constructionArgs)
self.assertIdentical(type(instance), theType)
class EqualityTestsMixin(object):
"""
A mixin defining tests for the standard implementation of C{==} and C{!=}.
"""
def anInstance(self):
"""
Return an instance of the class under test. Each call to this method
must return a different object. All objects returned must be equal to
each other.
"""
raise NotImplementedError()
def anotherInstance(self):
"""
Return an instance of the class under test. Each call to this method
must return a different object. The objects must not be equal to the
objects returned by C{anInstance}. They may or may not be equal to
each other (they will not be compared against each other).
"""
raise NotImplementedError()
def test_identicalEq(self):
"""
An object compares equal to itself using the C{==} operator.
"""
o = self.anInstance()
self.assertTrue(o == o)
def test_identicalNe(self):
"""
An object doesn't compare not equal to itself using the C{!=} operator.
"""
o = self.anInstance()
self.assertFalse(o != o)
def test_sameEq(self):
"""
Two objects that are equal to each other compare equal to each other
using the C{==} operator.
"""
a = self.anInstance()
b = self.anInstance()
self.assertTrue(a == b)
def test_sameNe(self):
"""
Two objects that are equal to each other do not compare not equal to
each other using the C{!=} operator.
"""
a = self.anInstance()
b = self.anInstance()
self.assertFalse(a != b)
def test_differentEq(self):
"""
Two objects that are not equal to each other do not compare equal to
each other using the C{==} operator.
"""
a = self.anInstance()
b = self.anotherInstance()
self.assertFalse(a == b)
def test_differentNe(self):
"""
Two objects that are not equal to each other compare not equal to each
other using the C{!=} operator.
"""
a = self.anInstance()
b = self.anotherInstance()
self.assertTrue(a != b)
def test_anotherTypeEq(self):
"""
The object does not compare equal to an object of an unrelated type
(which does not implement the comparison) using the C{==} operator.
"""
a = self.anInstance()
b = object()
self.assertFalse(a == b)
def test_anotherTypeNe(self):
"""
The object compares not equal to an object of an unrelated type (which
does not implement the comparison) using the C{!=} operator.
"""
a = self.anInstance()
b = object()
self.assertTrue(a != b)
def test_delegatedEq(self):
"""
The result of comparison using C{==} is delegated to the right-hand
operand if it is of an unrelated type.
"""
class Delegate(object):
def __eq__(self, other):
# Do something crazy and obvious.
return [self]
a = self.anInstance()
b = Delegate()
self.assertEqual(a == b, [b])
def test_delegateNe(self):
"""
The result of comparison using C{!=} is delegated to the right-hand
operand if it is of an unrelated type.
"""
class Delegate(object):
def __ne__(self, other):
# Do something crazy and obvious.
return [self]
a = self.anInstance()
b = Delegate()
self.assertEqual(a != b, [b])
| apache-2.0 | -1,132,062,763,748,964,700 | -3,601,975,940,888,293,400 | 36.474388 | 137 | 0.574528 | false |
esikachev/sahara-backup | sahara/service/edp/oozie/workflow_creator/shell_workflow.py | 9 | 1685 | # Copyright (c) 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service.edp.oozie.workflow_creator import base_workflow
from sahara.utils import xmlutils as x
class ShellWorkflowCreator(base_workflow.OozieWorkflowCreator):
SHELL_XMLNS = {"xmlns": "uri:oozie:shell-action:0.1"}
def __init__(self):
super(ShellWorkflowCreator, self).__init__('shell')
def build_workflow_xml(self, script_name, prepare={},
job_xml=None, configuration=None, env_vars={},
arguments=[], files=[]):
x.add_attributes_to_element(self.doc, self.tag_name, self.SHELL_XMLNS)
for k in sorted(prepare):
self._add_to_prepare_element(k, prepare[k])
self._add_configuration_elements(configuration)
x.add_text_element_to_tag(self.doc, self.tag_name, 'exec', script_name)
for arg in arguments:
x.add_text_element_to_tag(self.doc, self.tag_name, 'argument', arg)
x.add_equal_separated_dict(self.doc, self.tag_name,
'env-var', env_vars)
self._add_files_and_archives(files + [script_name], [])
| apache-2.0 | 5,577,523,941,725,258,000 | 5,192,090,481,586,549,000 | 36.444444 | 79 | 0.661721 | false |
ahmetcemturan/SFACT | skeinforge_application/skeinforge_plugins/craft_plugins/chop.py | 8 | 10424 | """
This page is in the table of contents.
Chop is a script to chop a shape into svg slice layers.
==Settings==
===Add Layer Template to SVG===
Default is on.
When selected, the layer template will be added to the svg output, which adds javascript control boxes. So 'Add Layer Template to SVG' should be selected when the svg will be viewed in a browser.
When off, no controls will be added, the svg output will only include the fabrication paths. So 'Add Layer Template to SVG' should be deselected when the svg will be used by other software, like Inkscape.
===Add Extra Top Layer if Necessary===
Default is on.
When selected, chop will add an extra layer at the very top of the object if the top of the object is more than half the layer height above the first slice. This is so the cutting tool doesn't cut too deeply through the top of the object on its first pass.
===Extra Decimal Places===
Default is two.
Defines the number of extra decimal places export will output compared to the number of decimal places in the layer height. The higher the 'Extra Decimal Places', the more significant figures the output numbers will have.
===Import Coarseness===
Default is one.
When a triangle mesh has holes in it, the triangle mesh slicer switches over to a slow algorithm that spans gaps in the mesh. The higher the 'Import Coarseness' setting, the wider the gaps in the mesh it will span. An import coarseness of one means it will span gaps of the edge width.
===Layer Height===
Default is 0.4 mm.
Defines the height of the layer, this is the most important chop setting.
===Layers===
Chop slices from top to bottom. To get only the bottom layer, set the "Layers From" to minus one. The 'Layers From' until 'Layers To' range is a python slice.
====Layers From====
Default is zero.
Defines the index of the top layer that will be chopped. If the 'Layers From' is the default zero, the carving will start from the top layer. If the 'Layers From' index is negative, then the carving will start from the 'Layers From' index above the bottom layer.
====Layers To====
Default is a huge number, which will be limited to the highest index number.
Defines the index of the bottom layer that will be chopped. If the 'Layers To' index is a huge number like the default, the carving will go to the bottom of the model. If the 'Layers To' index is negative, then the carving will go to the 'Layers To' index above the bottom layer.
===Mesh Type===
Default is 'Correct Mesh'.
====Correct Mesh====
When selected, the mesh will be accurately chopped, and if a hole is found, chop will switch over to the algorithm that spans gaps.
====Unproven Mesh====
When selected, chop will use the gap spanning algorithm from the start. The problem with the gap spanning algothm is that it will span gaps, even if there is not actually a gap in the model.
===Perimeter Width===
Default is 2 mm.
Defines the width of the edge.
===SVG Viewer===
Default is webbrowser.
If the 'SVG Viewer' is set to the default 'webbrowser', the scalable vector graphics file will be sent to the default browser to be opened. If the 'SVG Viewer' is set to a program name, the scalable vector graphics file will be sent to that program to be opened.
==Examples==
The following examples chop the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and chop.py.
> python chop.py
This brings up the chop dialog.
> python chop.py Screw Holder Bottom.stl
The chop tool is parsing the file:
Screw Holder Bottom.stl
..
The chop tool has created the file:
.. Screw Holder Bottom_chop.svg
"""
from __future__ import absolute_import
try:
import psyco
psyco.full()
except:
pass
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from fabmetheus_utilities import svg_writer
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import os
import sys
import time
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, gcodeText = '', repository=None):
"Get chopped text."
if fileName.endswith('.svg'):
gcodeText = archive.getTextIfEmpty(fileName, gcodeText)
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'chop'):
return gcodeText
carving = svg_writer.getCarving(fileName)
if carving == None:
return ''
if repository == None:
repository = ChopRepository()
settings.getReadRepository(repository)
return ChopSkein().getCarvedSVG( carving, fileName, repository )
def getNewRepository():
'Get new repository.'
return ChopRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Chop a GNU Triangulated Surface file. If no fileName is specified, chop the first GNU Triangulated Surface file in this folder."
startTime = time.time()
print('File ' + archive.getSummarizedFileName(fileName) + ' is being chopped.')
repository = ChopRepository()
settings.getReadRepository(repository)
chopGcode = getCraftedText( fileName, '', repository )
if chopGcode == '':
return
suffixFileName = fileName[ : fileName.rfind('.') ] + '_chop.svg'
suffixDirectoryName = os.path.dirname(suffixFileName)
suffixReplacedBaseName = os.path.basename(suffixFileName).replace(' ', '_')
suffixFileName = os.path.join( suffixDirectoryName, suffixReplacedBaseName )
archive.writeFileText( suffixFileName, chopGcode )
print('The chopped file is saved as ' + archive.getSummarizedFileName(suffixFileName) )
print('It took %s to chop the file.' % euclidean.getDurationString( time.time() - startTime ) )
if shouldAnalyze:
settings.openSVGPage( suffixFileName, repository.svgViewer.value )
class ChopRepository:
"A class to handle the chop settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.chop.html', self )
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getTranslatorFileTypeTuples(), 'Open File to be Chopped', self, '')
self.addExtraTopLayerIfNecessary = settings.BooleanSetting().getFromValue('Add Extra Top Layer if Necessary', self, True )
self.addLayerTemplateToSVG = settings.BooleanSetting().getFromValue('Add Layer Template to SVG', self, True)
self.edgeWidth = settings.FloatSpin().getFromValue( 0.4, 'Edge Width (mm):', self, 4.0, 2.0 )
self.extraDecimalPlaces = settings.FloatSpin().getFromValue(0.0, 'Extra Decimal Places (float):', self, 3.0, 2.0)
self.importCoarseness = settings.FloatSpin().getFromValue( 0.5, 'Import Coarseness (ratio):', self, 2.0, 1.0 )
self.layerHeight = settings.FloatSpin().getFromValue( 0.1, 'Layer Height (mm):', self, 1.0, 0.4 )
self.layersFrom = settings.IntSpin().getFromValue( 0, 'Layers From (index):', self, 20, 0 )
self.layersTo = settings.IntSpin().getSingleIncrementFromValue( 0, 'Layers To (index):', self, 912345678, 912345678 )
self.meshTypeLabel = settings.LabelDisplay().getFromName('Mesh Type: ', self, )
importLatentStringVar = settings.LatentStringVar()
self.correctMesh = settings.Radio().getFromRadio( importLatentStringVar, 'Correct Mesh', self, True )
self.unprovenMesh = settings.Radio().getFromRadio( importLatentStringVar, 'Unproven Mesh', self, False )
self.svgViewer = settings.StringSetting().getFromValue('SVG Viewer:', self, 'webbrowser')
settings.LabelSeparator().getFromRepository(self)
self.executeTitle = 'Chop'
def execute(self):
"Chop button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypes(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class ChopSkein:
"A class to chop a carving."
def addExtraTopLayerIfNecessary( self, carving, layerHeight, loopLayers ):
"Add extra top layer if necessary."
topRotatedBoundaryLayer = loopLayers[-1]
cuttingSafeHeight = topRotatedBoundaryLayer.z + 0.5001 * layerHeight
if cuttingSafeHeight > carving.getCarveCornerMaximum().z:
return
extraTopRotatedBoundaryLayer = topRotatedBoundaryLayer.getCopyAtZ( topRotatedBoundaryLayer.z + layerHeight )
loopLayers.append( extraTopRotatedBoundaryLayer )
def getCarvedSVG( self, carving, fileName, repository ):
"Parse gnu triangulated surface text and store the chopped gcode."
layerHeight = repository.layerHeight.value
edgeWidth = repository.edgeWidth.value
carving.setCarveLayerHeight( layerHeight )
importRadius = 0.5 * repository.importCoarseness.value * abs(edgeWidth)
carving.setCarveImportRadius(max(importRadius, 0.001 * layerHeight))
carving.setCarveIsCorrectMesh( repository.correctMesh.value )
loopLayers = carving.getCarveBoundaryLayers()
if len( loopLayers ) < 1:
print('Warning, there are no slices for the model, this could be because the model is too small for the Layer Height.')
return ''
if repository.addExtraTopLayerIfNecessary.value:
self.addExtraTopLayerIfNecessary( carving, layerHeight, loopLayers )
loopLayers.reverse()
layerHeight = carving.getCarveLayerHeight()
decimalPlacesCarried = euclidean.getDecimalPlacesCarried(repository.extraDecimalPlaces.value, layerHeight)
svgWriter = svg_writer.SVGWriter(
repository.addLayerTemplateToSVG.value,
carving.getCarveCornerMaximum(),
carving.getCarveCornerMinimum(),
decimalPlacesCarried,
carving.getCarveLayerHeight(),
edgeWidth)
truncatedRotatedBoundaryLayers = svg_writer.getTruncatedRotatedBoundaryLayers(loopLayers, repository)
return svgWriter.getReplacedSVGTemplate( fileName, truncatedRotatedBoundaryLayers, 'chop', carving.getFabmetheusXML())
def main():
"Display the chop dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()
| agpl-3.0 | -6,115,893,209,970,606,000 | 6,494,151,697,429,118,000 | 45.954955 | 287 | 0.767172 | false |
jiangzhuo/kbengine | kbe/src/lib/python/Lib/encodings/cp775.py | 272 | 34476 | """ Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp775',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0096: 0x00a2, # CENT SIGN
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00a7: 0x00a6, # BROKEN BAR
0x00a8: 0x00a9, # COPYRIGHT SIGN
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON
'\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA
'\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\xa2' # 0x0096 -> CENT SIGN
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\xa4' # 0x009f -> CURRENCY SIGN
'\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON
'\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE
'\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK
'\xa6' # 0x00a7 -> BROKEN BAR
'\xa9' # 0x00a8 -> COPYRIGHT SIGN
'\xae' # 0x00a9 -> REGISTERED SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON
'\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK
'\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON
'\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK
'\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON
'\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK
'\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE
'\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK
'\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON
'\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK
'\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON
'\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
'\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xb5' # 0x00e6 -> MICRO SIGN
'\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE
'\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA
'\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA
'\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA
'\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON
'\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK
'\xad' # 0x00f0 -> SOFT HYPHEN
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK
'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
'\xb6' # 0x00f4 -> PILCROW SIGN
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\xb9' # 0x00fb -> SUPERSCRIPT ONE
'\xb3' # 0x00fc -> SUPERSCRIPT THREE
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x0096, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x009f, # CURRENCY SIGN
0x00a6: 0x00a7, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a9: 0x00a8, # COPYRIGHT SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON
0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON
0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON
0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON
0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON
0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE
0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK
0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA
0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON
0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON
0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK
0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK
0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA
0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA
0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA
0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA
0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE
0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA
0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA
0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON
0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON
0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA
0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON
0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON
0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON
0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK
0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON
0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK
0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK
0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK
0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK
0x2219: 0x00f9, # BULLET OPERATOR
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| lgpl-3.0 | 210,634,315,408,458,340 | -1,897,325,826,992,409,000 | 48.463415 | 103 | 0.602448 | false |
chriskmanx/qmole | QMOLEDEV/boost_1_49_0/libs/python/pyste/src/Pyste/pyste.py | 54 | 14022 | # Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
Pyste version %s
Usage:
pyste [options] interface-files
where options are:
--module=<name> The name of the module that will be generated;
defaults to the first interface filename, without
the extension.
-I <path> Add an include path
-D <symbol> Define symbol
--multiple Create various cpps, instead of only one
(useful during development)
--out=<name> Specify output filename (default: <module>.cpp)
in --multiple mode, this will be a directory
--no-using Do not declare "using namespace boost";
use explicit declarations instead
--pyste-ns=<name> Set the namespace where new types will be declared;
default is the empty namespace
--debug Writes the xml for each file parsed in the current
directory
--cache-dir=<dir> Directory for cache files (speeds up future runs)
--only-create-cache Recreates all caches (doesn't generate code).
--generate-main Generates the _main.cpp file (in multiple mode)
--file-list A file with one pyste file per line. Use as a
substitute for passing the files in the command
line.
--gccxml-path=<path> Path to gccxml executable (default: gccxml)
--no-default-include Do not use INCLUDE environment variable for include
files to pass along gccxml.
-h, --help Print this help and exit
-v, --version Print version information
"""
import sys
import os
import getopt
import exporters
import SingleCodeUnit
import MultipleCodeUnit
import infos
import exporterutils
import settings
import gc
import sys
from policies import *
from CppParser import CppParser, CppParserError
import time
import declarations
__version__ = '0.9.30'
def RecursiveIncludes(include):
'Return a list containg the include dir and all its subdirectories'
dirs = [include]
def visit(arg, dir, names):
# ignore CVS dirs
if os.path.split(dir)[1] != 'CVS':
dirs.append(dir)
os.path.walk(include, visit, None)
return dirs
def GetDefaultIncludes():
if 'INCLUDE' in os.environ:
include = os.environ['INCLUDE']
return include.split(os.pathsep)
else:
return []
def ProcessIncludes(includes):
if sys.platform == 'win32':
index = 0
for include in includes:
includes[index] = include.replace('\\', '/')
index += 1
def ReadFileList(filename):
f = file(filename)
files = []
try:
for line in f:
line = line.strip()
if line:
files.append(line)
finally:
f.close()
return files
def ParseArguments():
def Usage():
print __doc__ % __version__
sys.exit(1)
try:
options, files = getopt.getopt(
sys.argv[1:],
'R:I:D:vh',
['module=', 'multiple', 'out=', 'no-using', 'pyste-ns=', 'debug', 'cache-dir=',
'only-create-cache', 'version', 'generate-main', 'file-list=', 'help',
'gccxml-path=', 'no-default-include'])
except getopt.GetoptError, e:
print
print 'ERROR:', e
Usage()
default_includes = GetDefaultIncludes()
includes = []
defines = []
module = None
out = None
multiple = False
cache_dir = None
create_cache = False
generate_main = False
gccxml_path = 'gccxml'
for opt, value in options:
if opt == '-I':
includes.append(value)
elif opt == '-D':
defines.append(value)
elif opt == '-R':
includes.extend(RecursiveIncludes(value))
elif opt == '--module':
module = value
elif opt == '--out':
out = value
elif opt == '--no-using':
settings.namespaces.python = 'boost::python::'
settings.USING_BOOST_NS = False
elif opt == '--pyste-ns':
settings.namespaces.pyste = value + '::'
elif opt == '--debug':
settings.DEBUG = True
elif opt == '--multiple':
multiple = True
elif opt == '--cache-dir':
cache_dir = value
elif opt == '--only-create-cache':
create_cache = True
elif opt == '--file-list':
files += ReadFileList(value)
elif opt in ['-h', '--help']:
Usage()
elif opt in ['-v', '--version']:
print 'Pyste version %s' % __version__
sys.exit(2)
elif opt == '--generate-main':
generate_main = True
elif opt == '--gccxml-path':
gccxml_path = value
elif opt == '--no-default-include':
default_includes = []
else:
print 'Unknown option:', opt
Usage()
includes[0:0] = default_includes
if not files:
Usage()
if not module:
module = os.path.splitext(os.path.basename(files[0]))[0]
if not out:
out = module
if not multiple:
out += '.cpp'
for file in files:
d = os.path.dirname(os.path.abspath(file))
if d not in sys.path:
sys.path.append(d)
if create_cache and not cache_dir:
print 'Error: Use --cache-dir to indicate where to create the cache files!'
Usage()
sys.exit(3)
if generate_main and not multiple:
print 'Error: --generate-main only valid in multiple mode.'
Usage()
sys.exit(3)
ProcessIncludes(includes)
return includes, defines, module, out, files, multiple, cache_dir, create_cache, \
generate_main, gccxml_path
def PCHInclude(*headers):
code = '\n'.join(['#include <%s>' % x for x in headers])
infos.CodeInfo(code, 'pchinclude')
def CreateContext():
'create the context where a interface file will be executed'
context = {}
context['Import'] = Import
# infos
context['Function'] = infos.FunctionInfo
context['Class'] = infos.ClassInfo
context['Include'] = lambda header: infos.CodeInfo('#include <%s>\n' % header, 'include')
context['PCHInclude'] = PCHInclude
context['Template'] = infos.ClassTemplateInfo
context['Enum'] = infos.EnumInfo
context['AllFromHeader'] = infos.HeaderInfo
context['Var'] = infos.VarInfo
# functions
context['rename'] = infos.rename
context['set_policy'] = infos.set_policy
context['exclude'] = infos.exclude
context['set_wrapper'] = infos.set_wrapper
context['use_shared_ptr'] = infos.use_shared_ptr
context['use_auto_ptr'] = infos.use_auto_ptr
context['holder'] = infos.holder
context['add_method'] = infos.add_method
context['final'] = infos.final
context['export_values'] = infos.export_values
# policies
context['return_internal_reference'] = return_internal_reference
context['with_custodian_and_ward'] = with_custodian_and_ward
context['return_value_policy'] = return_value_policy
context['reference_existing_object'] = reference_existing_object
context['copy_const_reference'] = copy_const_reference
context['copy_non_const_reference'] = copy_non_const_reference
context['return_opaque_pointer'] = return_opaque_pointer
context['manage_new_object'] = manage_new_object
context['return_by_value'] = return_by_value
context['return_self'] = return_self
# utils
context['Wrapper'] = exporterutils.FunctionWrapper
context['declaration_code'] = lambda code: infos.CodeInfo(code, 'declaration-outside')
context['module_code'] = lambda code: infos.CodeInfo(code, 'module')
context['class_code'] = infos.class_code
return context
def Begin():
# parse arguments
includes, defines, module, out, interfaces, multiple, cache_dir, create_cache, generate_main, gccxml_path = ParseArguments()
# run pyste scripts
for interface in interfaces:
ExecuteInterface(interface)
# create the parser
parser = CppParser(includes, defines, cache_dir, declarations.version, gccxml_path)
try:
if not create_cache:
if not generate_main:
return GenerateCode(parser, module, out, interfaces, multiple)
else:
return GenerateMain(module, out, OrderInterfaces(interfaces))
else:
return CreateCaches(parser)
finally:
parser.Close()
def CreateCaches(parser):
# There is one cache file per interface so we organize the headers
# by interfaces. For each interface collect the tails from the
# exporters sharing the same header.
tails = JoinTails(exporters.exporters)
# now for each interface file take each header, and using the tail
# get the declarations and cache them.
for interface, header in tails:
tail = tails[(interface, header)]
declarations = parser.ParseWithGCCXML(header, tail)
cachefile = parser.CreateCache(header, interface, tail, declarations)
print 'Cached', cachefile
return 0
_imported_count = {} # interface => count
def ExecuteInterface(interface):
old_interface = exporters.current_interface
if not os.path.exists(interface):
if old_interface and os.path.exists(old_interface):
d = os.path.dirname(old_interface)
interface = os.path.join(d, interface)
if not os.path.exists(interface):
raise IOError, "Cannot find interface file %s."%interface
_imported_count[interface] = _imported_count.get(interface, 0) + 1
exporters.current_interface = interface
context = CreateContext()
context['INTERFACE_FILE'] = os.path.abspath(interface)
execfile(interface, context)
exporters.current_interface = old_interface
def Import(interface):
exporters.importing = True
ExecuteInterface(interface)
exporters.importing = False
def JoinTails(exports):
'''Returns a dict of {(interface, header): tail}, where tail is the
joining of all tails of all exports for the header.
'''
tails = {}
for export in exports:
interface = export.interface_file
header = export.Header()
tail = export.Tail() or ''
if (interface, header) in tails:
all_tails = tails[(interface,header)]
all_tails += '\n' + tail
tails[(interface, header)] = all_tails
else:
tails[(interface, header)] = tail
return tails
def OrderInterfaces(interfaces):
interfaces_order = [(_imported_count[x], x) for x in interfaces]
interfaces_order.sort()
interfaces_order.reverse()
return [x for _, x in interfaces_order]
def GenerateMain(module, out, interfaces):
codeunit = MultipleCodeUnit.MultipleCodeUnit(module, out)
codeunit.GenerateMain(interfaces)
return 0
def GenerateCode(parser, module, out, interfaces, multiple):
# prepare to generate the wrapper code
if multiple:
codeunit = MultipleCodeUnit.MultipleCodeUnit(module, out)
else:
codeunit = SingleCodeUnit.SingleCodeUnit(module, out)
# stop referencing the exporters here
exports = exporters.exporters
exporters.exporters = None
exported_names = dict([(x.Name(), None) for x in exports])
# order the exports
order = {}
for export in exports:
if export.interface_file in order:
order[export.interface_file].append(export)
else:
order[export.interface_file] = [export]
exports = []
interfaces_order = OrderInterfaces(interfaces)
for interface in interfaces_order:
exports.extend(order[interface])
del order
del interfaces_order
# now generate the code in the correct order
#print exported_names
tails = JoinTails(exports)
for i in xrange(len(exports)):
export = exports[i]
interface = export.interface_file
header = export.Header()
if header:
tail = tails[(interface, header)]
declarations, parsed_header = parser.Parse(header, interface, tail)
else:
declarations = []
parsed_header = None
ExpandTypedefs(declarations, exported_names)
export.SetDeclarations(declarations)
export.SetParsedHeader(parsed_header)
if multiple:
codeunit.SetCurrent(export.interface_file, export.Name())
export.GenerateCode(codeunit, exported_names)
# force collect of cyclic references
exports[i] = None
del declarations
del export
gc.collect()
# finally save the code unit
codeunit.Save()
if not multiple:
print 'Module %s generated' % module
return 0
def ExpandTypedefs(decls, exported_names):
'''Check if the names in exported_names are a typedef, and add the real class
name in the dict.
'''
for name in exported_names.keys():
for decl in decls:
if isinstance(decl, declarations.Typedef):
exported_names[decl.type.FullName()] = None
def UsePsyco():
'Tries to use psyco if possible'
try:
import psyco
psyco.profile()
except: pass
def main():
start = time.clock()
UsePsyco()
status = Begin()
print '%0.2f seconds' % (time.clock()-start)
sys.exit(status)
if __name__ == '__main__':
main()
| gpl-3.0 | -3,758,451,575,766,929,400 | -5,534,771,034,684,589,000 | 32.070755 | 128 | 0.601983 | false |
bearstech/ansible | test/units/modules/network/nxos/test_nxos_config.py | 47 | 4988 | #!/usr/bin/env python
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_config
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosConfigModule(TestNxosModule):
module = nxos_config
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_config.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_config', 'config.cfg')
self.load_config.return_value = None
def test_nxos_config_no_change(self):
args = dict(lines=['hostname localhost'])
set_module_args(args)
result = self.execute_module()
def test_nxos_config_src(self):
args = dict(src=load_fixture('nxos_config', 'candidate.cfg'))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01', 'interface Ethernet1',
'description test interface', 'no shutdown', 'ip routing']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_nxos_config_lines(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_nxos_config_before(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'],
before=['before command'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['before command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('before command', result['commands'][0])
def test_nxos_config_after(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'],
after=['after command'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['after command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('after command', result['commands'][-1])
def test_nxos_config_parents(self):
args = dict(lines=['ip address 1.2.3.4/5', 'no shutdown'], parents=['interface Ethernet10'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['interface Ethernet10', 'ip address 1.2.3.4/5', 'no shutdown']
self.assertEqual(config, result['commands'], result['commands'])
def test_nxos_config_src_and_lines_fails(self):
args = dict(src='foo', lines='foo')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_match_exact_requires_lines(self):
args = dict(match='exact')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_match_strict_requires_lines(self):
args = dict(match='strict')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_replace_block_requires_lines(self):
args = dict(replace='block')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_replace_config_requires_src(self):
args = dict(replace='config')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_backup_returns__backup__(self):
args = dict(backup=True)
set_module_args(args)
result = self.execute_module()
self.assertIn('__backup__', result)
| gpl-3.0 | 6,688,736,418,475,944,000 | 4,970,579,814,489,620,000 | 35.676471 | 100 | 0.660585 | false |
sup95/zulip | zerver/views/webhooks/circleci.py | 11 | 1853 | # Webhooks for external integrations.
from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from six import text_type
from typing import Any
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile, Client
import ujson
CIRCLECI_SUBJECT_TEMPLATE = u'{repository_name}'
CIRCLECI_MESSAGE_TEMPLATE = u'[Build]({build_url}) triggered by {username} on {branch} branch {status}.'
FAILED_STATUS = 'failed'
@api_key_only_webhook_view('CircleCI')
@has_request_variables
def api_circleci_webhook(request, user_profile, client, payload=REQ(argument_type='body'),
stream=REQ(default='circleci')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], text_type) -> HttpResponse
payload = payload['payload']
subject = get_subject(payload)
body = get_body(payload)
check_send_message(user_profile, client, 'stream', [stream], subject, body)
return json_success()
def get_subject(payload):
# type: (Dict[str, Any]) -> text_type
return CIRCLECI_SUBJECT_TEMPLATE.format(repository_name=payload['reponame'])
def get_body(payload):
# type: (Dict[str, Any]) -> text_type
data = {
'build_url': payload['build_url'],
'username': payload['username'],
'branch': payload['branch'],
'status': get_status(payload)
}
return CIRCLECI_MESSAGE_TEMPLATE.format(**data)
def get_status(payload):
# type: (Dict[str, Any]) -> text_type
status = payload['status']
if payload['previous']['status'] == FAILED_STATUS and status == FAILED_STATUS:
return u'is still failing'
if status == 'success':
return u'succeeded'
return status
| apache-2.0 | -339,356,489,594,834,900 | -5,194,020,417,269,165,000 | 33.314815 | 104 | 0.690232 | false |
lispc/Paddle | v1_api_demo/quick_start/dataprovider_emb.py | 10 | 1953 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.trainer.PyDataProvider2 import *
UNK_IDX = 0
def initializer(settings, dictionary, **kwargs):
settings.word_dict = dictionary
settings.input_types = {
# Define the type of the first input as sequence of integer.
# The value of the integers range from 0 to len(dictrionary)-1
'word': integer_value_sequence(len(dictionary)),
# Define the second input for label id
'label': integer_value(2)
}
@provider(init_hook=initializer, cache=CacheType.CACHE_PASS_IN_MEM)
def process(settings, file_name):
with open(file_name, 'r') as f:
for line in f:
label, comment = line.strip().split('\t')
words = comment.split()
word_slot = [settings.word_dict.get(w, UNK_IDX) for w in words]
yield {'word': word_slot, 'label': int(label)}
def predict_initializer(settings, dictionary, **kwargs):
settings.word_dict = dictionary
settings.input_types = {'word': integer_value_sequence(len(dictionary))}
@provider(init_hook=predict_initializer, should_shuffle=False)
def process_predict(settings, file_name):
with open(file_name, 'r') as f:
for line in f:
comment = line.strip().split()
word_slot = [settings.word_dict.get(w, UNK_IDX) for w in comment]
yield {'word': word_slot}
| apache-2.0 | 580,853,733,067,694,300 | -6,010,535,041,501,006,000 | 36.557692 | 77 | 0.680492 | false |
asadziach/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 92 | 4535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 | 7,890,072,147,310,529,000 | -1,154,659,204,666,234,000 | 30.713287 | 96 | 0.6086 | false |
tareqalayan/pytest | _pytest/monkeypatch.py | 1 | 9043 | """ monkeypatching and mocking functionality. """
from __future__ import absolute_import, division, print_function
import os
import sys
import re
import six
from _pytest.fixtures import fixture
RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
@fixture
def monkeypatch():
"""The returned ``monkeypatch`` fixture provides these
helper methods to modify objects, dictionaries or os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=False)
monkeypatch.delenv(name, value, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting
test function or fixture has finished. The ``raising``
parameter determines if a KeyError or AttributeError
will be raised if the set/deletion operation has no target.
"""
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
def resolve(name):
# simplified from zope.dottedname
parts = name.split('.')
used = parts.pop(0)
found = __import__(used)
for part in parts:
used += '.' + part
try:
found = getattr(found, part)
except AttributeError:
pass
else:
continue
# we use explicit un-nesting of the handling block in order
# to avoid nested exceptions on python 3
try:
__import__(used)
except ImportError as ex:
# str is used for py2 vs py3
expected = str(ex).split()[-1]
if expected == used:
raise
else:
raise ImportError(
'import error in %s: %s' % (used, ex)
)
found = annotated_getattr(found, part, used)
return found
def annotated_getattr(obj, name, ann):
try:
obj = getattr(obj, name)
except AttributeError:
raise AttributeError(
'%r object at %s has no attribute %r' % (
type(obj).__name__, ann, name
)
)
return obj
def derive_importpath(import_path, raising):
if not isinstance(import_path, six.string_types) or "." not in import_path:
raise TypeError("must be absolute import path string, not %r" %
(import_path,))
module, attr = import_path.rsplit('.', 1)
target = resolve(module)
if raising:
annotated_getattr(target, attr, ann=module)
return attr, target
class Notset(object):
def __repr__(self):
return "<notset>"
notset = Notset()
class MonkeyPatch(object):
""" Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes.
"""
def __init__(self):
self._setattr = []
self._setitem = []
self._cwd = None
self._savesyspath = None
def setattr(self, target, name, value=notset, raising=True):
""" Set attribute value on target, memorizing the old value.
By default raise AttributeError if the attribute did not exist.
For convenience you can specify a string as ``target`` which
will be interpreted as a dotted import path, with the last part
being the attribute name. Example:
``monkeypatch.setattr("os.getcwd", lambda: "/")``
would set the ``getcwd`` function of the ``os`` module.
The ``raising`` value determines if the setattr should fail
if the attribute is not already present (defaults to True
which means it will raise).
"""
__tracebackhide__ = True
import inspect
if value is notset:
if not isinstance(target, six.string_types):
raise TypeError("use setattr(target, name, value) or "
"setattr(target, value) with target being a dotted "
"import string")
value = name
name, target = derive_importpath(target, raising)
oldval = getattr(target, name, notset)
if raising and oldval is notset:
raise AttributeError("%r has no attribute %r" % (target, name))
# avoid class descriptors like staticmethod/classmethod
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
setattr(target, name, value)
def delattr(self, target, name=notset, raising=True):
""" Delete attribute ``name`` from ``target``, by default raise
AttributeError it the attribute did not previously exist.
If no ``name`` is specified and ``target`` is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
If ``raising`` is set to False, no exception will be raised if the
attribute is missing.
"""
__tracebackhide__ = True
if name is notset:
if not isinstance(target, six.string_types):
raise TypeError("use delattr(target, name) or "
"delattr(target) with target being a dotted "
"import string")
name, target = derive_importpath(target, raising)
if not hasattr(target, name):
if raising:
raise AttributeError(name)
else:
self._setattr.append((target, name, getattr(target, name, notset)))
delattr(target, name)
def setitem(self, dic, name, value):
""" Set dictionary entry ``name`` to value. """
self._setitem.append((dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic, name, raising=True):
""" Delete ``name`` from dict. Raise KeyError if it doesn't exist.
If ``raising`` is set to False, no exception will be raised if the
key is missing.
"""
if name not in dic:
if raising:
raise KeyError(name)
else:
self._setitem.append((dic, name, dic.get(name, notset)))
del dic[name]
def setenv(self, name, value, prepend=None):
""" Set environment variable ``name`` to ``value``. If ``prepend``
is a character, read the current environment variable value
and prepend the ``value`` adjoined with the ``prepend`` character."""
value = str(value)
if prepend and name in os.environ:
value = value + prepend + os.environ[name]
self.setitem(os.environ, name, value)
def delenv(self, name, raising=True):
""" Delete ``name`` from the environment. Raise KeyError it does not
exist.
If ``raising`` is set to False, no exception will be raised if the
environment variable is missing.
"""
self.delitem(os.environ, name, raising=raising)
def syspath_prepend(self, path):
""" Prepend ``path`` to ``sys.path`` list of import locations. """
if self._savesyspath is None:
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
def chdir(self, path):
""" Change the current working directory to the specified path.
Path can be a string or a py.path.local object.
"""
if self._cwd is None:
self._cwd = os.getcwd()
if hasattr(path, "chdir"):
path.chdir()
else:
os.chdir(path)
def undo(self):
""" Undo previous changes. This call consumes the
undo stack. Calling it a second time has no effect unless
you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
Note that the same `monkeypatch` fixture is used across a
single test function invocation. If `monkeypatch` is used both by
the test function itself and one of the test fixtures,
calling `undo()` will undo all of the changes made in
both functions.
"""
for obj, name, value in reversed(self._setattr):
if value is not notset:
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for dictionary, name, value in reversed(self._setitem):
if value is notset:
try:
del dictionary[name]
except KeyError:
pass # was already deleted, so we have the desired state
else:
dictionary[name] = value
self._setitem[:] = []
if self._savesyspath is not None:
sys.path[:] = self._savesyspath
self._savesyspath = None
if self._cwd is not None:
os.chdir(self._cwd)
self._cwd = None
| mit | -8,825,435,079,213,210,000 | 7,456,519,932,654,123,000 | 34.050388 | 108 | 0.585204 | false |
mpobrien/see | see.py | 1 | 6339 | #!/usr/bin/env python
"""
see
A human alternative to dir().
>>> from see import see
>>> help(see)
Copyright (c) 2009 Liam Cooke
http://inky.github.com/see/
Licensed under the GNU General Public License v3. {{{
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
}}}
"""
import fnmatch
import inspect
import re
import sys
import textwrap
__all__ = ['see']
__author__ = 'Liam Cooke'
__contributors__ = [
'Bob Farrell',
'Gabriel Genellina',
'Baishampayan Ghose',
'Charlie Nolan',
'Ed Page',
'guff',
'jdunck',
]
__version__ = '0.5.4'
__copyright__ = 'Copyright (c) 2009 Liam Cooke'
__license__ = 'GNU General Public License v3'
def regex_filter(names, pat):
pat = re.compile(pat)
def match(name, fn=pat.search):
return fn(name) is not None
return tuple(filter(match, names))
def fn_filter(names, pat):
def match(name, fn=fnmatch.fnmatch, pat=pat):
return fn(name, pat)
return tuple(filter(match, names))
class _SeeOutput(tuple):
"""Tuple-like object with a pretty string representation."""
def __new__(self, actions=None):
return tuple.__new__(self, actions or [])
def __repr__(self):
return textwrap.fill(' '.join(self), 78,
initial_indent=' ',
subsequent_indent=' ')
class _SeeDefault(object):
def __repr__(self):
return 'anything'
_LOCALS = _SeeDefault()
def see(obj=_LOCALS, pattern=None, r=None):
"""
Inspect an object. Like the dir() builtin, but easier on the eyes.
Keyword arguments (all optional):
obj -- object to be inspected
pattern -- shell-style search pattern (e.g. '*len*')
r -- regular expression
If obj is omitted, objects in the current scope are listed instead.
Some unique symbols are used:
.* implements obj.anything
[] implements obj[key]
in implements membership tests (e.g. x in obj)
+obj unary positive operator (e.g. +2)
-obj unary negative operator (e.g. -2)
"""
use_locals = obj is _LOCALS
actions = []
dot = not use_locals and '.' or ''
func = lambda f: hasattr(f, '__call__') and '()' or ''
name = lambda a, f: ''.join((dot, a, func(f)))
if use_locals:
obj.__dict__ = inspect.currentframe().f_back.f_locals
attrs = dir(obj)
if not use_locals:
for var, symbol in SYMBOLS:
if var not in attrs or symbol in actions:
continue
elif var == '__doc__':
if not obj.__doc__ or not obj.__doc__.strip():
continue
actions.append(symbol)
for attr in filter(lambda a: not a.startswith('_'), attrs):
try:
prop = getattr(obj, attr)
except AttributeError:
continue
actions.append(name(attr, prop))
if pattern is not None:
actions = fn_filter(actions, pattern)
if r is not None:
actions = regex_filter(actions, r)
return _SeeOutput(actions)
PY_300 = sys.version_info >= (3, 0)
PY_301 = sys.version_info >= (3, 0, 1)
SYMBOLS = tuple(filter(lambda x: x[0], (
# callable
('__call__', '()'),
# element/attribute access
('__getattr__', '.*'),
('__getitem__', '[]'),
('__setitem__', '[]'),
('__delitem__', '[]'),
# iteration
('__enter__', 'with'),
('__exit__', 'with'),
('__contains__', 'in'),
# operators
('__add__', '+'),
('__radd__', '+'),
('__iadd__', '+='),
('__sub__', '-'),
('__rsub__', '-'),
('__isub__', '-='),
('__mul__', '*'),
('__rmul__', '*'),
('__imul__', '*='),
(not PY_300 and '__div__', '/'),
(not PY_301 and '__rdiv__', '/'),
('__truediv__', '/'),
('__rtruediv__', '/'),
('__floordiv__', '//'),
('__rfloordiv__', '//'),
(not PY_300 and '__idiv__', '/='),
('__itruediv__', '/='),
('__ifloordiv__', '//='),
('__mod__', '%'),
('__rmod__', '%'),
('__divmod__', '%'),
('__imod__', '%='),
('__pow__', '**'),
('__rpow__', '**'),
('__ipow__', '**='),
('__lshift__', '<<'),
('__rlshift__', '<<'),
('__ilshift__', '<<='),
('__rshift__', '>>'),
('__rrshift__', '>>'),
('__irshift__', '>>='),
('__and__', '&'),
('__rand__', '&'),
('__iand__', '&='),
('__xor__', '^'),
('__rxor__', '^'),
('__ixor__', '^='),
('__or__', '|'),
('__ror__', '|'),
('__ior__', '|='),
('__pos__', '+obj'),
('__neg__', '-obj'),
('__invert__', '~'),
('__lt__', '<'),
(not PY_301 and '__cmp__', '<'),
('__le__', '<='),
(not PY_301 and '__cmp__', '<='),
('__eq__', '=='),
(not PY_301 and '__cmp__', '=='),
('__ne__', '!='),
(not PY_301 and '__cmp__', '!='),
('__gt__', '>'),
(not PY_301 and '__cmp__', '>'),
('__ge__', '>='),
(not PY_301 and '__cmp__', '>='),
# built-in functions
('__abs__', 'abs()'),
(PY_300 and '__bool__' or '__nonzero__', 'bool()'),
('__complex__', 'complex()'),
(PY_300 and '__dir__', 'dir()'),
('__divmod__', 'divmod()'),
('__rdivmod__', 'divmod()'),
('__float__', 'float()'),
('__hash__', 'hash()'),
('__doc__', 'help()'),
(PY_300 and '__index__' or '__hex__', 'hex()'),
('__int__', 'int()'),
('__iter__', 'iter()'),
('__len__', 'len()'),
(not PY_300 and '__long__', 'long()'),
(PY_300 and '__index__' or '__oct__', 'oct()'),
('__repr__', 'repr()'),
('__reversed__', 'reversed()'),
(PY_300 and '__round__', 'round()'),
('__str__', 'str()'),
(PY_300 and '__unicode__', 'unicode()'),
)))
if __name__ == '__main__':
help(see)
# vim: expandtab tabstop=4 shiftround shiftwidth=4 fdm=marker
| gpl-3.0 | -4,692,575,963,459,036,000 | -2,064,545,152,985,308,000 | 25.302905 | 71 | 0.485881 | false |
bkillenit/AbletonAPI | python-api-materials/code/Hack_LiveCurses/rpyc/utils/ssh.py | 3 | 10267 | import os
from subprocess import Popen, PIPE
from rpyc.lib import safe_import
from rpyc.lib.compat import BYTES_LITERAL
signal = safe_import("signal")
# modified from the stdlib pipes module for windows
_safechars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@%_-+=:,./'
_funnychars = '"`$\\'
def shquote(text):
if not text:
return "''"
for c in text:
if c not in _safechars:
break
else:
return text
if "'" not in text:
return "'" + text + "'"
def escaped(c):
if c in _funnychars:
return '\\' + c
else:
return c
res = "".join(escaped(c) for c in text)
return '"' + res + '"'
class ProcessExecutionError(Exception):
"""raised by :func:`SshContext.execute` should the executed process
terminate with an error"""
pass
import subprocess
def _get_startupinfo():
if subprocess.mswindows:
import _subprocess
sui = subprocess.STARTUPINFO()
sui.dwFlags |= _subprocess.STARTF_USESHOWWINDOW #@UndefinedVariable
sui.wShowWindow = _subprocess.SW_HIDE #@UndefinedVariable
return sui
else:
return None
class SshTunnel(object):
"""
Represents an active SSH tunnel (as created by ``ssh -L``).
.. note::
Do not instantiate this class yourself -- use the :func:`SshContext.tunnel`
function for that.
"""
PROGRAM = r"""import sys;sys.stdout.write("ready\n\n\n");sys.stdout.flush();sys.stdin.readline()"""
def __init__(self, sshctx, loc_host, loc_port, rem_host, rem_port):
self.loc_host = loc_host
self.loc_port = loc_port
self.rem_host = rem_host
self.rem_port = rem_port
self.sshctx = sshctx
self.proc = sshctx.popen("python", "-u", "-c", self.PROGRAM,
L = "[%s]:%s:[%s]:%s" % (loc_host, loc_port, rem_host, rem_port))
banner = self.proc.stdout.readline().strip()
if banner != BYTES_LITERAL("ready"):
raise ValueError("tunnel failed", banner)
def __del__(self):
try:
self.close()
except Exception:
pass
def __str__(self):
return "%s:%s --> (%s)%s:%s" % (self.loc_host, self.loc_port, self.sshctx.host,
self.rem_host, self.rem_port)
def is_open(self):
"""returns True if the ``ssh`` process is alive, False otherwise"""
return self.proc and self.proc.poll() is None
def close(self):
"""closes (terminates) the SSH tunnel"""
if not self.is_open():
return
self.proc.stdin.write(BYTES_LITERAL("foo\n\n\n"))
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.stderr.close()
try:
self.proc.kill()
except AttributeError:
if signal:
os.kill(self.proc.pid, signal.SIGTERM)
self.proc.wait()
self.proc = None
class SshContext(object):
"""
An *SSH context* encapsulates all the details required to establish an SSH
connection to other host. It includes the host name, user name, TCP port,
identity file, etc.
Once constructed, it can serve as a factory for SSH operations, such as
executing a remote program and getting its stdout, or uploading/downloading
files using ``scp``. It also serves for creating SSH tunnels.
Example::
>>> sshctx = SshContext("mymachine", username="borg", keyfile="/home/foo/.ssh/mymachine-id")
>>> sshctx.execute("ls")
(0, "...", "")
"""
def __init__(self, host, user = None, port = None, keyfile = None,
ssh_program = "ssh", ssh_env = None, ssh_cwd = None,
scp_program = "scp", scp_env = None, scp_cwd = None):
self.host = host
self.user = user
self.port = port
self.keyfile = keyfile
self.ssh_program = ssh_program
self.ssh_env = ssh_env
self.ssh_cwd = ssh_cwd
self.scp_program = scp_program
self.scp_env = scp_env
self.scp_cwd = scp_cwd
def __str__(self):
uri = "ssh://"
if self.user:
uri += "%s@%s" % (self.user, self.host)
else:
uri += self.host
if self.port:
uri += ":%d" % (self.port)
return uri
def _convert_kwargs_to_args(self, kwargs):
args = []
for k, v in kwargs.items():
if v is True:
args.append("-%s" % (k,))
elif v is False:
pass
else:
args.append("-%s" % (k,))
args.append(str(v))
return args
def _process_scp_cmdline(self, kwargs):
args = [self.scp_program]
if "r" not in kwargs:
kwargs["r"] = True
if self.keyfile and "i" not in kwargs:
kwargs["i"] = self.keyfile
if self.port and "P" not in kwargs:
kwargs["P"] = self.port
args.extend(self._convert_kwargs_to_args(kwargs))
if self.user:
host = "%s@%s" % (self.user, self.host)
else:
host = self.host
return args, host
def _process_ssh_cmdline(self, kwargs):
args = [self.ssh_program]
if self.keyfile and "i" not in kwargs:
kwargs["i"] = self.keyfile
if self.port and "p" not in kwargs:
kwargs["p"] = self.port
args.extend(self._convert_kwargs_to_args(kwargs))
if self.user:
args.append("%s@%s" % (self.user, self.host))
else:
args.append(self.host)
return args
def popen(self, *args, **kwargs):
"""Runs the given command line remotely (over SSH), returning the
``subprocess.Popen`` instance of the command
:param args: the command line arguments
:param kwargs: additional keyword arguments passed to ``ssh``
:returns: a ``Popen`` instance
Example::
proc = ctx.popen("ls", "-la")
proc.wait()
"""
cmdline = self._process_ssh_cmdline(kwargs)
cmdline.extend(shquote(a) for a in args)
return Popen(cmdline, stdin = PIPE, stdout = PIPE, stderr = PIPE,
cwd = self.ssh_cwd, env = self.ssh_env, shell = False,
startupinfo = _get_startupinfo())
def execute(self, *args, **kwargs):
"""Runs the given command line remotely (over SSH), waits for it to finish,
returning the return code, stdout, and stderr of the executed process.
:param args: the command line arguments
:param kwargs: additional keyword arguments passed to ``ssh``, except for
``retcode`` and ``input``.
:param retcode: *keyword only*, the expected return code (Defaults to 0
-- success). An exception is raised if the return code does
not match the expected one, unless it is ``None``, in
which case it will not be tested.
:param input: *keyword only*, an input string that will be passed to
``Popen.communicate``. Defaults to ``None``
:raises: :class:`ProcessExecutionError` if the expected return code
is not matched
:returns: a tuple of (return code, stdout, stderr)
Example::
rc, out, err = ctx.execute("ls", "-la")
"""
retcode = kwargs.pop("retcode", 0)
input = kwargs.pop("input", None)
proc = self.popen(*args, **kwargs)
stdout, stderr = proc.communicate(input)
if retcode is not None and proc.returncode != retcode:
raise ProcessExecutionError(proc.returncode, stdout, stderr)
return proc.returncode, stdout, stderr
def upload(self, src, dst, **kwargs):
"""
Uploads *src* from the local machine to *dst* on the other side. By default,
``-r`` (recursive copy) is given to ``scp``, so *src* can be either a file or
a directory. To override this behavior, pass ``r = False`` as a keyword argument.
:param src: the source path (on the local side)
:param dst: the destination path (on the remote side)
:param kwargs: any additional keyword arguments, passed to ``scp``.
"""
cmdline, host = self._process_scp_cmdline(kwargs)
cmdline.append(src)
cmdline.append("%s:%s" % (host, dst))
proc = Popen(cmdline, stdin = PIPE, stdout = PIPE, stderr = PIPE, shell = False,
cwd = self.scp_cwd, env = self.scp_env, startupinfo = _get_startupinfo())
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise ValueError("upload failed", stdout, stderr)
def download(self, src, dst, **kwargs):
"""
Downloads *src* from the other side to *dst* on the local side. By default,
``-r`` (recursive copy) is given to ``scp``, so *src* can be either a file or
a directory. To override this behavior, pass ``r = False`` as a keyword argument.
:param src: the source path (on the other side)
:param dst: the destination path (on the local side)
:param kwargs: any additional keyword arguments, passed to ``scp``.
"""
cmdline, host = self._process_scp_cmdline(kwargs)
cmdline.append("%s:%s" % (host, src))
cmdline.append(dst)
proc = Popen(cmdline, stdin = PIPE, stdout = PIPE, stderr = PIPE, shell = False,
cwd = self.scp_cwd, env = self.scp_env, startupinfo = _get_startupinfo())
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise ValueError("upload failed", stdout, stderr)
def tunnel(self, loc_port, rem_port, loc_host = "localhost", rem_host = "localhost"):
"""
Creates an SSH tunnel from the local port to the remote one. This is
translated to ``ssh -L loc_host:loc_port:rem_host:rem_port``.
:param loc_port: the local TCP port to forward
:param rem_port: the remote (server) TCP port, to which the local port
will be forwarded
:returns: an :class:`SshTunnel` instance
"""
return SshTunnel(self, loc_host, loc_port, rem_host, rem_port)
| mit | 2,724,943,263,351,180,000 | -4,978,563,753,308,938,000 | 36.470803 | 103 | 0.574072 | false |
crepererum/invenio | invenio/legacy/bibdocfile/config.py | 12 | 2801 | # This file is part of Invenio.
# Copyright (C) 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import re
try:
from invenio.config import CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_MISC
except ImportError:
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_MISC = {
'can_revise_doctypes': ['*'],
'can_comment_doctypes': ['*'],
'can_describe_doctypes': ['*'],
'can_delete_doctypes': ['*'],
'can_keep_doctypes': ['*'],
'can_rename_doctypes': ['*'],
'can_add_format_to_doctypes': ['*'],
'can_restrict_doctypes': ['*']}
try:
from invenio.config import CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_DOCTYPES
except ImportError:
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_DOCTYPES = [
('Main', 'Main document'),
('LaTeX', 'LaTeX'),
('Source', 'Source'),
('Additional', 'Additional File'),
('Audio', 'Audio file'),
('Video', 'Video file'),
('Script', 'Script'),
('Data', 'Data'),
('Figure', 'Figure'),
('Schema', 'Schema'),
('Graph', 'Graph'),
('Image', 'Image'),
('Drawing', 'Drawing'),
('Slides', 'Slides')]
try:
from invenio.config import CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_RESTRICTIONS
except ImportError:
CFG_BIBDOCFILE_DOCUMENT_FILE_MANAGER_RESTRICTIONS = [
('', 'Public'),
('restricted', 'Restricted')]
# CFG_BIBDOCFILE_ICON_SUBFORMAT_RE -- a subformat is an Invenio concept to give
# file formats more semantic. For example "foo.gif;icon" has ".gif;icon"
# 'format', ".gif" 'superformat' and "icon" 'subformat'. That means that this
# particular format/instance of the "foo" document, not only is a ".gif" but
# is in the shape of an "icon", i.e. most probably it will be low-resolution.
# This configuration variable let the administrator to decide which implicit
# convention will be used to know which formats will be meant to be used
# as an icon.
CFG_BIBDOCFILE_ICON_SUBFORMAT_RE = re.compile(r"icon.*")
# CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT -- this is the default subformat used
# when creating new icons.
CFG_BIBDOCFILE_DEFAULT_ICON_SUBFORMAT = "icon"
| gpl-2.0 | -5,775,917,995,578,596,000 | 4,254,746,039,487,590,000 | 38.450704 | 80 | 0.665477 | false |
JASchilz/RoverMUD | simple_universe/attachments/ooc_commands.py | 1 | 1072 | from basics import BaseAttachment
class OOCComands(BaseAttachment):
character = False
def __init__(self, character):
self.character = character
self.action_matrix = [
["help", self.do_help,
"Open the help screen, or receive help on a specific command. \
\n\tExamples: 'help', 'help quit'"],
["quit", self.do_quit, "Quit the game."],
["health", self.do_health, "Assess your health.\n\tAliases: 'h'."]
]
def do_help(self, rest):
output = "Help Information\n\nCOMMAND\tDESCRIPTION"
for attachment in self.character.attachments:
output += "\n"
for action in attachment.action_matrix:
output += action[0] + "\t" + action[2] + "\n"
self.character.brain.to_client.append(output)
def do_quit(self, rest):
self.character.brain.client.active = False
def do_health(self, rest):
self.character.brain.to_client.append("You have " + str(self.character.current_hp) + " hit points.")
| apache-2.0 | -7,035,192,780,303,707,000 | 1,440,079,014,844,062,200 | 33.580645 | 108 | 0.583955 | false |
ylatuya/Flumotion | flumotion/test/test_component.py | 4 | 5295 | # -*- Mode: Python; test-case-name: flumotion.test.test_component -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import gobject
from twisted.trial import unittest
from flumotion.common import testsuite
from flumotion.common import errors
from flumotion.component.feedcomponent import ParseLaunchComponent
class PipelineTest(ParseLaunchComponent):
def __init__(self, eaters=None, feeders=None, pipeline='test-pipeline'):
self.__pipeline = pipeline
self._eater = eaters or {}
self._feed = feeders or []
config = {'name': 'fake',
'avatarId': '/default/fake',
'eater': self._eater,
'feed': self._feed,
'plugs': {},
'properties': {},
# clock master prevents the comp from being
# instantiated
'clock-master': '/some/component'}
ParseLaunchComponent.__init__(self, config)
def create_pipeline(self):
unparsed = self.__pipeline
self.pipeline_string = self.parse_pipeline(unparsed)
try:
# don't bother creating a gstreamer pipeline
# pipeline = gst.parse_launch(self.pipeline_string)
return None
except gobject.GError, e:
self.warning('Could not parse pipeline: %s' % e.message)
raise errors.PipelineParseError(e.message)
def connect_feeders(self, pipeline):
pass
def set_pipeline(self, pipeline):
self.pipeline = pipeline
class TestExpandElementNames(testsuite.TestCase):
def setUp(self):
self.p = PipelineTest([], [])
def tearDown(self):
return self.p.stop()
def testOddDelimeters(self):
self.assertRaises(TypeError, self.p.parse_pipeline,
'@ this:is:wrong @ ! because ! @')
class TestParser(testsuite.TestCase):
def parse(self, unparsed, correctresultproc, eaters=None, feeders=None):
comp = PipelineTest(eaters, feeders, unparsed)
result = comp.parse_pipeline(unparsed)
self.assertEquals(result, correctresultproc(comp))
comp.stop()
def testSimpleOneElement(self):
self.parse('foobar', lambda p: 'foobar')
def testSimpleTwoElements(self):
self.parse('foo ! bar', lambda p: 'foo ! bar')
def testOneSource(self):
self.parse('@eater:default@ ! bar',
lambda p: '%s ! bar' % (p.get_eater_template('default')),
{'qux': [('foo:bar', 'default')]})
def testOneSourceWithout(self):
self.parse('bar',
lambda p: '%s ! bar' % (p.get_eater_template('default')),
{'qux': [('foo:quoi', 'default')]})
def testOneFeed(self):
self.parse('foo ! @feeder:bar@',
lambda p: 'foo ! %s' % (p.get_feeder_template('bar')),
{}, ['bar'])
def testOneFeedWithout(self):
self.parse('foo',
lambda p: 'foo ! %s' % (p.get_feeder_template('bar')),
{}, ['bar'])
def testTwoSources(self):
self.parse('@eater:foo@ ! @eater:bar@ ! baz',
lambda p: '%s ! %s ! baz' % (p.get_eater_template('foo'),
p.get_eater_template('bar')),
{'qux': [('baz:default', 'foo')],
'zag': [('qux:default', 'bar')]})
def testTwoFeeds(self):
self.parse('foo ! @feeder:bar@ ! @feeder:baz@',
lambda p: 'foo ! %s ! %s' % (p.get_feeder_template('bar'),
p.get_feeder_template('baz')),
{}, ['bar', 'baz'])
def testTwoBoth(self):
self.parse(
'@eater:src1@ ! @eater:src2@ ! @feeder:feed1@ ! @feeder:feed2@',
lambda p: '%s ! %s ! %s ! %s' % (p.get_eater_template('src1'),
p.get_eater_template('src2'),
p.get_feeder_template('feed1'),
p.get_feeder_template('feed2')),
{'qux': [('comp1:default', 'src1')],
'zag': [('comp2:default', 'src2')]},
['feed1', 'feed2'])
def testErrors(self):
comp = PipelineTest(None, None, '')
d = self.assertFailure(comp.waitForHappy(), errors.ComponentStartError)
d.addCallback(lambda _: comp.stop())
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 1,940,381,758,145,141,200 | -1,392,595,344,419,523,000 | 36.027972 | 79 | 0.566572 | false |
appliedx/edx-platform | common/lib/capa/capa/safe_exec/tests/test_lazymod.py | 152 | 1667 | """Test lazymod.py"""
import sys
import unittest
from capa.safe_exec.lazymod import LazyModule
class ModuleIsolation(object):
"""
Manage changes to sys.modules so that we can roll back imported modules.
Create this object, it will snapshot the currently imported modules. When
you call `clean_up()`, it will delete any module imported since its creation.
"""
def __init__(self):
# Save all the names of all the imported modules.
self.mods = set(sys.modules)
def clean_up(self):
# Get a list of modules that didn't exist when we were created
new_mods = [m for m in sys.modules if m not in self.mods]
# and delete them all so another import will run code for real again.
for m in new_mods:
del sys.modules[m]
class TestLazyMod(unittest.TestCase):
def setUp(self):
super(TestLazyMod, self).setUp()
# Each test will remove modules that it imported.
self.addCleanup(ModuleIsolation().clean_up)
def test_simple(self):
# Import some stdlib module that has not been imported before
self.assertNotIn("colorsys", sys.modules)
colorsys = LazyModule("colorsys")
hsv = colorsys.rgb_to_hsv(.3, .4, .2)
self.assertEqual(hsv[0], 0.25)
def test_dotted(self):
# wsgiref is a module with submodules that is not already imported.
# Any similar module would do. This test demonstrates that the module
# is not already im
self.assertNotIn("wsgiref.util", sys.modules)
wsgiref_util = LazyModule("wsgiref.util")
self.assertEqual(wsgiref_util.guess_scheme({}), "http")
| agpl-3.0 | -7,231,777,169,214,764,000 | 8,895,766,421,400,617,000 | 33.729167 | 81 | 0.658068 | false |
nophead/Skeinforge50plus | fabmetheus_utilities/geometry/geometry_utilities/evaluate_fundamentals/_math.py | 13 | 2590 | """
Boolean geometry utilities.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities import euclidean
import math
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
globalNativeFunctions = 'acos asin atan atan2 ceil cos cosh degrees e exp fabs floor fmod frexp hypot'.split()
globalNativeFunctions += 'ldexp log log10 modf pi pow radians sin sinh sqrt tan tanh trunc'.split()
globalNativeFunctionSet = set(globalNativeFunctions)
#Constants from: http://www.physlink.com/reference/MathConstants.cfm
#Tau is from: http://tauday.com/
#If anyone wants to add stuff, more constants are at: http://en.wikipedia.org/wiki/Mathematical_constant
globalMathConstantDictionary = {
'euler' : 0.5772156649015328606065120,
'golden' : euclidean.globalGoldenRatio,
'goldenAngle' : euclidean.globalGoldenAngle,
'goldenRatio' : euclidean.globalGoldenRatio,
'tau' : euclidean.globalTau}
def _getAccessibleAttribute(attributeName):
'Get the accessible attribute.'
if attributeName in globalMathConstantDictionary:
return globalMathConstantDictionary[attributeName]
if attributeName in globalNativeFunctionSet:
return math.__dict__[attributeName]
if attributeName in globalAccessibleAttributeDictionary:
return globalAccessibleAttributeDictionary[attributeName]
return None
def getAbs(value):
'Get the abs.'
return abs(value)
def getBoolean(value):
'Get the boolean.'
return bool(value)
def getDivmod(x, y):
'Get the divmod.'
return divmod(x, y)
def getFloat(value):
'Get the float.'
return float(value)
def getHex(value):
'Get the hex.'
return hex(value)
def getInt(value):
'Get the int.'
return int(value)
def getLong(value):
'Get the long.'
return long(value)
def getMax(first, second):
'Get the max.'
return max(first, second)
def getMin(first, second):
'Get the min.'
return min(first, second)
def getRound(value):
'Get the round.'
return round(value)
def getString(value):
'Get the string.'
return str(value)
globalAccessibleAttributeDictionary = {
'abs' : getAbs,
'boolean' : getBoolean,
'divmod' : getDivmod,
'float' : getFloat,
'hex' : getHex,
'int' : getInt,
'long' : getLong,
'max' : getMax,
'min' : getMin,
'round' : getRound,
'string' : getString}
| agpl-3.0 | 2,244,529,412,234,558,500 | -769,535,827,755,897,200 | 24.643564 | 157 | 0.743629 | false |
kushG/osf.io | website/addons/dataverse/views/crud.py | 1 | 13076 | # -*- coding: utf-8 -*-
import os
import httplib
import logging
import datetime
import requests
from bs4 import BeautifulSoup
from flask import request, make_response
from framework.flask import redirect
from framework.exceptions import HTTPError
from framework.utils import secure_filename
from framework.auth.utils import privacy_info_handle
from website.addons.dataverse.client import delete_file, upload_file, \
get_file, get_file_by_id, release_study, get_study, get_dataverse, \
connect_from_settings_or_403, get_files
from website.project.decorators import must_have_permission
from website.project.decorators import must_be_contributor_or_public
from website.project.decorators import must_not_be_registration
from website.project.decorators import must_have_addon
from website.project.views.node import _view_project
from website.project.views.file import get_cache_content
from website.project.model import has_anonymous_link
from website.util import rubeus
from website.addons.dataverse.model import DataverseFile
from website.addons.dataverse.settings import HOST
from website.addons.base.views import check_file_guid
logger = logging.getLogger(__name__)
session = requests.Session()
@must_have_permission('write')
@must_not_be_registration
@must_have_addon('dataverse', 'node')
def dataverse_release_study(node_addon, auth, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
now = datetime.datetime.utcnow()
try:
connection = connect_from_settings_or_403(user_settings)
except HTTPError as error:
if error.code == 403:
connection = None
else:
raise
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
if study.get_state() == 'RELEASED':
raise HTTPError(httplib.CONFLICT)
release_study(study)
# Add a log
node.add_log(
action='dataverse_study_released',
params={
'project': node.parent_id,
'node': node._primary_key,
'study': study.title,
},
auth=auth,
log_date=now,
)
return {'study': study.title}, httplib.OK
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_download_file(node_addon, auth, **kwargs):
file_id = kwargs.get('path')
fail_if_unauthorized(node_addon, auth, file_id)
fail_if_private(file_id)
url = 'http://{0}/dvn/FileDownload/?fileId={1}'.format(HOST, file_id)
return redirect(url)
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_download_file_proxy(node_addon, auth, **kwargs):
file_id = kwargs.get('path')
fail_if_unauthorized(node_addon, auth, file_id)
fail_if_private(file_id)
filename, content = scrape_dataverse(file_id)
# Build response
resp = make_response(content)
resp.headers['Content-Disposition'] = 'attachment; filename={0}'.format(
filename
)
resp.headers['Content-Type'] = 'application/octet-stream'
return resp
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_get_file_info(node_addon, auth, **kwargs):
"""API view that gets info for a file."""
node = node_addon.owner
file_id = kwargs.get('path')
fail_if_unauthorized(node_addon, auth, file_id)
fail_if_private(file_id)
anonymous = has_anonymous_link(node, auth)
download_url = node.web_url_for('dataverse_download_file', path=file_id)
dataverse_url = 'http://{0}/dvn/dv/'.format(HOST) + node_addon.dataverse_alias
study_url = 'http://dx.doi.org/' + node_addon.study_hdl
delete_url = node.api_url_for('dataverse_delete_file', path=file_id)
data = {
'node': {
'id': node._id,
'title': node.title
},
'filename': scrape_dataverse(file_id, name_only=True)[0],
'dataverse': privacy_info_handle(node_addon.dataverse, anonymous),
'study': privacy_info_handle(node_addon.study, anonymous),
'urls': {
'dataverse': privacy_info_handle(dataverse_url, anonymous),
'study': privacy_info_handle(study_url, anonymous),
'download': privacy_info_handle(download_url, anonymous),
'delete': privacy_info_handle(delete_url, anonymous),
'files': node.web_url_for('collect_file_trees'),
}
}
return {'data': data}, httplib.OK
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_view_file(node_addon, auth, **kwargs):
node = node_addon.owner
file_id = kwargs.get('path')
fail_if_unauthorized(node_addon, auth, file_id)
fail_if_private(file_id)
# lazily create a file GUID record
file_obj, created = DataverseFile.get_or_create(node=node, path=file_id)
redirect_url = check_file_guid(file_obj)
if redirect_url:
return redirect(redirect_url)
# Get or create rendered file
cache_file_name = '{0}.html'.format(file_id)
rendered = get_cache_content(node_addon, cache_file_name)
if rendered is None:
filename, content = scrape_dataverse(file_id)
_, ext = os.path.splitext(filename)
download_url = node.api_url_for(
'dataverse_download_file_proxy', path=file_id
)
rendered = get_cache_content(
node_addon,
cache_file_name,
start_render=True,
remote_path=file_obj.file_id + ext, # Include extension for MFR
file_content=content,
download_url=download_url,
)
else:
filename, _ = scrape_dataverse(file_id, name_only=True)
render_url = node.api_url_for('dataverse_get_rendered_file',
path=file_id)
ret = {
'file_name': filename,
'rendered': rendered,
'render_url': render_url,
'urls': {
'render': render_url,
'download': node.web_url_for('dataverse_download_file',
path=file_id),
'info': node.api_url_for('dataverse_get_file_info',
path=file_id),
}
}
ret.update(_view_project(node, auth))
return ret
@must_have_permission('write')
@must_not_be_registration
@must_have_addon('dataverse', 'node')
def dataverse_upload_file(node_addon, auth, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
try:
name = request.args['name']
except KeyError:
raise HTTPError(httplib.BAD_REQUEST)
now = datetime.datetime.utcnow()
can_edit = node.can_edit(auth) and not node.is_registration
can_view = node.can_view(auth)
try:
connection = connect_from_settings_or_403(user_settings)
except HTTPError as error:
if error.code == httplib.FORBIDDEN:
connection = None
else:
raise
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
filename = secure_filename(name)
status_code = httplib.CREATED
old_id = None
# Fail if file is too small (Dataverse issue)
content = request.data
if len(content) < 5:
raise HTTPError(httplib.UNSUPPORTED_MEDIA_TYPE)
# Replace file if old version exists
old_file = get_file(study, filename)
if old_file is not None:
status_code = httplib.OK
old_id = old_file.id
delete_file(old_file)
# Check if file was deleted
if get_file_by_id(study, old_id) is not None:
raise HTTPError(httplib.BAD_REQUEST)
upload_file(study, filename, content)
file = get_file(study, filename)
if file is None:
raise HTTPError(httplib.BAD_REQUEST)
node.add_log(
action='dataverse_file_added',
params={
'project': node.parent_id,
'node': node._primary_key,
'filename': filename,
'path': node.web_url_for('dataverse_view_file', path=file.id),
'study': study.title,
},
auth=auth,
log_date=now,
)
info = {
'addon': 'dataverse',
'file_id': file.id,
'old_id': old_id,
'name': filename,
'path': filename,
'size': [
len(content),
rubeus.format_filesize(len(content))
],
rubeus.KIND: rubeus.FILE,
'urls': {
'view': node.web_url_for('dataverse_view_file',
path=file.id),
'download': node.web_url_for('dataverse_download_file',
path=file.id),
'delete': node.api_url_for('dataverse_delete_file',
path=file.id),
},
'permissions': {
'view': can_view,
'edit': can_edit,
},
}
return info, status_code
@must_have_permission('write')
@must_not_be_registration
@must_have_addon('dataverse', 'node')
def dataverse_delete_file(node_addon, auth, **kwargs):
node = node_addon.owner
user_settings = node_addon.user_settings
now = datetime.datetime.utcnow()
file_id = kwargs.get('path')
if file_id is None:
raise HTTPError(httplib.NOT_FOUND)
try:
connection = connect_from_settings_or_403(user_settings)
except HTTPError as error:
if error.code == httplib.FORBIDDEN:
connection = None
else:
raise
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
file = get_file_by_id(study, file_id)
delete_file(file)
# Check if file was deleted
if get_file_by_id(study, file_id) is not None:
raise HTTPError(httplib.BAD_REQUEST)
node.add_log(
action='dataverse_file_removed',
params={
'project': node.parent_id,
'node': node._primary_key,
'filename': file.name,
'study': study.title,
},
auth=auth,
log_date=now,
)
return {}
@must_be_contributor_or_public
@must_have_addon('dataverse', 'node')
def dataverse_get_rendered_file(**kwargs):
"""
"""
node_settings = kwargs['node_addon']
file_id = kwargs['path']
cache_file = '{0}.html'.format(file_id)
return get_cache_content(node_settings, cache_file)
def scrape_dataverse(file_id, name_only=False):
# Go to file url
url = 'http://{0}/dvn/FileDownload/?fileId={1}'.format(HOST, file_id)
response = session.head(url, allow_redirects=True) if name_only else session.get(url)
# Agree to terms if a redirect has occurred
if response.history:
response = session.get(url) if name_only else response
parsed = BeautifulSoup(response.content)
view_state = parsed.find(id='javax.faces.ViewState').attrs.get('value')
data = {
'form1': 'form1',
'javax.faces.ViewState': view_state,
'form1:termsAccepted': 'on',
'form1:termsButton': 'Continue',
}
terms_url = 'http://{0}/dvn/faces/study/TermsOfUsePage.xhtml'.format(HOST)
session.post(terms_url, data=data)
response = session.head(url) if name_only else session.get(url)
if 'content-disposition' not in response.headers.keys():
raise HTTPError(httplib.NOT_FOUND)
filename = response.headers['content-disposition'].split('"')[1]
return filename, response.content
def fail_if_unauthorized(node_addon, auth, file_id):
node = node_addon.owner
user_settings = node_addon.user_settings
if file_id is None:
raise HTTPError(httplib.NOT_FOUND)
try:
connection = connect_from_settings_or_403(user_settings)
except HTTPError as error:
if error.code == 403:
connection = None
else:
raise
dataverse = get_dataverse(connection, node_addon.dataverse_alias)
study = get_study(dataverse, node_addon.study_hdl)
released_file_ids = [f.id for f in get_files(study, released=True)]
all_file_ids = [f.id for f in get_files(study)] + released_file_ids
if file_id not in all_file_ids:
raise HTTPError(httplib.FORBIDDEN)
elif not node.can_edit(auth) and file_id not in released_file_ids:
raise HTTPError(httplib.UNAUTHORIZED)
def fail_if_private(file_id):
url = 'http://{0}/dvn/FileDownload/?fileId={1}'.format(HOST, file_id)
resp = requests.head(url)
if resp.status_code == httplib.FORBIDDEN:
raise HTTPError(
httplib.FORBIDDEN,
data={
'message_short': 'Cannot access file contents',
'message_long':
'The dataverse does not allow users to download files on ' +
'private studies at this time. Please contact the owner ' +
'of this Dataverse study for access to this file.',
}
)
| apache-2.0 | 6,724,189,096,105,678,000 | -7,470,630,561,658,656,000 | 29.268519 | 89 | 0.619073 | false |
dsolimando/Hot | hot-jython-modules/src/main/resources/test/test_inspect.py | 9 | 19658 | import sys
import types
import unittest
import inspect
import datetime
from test.test_support import TESTFN, run_unittest, is_jython
from test import inspect_fodder as mod
from test import inspect_fodder2 as mod2
from test import test_support
# Functions tested in this suite:
# ismodule, isclass, ismethod, isfunction, istraceback, isframe, iscode,
# isbuiltin, isroutine, getmembers, getdoc, getfile, getmodule,
# getsourcefile, getcomments, getsource, getclasstree, getargspec,
# getargvalues, formatargspec, formatargvalues, currentframe, stack, trace
# isdatadescriptor
modfile = mod.__file__
if modfile.endswith(('c', 'o')):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
import __builtin__
try:
1/0
except:
tb = sys.exc_traceback
git = mod.StupidGit()
def na_for_jython(fn):
if is_jython:
def do_nothing(*args, **kw):
pass
return do_nothing
else:
return fn
class IsTestBase(unittest.TestCase):
predicates = set([inspect.isbuiltin, inspect.isclass, inspect.iscode,
inspect.isframe, inspect.isfunction, inspect.ismethod,
inspect.ismodule, inspect.istraceback])
def istest(self, predicate, exp):
obj = eval(exp)
self.failUnless(predicate(obj), '%s(%s)' % (predicate.__name__, exp))
for other in self.predicates - set([predicate]):
self.failIf(other(obj), 'not %s(%s)' % (other.__name__, exp))
class TestPredicates(IsTestBase):
def test_thirteen(self):
count = len(filter(lambda x:x.startswith('is'), dir(inspect)))
# Doc/lib/libinspect.tex claims there are 13 such functions
expected = 13
err_msg = "There are %d (not %d) is* functions" % (count, expected)
self.assertEqual(count, expected, err_msg)
def test_excluding_predicates(self):
#XXX: Jython's PySystemState needs more work before this
#will be doable.
if not test_support.is_jython:
self.istest(inspect.isbuiltin, 'sys.exit')
self.istest(inspect.isbuiltin, '[].append')
self.istest(inspect.isclass, 'mod.StupidGit')
self.istest(inspect.iscode, 'mod.spam.func_code')
self.istest(inspect.isframe, 'tb.tb_frame')
self.istest(inspect.isfunction, 'mod.spam')
self.istest(inspect.ismethod, 'mod.StupidGit.abuse')
self.istest(inspect.ismethod, 'git.argue')
self.istest(inspect.ismodule, 'mod')
self.istest(inspect.istraceback, 'tb')
self.istest(inspect.isdatadescriptor, '__builtin__.file.closed')
self.istest(inspect.isdatadescriptor, '__builtin__.file.softspace')
if hasattr(types, 'GetSetDescriptorType'):
self.istest(inspect.isgetsetdescriptor,
'type(tb.tb_frame).f_locals')
#XXX: This detail of PyFrames is not yet supported in Jython
elif not test_support.is_jython:
self.failIf(inspect.isgetsetdescriptor(type(tb.tb_frame).f_locals))
if hasattr(types, 'MemberDescriptorType'):
self.istest(inspect.ismemberdescriptor, 'datetime.timedelta.days')
else:
self.failIf(inspect.ismemberdescriptor(datetime.timedelta.days))
def test_isroutine(self):
self.assert_(inspect.isroutine(mod.spam))
self.assert_(inspect.isroutine([].count))
class TestInterpreterStack(IsTestBase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
git.abuse(7, 8, 9)
def test_abuse_done(self):
self.istest(inspect.istraceback, 'git.ex[2]')
self.istest(inspect.isframe, 'mod.fr')
def test_stack(self):
self.assert_(len(mod.st) >= 5)
self.assertEqual(mod.st[0][1:],
(modfile, 16, 'eggs', [' st = inspect.stack()\n'], 0))
self.assertEqual(mod.st[1][1:],
(modfile, 9, 'spam', [' eggs(b + d, c + f)\n'], 0))
self.assertEqual(mod.st[2][1:],
(modfile, 43, 'argue', [' spam(a, b, c)\n'], 0))
self.assertEqual(mod.st[3][1:],
(modfile, 39, 'abuse', [' self.argue(a, b, c)\n'], 0))
def test_trace(self):
self.assertEqual(len(git.tr), 3)
self.assertEqual(git.tr[0][1:], (modfile, 43, 'argue',
[' spam(a, b, c)\n'], 0))
self.assertEqual(git.tr[1][1:], (modfile, 9, 'spam',
[' eggs(b + d, c + f)\n'], 0))
self.assertEqual(git.tr[2][1:], (modfile, 18, 'eggs',
[' q = y / 0\n'], 0))
def test_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr)
self.assertEqual(args, ['x', 'y'])
self.assertEqual(varargs, None)
self.assertEqual(varkw, None)
self.assertEqual(locals, {'x': 11, 'p': 11, 'y': 14})
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(x=11, y=14)')
# TODO - test_previous_frame could be rewritten such that we could
# introspect on the previous frame but without a dependency on
# tuple unpacking
@na_for_jython
def test_previous_frame(self):
args, varargs, varkw, locals = inspect.getargvalues(mod.fr.f_back)
self.assertEqual(args, ['a', 'b', 'c', 'd', ['e', ['f']]])
self.assertEqual(varargs, 'g')
self.assertEqual(varkw, 'h')
self.assertEqual(inspect.formatargvalues(args, varargs, varkw, locals),
'(a=7, b=8, c=9, d=3, (e=4, (f=5,)), *g=(), **h={})')
class GetSourceBase(unittest.TestCase):
# Subclasses must override.
fodderFile = None
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.source = file(inspect.getsourcefile(self.fodderFile)).read()
def sourcerange(self, top, bottom):
lines = self.source.split("\n")
return "\n".join(lines[top-1:bottom]) + "\n"
def assertSourceEqual(self, obj, top, bottom):
self.assertEqual(inspect.getsource(obj),
self.sourcerange(top, bottom))
class TestRetrievingSourceCode(GetSourceBase):
fodderFile = mod
def test_getclasses(self):
classes = inspect.getmembers(mod, inspect.isclass)
self.assertEqual(classes,
[('FesteringGob', mod.FesteringGob),
('MalodorousPervert', mod.MalodorousPervert),
('ParrotDroppings', mod.ParrotDroppings),
('StupidGit', mod.StupidGit)])
tree = inspect.getclasstree([cls[1] for cls in classes], 1)
self.assertEqual(tree,
[(mod.ParrotDroppings, ()),
(mod.StupidGit, ()),
[(mod.MalodorousPervert, (mod.StupidGit,)),
[(mod.FesteringGob, (mod.MalodorousPervert,
mod.ParrotDroppings))
]
]
])
def test_getfunctions(self):
functions = inspect.getmembers(mod, inspect.isfunction)
self.assertEqual(functions, [('eggs', mod.eggs),
('spam', mod.spam)])
def test_getdoc(self):
self.assertEqual(inspect.getdoc(mod), 'A module docstring.')
self.assertEqual(inspect.getdoc(mod.StupidGit),
'A longer,\n\nindented\n\ndocstring.')
self.assertEqual(inspect.getdoc(git.abuse),
'Another\n\ndocstring\n\ncontaining\n\ntabs')
def test_getcomments(self):
self.assertEqual(inspect.getcomments(mod), '# line 1\n')
self.assertEqual(inspect.getcomments(mod.StupidGit), '# line 20\n')
def test_getmodule(self):
# Check actual module
self.assertEqual(inspect.getmodule(mod), mod)
# Check class (uses __module__ attribute)
self.assertEqual(inspect.getmodule(mod.StupidGit), mod)
# Check a method (no __module__ attribute, falls back to filename)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Do it again (check the caching isn't broken)
self.assertEqual(inspect.getmodule(mod.StupidGit.abuse), mod)
# Check a builtin
self.assertEqual(inspect.getmodule(str), sys.modules["__builtin__"])
# Check filename override
self.assertEqual(inspect.getmodule(None, modfile), mod)
def test_getsource(self):
self.assertSourceEqual(git.abuse, 29, 39)
self.assertSourceEqual(mod.StupidGit, 21, 46)
def test_getsourcefile(self):
self.assertEqual(inspect.getsourcefile(mod.spam), modfile)
self.assertEqual(inspect.getsourcefile(git.abuse), modfile)
def test_getfile(self):
self.assertEqual(inspect.getfile(mod.StupidGit), mod.__file__)
def test_getmodule_recursion(self):
from new import module
name = '__inspect_dummy'
m = sys.modules[name] = module(name)
m.__file__ = "<string>" # hopefully not a real filename...
m.__loader__ = "dummy" # pretend the filename is understood by a loader
exec "def x(): pass" in m.__dict__
self.assertEqual(inspect.getsourcefile(m.x.func_code), '<string>')
del sys.modules[name]
inspect.getmodule(compile('a=10','','single'))
class TestDecorators(GetSourceBase):
fodderFile = mod2
def test_wrapped_decorator(self):
self.assertSourceEqual(mod2.wrapped, 14, 17)
def test_replacing_decorator(self):
self.assertSourceEqual(mod2.gone, 9, 10)
class TestOneliners(GetSourceBase):
fodderFile = mod2
def test_oneline_lambda(self):
# Test inspect.getsource with a one-line lambda function.
self.assertSourceEqual(mod2.oll, 25, 25)
def test_threeline_lambda(self):
# Test inspect.getsource with a three-line lambda function,
# where the second and third lines are _not_ indented.
self.assertSourceEqual(mod2.tll, 28, 30)
def test_twoline_indented_lambda(self):
# Test inspect.getsource with a two-line lambda function,
# where the second line _is_ indented.
self.assertSourceEqual(mod2.tlli, 33, 34)
def test_onelinefunc(self):
# Test inspect.getsource with a regular one-line function.
self.assertSourceEqual(mod2.onelinefunc, 37, 37)
def test_manyargs(self):
# Test inspect.getsource with a regular function where
# the arguments are on two lines and _not_ indented and
# the body on the second line with the last arguments.
self.assertSourceEqual(mod2.manyargs, 40, 41)
def test_twolinefunc(self):
# Test inspect.getsource with a regular function where
# the body is on two lines, following the argument list and
# continued on the next line by a \\.
self.assertSourceEqual(mod2.twolinefunc, 44, 45)
def test_lambda_in_list(self):
# Test inspect.getsource with a one-line lambda function
# defined in a list, indented.
self.assertSourceEqual(mod2.a[1], 49, 49)
def test_anonymous(self):
# Test inspect.getsource with a lambda function defined
# as argument to another function.
self.assertSourceEqual(mod2.anonymous, 55, 55)
class TestBuggyCases(GetSourceBase):
fodderFile = mod2
def test_with_comment(self):
self.assertSourceEqual(mod2.with_comment, 58, 59)
def test_multiline_sig(self):
self.assertSourceEqual(mod2.multiline_sig[0], 63, 64)
def test_nested_class(self):
self.assertSourceEqual(mod2.func69().func71, 71, 72)
def test_one_liner_followed_by_non_name(self):
self.assertSourceEqual(mod2.func77, 77, 77)
def test_one_liner_dedent_non_name(self):
self.assertSourceEqual(mod2.cls82.func83, 83, 83)
def test_with_comment_instead_of_docstring(self):
self.assertSourceEqual(mod2.func88, 88, 90)
def test_method_in_dynamic_class(self):
self.assertSourceEqual(mod2.method_in_dynamic_class, 95, 97)
# Helper for testing classify_class_attrs.
def attrs_wo_objs(cls):
return [t[:3] for t in inspect.classify_class_attrs(cls)]
class TestClassesAndFunctions(unittest.TestCase):
def test_classic_mro(self):
# Test classic-class method resolution order.
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, A, C)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def test_newstyle_mro(self):
# The same w/ new-class MRO.
class A(object): pass
class B(A): pass
class C(A): pass
class D(B, C): pass
expected = (D, B, C, A, object)
got = inspect.getmro(D)
self.assertEqual(expected, got)
def assertArgSpecEquals(self, routine, args_e, varargs_e = None,
varkw_e = None, defaults_e = None,
formatted = None):
args, varargs, varkw, defaults = inspect.getargspec(routine)
self.assertEqual(args, args_e)
self.assertEqual(varargs, varargs_e)
self.assertEqual(varkw, varkw_e)
self.assertEqual(defaults, defaults_e)
if formatted is not None:
self.assertEqual(inspect.formatargspec(args, varargs, varkw, defaults),
formatted)
@na_for_jython
def test_getargspec(self):
self.assertArgSpecEquals(mod.eggs, ['x', 'y'], formatted = '(x, y)')
self.assertArgSpecEquals(mod.spam,
['a', 'b', 'c', 'd', ['e', ['f']]],
'g', 'h', (3, (4, (5,))),
'(a, b, c, d=3, (e, (f,))=(4, (5,)), *g, **h)')
@na_for_jython
def test_getargspec_method(self):
class A(object):
def m(self):
pass
self.assertArgSpecEquals(A.m, ['self'])
@na_for_jython
def test_getargspec_sublistofone(self):
def sublistOfOne((foo,)): return 1
self.assertArgSpecEquals(sublistOfOne, [['foo']])
def fakeSublistOfOne((foo)): return 1
self.assertArgSpecEquals(fakeSublistOfOne, ['foo'])
def test_classify_oldstyle(self):
class A:
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', A) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'method', C) in attrs, 'missing plain method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', C) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', D) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
# Repeat all that, but w/ new-style classes.
def test_classify_newstyle(self):
class A(object):
def s(): pass
s = staticmethod(s)
def c(cls): pass
c = classmethod(c)
def getp(self): pass
p = property(getp)
def m(self): pass
def m1(self): pass
datablob = '1'
attrs = attrs_wo_objs(A)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', A) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class B(A):
def m(self): pass
attrs = attrs_wo_objs(B)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'class method', A) in attrs, 'missing class method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class C(A):
def m(self): pass
def c(self): pass
attrs = attrs_wo_objs(C)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'method', C) in attrs, 'missing plain method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', C) in attrs, 'missing plain method')
self.assert_(('m1', 'method', A) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
class D(B, C):
def m1(self): pass
attrs = attrs_wo_objs(D)
self.assert_(('s', 'static method', A) in attrs, 'missing static method')
self.assert_(('c', 'method', C) in attrs, 'missing plain method')
self.assert_(('p', 'property', A) in attrs, 'missing property')
self.assert_(('m', 'method', B) in attrs, 'missing plain method')
self.assert_(('m1', 'method', D) in attrs, 'missing plain method')
self.assert_(('datablob', 'data', A) in attrs, 'missing data')
def test_main():
run_unittest(TestDecorators, TestRetrievingSourceCode, TestOneliners,
TestBuggyCases,
TestInterpreterStack, TestClassesAndFunctions, TestPredicates)
if __name__ == "__main__":
test_main()
| gpl-3.0 | -2,369,301,177,971,286,000 | 2,805,177,397,464,610,000 | 37.849802 | 83 | 0.588514 | false |
SublimeText/PackageDev | plugins/settings/known_settings.py | 1 | 29813 | import collections
import logging
import os
import re
import textwrap
import time
import weakref
import sublime
from sublime_lib import encodings, ResourcePath
from ..lib.weakmethod import WeakMethodProxy
from ..lib import get_setting
from .region_math import VALUE_SCOPE, get_value_region_at, get_last_key_name_from
logger = logging.getLogger(__name__)
PREF_FILE = "Preferences.sublime-settings"
PREF_FILE_ALIAS = "Base File.sublime-settings"
KIND_SETTING = (sublime.KIND_ID_VARIABLE, "S", "Setting")
def html_encode(string):
"""Encode some critical characters to html entities."""
return string.replace("&", "&") \
.replace("<", "<") \
.replace(">", ">") \
.replace("\t", " ") \
.replace(" ", " ") \
.replace("\n", "<br>") if string else ""
def format_completion_item(value, default=None, is_default=False, label=None, annotation=None):
"""Create a completion item with its type as description.
Arguments:
value (any):
The value which is added when completions are committed.
If `label` is none, the `value` is used as label, too.
default (any):
Sets is_default if equals `value`.
is_default (bool):
If `True` the completion item is marked '(default)'.
label (str):
An alternative label to use to present the `value`
in the completions panel.
annotation (str):
An optional annotation to display after the label.
"""
if isinstance(value, dict):
raise ValueError("Cannot format dictionary value", value)
if not is_default:
is_default = value in default if isinstance(default, list) else value == default
type_ = type(value).__name__
return sublime.CompletionItem(
trigger=sublime.encode_value(label or value).strip('"'),
annotation=("(default) {}" if is_default else "{}").format(annotation or ""),
completion=value,
kind=(sublime.KIND_ID_SNIPPET, type_[0], type_),
)
def decode_value(string):
"""Decode string to python object with unrestrictive booleans."""
if string.lower() == "true":
return True
if string.lower() == "false":
return False
try:
return int(string)
except ValueError:
return float(string)
class KnownSettings(object):
"""A class which provides all known settings with comments/defaults.
An object of this class initialized with a sublime-settings file loads all
basefiles from all packages including comments and default values to
provide all required information for tooltips and auto-completion.
"""
# cache for instances, keyed by the basename
# and using weakrefs for easy garbage collection
cache = weakref.WeakValueDictionary()
_is_initialized = False
_is_loaded = False
filename = None
on_loaded_callbacks = None
on_loaded_once_callbacks = None
defaults = None
comments = None
fallback_settings = None
def __new__(cls, filename, on_loaded=None, **kwargs):
# __init__ will be called on the return value
obj = cls.cache.get(filename)
if obj:
logger.debug("cache hit %r", filename)
return cls.cache[filename]
else:
obj = super().__new__(cls, **kwargs)
cls.cache[filename] = obj
return obj
def __init__(self, filename):
"""Initialize view event listener object.
Arguments:
filename (str):
Settings file name to index.
"""
# Because __init__ may be called multiple times
# and we only want to trigger a reload once,
# we need special handling here.
if not self._is_initialized:
# the associated settings file name all the settings belong to
self.filename = filename
# callback lists
self.on_loaded_callbacks = []
self.on_loaded_once_callbacks = []
self._is_initialized = True
# the dictionary with all defaults of a setting
self.defaults = collections.ChainMap()
# the dictionary with all comments of each setting
self.comments = collections.ChainMap()
self.trigger_settings_reload()
def add_on_loaded(self, on_loaded, once=False):
"""Add a callback to call once settings have been indexed (asynchronously).
Bound methods are stored as weak references.
Arguments:
on_loaded (callable):
The callback.
once (bool):
Whether the callback should be called only once.
"""
# Due to us archiving the callback, we use a weakref
# to avoid a circular reference to all SettingListeners affected,
# ensuring our __del__ is properly called when all relevant views are closed.
if self._is_loaded:
# Invoke callback 'immediately' since we're already loaded.
# Note that this is currently not thread-safe.
sublime.set_timeout_async(on_loaded, 0)
if not once:
self.on_loaded_callbacks.append(WeakMethodProxy(on_loaded))
elif not self._is_loaded:
self.on_loaded_once_callbacks.append(WeakMethodProxy(on_loaded))
def __del__(self):
logger.debug("deleting KnownSettings instance for %r", self.filename)
def __iter__(self):
"""Iterate over default keys."""
return iter(self.defaults)
def trigger_settings_reload(self):
# look for settings files asynchronously
sublime.set_timeout_async(self._load_settings, 0)
def _load_settings(self):
"""Load and merge settings and their comments from all base files.
The idea is each package which wants to add a valid entry to the
`Preferences.sublime-settings` file must provide such a file with all
keys it wants to add. These keys and the associated comments above it
are loaded into dictionaries and used to provide tooltips, completions
and linting.
"""
ignored_patterns = frozenset(("/User/", "/Preferences Editor/"))
# TODO project settings include "Preferences",
# but we don't have a syntax def for those yet
logger.debug("loading defaults and comments for %r", self.filename)
start_time = time.time()
resources = sublime.find_resources(self.filename)
resources += sublime.find_resources(self.filename + "-hints")
if self.filename == PREF_FILE:
resources += sublime.find_resources(PREF_FILE_ALIAS)
logger.debug("found %d %r files", len(resources), self.filename)
for resource in resources:
if any(ignored in resource for ignored in ignored_patterns):
logger.debug("ignoring %r", resource)
continue
try:
logger.debug("parsing %r", resource)
lines = sublime.load_resource(resource).splitlines()
for key, value in self._parse_settings(lines).items():
# merge settings without overwriting existing ones
self.defaults.setdefault(key, value)
except Exception as e:
logger.error("error parsing %r - %s%r",
resource, e.__class__.__name__, e.args)
duration = time.time() - start_time
logger.debug("loading took %.3fs", duration)
# include general settings if we're in a syntax-specific file
is_syntax_specific = self._is_syntax_specific()
if is_syntax_specific and not self.fallback_settings:
self.fallback_settings = KnownSettings(PREF_FILE)
# add fallbacks to the ChainMaps
self.defaults.maps.append(self.fallback_settings.defaults)
self.comments.maps.append(self.fallback_settings.comments)
# these may be loaded later, so delay calling our own callbacks
self.fallback_settings.add_on_loaded(self._has_loaded, once=True)
else:
if self.fallback_settings and not is_syntax_specific:
# file was renamed, probably
self.fallback_settings = None
self.defaults.maps.pop()
self.comments.maps.pop()
self._has_loaded()
def _has_loaded(self):
self._is_loaded = True
for callback in self.on_loaded_once_callbacks:
try:
callback()
except ReferenceError:
pass
self.on_loaded_once_callbacks.clear()
# copy callback list so we can clean up expired references
for callback in tuple(self.on_loaded_callbacks):
try:
callback()
except ReferenceError:
logger.debug("removing gone-away weak on_loaded_callback reference")
self.on_loaded_callbacks.remove(callback)
def _is_syntax_specific(self):
"""Check whether a syntax def with the same base file name exists.
Returns:
bool
"""
syntax_file_exts = (".sublime-syntax", ".tmLanguage")
name_no_ext = os.path.splitext(self.filename)[0]
for ext in syntax_file_exts:
syntax_file_name = name_no_ext + ext
resources = sublime.find_resources(syntax_file_name)
if resources:
logger.debug("syntax-specific settings file for %r", resources[0])
return True
return False
def _parse_settings(self, lines):
"""Parse the setting file and capture comments.
This is naive but gets the job done most of the time.
"""
content = []
comment = []
in_comment = False
for line in lines:
stripped = line.strip()
if in_comment:
if stripped.endswith("*/"):
in_comment = False
# remove all spaces and asterix
line = line.rstrip("*/ \t")
if line:
comment.append(line)
elif stripped.startswith("* "):
comment.append(stripped[2:])
else:
comment.append(line)
continue
# ignore empty lines if not in a comment
# empty line in comment may be used as visual separator
elif not stripped:
continue
if stripped.startswith("/*"):
in_comment = True
# remove all asterix
stripped = stripped[2:].lstrip("*")
if stripped:
comment.append(stripped)
continue
if stripped.startswith("//"):
# skip comment lines ending with `//` (likely used as separators)
# a standalone `//` adds an empty line as visual separator
stripped = stripped[2:]
if not stripped or not stripped.endswith("//"):
comment.append(stripped)
continue
content.append(line)
if comment:
# the json key is used as key for the comments located above it
match = re.match(r'"((?:[^"]|\\.)*)":', stripped)
if not match:
continue
key = match.group(1)
if key not in self.comments:
self.comments[key] = textwrap.dedent('\n'.join(comment))
comment.clear()
# Return decoded json file from content with stripped comments
return sublime.decode_value('\n'.join(content))
def build_tooltip(self, view, key):
"""Return html encoded docstring for settings key.
Arguments:
view (sublime.View):
the view to provide completions for
key (string):
the key under the cursor
"""
if key in self.defaults:
# the comment for the setting
comment = html_encode(self.comments.get(key) or "No description.")
# the default value from base file
default = html_encode(
sublime.encode_value(self.defaults.get(key), pretty=True))
else:
comment, default = "No description.", "unknown setting"
# format tooltip html content
return (
"<h1>{key}</h1>"
"<h2>Default: {default}</h2>"
"<p>{comment}</p>"
).format(**locals())
def insert_snippet(self, view, key):
"""Insert a snippet for the settings key at the end of the view.
Arguments:
view (sublime.View):
The view to add the snippet to. Doesn't need to be the view
of this ViewEventHandler. It's more likely the view of the
user settings which is to be passed here.
key (string):
The settings key to insert a snippet for.
"""
# find last value in the view
value_regions = view.find_by_selector(VALUE_SCOPE)
if not value_regions:
# no value found use end of global dict
selector = "meta.mapping"
value_regions = view.find_by_selector(selector)
if not value_regions:
# no global dict found, insert one
point = view.size()
is_empty_line = not view.substr(view.line(point)).strip()
bol = "{\n\t" if is_empty_line else "\n{\n\t"
eol = ",$0\n}\n"
else:
# insert first value to user file
point = value_regions[-1].end() - 1
bol, eol = "\t", "\n"
else:
# find line with last non-whitespace characters
value_region = value_regions[-1]
value_str = view.substr(value_region)
value_str_trimmed = value_str.rstrip()
ws_length = len(value_str) - len(value_str_trimmed)
point = view.line(value_region.end() - ws_length).end()
if value_str_trimmed.endswith(","):
# already have a comma after last entry
bol, eol = "\n", ","
else:
# add a comma after last entry
bol, eol = ",\n", ""
# format and insert the snippet
snippet = self._key_snippet(key, self.defaults[key], bol, eol)
view.sel().clear()
view.sel().add(point)
view.run_command('insert_snippet', {'contents': snippet})
def key_completions(self, view, prefix, point):
"""Create a list with completions for all known settings.
Arguments:
view (sublime.View):
the view to provide completions for
prefix (string):
the line content before cursor
point (int):
the text positions of all characters in prefix
Returns:
tuple ([ (trigger, content), (trigger, content) ], flags):
the tuple with content ST needs to display completions
"""
if view.match_selector(point - 1, "string"):
# we are within quotations, return words only
completions = [
sublime.CompletionItem(
trigger=key,
completion=key,
kind=KIND_SETTING,
# TODO link to show full description
# details=,
)
for key in self.defaults
]
else:
line = view.substr(view.line(point)).strip()
# don't add newline after snippet if user starts on empty line
eol = "," if len(line) == len(prefix) else ",\n"
# no quotations -> return full snippet
completions = [
sublime.CompletionItem(
trigger=key,
completion=self._key_snippet(key, value, eol=eol),
completion_format=sublime.COMPLETION_FORMAT_SNIPPET,
kind=KIND_SETTING,
# TODO link to show full description
# details=,
)
for key, value in self.defaults.items()
]
return completions
@staticmethod
def _key_snippet(key, value, bol="", eol=",\n"):
"""Create snippet with default value depending on type.
Arguments:
key (string):
the settings key name
value (any):
the default value of the setting read from base file
bol (string):
the prefix to add to the beginning of line
eol (string):
the suffix to add to the end of line
Returns:
string: the contents field to insert into completions entry
"""
encoded = sublime.encode_value(value)
encoded = encoded.replace("\\", "\\\\") # escape snippet markers
encoded = encoded.replace("$", "\\$")
encoded = encoded.replace("}", "\\}")
if isinstance(value, str):
# create the snippet for json strings and exclude quotation marks
# from the input field {1:}
#
# "key": "value"
#
fmt = '{bol}"{key}": "${{1:{encoded}}}"{eol}'
encoded = encoded[1:-1] # strip quotation
elif isinstance(value, list):
# create the snippet for json lists and exclude brackets
# from the input field {1:}
#
# "key":
# [
# value
# ]
#
fmt = '{bol}"{key}":\n[\n\t${{1:{encoded}}}\n]{eol}'
encoded = encoded[1:-1] # strip brackets
elif isinstance(value, dict):
# create the snippet for json dictionaries braces
# from the input field {1:}
#
# "key":
# {
# value
# }
#
fmt = '{bol}"{key}":\n{{\n\t${{1:{encoded}}}\n}}{eol}'
encoded = encoded[1:-1] # strip braces
else:
fmt = '{bol}"{key}": ${{1:{encoded}}}{eol}'
return fmt.format(**locals())
def value_completions(self, view, prefix, point):
"""Create a list with completions for all known settings values.
Arguments:
view (sublime.View):
the view to provide completions for
prefix (string):
the line content before cursor.
point (int):
the text positions of all characters in prefix
Returns:
tuple ([ (trigger, content), (trigger, content) ], flags):
the tuple with content ST needs to display completions
"""
value_region = get_value_region_at(view, point)
if not value_region:
logger.debug("unable to find current key region")
return None
key = get_last_key_name_from(view, value_region.begin())
if not key:
logger.debug("unable to find current key")
return None
# Use a map to deduplicate completions by trigger; latter overrides
completions_map = {c.trigger: c for c in self._value_completions_for(key)}
completions = list(completions_map.values())
if not completions:
logger.debug("no completions to offer")
return None
is_str = any(
bool(isinstance(c.completion, str)
or (isinstance(c.completion, list)
and c.completion
and isinstance(c.completion[0], str)))
for c in completions
)
in_str = view.match_selector(point, "string")
logger.debug("completing a string (%s) within a string (%s)", is_str, in_str)
is_list = isinstance(self.defaults.get(key), list)
in_list = view.match_selector(point, "meta.sequence")
logger.debug("completing a list item (%s) within a list (%s)", is_list, in_list)
if in_str and not is_str:
# We're within a string but don't have a string value to complete.
# Complain about this in the status bar, I guess.
msg = "Cannot complete value set within a string"
view.window().status_message(msg)
logger.warning(msg)
return None
if in_str and is_str:
# Strip completions of non-strings. Don't need quotation marks.
completions = [
c for c in completions
if isinstance(c.completion, str)
]
else:
# JSON-ify completion values with special handling for floats.
#
# the value typed so far, which may differ from prefix for floats
typed_region = sublime.Region(value_region.begin(), point)
typed = view.substr(typed_region).lstrip()
for c in completions:
value = c.completion
# unroll dicts
if isinstance(value, frozenset):
value = dict(value)
if isinstance(value, float):
# strip already typed text from float completions
# because ST cannot complete past word boundaries
# (e.g. strip `1.` of `1.234`)
value_str = str(value)
if value_str.startswith(typed):
offset = len(typed) - len(prefix)
value_str = value_str[offset:]
elif typed:
# don't offer as completion if 'typed' didn't match
continue
else:
value_str = sublime.encode_value(value)
if is_list and not in_list:
# wrap each item in a brackets to insert a 'list'
value_str = "[{}]".format(value_str)
# escape snippet markers
value_str = value_str.replace("$", "\\$")
c.completion = value_str
# disable word completion to prevent stupid suggestions
return completions
def _value_completions_for(self, key):
"""Collect and return value completions from matching source.
Arguments:
key (string):
the settings key name to read comments from
Returns:
{(trigger, contents), ...}
A set of all completions.
"""
logger.debug("building completions for key %r", key)
default = self.defaults.get(key)
logger.debug("default value: %r", default)
if key in ('color_scheme', 'dark_color_scheme', 'light_color_scheme'):
yield from self._color_scheme_completions(key, default)
elif key in ('default_encoding', 'fallback_encoding'):
yield from self._encoding_completions(default)
elif key in ('theme', 'dark_theme', 'light_theme'):
yield from self._theme_completions(key, default)
else:
yield from self._completions_from_comment(key, default)
yield from self._completions_from_default(key, default)
def _completions_from_comment(self, key, default):
"""Parse settings comments and return all possible values.
Many settings are commented with a list of quoted words representing
the possible / allowed values. This method generates a list of these
quoted words which are suggested in auto-completions.
Arguments:
key (string):
the settings key name to read comments from
default (any):
the default value of the setting used to mark completion items
as "default".
Returns:
{(trigger, contents), ...}
A set of all completions.
"""
comment = self.comments.get(key)
if not comment:
return
for match in re.finditer(r"`([^`\n]+)`", comment):
# backticks should wrap the value in JSON representation,
# so we try to decode it
value = match.group(1)
try:
value = sublime.decode_value(value)
except ValueError:
pass
if isinstance(value, list):
# Suggest list items as completions instead of a string
# representation of the list.
# Unless it's a dict.
for v in value:
if not isinstance(v, dict):
yield format_completion_item(v, default)
elif isinstance(value, dict):
# TODO what should we do with dicts?
pass
else:
yield format_completion_item(value, default)
for match in re.finditer(r'"([\.\w]+)"', comment):
# quotation marks either wrap a string, a numeric or a boolean
# fall back to a str
value, = match.groups()
try:
value = decode_value(value)
except ValueError:
pass
yield format_completion_item(value, default)
@staticmethod
def _completions_from_default(key, default):
"""Built completions from default value.
Arguments:
key (string):
the settings key name to read comments from
Returns:
{(trigger, contents), ...}
A set of all completions.
"""
if default is None or default == "":
return
elif isinstance(default, bool):
for value in [True, False]:
yield format_completion_item(value, default=default)
elif isinstance(default, list):
for value in default:
yield format_completion_item(value, is_default=True)
elif isinstance(default, dict):
return # TODO can't complete these yet
else:
yield format_completion_item(default, is_default=True)
@staticmethod
def _color_scheme_completions(key, default):
"""Create completions of all visible color schemes.
The set will not include color schemes matching at least one entry of
`"settings.exclude_color_scheme_patterns": []`.
default (string):
The default `color_scheme` value.
Returns:
{(trigger, contents], ...}
A set of all completions.
- trigger (string): base file name of the color scheme
- contents (string): the value to commit to the settings
"""
if int(sublime.version()) >= 4095 and key == 'color_scheme':
yield format_completion_item(value="auto", annotation="dark-/light switching")
hidden = get_setting('settings.exclude_color_scheme_patterns') or []
for scheme_path in sublime.find_resources("*.sublime-color-scheme"):
if not any(hide in scheme_path for hide in hidden):
try:
root, package, *_, name = scheme_path.split("/")
except ValueError:
continue
if root == 'Cache':
continue
yield format_completion_item(value=name, default=default, annotation=package)
for scheme_path in sublime.find_resources("*.tmTheme"):
if not any(hide in scheme_path for hide in hidden):
try:
root, package, *_, name = scheme_path.split("/")
except ValueError:
continue
if root == 'Cache':
continue
yield format_completion_item(
value=scheme_path, default=default, label=name, annotation=package
)
@staticmethod
def _encoding_completions(default):
"""Create completions of all available encoding values.
default (string):
The default `encoding` value.
Returns:
{(trigger, contents), ...}
A set of all completions.
- trigger (string): the encoding in sublime format
- contents (string): the encoding in sublime format
"""
for enc in encodings.SUBLIME_TO_STANDARD.keys():
yield format_completion_item(value=enc, default=default, annotation="encoding")
@staticmethod
def _theme_completions(key, default):
"""Create completions of all visible themes.
default (string):
The default `theme` value.
The set will not include color schemes matching at least one entry of
`"settings.exclude_theme_patterns": []` setting.
Returns:
{(trigger, contents), ...}
A set of all completions.
- trigger (string): base file name of the theme
- contents (string): the file name to commit to the settings
"""
hidden = get_setting('settings.exclude_theme_patterns') or []
if int(sublime.version()) >= 4095 and key == 'theme':
yield format_completion_item(value="auto", annotation="dark-/light switching")
for theme_path in ResourcePath.glob_resources("*.sublime-theme"):
if not any(hide in theme_path.name for hide in hidden):
yield format_completion_item(
value=theme_path.name, default=default, annotation="theme"
)
| mit | -1,029,786,108,033,377,500 | -3,932,471,854,748,284,000 | 37.567917 | 95 | 0.55127 | false |
mcgonagle/ansible_f5 | library_old/bigip_gtm_facts.py | 4 | 16069 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_gtm_facts
short_description: Collect facts from F5 BIG-IP GTM devices.
description:
- Collect facts from F5 BIG-IP GTM devices.
version_added: "2.3"
options:
include:
description:
- Fact category to collect
required: true
choices:
- pool
- wide_ip
- virtual_server
filter:
description:
- Perform regex filter of response. Filtering is done on the name of
the resource. Valid filters are anything that can be provided to
Python's C(re) module.
required: false
default: None
notes:
- Requires the f5-sdk Python package on the host. This is as easy as
pip install f5-sdk
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Get pool facts
bigip_gtm_facts:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
include: "pool"
filter: "my_pool"
delegate_to: localhost
'''
RETURN = '''
wide_ip:
description:
Contains the lb method for the wide ip and the pools
that are within the wide ip.
returned: changed
type: dict
sample:
wide_ip:
- enabled: "True"
failure_rcode: "noerror"
failure_rcode_response: "disabled"
failure_rcode_ttl: "0"
full_path: "/Common/foo.ok.com"
last_resort_pool: ""
minimal_response: "enabled"
name: "foo.ok.com"
partition: "Common"
persist_cidr_ipv4: "32"
persist_cidr_ipv6: "128"
persistence: "disabled"
pool_lb_mode: "round-robin"
pools:
- name: "d3qw"
order: "0"
partition: "Common"
ratio: "1"
ttl_persistence: "3600"
type: "naptr"
pool:
description: Contains the pool object status and enabled status.
returned: changed
type: dict
sample:
pool:
- alternate_mode: "round-robin"
dynamic_ratio: "disabled"
enabled: "True"
fallback_mode: "return-to-dns"
full_path: "/Common/d3qw"
load_balancing_mode: "round-robin"
manual_resume: "disabled"
max_answers_returned: "1"
members:
- disabled: "True"
flags: "a"
full_path: "ok3.com"
member_order: "0"
name: "ok3.com"
order: "10"
preference: "10"
ratio: "1"
service: "80"
name: "d3qw"
partition: "Common"
qos_hit_ratio: "5"
qos_hops: "0"
qos_kilobytes_second: "3"
qos_lcs: "30"
qos_packet_rate: "1"
qos_rtt: "50"
qos_topology: "0"
qos_vs_capacity: "0"
qos_vs_score: "0"
ttl: "30"
type: "naptr"
verify_member_availability: "disabled"
virtual_server:
description:
Contains the virtual server enabled and availability
status, and address
returned: changed
type: dict
sample:
virtual_server:
- addresses:
- device_name: "/Common/qweqwe"
name: "10.10.10.10"
translation: "none"
datacenter: "/Common/xfxgh"
enabled: "True"
expose_route_domains: "no"
full_path: "/Common/qweqwe"
iq_allow_path: "yes"
iq_allow_service_check: "yes"
iq_allow_snmp: "yes"
limit_cpu_usage: "0"
limit_cpu_usage_status: "disabled"
limit_max_bps: "0"
limit_max_bps_status: "disabled"
limit_max_connections: "0"
limit_max_connections_status: "disabled"
limit_max_pps: "0"
limit_max_pps_status: "disabled"
limit_mem_avail: "0"
limit_mem_avail_status: "disabled"
link_discovery: "disabled"
monitor: "/Common/bigip "
name: "qweqwe"
partition: "Common"
product: "single-bigip"
virtual_server_discovery: "disabled"
virtual_servers:
- destination: "10.10.10.10:0"
enabled: "True"
full_path: "jsdfhsd"
limit_max_bps: "0"
limit_max_bps_status: "disabled"
limit_max_connections: "0"
limit_max_connections_status: "disabled"
limit_max_pps: "0"
limit_max_pps_status: "disabled"
name: "jsdfhsd"
translation_address: "none"
translation_port: "0"
'''
try:
from distutils.version import LooseVersion
from f5.bigip.contexts import TransactionContextManager
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
import re
class BigIpGtmFactsCommon(object):
def __init__(self):
self.api = None
self.attributes_to_remove = [
'kind', 'generation', 'selfLink', '_meta_data',
'membersReference', 'datacenterReference',
'virtualServersReference', 'nameReference'
]
self.gtm_types = dict(
a_s='a',
aaaas='aaaa',
cnames='cname',
mxs='mx',
naptrs='naptr',
srvs='srv'
)
self.request_params = dict(
params='expandSubcollections=true'
)
def is_version_less_than_12(self):
version = self.api.tmos_version
if LooseVersion(version) < LooseVersion('12.0.0'):
return True
else:
return False
def format_string_facts(self, parameters):
result = dict()
for attribute in self.attributes_to_remove:
parameters.pop(attribute, None)
for key, val in parameters.iteritems():
result[key] = str(val)
return result
def filter_matches_name(self, name):
if not self.params['filter']:
return True
matches = re.match(self.params['filter'], str(name))
if matches:
return True
else:
return False
def get_facts_from_collection(self, collection, collection_type=None):
results = []
for item in collection:
if not self.filter_matches_name(item.name):
continue
facts = self.format_facts(item, collection_type)
results.append(facts)
return results
def connect_to_bigip(self, **kwargs):
return ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
class BigIpGtmFactsPools(BigIpGtmFactsCommon):
def __init__(self, *args, **kwargs):
super(BigIpGtmFactsPools, self).__init__()
self.params = kwargs
def get_facts(self):
self.api = self.connect_to_bigip(**self.params)
return self.get_facts_from_device()
def get_facts_from_device(self):
try:
if self.is_version_less_than_12():
return self.get_facts_without_types()
else:
return self.get_facts_with_types()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
def get_facts_with_types(self):
result = []
for key, type in self.gtm_types.iteritems():
facts = self.get_all_facts_by_type(key, type)
if facts:
result.append(facts)
return result
def get_facts_without_types(self):
pools = self.api.tm.gtm.pools.get_collection(**self.request_params)
return self.get_facts_from_collection(pools)
def get_all_facts_by_type(self, key, type):
collection = getattr(self.api.tm.gtm.pools, key)
pools = collection.get_collection(**self.request_params)
return self.get_facts_from_collection(pools, type)
def format_facts(self, pool, collection_type):
result = dict()
pool_dict = pool.to_dict()
result.update(self.format_string_facts(pool_dict))
result.update(self.format_member_facts(pool))
if collection_type:
result['type'] = collection_type
return camel_dict_to_snake_dict(result)
def format_member_facts(self, pool):
result = []
if not 'items' in pool.membersReference:
return dict(members=[])
for member in pool.membersReference['items']:
member_facts = self.format_string_facts(member)
result.append(member_facts)
return dict(members=result)
class BigIpGtmFactsWideIps(BigIpGtmFactsCommon):
def __init__(self, *args, **kwargs):
super(BigIpGtmFactsWideIps, self).__init__()
self.params = kwargs
def get_facts(self):
self.api = self.connect_to_bigip(**self.params)
return self.get_facts_from_device()
def get_facts_from_device(self):
try:
if self.is_version_less_than_12():
return self.get_facts_without_types()
else:
return self.get_facts_with_types()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
def get_facts_with_types(self):
result = []
for key, type in self.gtm_types.iteritems():
facts = self.get_all_facts_by_type(key, type)
if facts:
result.append(facts)
return result
def get_facts_without_types(self):
wideips = self.api.tm.gtm.wideips.get_collection(
**self.request_params
)
return self.get_facts_from_collection(wideips)
def get_all_facts_by_type(self, key, type):
collection = getattr(self.api.tm.gtm.wideips, key)
wideips = collection.get_collection(**self.request_params)
return self.get_facts_from_collection(wideips, type)
def format_facts(self, wideip, collection_type):
result = dict()
wideip_dict = wideip.to_dict()
result.update(self.format_string_facts(wideip_dict))
result.update(self.format_pool_facts(wideip))
if collection_type:
result['type'] = collection_type
return camel_dict_to_snake_dict(result)
def format_pool_facts(self, wideip):
result = []
if not hasattr(wideip, 'pools'):
return dict(pools=[])
for pool in wideip.pools:
pool_facts = self.format_string_facts(pool)
result.append(pool_facts)
return dict(pools=result)
class BigIpGtmFactsVirtualServers(BigIpGtmFactsCommon):
def __init__(self, *args, **kwargs):
super(BigIpGtmFactsVirtualServers, self).__init__()
self.params = kwargs
def get_facts(self):
try:
self.api = self.connect_to_bigip(**self.params)
return self.get_facts_from_device()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
def get_facts_from_device(self):
servers = self.api.tm.gtm.servers.get_collection(
**self.request_params
)
return self.get_facts_from_collection(servers)
def format_facts(self, server, collection_type=None):
result = dict()
server_dict = server.to_dict()
result.update(self.format_string_facts(server_dict))
result.update(self.format_address_facts(server))
result.update(self.format_virtual_server_facts(server))
return camel_dict_to_snake_dict(result)
def format_address_facts(self, server):
result = []
if not hasattr(server, 'addresses'):
return dict(addresses=[])
for address in server.addresses:
address_facts = self.format_string_facts(address)
result.append(address_facts)
return dict(addresses=result)
def format_virtual_server_facts(self, server):
result = []
if not 'items' in server.virtualServersReference:
return dict(virtual_servers=[])
for server in server.virtualServersReference['items']:
server_facts = self.format_string_facts(server)
result.append(server_facts)
return dict(virtual_servers=result)
class BigIpGtmFactsManager(object):
def __init__(self, *args, **kwargs):
self.params = kwargs
self.api = None
def get_facts(self):
result = dict()
facts = dict()
if 'pool' in self.params['include']:
facts['pool'] = self.get_pool_facts()
if 'wide_ip' in self.params['include']:
facts['wide_ip'] = self.get_wide_ip_facts()
if 'virtual_server' in self.params['include']:
facts['virtual_server'] = self.get_virtual_server_facts()
result.update(**facts)
result.update(dict(changed=True))
return result
def get_pool_facts(self):
pools = BigIpGtmFactsPools(**self.params)
return pools.get_facts()
def get_wide_ip_facts(self):
wide_ips = BigIpGtmFactsWideIps(**self.params)
return wide_ips.get_facts()
def get_virtual_server_facts(self):
wide_ips = BigIpGtmFactsVirtualServers(**self.params)
return wide_ips.get_facts()
class BigIpGtmFactsModuleConfig(object):
def __init__(self):
self.argument_spec = dict()
self.meta_args = dict()
self.supports_check_mode = False
self.valid_includes = ['pool', 'wide_ip', 'virtual_server']
self.initialize_meta_args()
self.initialize_argument_spec()
def initialize_meta_args(self):
args = dict(
include=dict(type='list', required=True),
filter=dict(type='str', required=False)
)
self.meta_args = args
def initialize_argument_spec(self):
self.argument_spec = f5_argument_spec()
self.argument_spec.update(self.meta_args)
def create(self):
return AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=self.supports_check_mode
)
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
config = BigIpGtmFactsModuleConfig()
module = config.create()
try:
obj = BigIpGtmFactsManager(
check_mode=module.check_mode, **module.params
)
result = obj.get_facts()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
| apache-2.0 | -9,198,551,750,271,885,000 | -2,015,735,330,246,167,800 | 31.331992 | 75 | 0.571597 | false |
ntt-sic/keystone | keystone/openstack/common/db/sqlalchemy/migration.py | 6 | 10075 | # coding: utf-8
#
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Base on code in migrate/changeset/databases/sqlite.py which is under
# the following license:
#
# The MIT License
#
# Copyright (c) 2009 Evan Rosson, Jan Dittberner, Domen Kožar
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
import distutils.version as dist_version
import os
import re
import migrate
from migrate.changeset import ansisql
from migrate.changeset.databases import sqlite
from migrate.versioning import util as migrate_util
import sqlalchemy
from sqlalchemy.schema import UniqueConstraint
from keystone.openstack.common.db import exception
from keystone.openstack.common.db.sqlalchemy import session as db_session
from keystone.openstack.common.gettextutils import _ # noqa
@migrate_util.decorator
def patched_with_engine(f, *a, **kw):
url = a[0]
engine = migrate_util.construct_engine(url, **kw)
try:
kw['engine'] = engine
return f(*a, **kw)
finally:
if isinstance(engine, migrate_util.Engine) and engine is not url:
migrate_util.log.debug('Disposing SQLAlchemy engine %s', engine)
engine.dispose()
# TODO(jkoelker) When migrate 0.7.3 is released and nova depends
# on that version or higher, this can be removed
MIN_PKG_VERSION = dist_version.StrictVersion('0.7.3')
if (not hasattr(migrate, '__version__') or
dist_version.StrictVersion(migrate.__version__) < MIN_PKG_VERSION):
migrate_util.with_engine = patched_with_engine
# NOTE(jkoelker) Delay importing migrate until we are patched
from migrate import exceptions as versioning_exceptions
from migrate.versioning import api as versioning_api
from migrate.versioning.repository import Repository
_REPOSITORY = None
get_engine = db_session.get_engine
def _get_unique_constraints(self, table):
"""Retrieve information about existing unique constraints of the table
This feature is needed for _recreate_table() to work properly.
Unfortunately, it's not available in sqlalchemy 0.7.x/0.8.x.
"""
data = table.metadata.bind.execute(
"""SELECT sql
FROM sqlite_master
WHERE
type='table' AND
name=:table_name""",
table_name=table.name
).fetchone()[0]
UNIQUE_PATTERN = "CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)"
return [
UniqueConstraint(
*[getattr(table.columns, c.strip(' "')) for c in cols.split(",")],
name=name
)
for name, cols in re.findall(UNIQUE_PATTERN, data)
]
def _recreate_table(self, table, column=None, delta=None, omit_uniques=None):
"""Recreate the table properly
Unlike the corresponding original method of sqlalchemy-migrate this one
doesn't drop existing unique constraints when creating a new one.
"""
table_name = self.preparer.format_table(table)
# we remove all indexes so as not to have
# problems during copy and re-create
for index in table.indexes:
index.drop()
# reflect existing unique constraints
for uc in self._get_unique_constraints(table):
table.append_constraint(uc)
# omit given unique constraints when creating a new table if required
table.constraints = set([
cons for cons in table.constraints
if omit_uniques is None or cons.name not in omit_uniques
])
self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name)
self.execute()
insertion_string = self._modify_table(table, column, delta)
table.create(bind=self.connection)
self.append(insertion_string % {'table_name': table_name})
self.execute()
self.append('DROP TABLE migration_tmp')
self.execute()
def _visit_migrate_unique_constraint(self, *p, **k):
"""Drop the given unique constraint
The corresponding original method of sqlalchemy-migrate just
raises NotImplemented error
"""
self.recreate_table(p[0].table, omit_uniques=[p[0].name])
def patch_migrate():
"""A workaround for SQLite's inability to alter things
SQLite abilities to alter tables are very limited (please read
http://www.sqlite.org/lang_altertable.html for more details).
E. g. one can't drop a column or a constraint in SQLite. The
workaround for this is to recreate the original table omitting
the corresponding constraint (or column).
sqlalchemy-migrate library has recreate_table() method that
implements this workaround, but it does it wrong:
- information about unique constraints of a table
is not retrieved. So if you have a table with one
unique constraint and a migration adding another one
you will end up with a table that has only the
latter unique constraint, and the former will be lost
- dropping of unique constraints is not supported at all
The proper way to fix this is to provide a pull-request to
sqlalchemy-migrate, but the project seems to be dead. So we
can go on with monkey-patching of the lib at least for now.
"""
# this patch is needed to ensure that recreate_table() doesn't drop
# existing unique constraints of the table when creating a new one
helper_cls = sqlite.SQLiteHelper
helper_cls.recreate_table = _recreate_table
helper_cls._get_unique_constraints = _get_unique_constraints
# this patch is needed to be able to drop existing unique constraints
constraint_cls = sqlite.SQLiteConstraintDropper
constraint_cls.visit_migrate_unique_constraint = \
_visit_migrate_unique_constraint
constraint_cls.__bases__ = (ansisql.ANSIColumnDropper,
sqlite.SQLiteConstraintGenerator)
def db_sync(abs_path, version=None, init_version=0):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
"""
if version is not None:
try:
version = int(version)
except ValueError:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(abs_path, init_version)
repository = _find_migrate_repo(abs_path)
if version is None or version > current_version:
return versioning_api.upgrade(get_engine(), repository, version)
else:
return versioning_api.downgrade(get_engine(), repository,
version)
def db_version(abs_path, init_version):
"""Show the current version of the repository.
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
try:
return versioning_api.db_version(get_engine(), repository)
except versioning_exceptions.DatabaseNotControlledError:
meta = sqlalchemy.MetaData()
engine = get_engine()
meta.reflect(bind=engine)
tables = meta.tables
if len(tables) == 0:
db_version_control(abs_path, init_version)
return versioning_api.db_version(get_engine(), repository)
else:
# Some pre-Essex DB's may not be version controlled.
# Require them to upgrade using Essex first.
raise exception.DbMigrationError(
message=_("Upgrade DB using Essex release first."))
def db_version_control(abs_path, version=None):
"""Mark a database as under this repository's version control.
Once a database is under version control, schema changes should
only be done via change scripts in this repository.
:param abs_path: Absolute path to migrate repository
:param version: Initial database version
"""
repository = _find_migrate_repo(abs_path)
versioning_api.version_control(get_engine(), repository, version)
return version
def _find_migrate_repo(abs_path):
"""Get the project's change script repository
:param abs_path: Absolute path to migrate repository
"""
global _REPOSITORY
if not os.path.exists(abs_path):
raise exception.DbMigrationError("Path %s not found" % abs_path)
if _REPOSITORY is None:
_REPOSITORY = Repository(abs_path)
return _REPOSITORY
| apache-2.0 | 851,249,632,725,815,400 | -2,593,534,382,134,082,000 | 35.23741 | 79 | 0.69595 | false |
ghickman/django | django/core/management/commands/shell.py | 6 | 4104 | import os
import warnings
from django.core.management.base import BaseCommand
from django.utils.deprecation import RemovedInDjango20Warning
class Command(BaseCommand):
help = "Runs a Python interactive interpreter. Tries to use IPython or bpython, if one of them is available."
requires_system_checks = False
shells = ['ipython', 'bpython', 'python']
def add_arguments(self, parser):
parser.add_argument('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython or bpython. '
'Deprecated, use the `-i python` or `--interface python` option instead.')
parser.add_argument('--no-startup', action='store_true', dest='no_startup',
help='When using plain Python, ignore the PYTHONSTARTUP environment variable and ~/.pythonrc.py script.')
parser.add_argument('-i', '--interface', choices=self.shells, dest='interface',
help='Specify an interactive interpreter interface. Available options: "ipython", "bpython", and "python"')
def _ipython_pre_011(self):
"""Start IPython pre-0.11"""
from IPython.Shell import IPShell
shell = IPShell(argv=[])
shell.mainloop()
def _ipython_pre_100(self):
"""Start IPython pre-1.0.0"""
from IPython.frontend.terminal.ipapp import TerminalIPythonApp
app = TerminalIPythonApp.instance()
app.initialize(argv=[])
app.start()
def _ipython(self):
"""Start IPython >= 1.0"""
from IPython import start_ipython
start_ipython(argv=[])
def ipython(self, options):
"""Start any version of IPython"""
for ip in (self._ipython, self._ipython_pre_100, self._ipython_pre_011):
try:
ip()
except ImportError:
pass
else:
return
# no IPython, raise ImportError
raise ImportError("No IPython")
def bpython(self, options):
import bpython
bpython.embed()
def python(self, options):
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then .pythonrc.py.
if not options['no_startup']:
for pythonrc in (os.environ.get("PYTHONSTARTUP"), '~/.pythonrc.py'):
if not pythonrc:
continue
pythonrc = os.path.expanduser(pythonrc)
if not os.path.isfile(pythonrc):
continue
try:
with open(pythonrc) as handle:
exec(compile(handle.read(), pythonrc, 'exec'), imported_objects)
except NameError:
pass
code.interact(local=imported_objects)
def handle(self, **options):
if options['plain']:
warnings.warn(
"The --plain option is deprecated in favor of the -i python or --interface python option.",
RemovedInDjango20Warning
)
options['interface'] = 'python'
available_shells = [options['interface']] if options['interface'] else self.shells
for shell in available_shells:
try:
return getattr(self, shell)(options)
except ImportError:
pass
raise ImportError("Couldn't load any of the specified interfaces.")
| bsd-3-clause | 3,697,713,412,925,209,000 | -6,484,713,277,257,062,000 | 38.84466 | 119 | 0.598684 | false |
cbertinato/pandas | pandas/io/excel/_openpyxl.py | 1 | 14098 | from pandas.io.excel._base import ExcelWriter
from pandas.io.excel._util import _validate_freeze_panes
class _OpenpyxlWriter(ExcelWriter):
engine = 'openpyxl'
supported_extensions = ('.xlsx', '.xlsm')
def __init__(self, path, engine=None, mode='w', **engine_kwargs):
# Use the openpyxl module as the Excel writer.
from openpyxl.workbook import Workbook
super().__init__(path, mode=mode, **engine_kwargs)
if self.mode == 'a': # Load from existing workbook
from openpyxl import load_workbook
book = load_workbook(self.path)
self.book = book
else:
# Create workbook object with default optimized_write=True.
self.book = Workbook()
if self.book.worksheets:
try:
self.book.remove(self.book.worksheets[0])
except AttributeError:
# compat - for openpyxl <= 2.4
self.book.remove_sheet(self.book.worksheets[0])
def save(self):
"""
Save workbook to disk.
"""
return self.book.save(self.path)
@classmethod
def _convert_to_style(cls, style_dict):
"""
converts a style_dict to an openpyxl style object
Parameters
----------
style_dict : style dictionary to convert
"""
from openpyxl.style import Style
xls_style = Style()
for key, value in style_dict.items():
for nk, nv in value.items():
if key == "borders":
(xls_style.borders.__getattribute__(nk)
.__setattr__('border_style', nv))
else:
xls_style.__getattribute__(key).__setattr__(nk, nv)
return xls_style
@classmethod
def _convert_to_style_kwargs(cls, style_dict):
"""
Convert a style_dict to a set of kwargs suitable for initializing
or updating-on-copy an openpyxl v2 style object
Parameters
----------
style_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'font'
'fill'
'border' ('borders')
'alignment'
'number_format'
'protection'
Returns
-------
style_kwargs : dict
A dict with the same, normalized keys as ``style_dict`` but each
value has been replaced with a native openpyxl style object of the
appropriate class.
"""
_style_key_map = {
'borders': 'border',
}
style_kwargs = {}
for k, v in style_dict.items():
if k in _style_key_map:
k = _style_key_map[k]
_conv_to_x = getattr(cls, '_convert_to_{k}'.format(k=k),
lambda x: None)
new_v = _conv_to_x(v)
if new_v:
style_kwargs[k] = new_v
return style_kwargs
@classmethod
def _convert_to_color(cls, color_spec):
"""
Convert ``color_spec`` to an openpyxl v2 Color object
Parameters
----------
color_spec : str, dict
A 32-bit ARGB hex string, or a dict with zero or more of the
following keys.
'rgb'
'indexed'
'auto'
'theme'
'tint'
'index'
'type'
Returns
-------
color : openpyxl.styles.Color
"""
from openpyxl.styles import Color
if isinstance(color_spec, str):
return Color(color_spec)
else:
return Color(**color_spec)
@classmethod
def _convert_to_font(cls, font_dict):
"""
Convert ``font_dict`` to an openpyxl v2 Font object
Parameters
----------
font_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'name'
'size' ('sz')
'bold' ('b')
'italic' ('i')
'underline' ('u')
'strikethrough' ('strike')
'color'
'vertAlign' ('vertalign')
'charset'
'scheme'
'family'
'outline'
'shadow'
'condense'
Returns
-------
font : openpyxl.styles.Font
"""
from openpyxl.styles import Font
_font_key_map = {
'sz': 'size',
'b': 'bold',
'i': 'italic',
'u': 'underline',
'strike': 'strikethrough',
'vertalign': 'vertAlign',
}
font_kwargs = {}
for k, v in font_dict.items():
if k in _font_key_map:
k = _font_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
font_kwargs[k] = v
return Font(**font_kwargs)
@classmethod
def _convert_to_stop(cls, stop_seq):
"""
Convert ``stop_seq`` to a list of openpyxl v2 Color objects,
suitable for initializing the ``GradientFill`` ``stop`` parameter.
Parameters
----------
stop_seq : iterable
An iterable that yields objects suitable for consumption by
``_convert_to_color``.
Returns
-------
stop : list of openpyxl.styles.Color
"""
return map(cls._convert_to_color, stop_seq)
@classmethod
def _convert_to_fill(cls, fill_dict):
"""
Convert ``fill_dict`` to an openpyxl v2 Fill object
Parameters
----------
fill_dict : dict
A dict with one or more of the following keys (or their synonyms),
'fill_type' ('patternType', 'patterntype')
'start_color' ('fgColor', 'fgcolor')
'end_color' ('bgColor', 'bgcolor')
or one or more of the following keys (or their synonyms).
'type' ('fill_type')
'degree'
'left'
'right'
'top'
'bottom'
'stop'
Returns
-------
fill : openpyxl.styles.Fill
"""
from openpyxl.styles import PatternFill, GradientFill
_pattern_fill_key_map = {
'patternType': 'fill_type',
'patterntype': 'fill_type',
'fgColor': 'start_color',
'fgcolor': 'start_color',
'bgColor': 'end_color',
'bgcolor': 'end_color',
}
_gradient_fill_key_map = {
'fill_type': 'type',
}
pfill_kwargs = {}
gfill_kwargs = {}
for k, v in fill_dict.items():
pk = gk = None
if k in _pattern_fill_key_map:
pk = _pattern_fill_key_map[k]
if k in _gradient_fill_key_map:
gk = _gradient_fill_key_map[k]
if pk in ['start_color', 'end_color']:
v = cls._convert_to_color(v)
if gk == 'stop':
v = cls._convert_to_stop(v)
if pk:
pfill_kwargs[pk] = v
elif gk:
gfill_kwargs[gk] = v
else:
pfill_kwargs[k] = v
gfill_kwargs[k] = v
try:
return PatternFill(**pfill_kwargs)
except TypeError:
return GradientFill(**gfill_kwargs)
@classmethod
def _convert_to_side(cls, side_spec):
"""
Convert ``side_spec`` to an openpyxl v2 Side object
Parameters
----------
side_spec : str, dict
A string specifying the border style, or a dict with zero or more
of the following keys (or their synonyms).
'style' ('border_style')
'color'
Returns
-------
side : openpyxl.styles.Side
"""
from openpyxl.styles import Side
_side_key_map = {
'border_style': 'style',
}
if isinstance(side_spec, str):
return Side(style=side_spec)
side_kwargs = {}
for k, v in side_spec.items():
if k in _side_key_map:
k = _side_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
side_kwargs[k] = v
return Side(**side_kwargs)
@classmethod
def _convert_to_border(cls, border_dict):
"""
Convert ``border_dict`` to an openpyxl v2 Border object
Parameters
----------
border_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'left'
'right'
'top'
'bottom'
'diagonal'
'diagonal_direction'
'vertical'
'horizontal'
'diagonalUp' ('diagonalup')
'diagonalDown' ('diagonaldown')
'outline'
Returns
-------
border : openpyxl.styles.Border
"""
from openpyxl.styles import Border
_border_key_map = {
'diagonalup': 'diagonalUp',
'diagonaldown': 'diagonalDown',
}
border_kwargs = {}
for k, v in border_dict.items():
if k in _border_key_map:
k = _border_key_map[k]
if k == 'color':
v = cls._convert_to_color(v)
if k in ['left', 'right', 'top', 'bottom', 'diagonal']:
v = cls._convert_to_side(v)
border_kwargs[k] = v
return Border(**border_kwargs)
@classmethod
def _convert_to_alignment(cls, alignment_dict):
"""
Convert ``alignment_dict`` to an openpyxl v2 Alignment object
Parameters
----------
alignment_dict : dict
A dict with zero or more of the following keys (or their synonyms).
'horizontal'
'vertical'
'text_rotation'
'wrap_text'
'shrink_to_fit'
'indent'
Returns
-------
alignment : openpyxl.styles.Alignment
"""
from openpyxl.styles import Alignment
return Alignment(**alignment_dict)
@classmethod
def _convert_to_number_format(cls, number_format_dict):
"""
Convert ``number_format_dict`` to an openpyxl v2.1.0 number format
initializer.
Parameters
----------
number_format_dict : dict
A dict with zero or more of the following keys.
'format_code' : str
Returns
-------
number_format : str
"""
return number_format_dict['format_code']
@classmethod
def _convert_to_protection(cls, protection_dict):
"""
Convert ``protection_dict`` to an openpyxl v2 Protection object.
Parameters
----------
protection_dict : dict
A dict with zero or more of the following keys.
'locked'
'hidden'
Returns
-------
"""
from openpyxl.styles import Protection
return Protection(**protection_dict)
def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0,
freeze_panes=None):
# Write the frame cells using openpyxl.
sheet_name = self._get_sheet_name(sheet_name)
_style_cache = {}
if sheet_name in self.sheets:
wks = self.sheets[sheet_name]
else:
wks = self.book.create_sheet()
wks.title = sheet_name
self.sheets[sheet_name] = wks
if _validate_freeze_panes(freeze_panes):
wks.freeze_panes = wks.cell(row=freeze_panes[0] + 1,
column=freeze_panes[1] + 1)
for cell in cells:
xcell = wks.cell(
row=startrow + cell.row + 1,
column=startcol + cell.col + 1
)
xcell.value, fmt = self._value_with_fmt(cell.val)
if fmt:
xcell.number_format = fmt
style_kwargs = {}
if cell.style:
key = str(cell.style)
style_kwargs = _style_cache.get(key)
if style_kwargs is None:
style_kwargs = self._convert_to_style_kwargs(cell.style)
_style_cache[key] = style_kwargs
if style_kwargs:
for k, v in style_kwargs.items():
setattr(xcell, k, v)
if cell.mergestart is not None and cell.mergeend is not None:
wks.merge_cells(
start_row=startrow + cell.row + 1,
start_column=startcol + cell.col + 1,
end_column=startcol + cell.mergeend + 1,
end_row=startrow + cell.mergestart + 1
)
# When cells are merged only the top-left cell is preserved
# The behaviour of the other cells in a merged range is
# undefined
if style_kwargs:
first_row = startrow + cell.row + 1
last_row = startrow + cell.mergestart + 1
first_col = startcol + cell.col + 1
last_col = startcol + cell.mergeend + 1
for row in range(first_row, last_row + 1):
for col in range(first_col, last_col + 1):
if row == first_row and col == first_col:
# Ignore first cell. It is already handled.
continue
xcell = wks.cell(column=col, row=row)
for k, v in style_kwargs.items():
setattr(xcell, k, v)
| bsd-3-clause | -8,527,623,390,936,942,000 | -1,010,495,220,627,952,000 | 30.121413 | 79 | 0.474394 | false |
saurabh6790/medsynaptic-lib | core/doctype/property_setter/property_setter.py | 34 | 2382 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def autoname(self):
self.doc.name = self.doc.doc_type + "-" \
+ (self.doc.field_name and (self.doc.field_name + "-") or "") \
+ self.doc.property
def validate(self):
"""delete other property setters on this, if this is new"""
if self.doc.fields['__islocal']:
webnotes.conn.sql("""delete from `tabProperty Setter` where
doctype_or_field = %(doctype_or_field)s
and doc_type = %(doc_type)s
and ifnull(field_name,'') = ifnull(%(field_name)s, '')
and property = %(property)s""", self.doc.fields)
# clear cache
webnotes.clear_cache(doctype = self.doc.doc_type)
def get_property_list(self, dt):
return webnotes.conn.sql("""select fieldname, label, fieldtype
from tabDocField
where parent=%s
and fieldtype not in ('Section Break', 'Column Break', 'HTML', 'Read Only', 'Table')
and ifnull(fieldname, '') != ''
order by label asc""", dt, as_dict=1)
def get_setup_data(self):
return {
'doctypes': [d[0] for d in webnotes.conn.sql("select name from tabDocType")],
'dt_properties': self.get_property_list('DocType'),
'df_properties': self.get_property_list('DocField')
}
def get_field_ids(self):
return webnotes.conn.sql("select name, fieldtype, label, fieldname from tabDocField where parent=%s", self.doc.doc_type, as_dict = 1)
def get_defaults(self):
if not self.doc.field_name:
return webnotes.conn.sql("select * from `tabDocType` where name=%s", self.doc.doc_type, as_dict = 1)[0]
else:
return webnotes.conn.sql("select * from `tabDocField` where fieldname=%s and parent=%s",
(self.doc.field_name, self.doc.doc_type), as_dict = 1)[0]
def on_update(self):
from core.doctype.doctype.doctype import validate_fields_for_doctype
validate_fields_for_doctype(self.doc.doc_type)
def make_property_setter(doctype, fieldname, property, value, property_type, for_doctype = False):
return webnotes.bean({
"doctype":"Property Setter",
"doctype_or_field": for_doctype and "DocType" or "DocField",
"doc_type": doctype,
"field_name": fieldname,
"property": property,
"value": value,
"property_type": property_type
}).insert()
| mit | -6,067,365,197,643,643,000 | 1,503,744,445,484,358,100 | 34.567164 | 135 | 0.675903 | false |
PanDAWMS/panda-mon-qa | pandamonqa/qasuite/BSXPath.py | 3 | 80212 | # -*- coding: utf-8 -*-
"""
BSXPath.py: XPathEvaluator Extension for BeautifulSoup
"""
__version__ = '0.01e' # based on JavaScript-XPath 0.1.11 (c) 2007 Cybozu Labs, Inc. (http://coderepos.org/share/wiki/JavaScript-XPath)
__date__ = '2009-04-12'
__license__ = 'MIT-style license'
__author__ = 'furyu' # http://furyu.tea-nifty.com/annex/
# http://d.hatena.ne.jp/furyu-tei/
"""
Usage:
from BSXPath import BSXPathEvaluator,XPathResult
#*** PREPARATION (create object)
document = BSXPathEvaluator(<html>) # BSXPathEvaluator is sub-class of BeautifulSoup
# html: HTML (text string)
#*** BASIC OPERATIONS
result = document.evaluate(<expression>,<node>,None,<type>,None)
# expression: XPath expression
# node : base context-node(document is document-root)
# type : XPathResult.<name>
# name : ANY_TYPE, NUMBER_TYPE, STRING_TYPE, BOOLEAN_TYPE, UNORDERED_NODE_ITERATOR_TYPE, ORDERED_NODE_ITERATOR_TYPE
# UNORDERED_NODE_SNAPSHOT_TYPE, ORDERED_NODE_SNAPSHOT_TYPE, ANY_UNORDERED_NODE_TYPE, FIRST_ORDERED_NODE_TYPE
# (*) 3rd(resolver) and 5th(result) arguments are not implemented
length = result.snapshotLength
node = result.snapshotItem(<number>)
#*** USEFUL WRAPPER-FUNCTIONS
nodes = document.getItemList(<expression>[,<node>])
first = document.getFirstItem(<expression>[,<node>])
# expression: XPath expression
# node(optional): base context-node(default: document(document-root))
Examples:
from BSXPath import BSXPathEvaluator,XPathResult
html = '<html><head><title>Hello, DOM 3 XPath!</title></head><body><h1>Hello, DOM 3 XPath!</h1><p>This is XPathEvaluator Extension for BeautifulSoup.</p><p>This is based on JavaScript-XPath!</p></body>'
document = BSXPathEvaluator(html)
result = document.evaluate('//h1/text()[1]',document,None,XPathResult.STRING_TYPE,None)
print result.stringValue
# Hello, DOM 3 XPath!
result = document.evaluate('//h1',document,None,XPathResult.ORDERED_NODE_SNAPSHOT_TYPE,None)
print result.snapshotLength
# 1
print result.snapshotItem(0)
# <h1>Hello, DOM 3 XPath!</h1>
nodes = document.getItemList('//p')
print len(nodes)
# 2
print nodes
# [<p>This is XPathEvaluator Extension for BeautifulSoup.</p>, <p>This is based on JavaScript-XPath!</p>]
first = document.getFirstItem('//p')
print first
# <p>This is XPathEvaluator Extension for BeautifulSoup.</p>
Notice:
- This is based on JavaScript-XPath (c) 2007 Cybozu Labs, Inc. (http://coderepos.org/share/wiki/JavaScript-XPath)
- Required:
- Python 2.5+
- BeautifulSoup 3.0.7+(recommended) or 3.1.0+
"""
import re,types,math,datetime
#import logging
from BeautifulSoup import *
try:
if DEFAULT_OUTPUT_ENCODING:
pass
except:
DEFAULT_OUTPUT_ENCODING='utf-8'
#***** Optional Parameters
USE_NODE_CACHE=True
USE_NODE_INDEX=True
#***** General Functions
def throwError(str):
raise ValueError, str
def typeof(obj):
if isinstance(obj,bool):
return 'boolean'
if isinstance(obj,int) or isinstance(obj,float):
return 'number'
if isinstance(obj,basestring):
return 'string'
if isinstance(obj,types.FunctionType):
return 'function'
return 'object'
def isNaN(obj):
if isinstance(obj,int) or isinstance(obj,float):
return False
if not isinstance(obj,basestring):
return True
if obj.isdigit():
return False
try:
float(obj)
return False
except:
return True
def toNumber(obj):
if isinstance(obj,int) or isinstance(obj,float):
return obj
if isinstance(obj,basestring):
if obj.isdigit():
return int(obj)
try:
return float(obj)
except:
return obj
return obj
def toBoolean(obj):
return bool(obj)
def toString(obj):
if isinstance(obj,bool):
#return u'true' if obj else u'false'
if obj:
return u'true'
else:
return u'false'
if isinstance(obj,str) or isinstance(obj,int) or isinstance(obj,float):
return unicode(obj)
return obj
#***** General Classes
class ExtDict(dict):
def __getattr__(self,name):
try:
attr=super(ExtDict,self).__getattr__(name)
except:
if not self.has_key(name):
raise AttributeError,name
attr=self.get(name)
return attr
#***** Common Definitions
indent_space=' '
#{ // Regular Expressions
re_has_ualpha=re.compile(r'(?![0-9])[\w]')
re_seqspace=re.compile(r'\s+')
re_firstspace=re.compile(r'^\s')
re_lastspace=re.compile(r'\s$')
#} // end of Regular Expressions
#{ // NodeTypeDOM
NodeTypeDOM=ExtDict({
'ANY_NODE' :0
, 'ELEMENT_NODE' :1
, 'ATTRIBUTE_NODE' :2
, 'TEXT_NODE' :3
, 'CDATA_SECTION_NODE' :4
, 'ENTITY_REFERENCE_NODE' :5
, 'ENTITY_NODE' :6
, 'PROCESSING_INSTRUCTION_NODE':7
, 'COMMENT_NODE' :8
, 'DOCUMENT_NODE' :9
, 'DOCUMENT_TYPE_NODE' :10
, 'DOCUMENT_FRAGMENT_NODE' :11
, 'NOTATION_NODE' :12
})
NodeTypeBS=ExtDict({
'BSXPathEvaluator' :NodeTypeDOM.DOCUMENT_NODE
, 'NavigableString' :NodeTypeDOM.TEXT_NODE
, 'CData' :NodeTypeDOM.CDATA_SECTION_NODE
, 'ProcessingInstruction':NodeTypeDOM.PROCESSING_INSTRUCTION_NODE
, 'Comment' :NodeTypeDOM.COMMENT_NODE
, 'Declaration' :NodeTypeDOM.ANY_NODE
, 'Tag' :NodeTypeDOM.ELEMENT_NODE
})
#} // end of NodeTypeDOM
#{ // NodeUtil
def makeNodeUtils():
re_type_document_type=re.compile(r'^DOCTYPE\s')
re_type_entity =re.compile(r'^ENTITY\s')
re_type_notation =re.compile(r'^NOTATION\s')
#re_processing_instruction=re.compile(r'^(.*?)\s+(.*?)\?*$')
re_processing_instruction=re.compile(r'^(.*?)(\s+.*?)\?*$')
re_declaration_name=re.compile(r'^([^\s]+)\s+([\%]?)\s*([^\s]+)\s')
def makeNU_BS():
def _nodeType(node):
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
return node.nodeType
nodeType=NodeTypeBS.get(node.__class__.__name__)
if nodeType==NodeTypeDOM.ANY_NODE:
str=NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING)
if re_type_document_type.search(str):
nodeType=NodeTypeDOM.DOCUMENT_TYPE_NODE
elif re_type_entity.search(str):
nodeType=NodeTypeDOM.ENTITY_NODE
elif re_type_notation.search(str):
nodeType=NodeTypeDOM.NOTATION_NODE
return nodeType
def _nodeName(node):
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
return node.nodeName.lower()
nodeType=_nodeType(node)
if nodeType==NodeTypeDOM.DOCUMENT_NODE:
return '#document'
elif nodeType==NodeTypeDOM.TEXT_NODE:
return '#text'
elif nodeType==NodeTypeDOM.CDATA_SECTION_NODE:
return '#cdata-section'
elif nodeType==NodeTypeDOM.PROCESSING_INSTRUCTION_NODE:
mrslt=re_processing_instruction.search(NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING))
if mrslt:
return mrslt.group(1)
else:
return NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING)
elif nodeType==NodeTypeDOM.COMMENT_NODE:
return '#comment'
elif nodeType==NodeTypeDOM.DOCUMENT_TYPE_NODE or nodeType==NodeTypeDOM.ENTITY_NODE or nodeType==NodeTypeDOM.NOTATION_NODE:
mrslt=re_declaration_name.search(NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING))
if mrslt:
return mrslt.group(2)
else:
return NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING)
else:
return node.name.lower()
def _nodeValue(node):
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
return node.nodeValue
nodeType=_nodeType(node)
if nodeType==NodeTypeDOM.CDATA_SECTION_NODE or \
nodeType==NodeTypeDOM.COMMENT_NODE or \
nodeType==NodeTypeDOM.TEXT_NODE:
return NavigableString.encode(node, DEFAULT_OUTPUT_ENCODING)
if nodeType==NodeTypeDOM.PROCESSING_INSTRUCTION_NODE:
mrslt=re_processing_instruction.search(NavigableString.encode(node,DEFAULT_OUTPUT_ENCODING))
if mrslt:
return mrslt.group(2)
else:
return None
return None
def _nodeAttrValue(node,attrName):
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
return None
nodeType=_nodeType(node)
if nodeType!=NodeTypeDOM.ELEMENT_NODE:
return None
return node.get(attrName)
def _parentNode(node):
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
return node.parentNode
return node.parent
def _ownerDocument(node):
owner=getattr(node,'_owner',None)
if owner:
return owner
if getattr(node,'nodeType',None)==NodeTypeDOM.ATTRIBUTE_NODE:
owner=node.parentNode
else:
owner=node
while True:
parent=owner.parent
if not parent:
break
owner=parent
try:
node._owner=owner
except:
pass
return owner
def pairwise(iterable):
itnext = iter(iterable).next
while True:
yield itnext(), itnext()
def _attributes(node):
if _nodeType(node)==NodeTypeDOM.ELEMENT_NODE:
#return node._getAttrMap()
if not getattr(node,'attrMap'):
node.attrMap=dict(pairwise(node.attrs))
return node.attrMap
else:
return None
def _contains(node,cnode):
if _nodeType(node)==NodeTypeDOM.ATTRIBUTE_NODE: node=node.parentNode
if _nodeType(cnode)==NodeTypeDOM.ATTRIBUTE_NODE: cnode=cnode.parentNode
return node in cnode.findParents()
def _preceding(node,cnode):
if _nodeType(node)==NodeTypeDOM.ATTRIBUTE_NODE: node=node.parentNode
if _nodeType(cnode)==NodeTypeDOM.ATTRIBUTE_NODE: cnode=cnode.parentNode
#return cnode in node.findAllPrevious()
return cnode in node.findPreviousSiblings()
def _following(node,cnode):
if _nodeType(node)==NodeTypeDOM.ATTRIBUTE_NODE: node=node.parentNode
if _nodeType(cnode)==NodeTypeDOM.ATTRIBUTE_NODE: cnode=cnode.parentNode
#return cnode in node.findAllNext()
return cnode in node.findNextSiblings()
def d_getattr(self,name):
raise AttributeError,name
#{ // ExtPageElement
class ExtPageElement:
def __getattr__(self,name):
if name=='nodeType': return _nodeType(self)
if name=='nodeName': return _nodeName(self)
if name=='nodeValue': return _nodeValue(self)
if name=='parentNode': return _parentNode(self)
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return _attributes(self)
if name=='get': return self.get
if name=='contains': return self.contains
if name=='preceding': return self.preceding
if name=='following': return self.following
d_getattr(self,name)
def get(self,key,default=None):
return _nodeAttrValue(self,key)
def contains(self,cnode):
return _contains(self,cnode)
def preceding(self,cnode):
return _preceding(self,cnode)
def following(self,cnode):
return _following(self,cnode)
PageElement.__bases__+=(ExtPageElement,)
BeautifulSoup.__bases__+=(ExtPageElement,)
NavigableString.__bases__+=(ExtPageElement,)
CData.__bases__+=(ExtPageElement,)
ProcessingInstruction.__bases__+=(ExtPageElement,)
Comment.__bases__+=(ExtPageElement,)
Declaration.__bases__+=(ExtPageElement,)
Tag.__bases__+=(ExtPageElement,)
#} // ExtPageElement
#{ // _extBeautifulSoup
def _extBeautifulSoup():
o_getattr=getattr(BeautifulSoup,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.DOCUMENT_NODE
if name=='nodeName': return '#document'
if name=='nodeValue': return None
if name=='parentNode': return None
if name=='ownerDocument': return None
if name=='attributes': return None
return o_getattr(self,name)
BeautifulSoup.__getattr__=e_getattr
_extBeautifulSoup()
#} // _extBeautifulSoup
#{ // _extNavigableString
def _extNavigableString():
o_getattr=getattr(NavigableString,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.TEXT_NODE
if name=='nodeName': return '#text'
if name=='nodeValue': return NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return None
return o_getattr(self,name)
NavigableString.__getattr__=e_getattr
_extNavigableString()
#} // _extNavigableString
#{ // _extCData
def _extCData():
o_getattr=getattr(CData,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.CDATA_SECTION_NODE
if name=='nodeName': return '#cdata-section'
if name=='nodeValue': return NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return None
return o_getattr(self,name)
CData.__getattr__=e_getattr
_extCData()
#} // _extCData
#{ // _extProcessingInstruction
def _extProcessingInstruction():
o_getattr=getattr(ProcessingInstruction,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.PROCESSING_INSTRUCTION_NODE
if name=='nodeName':
mrslt=re_processing_instruction.search(NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING))
#return mrslt.group(1) if mrslt else NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if mrslt:
return mrslt.group(1)
else:
return NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if name=='nodeValue':
mrslt=re_processing_instruction.search(NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING))
#return mrslt.group(2) if mrslt else None
if mrslt:
return mrslt.group(2)
else:
return None
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return None
return o_getattr(self,name)
ProcessingInstruction.__getattr__=e_getattr
_extProcessingInstruction()
#} // _extProcessingInstruction
#{ // _extComment
def _extComment():
o_getattr=getattr(Comment,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.COMMENT_NODE
if name=='nodeName': return '#comment'
if name=='nodeValue': return NavigableString.encode(self, DEFAULT_OUTPUT_ENCODING)
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return None
return o_getattr(self,name)
Comment.__getattr__=e_getattr
_extComment()
#} // _extComment
#{ // _extDeclaration
def _extDeclaration():
o_getattr=getattr(Declaration,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType':
str=NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if re_type_document_type.search(str):
return NodeTypeDOM.DOCUMENT_TYPE_NODE
elif re_type_entity.search(str):
return NodeTypeDOM.ENTITY_NODE
elif re_type_notation.search(str):
return NodeTypeDOM.NOTATION_NODE
else:
return NodeTypeDOM.ANY_NODE
if name=='nodeName':
mrslt=re_declaration_name.search(NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING))
#return mrslt.group(2) if mrslt else NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if mrslt:
return mrslt.group(2)
else:
return NavigableString.encode(self,DEFAULT_OUTPUT_ENCODING)
if name=='nodeValue': return None
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return None
return o_getattr(self,name)
Declaration.__getattr__=e_getattr
_extDeclaration()
#} // _extDeclaration
#{ // _extTag
def _extTag():
o_getattr=getattr(Tag,'__getattr__',d_getattr)
def e_getattr(self,name):
if name=='nodeType': return NodeTypeDOM.ELEMENT_NODE
if name=='nodeName': return self.name.lower()
if name=='nodeValue': return None
if name=='parentNode': return self.parent
if name=='ownerDocument': return _ownerDocument(self)
if name=='attributes': return self._getAttrMap()
return o_getattr(self,name)
Tag.__getattr__=e_getattr
_extTag()
#} // _extTag
def _it_deepNodes(node):
child_next=iter(getattr(node,'contents',[])).next
while True:
child=child_next()
yield child
for gchild in _it_deepNodes(child):
yield gchild
return ExtDict({
'nodeType' :_nodeType
, 'nodeName' :_nodeName
, 'nodeValue' :_nodeValue
, 'nodeAttrValue':_nodeAttrValue
, 'parentNode' :_parentNode
, 'ownerDocument':_ownerDocument
, 'attributes' :_attributes
, 'contains' :_contains
, 'preceding' :_preceding
, 'following' :_following
, 'it_deepNodes' :_it_deepNodes
})
return
def makeNU():
def _to(valueType,node):
if typeof(node)=='string':
result=node
else:
nodeType=node.nodeType
if nodeType==NodeTypeDOM.ATTRIBUTE_NODE:
result=node.nodeValue
else:
strings=[]
for _node in NodeUtilBS.it_deepNodes(node):
if _node.nodeType==NodeTypeDOM.TEXT_NODE:
strings.append(unicode(_node))
result=''.join(strings)
if valueType=='number':
return toNumber(result)
elif valueType=='boolean':
return toBoolean(result)
else:
return result
def _attrMatch(node,attrName,attrValue):
if not attrName or \
not attrValue and node.get(attrName) or \
(attrValue and node.get(attrName)==attrValue):
return True
else:
return False
def _getDescendantNodes(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
if prevNodeset:
prevNodeset.delDescendant(node,prevIndex)
if USE_NODE_CACHE:
_cachemap=getattr(node,'_cachemap',None)
if not _cachemap:
_cachemap=node._cachemap=ExtDict({'attrib':ExtDict({}),'all':None,'tag':ExtDict({})})
if attrValue and attrName:
_cm=_cachemap.attrib
_anmap=_cm.get(attrName)
if not _anmap:
_anmap=_cm[attrName]=ExtDict({})
nodes=_anmap.get(attrValue)
if not nodes:
nodes=_anmap[attrValue]=[]
if getattr(node,'findAll',None):
nodes.extend(node.findAll(attrs={attrName:attrValue}))
for elm in nodes:
if test.match(elm):
nodeset.push(elm)
elif getattr(test,'notOnlyElement',None):
nodes=_cachemap.all
if not nodes:
nodes=_cachemap.all=[]
for elm in NodeUtilBS.it_deepNodes(node):
nodes.append(elm)
for elm in nodes:
if NodeUtil.attrMatch(elm,attrName,attrValue) and test.match(elm):
nodeset.push(elm)
else:
nodeType=node.nodeType
if nodeType==NodeTypeDOM.ELEMENT_NODE or nodeType==NodeTypeDOM.DOCUMENT_NODE:
_cm=_cachemap.tag
name=getattr(test,'name',None)
if not name or name=='*':
nodes=_cm.get('*')
if not nodes:
nodes=_cm['*']=node.findAll()
else:
nodes=_cm.get(name)
if not nodes:
nodes=_cm[name]=node.findAll([name])
for elm in nodes:
if NodeUtil.attrMatch(elm,attrName,attrValue):
nodeset.push(elm)
else: # USE_NODE_CACHE is False
if attrValue and attrName:
if getattr(node,'findAll',None):
for elm in node.findAll(attrs={attrName:attrValue}):
if test.match(elm):
nodeset.push(elm)
elif getattr(test,'notOnlyElement',None):
for elm in NodeUtilBS.it_deepNodes(node):
if NodeUtil.attrMatch(elm,attrName,attrValue) and test.match(elm):
nodeset.push(elm)
else:
nodeType=node.nodeType
if nodeType==NodeTypeDOM.ELEMENT_NODE or nodeType==NodeTypeDOM.DOCUMENT_NODE:
name=getattr(test,'name',None)
if not name or name=='*':
nodes=node.findAll()
else:
nodes=node.findAll([name])
for elm in nodes:
if NodeUtil.attrMatch(elm,attrName,attrValue):
nodeset.push(elm)
return nodeset
def _getChildNodes(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
contents=getattr(node,'contents',[])
for elm in contents:
if NodeUtil.attrMatch(elm,attrName,attrValue) and test.match(elm):
nodeset.push(elm)
return nodeset
return ExtDict({
'to' :_to
, 'attrMatch' :_attrMatch
, 'getDescendantNodes':_getDescendantNodes
, 'getChildNodes' :_getChildNodes
})
return (makeNU_BS(),makeNU())
(NodeUtilBS,NodeUtil)=makeNodeUtils()
#} // end of NodeUtil
#***** Application Classes
#{ // Lexer
class Lexer(object):
def __init__(self,source):
tokens=self.tokens=[]
def anlz_token(mrslt):
token=mrslt.group()
if not self.re_strip.search(token):
tokens.append(token)
return token
self.re_token.sub(anlz_token,source,count=0)
self.index=0
def peek(self,i=0):
#token=self.tokens[self.index+i] if self.index+i<len(self.tokens) else None
if self.index+i<len(self.tokens):
token=self.tokens[self.index+i]
else:
token=None
return token
def next(self):
#token=self.tokens[self.index] else None
if self.index<len(self.tokens):
token=self.tokens[self.index]
else:
token=None
self.index+=1
return token
def back(self):
self.index-=1
#token=self.tokens[self.index] if self.index<len(self.tokens) else None
if self.index<len(self.tokens):
token=self.tokens[self.index]
else:
token=None
def empty(self):
return (len(self.tokens)<=self.index)
re_token=re.compile(r'\$?(?:(?![0-9-])[\w-]+:)?(?![0-9-])[\w-]+|\/\/|\.\.|::|\d+(?:\.\d*)?|\.\d+|"[^"]*"|\'[^\']*\'|[!<>]=|(?![0-9-])[\w-]+:\*|\s+|.')
re_strip=re.compile(r'^\s')
#} // end of Lexer
#{ // Ctx
class Ctx(object):
def __init__(self,node,position=1,last=1):
self.node=node
self.position=position
self.last=last
#} // end of Ctx
#{ // AttributeWrapper
class AttributeWrapper(object):
def __init__(self,name,value,parent):
self.nodeType=NodeTypeDOM.ATTRIBUTE_NODE
self.nodeName=name
self.nodeValue=value
self.parentNode=parent
self.ownerElement=parent
def get(self,key,default=None):
return None
def contains(self,cnode):
return NodeUtilBS.contains(self,cnode)
def preceding(self,cnode):
return NodeUtilBS.preceding(self,cnode)
def following(self,cnode):
return NodeUtilBS.following(self,cnode)
def __str__(self,encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.nodeValue.encode(encoding)
else:
return self.nodeValue
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
@classmethod
def getAttributeWrapper(cls,name,value,parent):
_mapattr=getattr(parent,'_mapattr',None)
if not _mapattr:
_mapattr=parent._mapattr=ExtDict({})
if _mapattr.get(name):
return _mapattr[name]
_mapattr[name]=cls(name,value,parent)
return _mapattr[name]
#} // end of AttributeWrapper
#{ // BaseExpr
class BaseExpr(object):
def __init__(self):
pass
def number(self,ctx):
exrs=self.evaluate(ctx)
if getattr(exrs,'isNodeSet',None):
result=exrs.number()
else:
result=toNumber(exrs)
return result
def string(self,ctx):
exrs=self.evaluate(ctx)
if getattr(exrs,'isNodeSet',None):
result=exrs.string()
else:
result=toString(exrs)
return result
def bool(self,ctx):
exrs=self.evaluate(ctx)
if getattr(exrs,'isNodeSet',None):
result=exrs.bool()
else:
result=toBoolean(exrs)
return result
#} // end of BaseExpr
#{ // BaseExprHasPredicates
class BaseExprHasPredicates(BaseExpr):
def __init__(self):
pass
def evaluatePredicates(self,nodeset,start=0):
reverse=getattr(self,'reverse',False)
predicates=getattr(self,'predicates',[])
nodeset.sort()
l0=len(predicates)
for i in range(start,l0):
predicate=predicates[i]
deleteIndexes=[]
nodes=nodeset.list()
l1=len(nodes)
for j in range(0,l1):
#position=(l1-j) if reverse else (j+1)
if reverse:
position=(l1-j)
else:
position=(j+1)
exrs=predicate.evaluate(Ctx(nodes[j],position,l1))
if typeof(exrs)=='number':
exrs=(position==exrs)
elif typeof(exrs)=='string':
#exrs=False if exrs=='' else True
if exrs=='' :
exrs=False
else:
exrs=True
elif typeof(exrs)=='object':
exrs=exrs.bool()
if not exrs:
deleteIndexes.append(j)
r=range(0,len(deleteIndexes))
r.sort(reverse=True)
for j in r:
nodeset._del(deleteIndexes[j])
return nodeset
@classmethod
def parsePredicates(cls,lexer,expr):
while lexer.peek()=='[':
lexer.next()
if lexer.empty():
throwError(u'missing predicate expr')
predicate=BinaryExpr.parse(lexer)
expr.predicate(predicate)
if lexer.empty():
throwError(u'unclosed predicate expr')
if lexer.next() != ']':
lexer.back()
throwError(u'bad token: %s' % (lexer.next()))
#} // end of BaseExprHasPredicates
#{ // BinaryExpr
class BinaryExpr(BaseExpr):
def __init__(self,op,left,right):
self.op=op
self.left=left
self.right=right
self.dataType=BinaryExpr.ops[op][2]
(lneedContextPosition,rneedContextPosition)=(getattr(left,'needContextPosition',None),getattr(right,'needContextPosition',None))
(lneedContextNode,rneedContextNode)=(getattr(left,'needContextNode',None),getattr(right,'needContextNode',None))
self.needContextPosition=lneedContextPosition or rneedContextPosition
self.needContextNode=lneedContextNode or rneedContextNode
if op=='=':
(ldatatype,rdatatype)=(getattr(left,'datatype',None),getattr(right,'datatype',None))
(lqattr,rqattr)=(getattr(left,'quickAttr',None),getattr(right,'quickAttr',None))
if not rneedContextNode and not rneedContextPosition and rdatatype!='nodeset' and rdatatype!='void' and lqattr:
self.quickAttr=True
self.attrName=left.attrName
self.attrValueExpr=right
elif not lneedContextNode and not lneedContextPosition and ldatatype!='nodeset' and ldatatype!='void' and rqattr:
self.quickAttr=True
self.attrName=right.attrName
self.attrValueExpr=left
def evaluate(self,ctx):
result=BinaryExpr.ops[self.op][1](self.left,self.right,ctx)
return result
def show(self,indent=''):
t=''
t+=indent+'binary: '+self.op+'\n'
indent+=indent_space
t+=self.left.show(indent)
t+=self.right.show(indent)
return t
# --- Local Functions
@staticmethod
def _compare(op,comp,left,right,ctx):
left=left.evaluate(ctx)
right=right.evaluate(ctx)
if getattr(left,'isNodeSet',None) and getattr(right,'isNodeSet',None):
lnodes=left.list()
rnodes=right.list()
for lnode in lnodes:
for rnode in rnodes:
if comp(NodeUtil.to('string',lnode),NodeUtil.to('string',rnode)):
return True
return False
if getattr(left,'isNodeSet',None) or getattr(right,'isNodeSet',None):
if getattr(left,'isNodeSet',None):
(nodeset,primitive)=(left,right)
else:
(nodeset,primitive)=(right,left)
nodes=nodeset.list()
type=typeof(primitive)
for node in nodes:
if comp(NodeUtil.to(type,node),primitive):
return True
return False
if op=='=' or op=='!=':
if typeof(left)=='boolean' or typeof(right)=='boolean':
return comp(toBoolean(left),toBoolean(right))
if typeof(left)=='number' or typeof(right)=='number':
return comp(toNumber(left),toNumber(right))
return comp(left,right)
return comp(toNumber(left),toNumber(right))
def _div(left,right,ctx):
l=left.number(ctx)
r=right.number(ctx)
if typeof(l)!='number' or typeof(r)!='number': return 'NaN'
if r==0:
sign=int(getattr(left,'op','+')+'1')*int(getattr(right,'op','+')+'1')
if l==0: return 'NaN'
elif sign<0: return '-Infinity'
else: return 'Infinity'
n=float(l) / float(r)
n1=int(n)
#return n1 if n1==n else n
if n1==n:
return n1
else:
return n
def _mod(left,right,ctx):
l=left.number(ctx)
r=right.number(ctx)
if typeof(l)!='number' or typeof(r)!='number': return 'NaN'
if r==0:
if l==0: return 'NaN'
else: return 0
return l % r
def _mul(left,right,ctx):
l=left.number(ctx)
r=right.number(ctx)
if typeof(l)!='number' or typeof(r)!='number': return 'NaN'
n=l * r
n1=int(n)
#return n1 if n1==n else n
if n1==n:
return n1
else:
return n
def _add(left,right,ctx):
l=left.number(ctx)
r=right.number(ctx)
if typeof(l)!='number' or typeof(r)!='number': return 'NaN'
n=l + r
n1=int(n)
#return n1 if n1==n else n
if n1==n:
return n1
else:
return n
def _sub(left,right,ctx):
l=left.number(ctx)
r=right.number(ctx)
if typeof(l)!='number' or typeof(r)!='number': return 'NaN'
n=l - r
n1=int(n)
#return n1 if n1==n else n
if n1==n:
return n1
else:
return n
def _lt(left,right,ctx):
return BinaryExpr._compare('<',(lambda a,b:a<b),left,right,ctx)
def _gt(left,right,ctx):
return BinaryExpr._compare('>',(lambda a,b:a>b),left,right,ctx)
def _le(left,right,ctx):
return BinaryExpr._compare('<=',(lambda a,b:a<=b),left,right,ctx)
def _ge(left,right,ctx):
return BinaryExpr._compare('>=',(lambda a,b:a>=b),left,right,ctx)
def _eq(left,right,ctx):
return BinaryExpr._compare('=',(lambda a,b:a==b),left,right,ctx)
def _ne(left,right,ctx):
return BinaryExpr._compare('!=',(lambda a,b:a!=b),left,right,ctx)
def _and(left,right,ctx):
return left.bool(ctx) & right.bool(ctx)
def _or(left,right,ctx):
return left.bool(ctx) | right.bool(ctx)
ops=ExtDict({
'div':[6,_div,'number' ]
, 'mod':[6,_mod,'number' ]
, '*' :[6,_mul,'number' ]
, '+' :[5,_add,'number' ]
, '-' :[5,_sub,'number' ]
, '<' :[4,_lt ,'boolean']
, '>' :[4,_gt ,'boolean']
, '<=' :[4,_le ,'boolean']
, '>=' :[4,_ge ,'boolean']
, '=' :[3,_eq ,'boolean']
, '!=' :[3,_ne ,'boolean']
, 'and':[2,_and,'boolean']
, 'or' :[1,_or ,'boolean']
})
@classmethod
def parse(cls,lexer):
ops=cls.ops
stack=[]
index=lexer.index
while True:
if lexer.empty():
throwError(u'missing right expression')
expr=UnaryExpr.parse(lexer)
op=lexer.next()
if not op:
break
info=ops.get(op)
precedence=info and info[0]
if not precedence:
lexer.back()
break
while 0<len(stack) and precedence<=ops[stack[len(stack)-1]][0]:
expr=BinaryExpr(stack.pop(),stack.pop(),expr)
stack.extend([expr,op])
while 0<len(stack):
expr=BinaryExpr(stack.pop(),stack.pop(),expr)
return expr
#} // end of BinaryExpr
#{ // UnaryExpr
class UnaryExpr(BaseExpr):
def __init__(self,op,expr):
self.op=op
self.expr=expr
self.needContextPosition=getattr(expr,'needContextPosition',None)
self.needContextNode=getattr(expr,'needContextNode',None)
self.datatype='number'
def evaluate(self,ctx):
result=-self.expr.number(ctx)
return result
def show(self,indent=''):
t=''
t+=indent+'unary: '+self.op+'\n'
indent+=indent_space
t+=self.expr.show(indent)
return t
ops=ExtDict({
'-':1
})
@classmethod
def parse(cls,lexer):
ops=cls.ops
if ops.get(lexer.peek()):
return cls(lexer.next(),cls.parse(lexer))
else:
return UnionExpr.parse(lexer)
#} // end of UnaryExpr
#{ // UnionExpr
class UnionExpr(BaseExpr):
def __init__(self):
self.paths=[]
self.datatype='nodeset'
def evaluate(self,ctx):
paths=self.paths
nodeset=NodeSet()
for path in paths:
exrs=path.evaluate(ctx)
if not getattr(exrs,'isNodeSet',None):
throwError(u'PathExpr must be nodeset')
nodeset.merge(exrs)
return nodeset
def path(self,path):
self.paths.append(path)
if getattr(path,'needContextPosition',None):
self.needContextPosition=True
if getattr(path,'needContextNode',None):
self.needContextNode=True
def show(self,indent=''):
t=''
t+=indent+'union: '+'\n'
indent+=indent_space
for path in self.paths:
t+=path.show(indent)
return t
ops=ExtDict({
'|':1
})
@classmethod
def parse(cls,lexer):
ops=cls.ops
expr=PathExpr.parse(lexer)
if not ops.get(lexer.peek()):
return expr
union=UnionExpr()
union.path(expr)
while True:
if not ops.get(lexer.next()):
break
if lexer.empty():
throwError(u'missing next union location path')
union.path(PathExpr.parse(lexer))
lexer.back()
return union
#} // end of UnionExpr
#{ // PathExpr
class PathExpr(BaseExpr):
def __init__(self,filter):
self.filter=filter
self.steps=[]
self.datatype=filter.datatype
self.needContextPosition=filter.needContextPosition
self.needContextNode=filter.needContextNode
def evaluate(self,ctx):
nodeset=self.filter.evaluate(ctx)
if not getattr(nodeset,'isNodeSet',None):
throwException('Filter nodeset must be nodeset type')
for _step in self.steps:
if nodeset.length<=0:
break
step=_step[1] # _step=[op,step]
reverse=step.reverse
iter=nodeset.iterator(reverse)
prevNodeset=nodeset
nodeset=None
needContextPosition=getattr(step,'needContextPosition',None)
axis=step.axis
if not needContextPosition and axis=='following':
node=iter()
while True:
next=iter()
if not next:
break
if not node.contains(next):
break
node=next
nodeset=step.evaluate(Ctx(node))
elif not needContextPosition and axis=='preceding':
node=iter()
nodeset=step.evaluate(Ctx(node))
else:
node=iter()
j=0
nodeset=step.evaluate(Ctx(node),False,prevNodeset,j)
while True:
node=iter()
if not node:
break
j+=1
nodeset.merge(step.evaluate(Ctx(node),False,prevNodeset,j))
return nodeset
def step(self,op,step):
step.op=op
self.steps.append([op,step])
self.quickAttr=False
if len(self.steps)==1:
if op=='/' and step.axis=='attribute':
test=step.test
if not getattr(test,'notOnlyElement',None) and test.name!='*':
self.quickAttr=True
self.attrName=test.name
def show(self,indent=''):
t=''
t+=indent+'path: '+'\n'
indent+=indent_space
t+=indent+'filter:'+'\n'
t+=self.filter.show(indent+indent_space)
if 0<len(self.steps):
t+=indent+'steps:'+'\n'
indent+=indent_space
for _step in self.steps:
t+=indent+'operator: '+step[0]+'\n'
t+=_step[1].show(indent) # _step=[op,step]
return t
ops=ExtDict({
'//':1
, '/': 1
})
@classmethod
def parse(cls,lexer):
ops=cls.ops
if ops.get(lexer.peek()):
op=lexer.next()
token=lexer.peek()
if op=='/' and lexer.empty() or (token!='.' and token!='..' and token!='@' and token!='*' and not re_has_ualpha.search(token)):
return FilterExpr.root()
path=PathExpr(FilterExpr.root()) # RootExpr
if lexer.empty():
throwError(u'missing next location step')
expr=Step.parse(lexer)
path.step(op,expr)
else:
expr=FilterExpr.parse(lexer)
if not expr:
expr=Step.parse(lexer)
path=PathExpr(FilterExpr.context())
path.step('/',expr)
elif not ops.get(lexer.peek()):
return expr
else:
path=PathExpr(expr)
while True:
if not ops.get(lexer.peek()):
break
op=lexer.next()
if lexer.empty():
throwError(u'missing next location step')
path.step(op,Step.parse(lexer))
return path
#} // end of PathExpr
#{ // FilterExpr
class FilterExpr(BaseExprHasPredicates):
def __init__(self,primary):
self.primary=primary
self.predicates=[]
self.datatype=primary.datatype
self.needContextPosition=primary.needContextPosition
self.needContextNode=primary.needContextNode
def evaluate(self,ctx):
nodeset=self.primary.evaluate(ctx)
if not getattr(nodeset,'isNodeSet',None):
if 0<len(self.predicates):
throwError(u'Primary result must be nodeset type if filter have predicate expression')
return nodeset
return self.evaluatePredicates(nodeset)
def predicate(self,predicate):
self.predicates.append(predicate)
def show(self,indent=''):
t=''
t+=indent+'filter: '+'\n'
indent+=indent_space
t+=self.primary.show(indent+indent_space)
if 0<len(self.predicates):
t+=indent+'predicates:'+'\n'
indent+=indent_space
for predicate in self.predicates:
t+=predicate.show(indent)
return t
@classmethod
def root(cls):
return FunctionCall('root-node')
@classmethod
def context(cls):
return FunctionCall('context-node')
@classmethod
def parse(cls,lexer):
token=lexer.peek()
ch=token[0:1]
if ch=='$':
expr=VariableReference.parse(lexer)
elif ch=='(':
lexer.next()
expr=BinaryExpr.parse(lexer)
if lexer.empty():
throwError(u'unclosed "("')
if lexer.next()!=')':
lexer.back()
throwError(u'bad token: %s' % (lexer.next()))
elif ch=='"' or ch=="'":
expr=Literal.parse(lexer)
else:
if not isNaN(token):
expr=Number.parse(lexer)
elif NodeType.types.get(token):
return None
elif re_has_ualpha.search(ch) and lexer.peek(1)=='(':
expr=FunctionCall.parse(lexer)
else:
return None
if lexer.peek()!='[':
return expr
filter=FilterExpr(expr)
BaseExprHasPredicates.parsePredicates(lexer,filter)
return filter
#} // end of FilterExpr
#{ // Step
class Step(BaseExprHasPredicates):
def __init__(self,axis,test):
self.axis=axis
self.reverse=self.axises[axis][0]
self.func=self.axises[axis][1]
self.test=test
self.predicates=[]
self._quickAttr=self.axises[axis][2]
self.quickAttr=False
self.needContextPosition=False
def evaluate(self,ctx,special=False,prevNodeset=None,prevIndex=None):
node=ctx.node
reverse=False
if not special and getattr(self,'op',None)=='//':
if not self.needContextPosition and self.axis=='child':
if getattr(self,'quickAttr',None):
attrValueExpr=getattr(self,'attrValueExpr',None)
#attrValue=attrValueExpr.string(ctx) if attrValueExpr else None
if attrValueExpr:
attrValue=attrValueExpr.string(ctx)
else:
attrValue=None
nodeset=NodeUtil.getDescendantNodes(self.test,node,NodeSet(),self.attrName,attrValue,prevNodeset,prevIndex)
nodeset=self.evaluatePredicates(nodeset,1)
else:
nodeset=NodeUtil.getDescendantNodes(self.test,node,NodeSet(),None,None,prevNodeset,prevIndex)
nodeset=self.evaluatePredicates(nodeset)
else:
step=Step('descendant-or-self',NodeType('node'))
nodes=step.evaluate(ctx,False,prevNodeset,prevIndex).list()
nodeset=None
step.op='/'
for _node in nodes:
if not nodeset:
nodeset=self.evaluate(Ctx(_node),True,None,None)
else:
nodeset.merge(self.evaluate(Ctx(_node),True,None,None))
nodeset=nodeset or NodeSet()
else:
if getattr(self,'needContextPosition',None):
prevNodeset=None
prevIndex=None
if getattr(self,'quickAttr',None):
attrValueExpr=getattr(self,'attrValueExpr',None)
#attrValue=attrValueExpr.string(ctx) if attrValueExpr else None
if attrValueExpr:
attrValue=attrValueExpr.string(ctx)
else:
attrValue=None
nodeset=self.func(self.test,node,NodeSet(),self.attrName,attrValue,prevNodeset,prevIndex)
nodeset=self.evaluatePredicates(nodeset,1)
else:
nodeset=self.func(self.test,node,NodeSet(),None,None,prevNodeset,prevIndex)
nodeset=self.evaluatePredicates(nodeset)
if prevNodeset:
prevNodeset.doDel()
return nodeset
def predicate(self,predicate):
self.predicates.append(predicate)
datatype=getattr(predicate,'datatype',None)
if getattr(predicate,'needContextPosition',None) or datatype=='number' or datatype=='void':
self.needContextPosition=True
if getattr(self,'_quickAttr',None) and len(self.predicates)==1 and getattr(predicate,'quickAttr',None):
attrName=predicate.attrName
self.attrName=attrName
self.attrValueExpr=getattr(predicate,'attrValueExpr',None)
self.quickAttr=True
def show(self,indent=''):
t=''
t+=indent+'step: '+'\n'
indent+=indent_space
if self.axis:
t+=indent+'axis: '+self.axis+'\n'
t+=self.test.show(indent)
if 0<len(self.predicates):
t+=indent+'predicates:'+'\n'
indent+=indent_space
for predicate in self.predicates:
t+=predicate.show(indent)
return t
# --- Local Functions
def _ancestor(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
while True:
node=node.parentNode
if not node:
break
if prevNodeset and node.nodeType==NodeTypeDOM.ELEMENT_NODE:
prevNodeset.reserveDelByNode(node,prevIndex,True)
if test.match(node):
nodeset.unshift(node)
return nodeset
def _ancestorOrSelf(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
while True:
if prevNodeset and node.nodeType==NodeTypeDOM.ELEMENT_NODE:
prevNodeset.reserveDelByNode(node,prevIndex,True)
if test.match(node):
nodeset.unshift(node)
node=node.parentNode
if not node:
break
return nodeset
def _attribute(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
attrs=node.attributes
if attrs:
if getattr(test,'notOnlyElement',None) and test.type==NodeTypeDOM.ANY_NODE or test.name=='*':
for name in attrs.keys():
#nodeset.push(AttributeWrapper(name,attrs[name],node))
nodeset.push(AttributeWrapper.getAttributeWrapper(name,attrs[name],node))
else:
attr=attrs.get(test.name)
if attr!=None:
#nodeset.push(AttributeWrapper(test.name,attr,node))
nodeset.push(AttributeWrapper.getAttributeWrapper(test.name,attr,node))
return nodeset
def _child(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
return NodeUtil.getChildNodes(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex)
def _descendant(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
return NodeUtil.getDescendantNodes(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex)
def _descendantOrSelf(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
if NodeUtil.attrMatch(node,attrName,attrValue) and test.match(node):
nodeset.push(node)
return NodeUtil.getDescendantNodes(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex)
def _following(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
while True:
child=node
while True:
child=child.nextSibling
if not child:
break
if NodeUtil.attrMatch(child,attrName,attrValue) and test.match(child):
nodeset.push(child)
nodeset=NodeUtil.getDescendantNodes(test,child,nodeset,attrName,attrValue,None,None)
node=node.parentNode
if not node:
break
return nodeset
def _followingSibling(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
while True:
node=node.nextSibling
if not node:
break
if prevNodeset and node.nodeType==NodeTypeDOM.ELEMENT_NODE:
prevNodeset.reserveDelByNode(node,prevIndex)
if test.match(node):
nodeset.push(node)
return nodeset;
def _namespace(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
# not implemented
return nodeset
def _parent(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
nodeType=node.nodeType
if nodeType==NodeTypeDOM.DOCUMENT_NODE:
return nodeset
if nodeType==NodeTypeDOM.ATTRIBUTE_NODE:
nodeset.push(node.ownerElement)
return nodeset
node=node.parentNode
if test.match(node):
nodeset.push(node)
return nodeset
def _preceding(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
parents=[]
while True:
parents.insert(0,node)
node=node.parentNode
if not node:
break
for node in parents[1:]:
siblings=[]
while True:
node=node.previousSibling
if not node:
break
siblings.insert(0,node)
for node in siblings:
if NodeUtil.attrMatch(node,attrName,attrValue) and test.match(node):
nodeset.push(node)
nodeset=NodeUtil.getDescendantNodes(test,node,nodeset,attrName,attrValue,None,None)
return nodeset
def _precedingSibling(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
while True:
node=node.previousSibling
if not node:
break
if prevNodeset and node.nodeType==NodeTypeDOM.ELEMENT_NODE:
prevNodeset.reserveDelByNode(node,prevIndex,True)
if test.match(node):
nodeset.unshift(node)
return nodeset
def _self(test,node,nodeset,attrName,attrValue,prevNodeset,prevIndex):
if test.match(node):
nodeset.push(node)
return nodeset
axises=ExtDict({
'ancestor' :[True ,_ancestor ,False]
, 'ancestor-or-self' :[True ,_ancestorOrSelf ,False]
, 'attribute' :[False,_attribute ,False]
, 'child' :[False,_child,True ,False]
, 'descendant' :[False,_descendant ,True ]
, 'descendant-or-self':[False,_descendantOrSelf,True ]
, 'following' :[False,_following ,True ]
, 'following-sibling' :[False,_followingSibling,False]
, 'namespace' :[False,_namespace ,False]
, 'parent' :[False,_parent ,False]
, 'preceding' :[True ,_preceding ,True ]
, 'preceding-sibling' :[True ,_precedingSibling,False]
, 'self' :[False,_self ,False]
})
@classmethod
def _cself(cls):
return cls('self',NodeType('node'))
@classmethod
def parent(cls):
return cls('parent',NodeType('node'))
@classmethod
def parse(cls,lexer):
(parent,_cself,axises)=(cls.parent,cls._cself,cls.axises)
if lexer.peek()=='.':
step=_cself()
lexer.next()
elif lexer.peek()=='..':
step=parent()
lexer.next()
else:
if lexer.peek()=='@':
axis='attribute'
lexer.next()
if lexer.empty():
throwError(u'missing attribute name')
else:
if lexer.peek(1)=='::':
ch=lexer.peek()[0:1]
if not re_has_ualpha.search(ch):
throwError(u'bad token: %s' % (lexer.next()))
axis=lexer.next()
lexer.next()
if not axises.get(axis):
throwError(u'invalid axis: %s' % (axis))
if lexer.empty():
throwError(u'missing node name')
else:
axis='child'
token=lexer.peek()
ch=token[0:1]
if not re_has_ualpha.search(ch):
if token=='*':
test=NameTest.parse(lexer)
else:
throwError(u'bad token: %s' % (lexer.next()))
else:
if lexer.peek(1)=='(':
if not NodeType.types.get(token):
throwError(u'invalid node type: %s' % (token))
test=NodeType.parse(lexer)
else:
test=NameTest.parse(lexer)
step=Step(axis,test)
BaseExprHasPredicates.parsePredicates(lexer,step)
return step
#} // end of Step
#{ // NodeType
class NodeType(BaseExpr):
def __init__(self,name,literal=None):
self.name=name
self.literal=literal
self.type=NodeType.typeNums.get(name,NodeType.typeNums.node)
self.notOnlyElement=True
def match(self,node):
return self.type==NodeTypeDOM.ANY_NODE or self.type==node.nodeType
def show(self,indent=''):
t=''
t+=indent+'nodetype: '+toString(self.type)+'\n'
if self.literal:
indent+=indent_space
t+=self.literal.show(indent)
return t
types=ExtDict({
'comment' :1
, 'text' :1
, 'processing-instruction':1
, 'node' :1
})
typeNums=ExtDict({
'comment' :NodeTypeDOM.COMMENT_NODE
, 'text' :NodeTypeDOM.TEXT_NODE
, 'processing-instruction':NodeTypeDOM.PROCESSING_INSTRUCTION_NODE
, 'node' :NodeTypeDOM.ANY_NODE
})
@classmethod
def parse(cls,lexer):
type=lexer.next()
lexer.next()
if lexer.empty():
throwError(u'bad nodetype')
ch=lexer.peek()[0:1]
literal=None
if ch=='"' or ch=="'":
literal=Literal.parse(lexer)
if lexer.empty():
throwError(u'bad nodetype')
if lexer.next()!=')':
lexer.back()
throwError(u'bad token: %s' % (lexer.next()))
return cls(type,literal)
#} // end of NodeType
#{ // NameTest
class NameTest(BaseExpr):
def __init__(self,name):
self.name=name.lower()
def match(self,node):
type=node.nodeType
if type==NodeTypeDOM.ELEMENT_NODE or type==NodeTypeDOM.ATTRIBUTE_NODE:
if self.name=='*' or self.name==node.nodeName:
return True
return False
def show(self,indent=''):
t=''
t+=indent+'nametest: '+self.name+'\n'
return t
@classmethod
def parse(cls,lexer):
if lexer.peek()!= '*' and lexer.peek(1)==':' and lexer.peek(2)=='*':
return cls(lexer.next()+lexer.next()+lexer.next())
return cls(lexer.next())
#} // end of NameTest
#{ // VariableReference
class VariableReference(BaseExpr):
def __init__(self,name):
self.name=name[1:]
self.datatype='void'
def show(self,indent=''):
t=''
t+=indent+'variable: '+self.name+'\n'
return t
@classmethod
def parse(cls,lexer):
token=lexer.next()
if len(token)<2:
throwError(u'unnamed variable reference')
return cls(token)
#} // end of VariableReference
#{ // Literal
class Literal(BaseExpr):
def __init__(self,text):
self.text=text[1:-1]
self.datatype='string'
def evaluate(self,ctx):
result=self.text
return result
def show(self,indent=''):
t=''
t+=indent+'literal: '+self.text+'\n'
return t
@classmethod
def parse(cls,lexer):
token=lexer.next()
if len(token)<2:
throwError(u'unclosed literal string')
return cls(token)
#} // end of Literal
#{ // Number
class Number(BaseExpr):
def __init__(self,digit):
self.digit=toNumber(digit)
self.datatype='number'
def evaluate(self,ctx):
result=self.digit
return result
def show(self,indent=''):
t=''
t+=indent+'number: '+toString(self.digit)+'\n'
return t
@classmethod
def parse(cls,lexer):
return cls(lexer.next())
#} // end of Number
#{ // FunctionCall
class FunctionCall(BaseExpr):
def __init__(self,name):
info=self.funcs.get(name)
if not info:
throwError(u'%s is not a function' % (name))
self.name=name
self.func=info[0]
self.args=[]
self.datatype=info[1]
#self.needContextPosition=True if info[2] else False
if info[2]:
self.needContextPosition=True
else:
self.needContextPosition=False
self.needContextNodeInfo=info[3]
#self.needContextNode=self.needContextNodeInfo[0] if 0<len(self.needContextNodeInfo) else False
if 0<len(self.needContextNodeInfo):
self.needContextNode=self.needContextNodeInfo[0]
else:
self.needContextNode=False
def evaluate(self,ctx):
result=self.func(ctx,*self.args)
return result
def arg(self,arg):
self.args.append(arg)
if getattr(arg,'needContextPosition',None):
self.needContextPosition=True
args=self.args
if getattr(arg,'needContextNode',None):
#args.needContextNode=True
self.needContextNode=True
#self.needContextNode=args.needContextNode or self.needContextNodeInfo[len(args)]
if not getattr(self,'needContextNode',None) and len(args)<len(self.needContextNodeInfo):
self.needContextNode=self.needContextNodeInfo[len(args)]
def show(self,indent=''):
t=''
t+=indent+'function: '+self.name+'\n'
indent+=indent_space
if 0<len(self.args):
t+=indent+'arguments: '+'\n'
indent+=indent_space
for arg in self.args:
t+=arg.show(indent)
return t
# --- Local Functions
def _contextNode(self,*arguments):
if len(arguments)!=0:
throwError(u'Function context-node expects ()')
nodeset=NodeSet()
nodeset.push(self.node)
return nodeset
def _rootNode(self,*arguments):
if len(arguments)!=0:
throwError(u'Function root-node expects ()')
nodeset=NodeSet()
ctxn=self.node
if ctxn.nodeType==NodeTypeDOM.DOCUMENT_NODE:
nodeset.push(ctxn)
else:
nodeset.push(ctxn.ownerDocument)
return nodeset
def _last(self,*arguments):
if len(arguments)!=0:
throwError(u'Function last expects ()')
return self.last
def _position(self,*arguments):
if len(arguments)!=0:
throwError(u'Function position expects ()')
return self.position
def _count(self,*arguments):
if len(arguments)!=1:
throwError(u'Function count expects (nodeset)')
nodeset=arguments[0].evaluate(self)
if not nodeset.isNodeSet:
throwError(u'Function count expects (nodeset)')
return nodeset.length
def _id(self,*arguments):
if len(arguments)!=1:
throwError(u'Function id expects (object)')
s=arguments[0]
ctxn=self.node
if ctxn.nodeType==NodeTypeDOM.DOCUMENT_NODE:
doc=ctxn
else:
doc=ctxn.ownerDocument
s=s.string(self)
ids=re_seqspace.split(s)
nodeset=NodeSet()
for id in ids:
for elm in doc.findAll(id=id):
nodeset.push(elm)
nodeset.isSorted=False
return nodeset
def _localName(self,*arguments):
alen=len(arguments)
if alen<0 or 1<alen:
throwError(u'Function local-name expects (nodeset?)')
if alen==0:
node=self.node
else:
nodeset=arguments[0]
nodeset=nodeset.evaluate(self)
if getattr(nodeset,'isNodeSet',None):
node=nodeset.first()
return ''+node.nodeName
def _name(self,*arguments):
# not implemented
return FunctionCall.funcs['local-name'][0](self,*arguments)
def _namespaceUri(self,*arguments):
# not implemented
return ''
def _string(self,*arguments):
alen=len(arguments)
if alen==0:
s=NodeUtil.to('string',self.node)
elif alen==1:
s=arguments[0]
s=s.string(self)
else:
throwError(u'Function string expects (object?)')
return s
def _concat(self,*arguments):
if len(arguments)<2:
throwError('Function concat expects (string, string[, ...])')
t=''
for argument in arguments:
t+=argument.string(self)
return t
def _startsWith(self,*arguments):
if len(arguments)!=2:
throwError('Function starts-with expects (string, string)')
(s1,s2)=(arguments[0],arguments[1])
s1=s1.string(self)
s2=s2.string(self)
#if s2 in s1 and s1.index(s2)==0:
# return True
#else:
# return False
if s1.find(s2)==0:
return True
else:
return False
def _contains(self,*arguments):
if len(arguments)!=2:
throwError('Function contains expects (string, string)')
(s1,s2)=(arguments[0],arguments[1])
s1=s1.string(self)
s2=s2.string(self)
#if s2 in s1:
# return True
#else:
# return False
n=s1.find(s2)
if n<0:
return False
else:
return True
def _substring(self,*arguments):
alen=len(arguments)
if alen<2 or 3<alen:
throwError(u'Function substring expects (string, string)')
(s,n1)=(arguments[0],arguments[1])
s=s.string(self)
n1=n1.number(self)
if alen==2:
n2=len(s)-n1+1
elif alen==3:
n2=arguments[2]
n2=n2.number(self)
if n1=='NaN' or n2=='NaN' or n1=='-Infinity' or n2=='-Infinity' or n1=='Infinity':
return u''
# n1,n2:origin=1 a1,a2:origin=0
n1=int(round(n1))
a1=n1-1
if a1<0: a1=0
if n2=='Infinity':
return s[a1:]
else:
n2=int(round(n2))
a2=n1+n2-1
return s[a1:a2]
def _substringBefore(self,*arguments):
if len(arguments)!=2:
throwError('Function substring-before expects (string, string)')
(s1,s2)=(arguments[0],arguments[1])
s1=s1.string(self)
s2=s2.string(self)
#if s2 in s1:
# n=s1.index(s2)
#else:
# return ''
n=s1.find(s2)
if n<0:
return ''
return s1[:n]
def _substringAfter(self,*arguments):
if len(arguments)!=2:
throwError('Function substring-after expects (string, string)')
(s1,s2)=(arguments[0],arguments[1])
s1=s1.string(self)
s2=s2.string(self)
#if s2 in s1:
# n=s1.index(s2)
#else:
# return ''
n=s1.find(s2)
if n<0:
return ''
return s1[n+len(s2):]
def _substringLength(self,*arguments):
alen=len(arguments)
if alen==0:
s=NodeUtil.to('string',self.node)
elif alen==1:
s=arguments[0]
s=s.string(self)
else:
throwError(u'Function string-length expects (string?)')
return len(s)
def _normalizeSpace(self,*arguments):
alen=len(arguments)
if alen==0:
s=NodeUtil.to('string',self.node)
elif alen==1:
s=arguments[0]
s=s.string(self)
else:
throwError(u'Function normalize-space expects (string?)')
return re_lastspace.sub('',re_firstspace.sub('',re_seqspace.sub(' ',s)))
def _translate(self,*arguments):
if len(arguments)!=3:
throwError('Function translate expects (string, string, string)')
(s1,s2,s3)=(arguments[0],arguments[1],arguments[2])
s1=s1.string(self)
s2=s2.string(self)
s3=s3.string(self)
_map={}
for i in range(0,len(s2)):
ch=s2[i]
if not _map.get(ch):
#_map[ch]=s3[i] if i<len(s3) else ''
if i<len(s3):
_map[ch]=s3[i]
else:
_map[ch]=''
t=''
for ch in s1:
replace=_map.get(ch)
#t+=replace if replace!=None else ch
if replace!=None:
t+=replace
else:
t=ch
return t
def _boolean(self,*arguments):
if len(arguments)!=1:
throwError(u'Function not expects (object)')
b=arguments[0]
b=b.bool(self)
return b
def _not(self,*arguments):
if len(arguments)!=1:
throwError(u'Function not expects (object)')
b=arguments[0]
b=b.bool(self)
return not b
def _true(self,*arguments):
if len(arguments)!=0:
throwError(u'Function false expects ()')
return True
def _false(self,*arguments):
if len(arguments)!=0:
throwError(u'Function false expects ()')
return False
def _lang(self,*arguments):
# not implemented
return False
def _number(self,*arguments):
alen=len(arguments)
if alen==0:
n=NodeUtil.to('number',self.node)
elif alen==1:
n=arguments[0]
n=n.number(self)
else:
throwError(u'Function number expects (object?)')
if isinstance(n,int):
return n
elif isinstance(n,float):
n1=int(n)
#return n1 if n1==n else n
if n1==n:
return n1
else:
return n
else:
return 'NaN'
def _sum(self,*arguments):
if len(arguments)!=1:
throwError(u'Function sum expects (nodeset)')
nodeset=arguments[0]
nodeset=nodeset.evaluate(self)
if not getattr(nodeset,'isNodeSet',None):
throwError(u'Function sum expects (nodeset)')
nodes=nodeset.list()
n=0
for node in nodes:
n+=NodeUtil.to('number',node)
return n
def _floor(self,*arguments):
if len(arguments)!=1:
throwError(u'Function floor expects (number)')
n=arguments[0]
n=n.number(self)
return int(math.floor(n))
def _ceiling(self,*arguments):
if len(arguments)!=1:
throwError(u'Function ceiling expects (number)')
n=arguments[0]
n=n.number(self)
return int(math.ceil(n))
def _round(self,*arguments):
if len(arguments)!=1:
throwError(u'Function round expects (number)')
n=arguments[0]
n=n.number(self)
return int(round(n))
funcs=ExtDict({
'context-node' :[_contextNode ,'nodeset',False,[True]]
, 'root-node' :[_rootNode ,'nodeset',False,[]]
, 'last' :[_last ,'number' ,True ,[]]
, 'position' :[_position ,'number' ,True ,[]]
, 'count' :[_count ,'number' ,False,[]]
, 'id' :[_id ,'nodeset',False,[]]
, 'local-name' :[_localName ,'string' ,False,[True ,False]]
, 'name' :[_name ,'string' ,False,[True ,False]]
, 'namespace-uri' :[_namespaceUri ,'string' ,False,[True ,False]]
, 'string' :[_string ,'string' ,False,[True ,False]]
, 'concat' :[_concat ,'string' ,False,[]]
, 'starts-with' :[_startsWith ,'boolean',False,[]]
, 'contains' :[_contains ,'boolean',False,[]]
, 'substring' :[_substring ,'string' ,False,[]]
, 'substring-before':[_substringBefore,'string' ,False,[]]
, 'substring-after' :[_substringAfter ,'string' ,False,[]]
, 'string-length' :[_substringLength,'number' ,False,[True ,False]]
, 'normalize-space' :[_normalizeSpace ,'string' ,False,[True ,False]]
, 'translate' :[_translate ,'string' ,False,[]]
, 'boolean' :[_boolean ,'boolean',False,[]]
, 'not' :[_not ,'boolean',False,[]]
, 'true' :[_true ,'boolean',False,[]]
, 'false' :[_false ,'boolean',False,[]]
, 'lang' :[_lang ,'boolean',False,[]]
, 'number' :[_number ,'number' ,False,[True ,False]]
, 'sum' :[_sum ,'number' ,False,[]]
, 'floor' :[_floor ,'number' ,False,[]]
, 'ceiling' :[_ceiling ,'number' ,False,[]]
, 'round' :[_round ,'number' ,False,[]]
})
@classmethod
def parse(cls,lexer):
func=cls(lexer.next())
lexer.next()
while lexer.peek()!=')':
if lexer.empty():
throwError(u'missing function argument list')
expr=BinaryExpr.parse(lexer)
func.arg(expr)
if lexer.peek()!=',':
break
lexer.next()
if lexer.empty():
throwError(u'unclosed function argument list')
if lexer.next()!=')':
lexer.back()
throwError(u'bad token: %s' % (lexer.next()))
return func
#} // end of FunctionCall
#{ // NodeSet
class NodeSet(object):
def __init__(self):
self.length=0
self.nodes=[]
self.seen={}
self.idIndexMap=None
self.reserveDels=[]
self.isNodeSet=True
self.isSorted=True
self.sortOff=False
self.only=None
def merge(self,nodeset):
self.isSorted=False
if getattr(nodeset,'only',None):
return self.push(nodeset.only)
if getattr(self,'only',None):
only=self.only
self.only=None
self.push(only)
self.length-=1
map(self._add,nodeset.nodes)
def sort(self):
if getattr(self,'only',None):
return
if getattr(self,'sortOff',None):
return
if getattr(self,'isSorted',None):
return
self.isSorted=True
self.idIndexMap=None
nodes=self.nodes
def _comp(a,b):
if a.nodeType==NodeTypeDOM.ATTRIBUTE_NODE: a=a.parentNode
if b.nodeType==NodeTypeDOM.ATTRIBUTE_NODE: b=b.parentNode
if a==b:
return 0
(node1,node2)=(a,b)
(ancestor1,ancestor2)=(a,b)
(deep1,deep2)=(0,0)
while True:
ancestor1=ancestor1.parentNode
if not ancestor1:
break
deep1+=1
while True:
ancestor2=ancestor2.parentNode
if not ancestor2:
break
deep2+=1
if deep1>deep2:
while deep1!=deep2:
deep1-=1
node1=node1.parentNode
if node1==node2:
return 1
elif deep2>deep1:
while deep2!=deep1:
deep2-=1
node2=node2.parentNode
if node1==node2:
return -1
while True:
ancestor1=node1.parentNode
ancestor2=node2.parentNode
if ancestor1==ancestor2:
break
node1=ancestor1
node2=ancestor2
while True:
node1=node1.nextSibling
if not node1:
break
if node1==node2:
return -1
return 1
def index_comp(a,b):
if a.nodeType==NodeTypeDOM.ATTRIBUTE_NODE: a=a.parentNode
if b.nodeType==NodeTypeDOM.ATTRIBUTE_NODE: b=b.parentNode
return cmp(a._sortindex,b._sortindex)
if USE_NODE_INDEX:
nodes.sort(index_comp)
else:
nodes.sort(_comp)
def reserveDelByNodeID(self,id,offset,reverse):
_map=self.createIdIndexMap()
index=_map.get(id)
if index:
if reverse and index<(self.length-offset-1) or not reverse and offset<index:
self.reserveDels.append(index)
def reserveDelByNode(self,node,offset,reverse=False):
self.reserveDelByNodeID(self.NodeID.get(node),offset,reverse)
def doDel(self):
if len(self.reserveDels)<=0:
return
map(self._del,sorted(self.reserveDels,lambda x,y:cmp(y,x)))
self.reserveDels=[]
self.idIndexMap=None
def createIdIndexMap(self):
if getattr(self,'idIndexMap',None):
return self.idIndexMap
else:
_map=self.idIndexMap={}
nodes=self.nodes
for i in range(0,len(nodes)):
node=nodes[i]
id=self.NodeID.get(node)
_map[id]=i
return _map
def _del(self,index):
self.length-=1
if getattr(self,'only',None):
self.only=None
else:
node=self.nodes[index]
if getattr(self,'_first',None)==node:
self._first=None
self._firstSourceIndex=None
self._firstSubIndex=None
del(self.seen[self.NodeID.get(node)])
del(self.nodes[index])
def delDescendant(self,elm,offset):
if getattr(self,'only',None):
return
nodeType=elm.nodeType
if nodeType!=NodeTypeDOM.ELEMENT_NODE and nodeType!=NodeTypeDOM.DOCUMENT_NODE:
return
nodes=self.nodes
i=offset+1
while i<len(nodes):
if elm.contains(nodes[i]):
self._del(i)
i-=1
i+=1
def _add(self,node,reverse=False):
seen=self.seen
id=self.NodeID.get(node)
if seen.get(id):
return
seen[id]=True
self.length+=1
if reverse:
self.nodes.insert(0,node)
else:
self.nodes.append(node)
def unshift(self,node):
if self.length<=0:
self.length+=1
self.only=node
return
if getattr(self,'only',None):
only=self.only
self.only=None
self.unshift(only)
self.length-=1
return self._add(node,True)
def push(self,node):
if self.length<=0:
self.length+=1
self.only=node
return
if getattr(self,'only',None):
only=self.only
self.only=None
self.push(only)
self.length-=1
return self._add(node)
def first(self):
if getattr(self,'only',None):
return self.only
if 0<len(self.nodes):
self.sort()
return self.nodes[0]
else:
return None
def list(self):
if getattr(self,'only',None):
return [self.only]
self.sort()
return self.nodes
def string(self):
node=self.only or self.first()
#return NodeUtil.to('string',node) if node else ''
if node:
return NodeUtil.to('string',node)
else:
return ''
def bool(self):
return toBoolean(self.length or self.only)
def number(self):
return toNumber(self.string())
def iterator(self,reverse=False):
self.sort()
_info=ExtDict({
'nodeset':self
, 'count':0
})
if not reverse:
calcIndex=(lambda x,y:x)
else:
calcIndex=(lambda x,y:y.length-x-1)
def iter():
nodeset=_info.nodeset
index=calcIndex(_info.count,nodeset)
_info['count']+=1
if getattr(nodeset,'only',None) and index==0:
return nodeset.only
#return nodeset.nodes[index] if 0<=index and index<len(nodeset.nodes) else None
if 0<=index and index<len(nodeset.nodes):
return nodeset.nodes[index]
else:
return None
return iter
class nodeID(object):
def __init__(self):
self.uuid=1
def get(self,node):
id=getattr(node,'__bsxpath_id__',None)
if id:
return id
id=node.__bsxpath_id__=self.uuid
self.uuid+=1
return id
NodeID=nodeID()
#} // end of NodeSet
#{ // XPathEvaluator
class XPathResult(object):
ANY_TYPE =0
NUMBER_TYPE =1
STRING_TYPE =2
BOOLEAN_TYPE =3
UNORDERED_NODE_ITERATOR_TYPE=4
ORDERED_NODE_ITERATOR_TYPE =5
UNORDERED_NODE_SNAPSHOT_TYPE=6
ORDERED_NODE_SNAPSHOT_TYPE =7
ANY_UNORDERED_NODE_TYPE =8
FIRST_ORDERED_NODE_TYPE =9
def __init__(self,value,type):
if type==XPathResult.ANY_TYPE:
tov=typeof(value)
if tov=='object' : type=self.UNORDERED_NODE_ITERATOR_TYPE
if tov=='boolean': type=self.BOOLEAN_TYPE
if tov=='string' : type=self.STRING_TYPE
if tov=='number' : type=self.NUMBER_TYPE
if type<self.NUMBER_TYPE or self.FIRST_ORDERED_NODE_TYPE<type:
throwError(u'unknown type: %d' %(type))
self.resultType=type
if type==self.NUMBER_TYPE:
#self.numberValue=value.number() if getattr(value,'isNodeSet',None) else toNumber(value)
if getattr(value,'isNodeSet',None):
self.numberValue=value.number()
else:
self.numberValue=toNumber(value)
elif type==self.STRING_TYPE:
#self.stringValue=value.string() if getattr(value,'isNodeSet',None) else toString(value)
if getattr(value,'isNodeSet',None):
self.stringValue=value.string()
else:
self.stringValue=toString(value)
elif type==self.BOOLEAN_TYPE:
#self.booleanValue=value.bool() if getattr(value,'isNodeSet',None) else toBoolean(value)
if getattr(value,'isNodeSet',None):
self.booleanValue=value.bool()
else:
self.booleanValue=toBoolean(value)
elif type==self.ANY_UNORDERED_NODE_TYPE or type==self.FIRST_ORDERED_NODE_TYPE:
self.singleNodeValue=value.first()
else:
self.nodes=value.list()
self.snapshotLength=value.length
self.index=0
self.invalidIteratorState=False
def iterateNext(self):
node=self.nodes[self.index]
self.index+=1
return node
def snapshotItem(self,i):
return self.nodes[i]
class XPathExpression(object):
def __init__(self,expr,resolver):
if len(expr)<=0:
throwError(u'no expression')
lexer=self.lexer=Lexer(expr)
if lexer.empty():
throwError(u'no expression')
self.expr=BinaryExpr.parse(lexer)
if not lexer.empty():
throwError(u'bad token: %s' % (lexer.next()))
def evaluate(self,node,type):
return XPathResult(self.expr.evaluate(Ctx(node)),type)
class BSXPathEvaluator(BeautifulSoup):
def __init__(self, *args, **kwargs):
BeautifulSoup.__init__(self, *args, **kwargs)
self._string=u'[object HTMLDocument]'
self._fix_table()
self._init_index()
SELF_CLOSING_TAGS=buildTagMap(None,['br','hr','input','img','meta','spacer','frame','base'])
# exclude 'link' for XML
def _init_index(self):
idx=self._sortindex=1
self._cachemap=None
for node in NodeUtilBS.it_deepNodes(self):
idx=node._sortindex=idx+1
for node in self.findAll():
node.attrMap=dict(node.attrs)
def _fix_table(self):
tables=self.findAll('table')
for table in tables:
parent=table.parent
contents=getattr(table,'contents',[])
if len(contents)<=0: continue
(tbody,tr)=(None,None)
node=table.contents[0]
while node:
_next=node.nextSibling
name=getattr(node,'name',None)
if name in ('thead','tbody','tfoot',):
(tbody,tr)=(None,None)
elif name in ('tr',):
tr=None
if not tbody:
tbody=Tag(self,'tbody')
table.insert(table.contents.index(node),tbody)
tbody.append(node)
elif name in ('th','td',):
if not tbody:
tbody=Tag(self,'tbody')
table.insert(table.contents.index(node),tbody)
if not tr:
tr=Tag(self,'tr')
tbody.append(tr)
tr.append(node)
else:
parent.insert(parent.contents.index(table),node)
node=_next
def __str__(self,encoding=DEFAULT_OUTPUT_ENCODING):
return self._string
def __unicode__(self):
return self._string
def decode(self):
return self._string
def createExpression(self,expr,resolver):
return XPathExpression(expr,resolver)
def createNSResolver(self,nodeResolver):
# not implemented
pass
def evaluate(self,expr,context,resolver,type,result):
if not context:
context=self
if isinstance(context,list):
context=context[0]
return self.createExpression(expr,resolver).evaluate(context,type)
def getItemList(self,expr,context=None):
elms=[]
result=self.evaluate(expr,context,None,XPathResult.ORDERED_NODE_SNAPSHOT_TYPE,None)
for i in range(0,result.snapshotLength):
elms.append(result.snapshotItem(i))
return elms
def getFirstItem(self,expr,context=None):
elm=self.evaluate(expr,context,None,XPathResult.FIRST_ORDERED_NODE_TYPE,None).singleNodeValue
return elm
def applyXPath(self,context,expr):
start_t=datetime.datetime.now()
expression=self.createExpression(expr,None)
result=expression.evaluate(context,XPathResult.ANY_TYPE)
time=datetime.datetime.now()-start_t
resultType=result.resultType
if XPathResult.BOOLEAN_TYPE<resultType:
result=expression.evaluate(context,XPathResult.ORDERED_NODE_SNAPSHOT_TYPE)
array=[]
for i in range(0,result.snapshotLength):
array.append(result.snapshotItem(i))
resultItems=array
else:
if resultType==XPathResult.NUMBER_TYPE:
resultItems=result.numberValue
elif resultType==XPathResult.STRING_TYPE:
resultItems=result.stringValue
elif resultType==XPathResult.BOOLEAN_TYPE:
resultItems=result.booleanValue
else:
resultItems=None
return (resultItems,time,resultType)
#} // end of XPathEvaluator
if __name__ == '__main__':
import sys
import optparse
import pdb
options=None
def prn(obj):
def prn_sub(obj,indent):
indent+=u' '
if isinstance(obj,list):
for i in range(0,len(obj)):
print u'[%d]' % (i)
prn_sub(obj[i],indent)
elif isinstance(obj,dict):
for mem in obj:
print u'[%s]' % (mem)
prn_sub(obj[mem],indent)
elif getattr(obj,'nodeType',None) or isinstance(obj,basestring):
str=indent+re.sub(r'([\r?\n])',r'\1'+indent,unicode(obj))
print str
else:
print obj
prn_sub(obj,u'')
def test():
global options
if options.expr:
if options.html:
document=BSXPathEvaluator(options.html)
elif options.file:
fp=open(options.file)
document=BSXPathEvaluator(fp.read())
fp.close()
else:
document=BSXPathEvaluator(sys.stdin.read())
(result,time,resultType)=document.applyXPath(document,options.expr)
prn(result)
else:
optparser.print_help()
optparser=optparse.OptionParser()
optparser.add_option(
'-e','--expr'
, action='store'
, metavar='<expression>'
, help=u'expression: XPATH expression'
, dest='expr'
)
optparser.add_option(
'-t','--html'
, action='store'
, metavar='<text>'
, help=u'text: HTML text'
, dest='html'
)
optparser.add_option(
'-f','--file'
, action='store'
, metavar='<filename>'
, help=u'filename: HTML filename'
, dest='file'
)
optparser.add_option(
'-d','--debug'
, action='store_true'
, help=u'use pdb'
, dest='debug'
)
(options,args)=optparser.parse_args()
if options.debug:
pdb.run('test()')
else:
test()
#[History]
#
# 0.01e: 2009-04-12
# - exclude 'link' tag from SELF_CLOSING_TAGS (for XML)
# - add __str__() and __unicode__() to AttributeWrapper class
#
# 0.01d: 2009-03-28
# - performance improvement: node searching(make attrMap in advance)
#
# 0.01c: 2009-03-28
# - performance improvement: node sorting(indexing) and node search(caching)
#
# 0.01b: 2009-03-27
# - fixed 'singleNodeValue' bug
# result = document.evaluate('//title[1]',document,None,XPathResult.FIRST_ORDERED_NODE_TYPE,None).singleNodeValue
# # returnd 'None', even though first-value exists
#
# 0.01a: 2009-03-27
# - fixed string() bug
# BSXPath.py -e "boolean(//p[contains(string(),\"br\")])" -t "<html><head></head><body><p>text before<br />text after</p></body></html>"
# # returned 'True', even though 'False' is right
# - cope with <table> problems on malformed HTML
# may convert '<table><th></th><td></td></table>' to '<table><tbody><tr><th></th><td></td></tr></tbody></table>' automatically
#
# 0.01 : 2009-03-25
# first release
#
#■ End of BSXPath.py
| apache-2.0 | -1,387,810,285,272,834,800 | 7,773,747,423,111,944,000 | 27.738803 | 204 | 0.610547 | false |
RRZE-HPC/pycachesim | setup.py | 1 | 4792 | #!/usr/bin/env python
from __future__ import absolute_import
from setuptools.extension import Extension
from setuptools import setup, find_packages # Always prefer setuptools over distutils
from codecs import open # To use a consistent encoding
import os
import io
import re
#import numpy
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the relevant file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Stolen from pip
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
# Stolen from pip
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='pycachesim',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=find_version('cachesim', '__init__.py'),
description='Python Cache Hierarchy Simulator',
long_description=long_description,
long_description_content_type='text/x-rst',
# The project's main homepage.
url='https://github.com/RRZE-HPC/pycachesim',
# Author details
author='Julian Hammer',
author_email='[email protected]',
# Choose your license
license='AGPLv3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU Affero General Public License v3',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
# What does your project relate to?
keywords='hpc performance benchmark analysis',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
ext_modules=[
Extension(
'cachesim.backend',
sources=['cachesim/backend.c'],
extra_compile_args=['-std=c99'],
#include_dirs=[numpy.get_include()]
)
],
# List run-time dependencies here. These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
],
# List additional groups of dependencies here (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require={
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'cachesim': ['*.h']
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages.
# see http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'cachesim=cachesim.frontend:main',
],
},
)
| agpl-3.0 | -980,146,394,073,415,600 | 1,476,661,814,015,069,200 | 33.228571 | 91 | 0.643573 | false |
martinwicke/tensorflow | tensorflow/python/client/device_lib.py | 149 | 1308 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Python interface for creating TensorFlow servers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import device_attributes_pb2
from tensorflow.python import pywrap_tensorflow
def list_local_devices():
"""List the available devices available in the local process.
Returns:
A list of `DeviceAttribute` protocol buffers.
"""
def _convert(pb_str):
m = device_attributes_pb2.DeviceAttributes()
m.ParseFromString(pb_str)
return m
return [_convert(s) for s in pywrap_tensorflow.list_devices()]
| apache-2.0 | -4,870,816,598,941,193,000 | 5,828,043,104,823,611,000 | 35.333333 | 80 | 0.707187 | false |
wxthon/googletest | scripts/upload_gtest.py | 1963 | 2851 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| bsd-3-clause | -1,008,651,524,481,904,000 | -6,453,137,073,226,368,000 | 35.551282 | 72 | 0.727815 | false |
peterm-itr/edx-platform | lms/djangoapps/mobile_api/tests.py | 8 | 1648 | """
Tests for mobile API utilities
"""
import ddt
from rest_framework.test import APITestCase
from courseware.tests.factories import UserFactory
from student import auth
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from .utils import mobile_available_when_enrolled
ROLE_CASES = (
(auth.CourseBetaTesterRole, True),
(auth.CourseStaffRole, True),
(auth.CourseInstructorRole, True),
(None, False)
)
@ddt.ddt
class TestMobileApiUtils(ModuleStoreTestCase, APITestCase):
"""
Tests for mobile API utilities
"""
def setUp(self):
self.user = UserFactory.create()
@ddt.data(*ROLE_CASES)
@ddt.unpack
def test_mobile_role_access(self, role, should_have_access):
"""
Verifies that our mobile access function properly handles using roles to grant access
"""
course = CourseFactory.create(mobile_available=False)
if role:
role(course.id).add_users(self.user)
self.assertEqual(should_have_access, mobile_available_when_enrolled(course, self.user))
def test_mobile_explicit_access(self):
"""
Verifies that our mobile access function listens to the mobile_available flag as it should
"""
course = CourseFactory.create(mobile_available=True)
self.assertTrue(mobile_available_when_enrolled(course, self.user))
def test_missing_course(self):
"""
Verifies that we handle the case where a course doesn't exist
"""
self.assertFalse(mobile_available_when_enrolled(None, self.user))
| agpl-3.0 | 4,903,085,924,339,361,000 | -3,569,647,189,540,400,600 | 28.963636 | 98 | 0.696602 | false |
kbdick/RecycleTracker | recyclecollector/scrap/gdata-2.0.18/src/gdata/contentforshopping/client.py | 29 | 31884 | #!/usr/bin/python
#
# Copyright (C) 2010-2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extend the gdata client for the Content API for Shopping."""
__author__ = 'afshar (Ali Afshar), dhermes (Daniel Hermes)'
import urllib
import atom.data
import gdata.client
from gdata.contentforshopping.data import ClientAccount
from gdata.contentforshopping.data import ClientAccountFeed
from gdata.contentforshopping.data import DatafeedEntry
from gdata.contentforshopping.data import DatafeedFeed
from gdata.contentforshopping.data import DataQualityEntry
from gdata.contentforshopping.data import DataQualityFeed
from gdata.contentforshopping.data import InventoryFeed
from gdata.contentforshopping.data import ProductEntry
from gdata.contentforshopping.data import ProductFeed
from gdata.contentforshopping.data import UsersEntry
from gdata.contentforshopping.data import UsersFeed
CFS_VERSION = 'v1'
CFS_HOST = 'content.googleapis.com'
CFS_URI = 'https://%s/content' % CFS_HOST
CFS_PROJECTION = 'schema'
class ContentForShoppingClient(gdata.client.GDClient):
"""Client for Content for Shopping API.
:param account_id: Merchant account ID. This value will be used by default
for all requests, but may be overridden on a
request-by-request basis.
:param api_version: The version of the API to target. Default value: 'v1'.
:param **kwargs: Pass all addtional keywords to the GDClient constructor.
"""
api_version = '1.0'
def __init__(self, account_id=None, api_version=CFS_VERSION,
cfs_uri=CFS_URI, **kwargs):
self.cfs_account_id = account_id
self.cfs_api_version = api_version
self.cfs_uri = cfs_uri
gdata.client.GDClient.__init__(self, **kwargs)
def _create_uri(self, account_id, resource, path=(), use_projection=True,
dry_run=False, warnings=False, max_results=None,
start_token=None, start_index=None,
performance_start=None, performance_end=None):
"""Create a request uri from the given arguments.
If arguments are None, use the default client attributes.
"""
account_id = account_id or self.cfs_account_id
if account_id is None:
raise ValueError('No Account ID set. '
'Either set for the client, or per request')
segments = [self.cfs_uri, self.cfs_api_version, account_id, resource]
if use_projection:
segments.append(CFS_PROJECTION)
segments.extend(urllib.quote(value) for value in path)
result = '/'.join(segments)
request_params = []
if dry_run:
request_params.append('dry-run')
if warnings:
request_params.append('warnings')
if max_results is not None:
request_params.append('max-results=%s' % max_results)
if start_token is not None:
request_params.append('start-token=%s' % start_token)
if start_index is not None:
request_params.append('start-index=%s' % start_index)
if performance_start is not None:
request_params.append('performance.start=%s' % performance_start)
if performance_end is not None:
request_params.append('performance.end=%s' % performance_end)
request_params = '&'.join(request_params)
if request_params:
result = '%s?%s' % (result, request_params)
return result
def _create_product_id(self, id, country, language, channel='online'):
return '%s:%s:%s:%s' % (channel, language, country, id)
def _create_batch_feed(self, entries, operation, feed=None,
feed_class=ProductFeed):
if feed is None:
feed = feed_class()
for entry in entries:
entry.batch_operation = gdata.data.BatchOperation(type=operation)
feed.entry.append(entry)
return feed
# Operations on a single product
def get_product(self, id, country, language, account_id=None,
auth_token=None):
"""Get a product by id, country and language.
:param id: The product ID
:param country: The country (target_country)
:param language: The language (content_language)
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
pid = self._create_product_id(id, country, language)
uri = self._create_uri(account_id, 'items/products', path=[pid])
return self.get_entry(uri, desired_class=ProductEntry,
auth_token=auth_token)
GetProduct = get_product
def insert_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Create a new product, by posting the product entry feed.
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'items/products',
dry_run=dry_run, warnings=warnings)
return self.post(product, uri=uri, auth_token=auth_token)
InsertProduct = insert_product
def update_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update a product, by putting the product entry feed.
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False
by default.
"""
pid = self._create_product_id(product.product_id.text,
product.target_country.text,
product.content_language.text)
uri = self._create_uri(account_id, 'items/products', path=[pid],
dry_run=dry_run, warnings=warnings)
return self.update(product, uri=uri, auth_token=auth_token)
UpdateProduct = update_product
def delete_product(self, product, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Delete a product
:param product: A :class:`gdata.contentforshopping.data.ProductEntry` with
the required product data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
pid = self._create_product_id(product.product_id.text,
product.target_country.text,
product.content_language.text)
uri = self._create_uri(account_id, 'items/products', path=[pid],
dry_run=dry_run, warnings=warnings)
return self.delete(uri, auth_token=auth_token)
DeleteProduct = delete_product
# Operations on multiple products
def get_products(self, max_results=None, start_token=None, start_index=None,
performance_start=None, performance_end=None,
account_id=None, auth_token=None):
"""Get a feed of products for the account.
:param max_results: The maximum number of results to return (default 25,
maximum 250).
:param start_token: The start token of the feed provided by the API.
:param start_index: The starting index of the feed to return (default 1,
maximum 10000)
:param performance_start: The start date (inclusive) of click data returned.
Should be represented as YYYY-MM-DD; not appended
if left as None.
:param performance_end: The end date (inclusive) of click data returned.
Should be represented as YYYY-MM-DD; not appended
if left as None.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'items/products',
max_results=max_results,
start_token=start_token,
start_index=start_index,
performance_start=performance_start,
performance_end=performance_end)
return self.get_feed(uri, auth_token=auth_token,
desired_class=ProductFeed)
GetProducts = get_products
def batch(self, feed, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Send a batch request.
:param feed: The feed of batch entries to send.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'items/products', path=['batch'],
dry_run=dry_run, warnings=warnings)
return self.post(feed, uri=uri, auth_token=auth_token,
desired_class=ProductFeed)
Batch = batch
def insert_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert the products using a batch request
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
feed = self._create_batch_feed(products, 'insert')
return self.batch(feed, account_id=account_id, auth_token=auth_token,
dry_run=dry_run, warnings=warnings)
InsertProducts = insert_products
def update_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update the products using a batch request
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
.. note:: Entries must have the atom:id element set.
"""
feed = self._create_batch_feed(products, 'update')
return self.batch(feed, account_id=account_id, auth_token=auth_token,
dry_run=dry_run, warnings=warnings)
UpdateProducts = update_products
def delete_products(self, products, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Delete the products using a batch request.
:param products: A list of product entries
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
.. note:: Entries must have the atom:id element set.
"""
feed = self._create_batch_feed(products, 'delete')
return self.batch(feed, account_id=account_id, auth_token=auth_token,
dry_run=dry_run, warnings=warnings)
DeleteProducts = delete_products
# Operations on datafeeds
def get_datafeeds(self, account_id=None):
"""Get the feed of datafeeds.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
"""
uri = self._create_uri(account_id, 'datafeeds/products',
use_projection=False)
return self.get_feed(uri, desired_class=DatafeedFeed)
GetDatafeeds = get_datafeeds
# Operations on a single datafeed
def get_datafeed(self, feed_id, account_id=None, auth_token=None):
"""Get the feed of a single datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False)
return self.get_feed(uri, auth_token=auth_token,
desired_class=DatafeedEntry)
GetDatafeed = get_datafeed
def insert_datafeed(self, entry, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert a datafeed.
:param entry: XML Content of post request required for registering a
datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'datafeeds/products',
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.post(entry, uri=uri, auth_token=auth_token)
InsertDatafeed = insert_datafeed
def update_datafeed(self, entry, feed_id, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Update the feed of a single datafeed.
:param entry: XML Content of put request required for updating a
datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.update(entry, auth_token=auth_token, uri=uri)
UpdateDatafeed = update_datafeed
def delete_datafeed(self, feed_id, account_id=None, auth_token=None):
"""Delete a single datafeed.
:param feed_id: The ID of the desired datafeed.
:param account_id: The Sub-Account ID. If ommitted the default
Account ID will be used for this client.
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'datafeeds/products', path=[feed_id],
use_projection=False)
return self.delete(uri, auth_token=auth_token)
DeleteDatafeed = delete_datafeed
# Operations on client accounts
def get_client_accounts(self, max_results=None, start_index=None,
account_id=None, auth_token=None):
"""Get the feed of managed accounts
:param max_results: The maximum number of results to return (default 25,
maximum 250).
:param start_index: The starting index of the feed to return (default 1,
maximum 10000)
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'managedaccounts',
max_results=max_results, start_index=start_index,
use_projection=False)
return self.get_feed(uri, desired_class=ClientAccountFeed,
auth_token=auth_token)
GetClientAccounts = get_client_accounts
def get_client_account(self, client_account_id,
account_id=None, auth_token=None):
"""Get a managed account.
:param client_account_id: The Account ID of the subaccount being retrieved.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'managedaccounts',
path=[client_account_id], use_projection=False)
return self.get_entry(uri, desired_class=ClientAccount,
auth_token=auth_token)
GetClientAccount = get_client_account
def insert_client_account(self, entry, account_id=None, auth_token=None,
dry_run=False, warnings=False):
"""Insert a client account entry
:param entry: An entry of type ClientAccount
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts',
use_projection=False, dry_run=dry_run,
warnings=warnings)
return self.post(entry, uri=uri, auth_token=auth_token)
InsertClientAccount = insert_client_account
def update_client_account(self, entry, client_account_id, account_id=None,
auth_token=None, dry_run=False, warnings=False):
"""Update a client account
:param entry: An entry of type ClientAccount to update to
:param client_account_id: The client account ID
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts',
path=[client_account_id], use_projection=False,
dry_run=dry_run, warnings=warnings)
return self.update(entry, uri=uri, auth_token=auth_token)
UpdateClientAccount = update_client_account
def delete_client_account(self, client_account_id, account_id=None,
auth_token=None, dry_run=False, warnings=False):
"""Delete a client account
:param client_account_id: The client account ID
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
:param dry_run: Flag to run all requests that modify persistent data in
dry-run mode. False by default.
:param warnings: Flag to include warnings in response. False by default.
"""
uri = self._create_uri(account_id, 'managedaccounts',
path=[client_account_id], use_projection=False,
dry_run=dry_run, warnings=warnings)
return self.delete(uri, auth_token=auth_token)
DeleteClientAccount = delete_client_account
def get_users_feed(self, account_id=None, auth_token=None):
"""Get the users feed for an account.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'users', use_projection=False)
return self.get_feed(uri, auth_token=auth_token, desired_class=UsersFeed)
GetUsersFeed = get_users_feed
def get_users_entry(self, user_email, account_id=None, auth_token=None):
"""Get a users feed entry for an account.
:param user_email: Email of the user entry to be retrieved.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(
account_id, 'users', path=[user_email], use_projection=False)
return self.get_entry(uri, auth_token=auth_token, desired_class=UsersEntry)
GetUsersEntry = get_users_entry
def insert_users_entry(self, entry, account_id=None, auth_token=None):
"""Insert a users feed entry for an account.
:param entry: A :class:`gdata.contentforshopping.data.UsersEntry` with
the required user data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'users', use_projection=False)
return self.post(entry, uri=uri, auth_token=auth_token)
InsertUsersEntry = insert_users_entry
def update_users_entry(self, entry, account_id=None, auth_token=None):
"""Update a users feed entry for an account.
:param entry: A :class:`gdata.contentforshopping.data.UsersEntry` with
the required user data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
# Could also use entry.find_edit_link() but that is inconsistent
# with the rest of the module
user_email = entry.title.text
uri = self._create_uri(
account_id, 'users', path=[user_email], use_projection=False)
return self.update(entry, uri=uri, auth_token=auth_token)
UpdateUsersEntry = update_users_entry
def delete_users_entry(self, entry, account_id=None, auth_token=None):
"""Delete a users feed entry for an account.
:param entry: A :class:`gdata.contentforshopping.data.UsersEntry` with
the required user data.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
# Could also use entry.find_edit_link() but that is inconsistent
# with the rest of the module
user_email = entry.title.text
uri = self._create_uri(
account_id, 'users', path=[user_email], use_projection=False)
return self.delete(uri, auth_token=auth_token)
DeleteUsersEntry = delete_users_entry
def get_data_quality_feed(self, account_id=None, auth_token=None,
max_results=None, start_index=None):
"""Get the data quality feed for an account.
:param max_results: The maximum number of results to return (default 25,
max 100).
:param start_index: The starting index of the feed to return.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
uri = self._create_uri(account_id, 'dataquality', use_projection=False,
max_results=max_results, start_index=start_index)
return self.get_feed(uri, auth_token=auth_token,
desired_class=DataQualityFeed)
GetDataQualityFeed = get_data_quality_feed
def get_data_quality_entry(self, secondary_account_id=None,
account_id=None, auth_token=None):
"""Get the data quality feed entry for an account.
:param secondary_account_id: The Account ID of the secondary account. If
ommitted the value of account_id is used.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
if secondary_account_id is None:
secondary_account_id = account_id or self.cfs_account_id
uri = self._create_uri(account_id, 'dataquality',
path=[secondary_account_id],
use_projection=False)
return self.get_entry(uri, auth_token=auth_token,
desired_class=DataQualityEntry)
GetDataQualityEntry = get_data_quality_entry
def update_inventory_entry(self, product, id, country, language, store_code,
account_id=None, auth_token=None):
"""Make a local product update, by putting the inventory entry.
:param product: A :class:`gdata.contentforshopping.data.InventoryEntry`
with the required product data.
:param id: The product ID
:param country: The country (target_country)
:param language: The language (content_language)
:param store_code: The code for the store where this local product will
be updated.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
"""
pid = self._create_product_id(id, country, language, channel='local')
uri = self._create_uri(account_id, 'inventory',
path=[store_code, 'items', pid],
use_projection=False)
return self.update(product, uri=uri, auth_token=auth_token)
UpdateInventoryEntry = update_inventory_entry
def add_local_id(self, product, id, country, language,
store_code, account_id=None):
"""Add an atom id to a local product with a local store specific URI.
:param product: A :class:`gdata.contentforshopping.data.InventoryEntry`
with the required product data.
:param id: The product ID
:param country: The country (target_country)
:param language: The language (content_language)
:param store_code: The code for the store where this local product will
be updated.
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
"""
pid = self._create_product_id(id, country, language, channel='local')
uri = self._create_uri(account_id, 'inventory',
path=[store_code, 'items', pid],
use_projection=False)
product.id = atom.data.Id(uri)
return product
AddLocalId = add_local_id
def update_inventory_feed(self, products, account_id=None, auth_token=None):
"""Update a batch of local products, by putting the product entry feed.
:param products: A list containing entries of
:class:`gdata.contentforshopping.data.InventoryEntry`
with the required product data
:param account_id: The Merchant Center Account ID. If ommitted the default
Account ID will be used for this client
:param auth_token: An object which sets the Authorization HTTP header in its
modify_request method.
.. note:: Entries must have the atom:id element set. You can use
add_local_id to set this attribute using the store_code, product
id, country and language.
"""
feed = self._create_batch_feed(products, 'update',
feed_class=InventoryFeed)
uri = self._create_uri(account_id, 'inventory', path=['batch'],
use_projection=False)
return self.post(feed, uri=uri, auth_token=auth_token)
UpdateInventoryFeed = update_inventory_feed
| gpl-3.0 | 7,100,893,692,773,919,000 | -3,115,393,009,330,745,300 | 44.225532 | 80 | 0.64076 | false |
kalahbrown/HueBigSQL | desktop/core/ext-py/django-openid-auth-0.5/django_openid_auth/tests/__init__.py | 44 | 1749 | # django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import unittest
from test_views import *
from test_store import *
from test_auth import *
from test_admin import *
def suite():
suite = unittest.TestSuite()
for name in ['test_auth', 'test_store', 'test_views', 'test_admin']:
mod = __import__('%s.%s' % (__name__, name), {}, {}, ['suite'])
suite.addTest(mod.suite())
return suite
| apache-2.0 | -8,895,242,232,730,992,000 | -941,831,140,653,925,800 | 41.658537 | 72 | 0.748999 | false |
40223136/w11-2 | static/Brython3.1.0-20150301-090019/Lib/browser/object_storage.py | 627 | 1315 | import pickle
class __UnProvided():
pass
class ObjectStorage():
def __init__(self, storage):
self.storage = storage
def __delitem__(self, key):
del self.storage[pickle.dumps(key)]
def __getitem__(self, key):
return pickle.loads(self.storage[pickle.dumps(key)])
def __setitem__(self, key, value):
self.storage[pickle.dumps(key)] = pickle.dumps(value)
def __contains__(self, key):
return pickle.dumps(key) in self.storage
def get(self, key, default=None):
if pickle.dumps(key) in self.storage:
return self.storage[pickle.dumps(key)]
return default
def pop(self, key, default=__UnProvided()):
if type(default) is __UnProvided or pickle.dumps(key) in self.storage:
return pickle.loads(self.storage.pop(pickle.dumps(key)))
return default
def __iter__(self):
keys = self.keys()
return keys.__iter__()
def keys(self):
return [pickle.loads(key) for key in self.storage.keys()]
def values(self):
return [pickle.loads(val) for val in self.storage.values()]
def items(self):
return list(zip(self.keys(), self.values()))
def clear(self):
self.storage.clear()
def __len__(self):
return len(self.storage)
| gpl-3.0 | 1,996,923,692,838,046,700 | 6,407,969,009,051,403,000 | 24.784314 | 78 | 0.602281 | false |
andrewyoung1991/abjad | abjad/tools/layouttools/test/test_layouttools_set_line_breaks_by_line_duration_ge.py | 2 | 2235 | # -*- encoding: utf-8 -*-
from abjad import *
def test_layouttools_set_line_breaks_by_line_duration_ge_01():
r'''Iterate classes in expr and accumulate duration.
Add line break after every total le line duration.
'''
staff = Staff()
staff.append(Measure((2, 8), "c'8 d'8"))
staff.append(Measure((2, 8), "e'8 f'8"))
staff.append(Measure((2, 8), "g'8 a'8"))
staff.append(Measure((2, 8), "b'8 c''8"))
layouttools.set_line_breaks_by_line_duration_ge(
staff,
Duration(4, 8),
)
assert systemtools.TestManager.compare(
staff,
r'''
\new Staff {
{
\time 2/8
c'8
d'8
}
{
e'8
f'8
\break
}
{
g'8
a'8
}
{
b'8
c''8
\break
}
}
'''
)
assert inspect_(staff).is_well_formed()
def test_layouttools_set_line_breaks_by_line_duration_ge_02():
r'''Iterate classes in expr and accumulate duration.
Add line break after every total le line duration.
'''
staff = Staff()
staff.append(Measure((2, 8), "c'8 d'8"))
staff.append(Measure((2, 8), "e'8 f'8"))
staff.append(Measure((2, 8), "g'8 a'8"))
staff.append(Measure((2, 8), "b'8 c''8"))
layouttools.set_line_breaks_by_line_duration_ge(
staff,
Duration(1, 8),
line_break_class=scoretools.Leaf,
)
assert systemtools.TestManager.compare(
staff,
r'''
\new Staff {
{
\time 2/8
c'8
\break
d'8
\break
}
{
e'8
\break
f'8
\break
}
{
g'8
\break
a'8
\break
}
{
b'8
\break
c''8
\break
}
}
'''
)
assert inspect_(staff).is_well_formed() | gpl-3.0 | -6,911,960,644,850,030,000 | -5,558,694,503,453,996,000 | 21.585859 | 62 | 0.399553 | false |
Maspear/odoo | addons/hw_proxy/__openerp__.py | 313 | 1675 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hardware Proxy',
'version': '1.0',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Connect the Web Client to Hardware Peripherals',
'website': 'https://www.odoo.com/page/point-of-sale',
'description': """
Hardware Poxy
=============
This module allows you to remotely use peripherals connected to this server.
This modules only contains the enabling framework. The actual devices drivers
are found in other modules that must be installed separately.
""",
'author': 'OpenERP SA',
'depends': [],
'test': [
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,361,834,721,594,267,000 | 5,370,464,872,597,117,000 | 33.895833 | 78 | 0.619701 | false |
yorvic/.vim | bundle/python-mode/pylibs/pylama/checkers/pylint/logilab/common/modutils.py | 1 | 21802 | # -*- coding: utf-8 -*-
# copyright 2003-2012 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Python modules manipulation utility functions.
:type PY_SOURCE_EXTS: tuple(str)
:var PY_SOURCE_EXTS: list of possible python source file extension
:type STD_LIB_DIR: str
:var STD_LIB_DIR: directory where standard modules are located
:type BUILTIN_MODULES: dict
:var BUILTIN_MODULES: dictionary with builtin module names has key
"""
__docformat__ = "restructuredtext en"
import sys
import os
from os.path import splitext, join, abspath, isdir, dirname, exists, basename
from imp import find_module, load_module, C_BUILTIN, PY_COMPILED, PKG_DIRECTORY
from distutils.sysconfig import get_config_var, get_python_lib, get_python_version
from distutils.errors import DistutilsPlatformError
try:
import zipimport
except ImportError:
zipimport = None
ZIPFILE = object()
from . import STD_BLACKLIST, _handle_blacklist
# Notes about STD_LIB_DIR
# Consider arch-specific installation for STD_LIB_DIR definition
# :mod:`distutils.sysconfig` contains to much hardcoded values to rely on
#
# :see: `Problems with /usr/lib64 builds <http://bugs.python.org/issue1294959>`_
# :see: `FHS <http://www.pathname.com/fhs/pub/fhs-2.3.html#LIBLTQUALGTALTERNATEFORMATESSENTIAL>`_
if sys.platform.startswith('win'):
PY_SOURCE_EXTS = ('py', 'pyw')
PY_COMPILED_EXTS = ('dll', 'pyd')
else:
PY_SOURCE_EXTS = ('py',)
PY_COMPILED_EXTS = ('so',)
try:
STD_LIB_DIR = get_python_lib(standard_lib=1)
# get_python_lib(standard_lib=1) is not available on pypy, set STD_LIB_DIR to
# non-valid path, see https://bugs.pypy.org/issue1164
except DistutilsPlatformError:
STD_LIB_DIR = '//'
EXT_LIB_DIR = get_python_lib()
BUILTIN_MODULES = dict(zip(sys.builtin_module_names,
[1]*len(sys.builtin_module_names)))
class NoSourceFile(Exception):
"""exception raised when we are not able to get a python
source file for a precompiled file
"""
class LazyObject(object):
def __init__(self, module, obj):
self.module = module
self.obj = obj
self._imported = None
def _getobj(self):
if self._imported is None:
self._imported = getattr(load_module_from_name(self.module),
self.obj)
return self._imported
def __getattribute__(self, attr):
try:
return super(LazyObject, self).__getattribute__(attr)
except AttributeError, ex:
return getattr(self._getobj(), attr)
def __call__(self, *args, **kwargs):
return self._getobj()(*args, **kwargs)
def load_module_from_name(dotted_name, path=None, use_sys=1):
"""Load a Python module from its name.
:type dotted_name: str
:param dotted_name: python name of a module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
return load_module_from_modpath(dotted_name.split('.'), path, use_sys)
def load_module_from_modpath(parts, path=None, use_sys=1):
"""Load a python module from its splitted name.
:type parts: list(str) or tuple(str)
:param parts:
python name of a module or package splitted on '.'
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
if use_sys:
try:
return sys.modules['.'.join(parts)]
except KeyError:
pass
modpath = []
prevmodule = None
for part in parts:
modpath.append(part)
curname = '.'.join(modpath)
module = None
if len(modpath) != len(parts):
# even with use_sys=False, should try to get outer packages from sys.modules
module = sys.modules.get(curname)
elif use_sys:
# because it may have been indirectly loaded through a parent
module = sys.modules.get(curname)
if module is None:
mp_file, mp_filename, mp_desc = find_module(part, path)
module = load_module(curname, mp_file, mp_filename, mp_desc)
if prevmodule:
setattr(prevmodule, part, module)
_file = getattr(module, '__file__', '')
if not _file and len(modpath) != len(parts):
raise ImportError('no module in %s' % '.'.join(parts[len(modpath):]) )
path = [dirname( _file )]
prevmodule = module
return module
def load_module_from_file(filepath, path=None, use_sys=1, extrapath=None):
"""Load a Python module from it's path.
:type filepath: str
:param filepath: path to the python module or package
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type use_sys: bool
:param use_sys:
boolean indicating whether the sys.modules dictionary should be
used or not
:raise ImportError: if the module or package is not found
:rtype: module
:return: the loaded module
"""
modpath = modpath_from_file(filepath, extrapath)
return load_module_from_modpath(modpath, path, use_sys)
def _check_init(path, mod_path):
"""check there are some __init__.py all along the way"""
for part in mod_path:
path = join(path, part)
if not _has_init(path):
return False
return True
def modpath_from_file(filename, extrapath=None):
"""given a file path return the corresponding splitted module's name
(i.e name of a module or package splitted on '.')
:type filename: str
:param filename: file's path for which we want the module's name
:type extrapath: dict
:param extrapath:
optional extra search path, with path as key and package name for the path
as value. This is usually useful to handle package splitted in multiple
directories using __path__ trick.
:raise ImportError:
if the corresponding module's name has not been found
:rtype: list(str)
:return: the corresponding splitted module's name
"""
base = splitext(abspath(filename))[0]
if extrapath is not None:
for path_ in extrapath:
path = abspath(path_)
if path and base[:len(path)] == path:
submodpath = [pkg for pkg in base[len(path):].split(os.sep)
if pkg]
if _check_init(path, submodpath[:-1]):
return extrapath[path_].split('.') + submodpath
for path in sys.path:
path = abspath(path)
if path and base.startswith(path):
modpath = [pkg for pkg in base[len(path):].split(os.sep) if pkg]
if _check_init(path, modpath[:-1]):
return modpath
raise ImportError('Unable to find module for %s in %s' % (
filename, ', \n'.join(sys.path)))
def file_from_modpath(modpath, path=None, context_file=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file, giving priority to source file over precompiled
file if it exists
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.')
(this means explicit relative imports that start with dots have
empty strings in this list!)
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the path to the module's file or None if it's an integrated
builtin module such as 'sys'
"""
if context_file is not None:
context = dirname(context_file)
else:
context = context_file
if modpath[0] == 'xml':
# handle _xmlplus
try:
return _file_from_modpath(['_xmlplus'] + modpath[1:], path, context)
except ImportError:
return _file_from_modpath(modpath, path, context)
elif modpath == ['os', 'path']:
# FIXME: currently ignoring search_path...
return os.path.__file__
return _file_from_modpath(modpath, path, context)
def get_module_part(dotted_name, context_file=None):
"""given a dotted name return the module part of the name :
>>> get_module_part('logilab.common.modutils.get_module_part')
'logilab.common.modutils'
:type dotted_name: str
:param dotted_name: full name of the identifier we are interested in
:type context_file: str or None
:param context_file:
context file to consider, necessary if the identifier has been
introduced using a relative import unresolvable in the actual
context (i.e. modutils)
:raise ImportError: if there is no such module in the directory
:rtype: str or None
:return:
the module part of the name or None if we have not been able at
all to import the given name
XXX: deprecated, since it doesn't handle package precedence over module
(see #10066)
"""
# os.path trick
if dotted_name.startswith('os.path'):
return 'os.path'
parts = dotted_name.split('.')
if context_file is not None:
# first check for builtin module which won't be considered latter
# in that case (path != None)
if parts[0] in BUILTIN_MODULES:
if len(parts) > 2:
raise ImportError(dotted_name)
return parts[0]
# don't use += or insert, we want a new list to be created !
path = None
starti = 0
if parts[0] == '':
assert context_file is not None, \
'explicit relative import, but no context_file?'
path = [] # prevent resolving the import non-relatively
starti = 1
while parts[starti] == '': # for all further dots: change context
starti += 1
context_file = dirname(context_file)
for i in range(starti, len(parts)):
try:
file_from_modpath(parts[starti:i+1],
path=path, context_file=context_file)
except ImportError:
if not i >= max(1, len(parts) - 2):
raise
return '.'.join(parts[:i])
return dotted_name
def get_modules(package, src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
modules in the package and its subpackages
:type package: str
:param package: the python name for the package
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to
the value of `logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python modules in the package and its
subpackages
"""
modules = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
if directory != src_directory:
dir_package = directory[len(src_directory):].replace(os.sep, '.')
modules.append(package + dir_package)
for filename in filenames:
if _is_python_file(filename) and filename != '__init__.py':
src = join(directory, filename)
module = package + src[len(src_directory):-3]
modules.append(module.replace(os.sep, '.'))
return modules
def get_module_files(src_directory, blacklist=STD_BLACKLIST):
"""given a package directory return a list of all available python
module's files in the package and its subpackages
:type src_directory: str
:param src_directory:
path of the directory corresponding to the package
:type blacklist: list or tuple
:param blacklist:
optional list of files or directory to ignore, default to the value of
`logilab.common.STD_BLACKLIST`
:rtype: list
:return:
the list of all available python module's files in the package and
its subpackages
"""
files = []
for directory, dirnames, filenames in os.walk(src_directory):
_handle_blacklist(blacklist, dirnames, filenames)
# check for __init__.py
if not '__init__.py' in filenames:
dirnames[:] = ()
continue
for filename in filenames:
if _is_python_file(filename):
src = join(directory, filename)
files.append(src)
return files
def get_source_file(filename, include_no_ext=False):
"""given a python module's file name return the matching source file
name (the filename will be returned identically if it's a already an
absolute path to a python source file...)
:type filename: str
:param filename: python module's file name
:raise NoSourceFile: if no source file exists on the file system
:rtype: str
:return: the absolute path of the source file if it exists
"""
base, orig_ext = splitext(abspath(filename))
for ext in PY_SOURCE_EXTS:
source_path = '%s.%s' % (base, ext)
if exists(source_path):
return source_path
if include_no_ext and not orig_ext and exists(base):
return base
raise NoSourceFile(filename)
def cleanup_sys_modules(directories):
"""remove submodules of `directories` from `sys.modules`"""
for modname, module in sys.modules.items():
modfile = getattr(module, '__file__', None)
if modfile:
for directory in directories:
if modfile.startswith(directory):
del sys.modules[modname]
break
def is_python_source(filename):
"""
rtype: bool
return: True if the filename is a python source file
"""
return splitext(filename)[1][1:] in PY_SOURCE_EXTS
def is_standard_module(modname, std_path=(STD_LIB_DIR,)):
"""try to guess if a module is a standard python module (by default,
see `std_path` parameter's description)
:type modname: str
:param modname: name of the module we are interested in
:type std_path: list(str) or tuple(str)
:param std_path: list of path considered has standard
:rtype: bool
:return:
true if the module:
- is located on the path listed in one of the directory in `std_path`
- is a built-in module
"""
modname = modname.split('.')[0]
try:
filename = file_from_modpath([modname])
except ImportError, ex:
# import failed, i'm probably not so wrong by supposing it's
# not standard...
return 0
# modules which are not living in a file are considered standard
# (sys and __builtin__ for instance)
if filename is None:
return 1
filename = abspath(filename)
if filename.startswith(EXT_LIB_DIR):
return 0
for path in std_path:
if filename.startswith(abspath(path)):
return 1
return False
def is_relative(modname, from_file):
"""return true if the given module name is relative to the given
file name
:type modname: str
:param modname: name of the module we are interested in
:type from_file: str
:param from_file:
path of the module from which modname has been imported
:rtype: bool
:return:
true if the module has been imported relatively to `from_file`
"""
if not isdir(from_file):
from_file = dirname(from_file)
if from_file in sys.path:
return False
try:
find_module(modname.split('.')[0], [from_file])
return True
except ImportError:
return False
# internal only functions #####################################################
def _file_from_modpath(modpath, path=None, context=None):
"""given a mod path (i.e. splitted module / package name), return the
corresponding file
this function is used internally, see `file_from_modpath`'s
documentation for more information
"""
assert len(modpath) > 0
if context is not None:
try:
mtype, mp_filename = _module_file(modpath, [context])
except ImportError:
mtype, mp_filename = _module_file(modpath, path)
else:
mtype, mp_filename = _module_file(modpath, path)
if mtype == PY_COMPILED:
try:
return get_source_file(mp_filename)
except NoSourceFile:
return mp_filename
elif mtype == C_BUILTIN:
# integrated builtin module
return None
elif mtype == PKG_DIRECTORY:
mp_filename = _has_init(mp_filename)
return mp_filename
def _search_zip(modpath, pic):
for filepath, importer in pic.items():
if importer is not None:
if importer.find_module(modpath[0]):
if not importer.find_module('/'.join(modpath)):
raise ImportError('No module named %s in %s/%s' % (
'.'.join(modpath[1:]), file, modpath))
return ZIPFILE, abspath(filepath) + '/' + '/'.join(modpath), filepath
raise ImportError('No module named %s' % '.'.join(modpath))
def _module_file(modpath, path=None):
"""get a module type / file path
:type modpath: list or tuple
:param modpath:
splitted module's name (i.e name of a module or package splitted
on '.'), with leading empty strings for explicit relative import
:type path: list or None
:param path:
optional list of path where the module or package should be
searched (use sys.path if nothing or None is given)
:rtype: tuple(int, str)
:return: the module type flag and the file path for a module
"""
# egg support compat
try:
pic = sys.path_importer_cache
_path = (path is None and sys.path or path)
for __path in _path:
if not __path in pic:
try:
pic[__path] = zipimport.zipimporter(__path)
except zipimport.ZipImportError:
pic[__path] = None
checkeggs = True
except AttributeError:
checkeggs = False
imported = []
while modpath:
try:
_, mp_filename, mp_desc = find_module(modpath[0], path)
except ImportError:
if checkeggs:
return _search_zip(modpath, pic)[:2]
raise
else:
if checkeggs:
fullabspath = [abspath(x) for x in _path]
try:
pathindex = fullabspath.index(dirname(abspath(mp_filename)))
emtype, emp_filename, zippath = _search_zip(modpath, pic)
if pathindex > _path.index(zippath):
# an egg takes priority
return emtype, emp_filename
except ValueError:
# XXX not in _path
pass
except ImportError:
pass
checkeggs = False
imported.append(modpath.pop(0))
mtype = mp_desc[2]
if modpath:
if mtype != PKG_DIRECTORY:
raise ImportError('No module %s in %s' % ('.'.join(modpath),
'.'.join(imported)))
path = [mp_filename]
return mtype, mp_filename
def _is_python_file(filename):
"""return true if the given filename should be considered as a python file
.pyc and .pyo are ignored
"""
for ext in ('.py', '.so', '.pyd', '.pyw'):
if filename.endswith(ext):
return True
return False
def _has_init(directory):
"""if the given directory has a valid __init__ file, return its path,
else return None
"""
mod_or_pack = join(directory, '__init__')
for ext in PY_SOURCE_EXTS + ('pyc', 'pyo'):
if exists(mod_or_pack + '.' + ext):
return mod_or_pack + '.' + ext
return None
| gpl-3.0 | -5,480,387,907,231,285,000 | -3,506,377,209,678,698,000 | 32.133739 | 97 | 0.622236 | false |
blacklin/kbengine | kbe/res/scripts/common/Lib/ensurepip/__init__.py | 67 | 6388 | import os
import os.path
import pkgutil
import sys
import tempfile
__all__ = ["version", "bootstrap"]
_SETUPTOOLS_VERSION = "2.1"
_PIP_VERSION = "1.5.6"
# pip currently requires ssl support, so we try to provide a nicer
# error message when that is missing (http://bugs.python.org/issue19744)
_MISSING_SSL_MESSAGE = ("pip {} requires SSL/TLS".format(_PIP_VERSION))
try:
import ssl
except ImportError:
ssl = None
def _require_ssl_for_pip():
raise RuntimeError(_MISSING_SSL_MESSAGE)
else:
def _require_ssl_for_pip():
pass
_PROJECTS = [
("setuptools", _SETUPTOOLS_VERSION),
("pip", _PIP_VERSION),
]
def _run_pip(args, additional_paths=None):
# Add our bundled software to the sys.path so we can import it
if additional_paths is not None:
sys.path = additional_paths + sys.path
# Install the bundled software
import pip
pip.main(args)
def version():
"""
Returns a string specifying the bundled version of pip.
"""
return _PIP_VERSION
def _disable_pip_configuration_settings():
# We deliberately ignore all pip environment variables
# when invoking pip
# See http://bugs.python.org/issue19734 for details
keys_to_remove = [k for k in os.environ if k.startswith("PIP_")]
for k in keys_to_remove:
del os.environ[k]
# We also ignore the settings in the default pip configuration file
# See http://bugs.python.org/issue20053 for details
os.environ['PIP_CONFIG_FILE'] = os.devnull
def bootstrap(*, root=None, upgrade=False, user=False,
altinstall=False, default_pip=False,
verbosity=0):
"""
Bootstrap pip into the current Python installation (or the given root
directory).
Note that calling this function will alter both sys.path and os.environ.
"""
if altinstall and default_pip:
raise ValueError("Cannot use altinstall and default_pip together")
_require_ssl_for_pip()
_disable_pip_configuration_settings()
# By default, installing pip and setuptools installs all of the
# following scripts (X.Y == running Python version):
#
# pip, pipX, pipX.Y, easy_install, easy_install-X.Y
#
# pip 1.5+ allows ensurepip to request that some of those be left out
if altinstall:
# omit pip, pipX and easy_install
os.environ["ENSUREPIP_OPTIONS"] = "altinstall"
elif not default_pip:
# omit pip and easy_install
os.environ["ENSUREPIP_OPTIONS"] = "install"
with tempfile.TemporaryDirectory() as tmpdir:
# Put our bundled wheels into a temporary directory and construct the
# additional paths that need added to sys.path
additional_paths = []
for project, version in _PROJECTS:
wheel_name = "{}-{}-py2.py3-none-any.whl".format(project, version)
whl = pkgutil.get_data(
"ensurepip",
"_bundled/{}".format(wheel_name),
)
with open(os.path.join(tmpdir, wheel_name), "wb") as fp:
fp.write(whl)
additional_paths.append(os.path.join(tmpdir, wheel_name))
# Construct the arguments to be passed to the pip command
args = ["install", "--no-index", "--find-links", tmpdir]
if root:
args += ["--root", root]
if upgrade:
args += ["--upgrade"]
if user:
args += ["--user"]
if verbosity:
args += ["-" + "v" * verbosity]
_run_pip(args + [p[0] for p in _PROJECTS], additional_paths)
def _uninstall_helper(*, verbosity=0):
"""Helper to support a clean default uninstall process on Windows
Note that calling this function may alter os.environ.
"""
# Nothing to do if pip was never installed, or has been removed
try:
import pip
except ImportError:
return
# If the pip version doesn't match the bundled one, leave it alone
if pip.__version__ != _PIP_VERSION:
msg = ("ensurepip will only uninstall a matching version "
"({!r} installed, {!r} bundled)")
print(msg.format(pip.__version__, _PIP_VERSION), file=sys.stderr)
return
_require_ssl_for_pip()
_disable_pip_configuration_settings()
# Construct the arguments to be passed to the pip command
args = ["uninstall", "-y"]
if verbosity:
args += ["-" + "v" * verbosity]
_run_pip(args + [p[0] for p in reversed(_PROJECTS)])
def _main(argv=None):
if ssl is None:
print("Ignoring ensurepip failure: {}".format(_MISSING_SSL_MESSAGE),
file=sys.stderr)
return
import argparse
parser = argparse.ArgumentParser(prog="python -m ensurepip")
parser.add_argument(
"--version",
action="version",
version="pip {}".format(version()),
help="Show the version of pip that is bundled with this Python.",
)
parser.add_argument(
"-v", "--verbose",
action="count",
default=0,
dest="verbosity",
help=("Give more output. Option is additive, and can be used up to 3 "
"times."),
)
parser.add_argument(
"-U", "--upgrade",
action="store_true",
default=False,
help="Upgrade pip and dependencies, even if already installed.",
)
parser.add_argument(
"--user",
action="store_true",
default=False,
help="Install using the user scheme.",
)
parser.add_argument(
"--root",
default=None,
help="Install everything relative to this alternate root directory.",
)
parser.add_argument(
"--altinstall",
action="store_true",
default=False,
help=("Make an alternate install, installing only the X.Y versioned"
"scripts (Default: pipX, pipX.Y, easy_install-X.Y)"),
)
parser.add_argument(
"--default-pip",
action="store_true",
default=False,
help=("Make a default pip install, installing the unqualified pip "
"and easy_install in addition to the versioned scripts"),
)
args = parser.parse_args(argv)
bootstrap(
root=args.root,
upgrade=args.upgrade,
user=args.user,
verbosity=args.verbosity,
altinstall=args.altinstall,
default_pip=args.default_pip,
)
| lgpl-3.0 | -644,154,557,893,203,700 | 2,304,637,892,101,856,300 | 29.419048 | 78 | 0.608172 | false |
zhoffice/minos | client/package.py | 5 | 8146 | import argparse
import glob
import hashlib
import os
import pprint
import subprocess
import yaml
import deploy_config
from log import Log
from tank_client import TankClient
def check_directory(path):
if not os.path.exists(path):
Log.print_critical(
'Directory doesn''t exist: ' + path)
if not os.path.isdir(path):
Log.print_critical(
'NOT a directory: ' + path)
if not os.access(path, os.X_OK):
Log.print_critical(
'Can''t cd to: ' + path)
def check_file(path):
if not os.path.exists(path):
Log.print_critical(
'File doesn''t exist: ' + path)
if not os.path.isfile(path):
Log.print_critical(
'NOT a file: ' + path)
if not os.access(path, os.R_OK):
Log.print_critical(
'Can''t read file: ' + path)
def get_package_config_dir():
return deploy_config.get_deploy_config().get_config_dir() + '/package'
def get_package_config_file(package):
return '%s/%s.yaml' % (get_package_config_dir(), package)
def get_pacakge_config(package):
return yaml.load(open(get_package_config_file(package)))
def get_tank_client():
'''
A factory method to construct a tank(package server) client object.
'''
tank_config = deploy_config.get_deploy_config().get_tank_config()
return TankClient(tank_config.get('server_host'),
tank_config.get('server_port'))
def get_revision_number(cmd, output_prefix, work_space_dir):
env = os.environ
# Enforce English locale.
env["LC_ALL"] = "C"
current_work_dir = os.getcwd()
os.chdir(work_space_dir)
content = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
os.chdir(current_work_dir)
for line in content.splitlines():
if line.startswith(output_prefix):
return line[len(output_prefix):]
def generate_package_revision(root):
'''
Get the revision of the package. Currently, svn revision and git commit are
supported. If the package directory is neither a svn working directory nor
a git working directory, a fake revision will be returned.
@param root the local package root directory
@return string the revision of the package
'''
if os.path.islink(root):
real_path = os.readlink(root)
if not real_path.startswith('/'):
abs_path = "%s/%s" % (os.path.dirname(root), real_path)
else:
abs_path = real_path
else:
abs_path = root
try:
try:
cmd = ["svn", "info"]
revision_prefix = "Revision: "
return "r%s" % get_revision_number(cmd, revision_prefix, abs_path)
except:
cmd = ["git", "show"]
commit_prefix = "commit "
return get_revision_number(cmd, commit_prefix, abs_path)
except:
# We cannot get the version No., just return a fake one
return "r%s" % FAKE_SVN_VERSION
def generate_checksum(path):
'''
Generate the SHA-1 digest of specified file.
@param path the path of the file
@return string the SHA-1 digest
'''
fd = open(path, "r")
sha1 = hashlib.sha1()
while True:
buffer = fd.read(4096)
if not buffer: break
sha1.update(buffer)
fd.close()
return sha1.hexdigest()
def upload_package(artifact, package_tarball, package_source):
'''
Upload the specified package to the package server(Tank). Note that
if the file with the same checksum is already uploaded, this uploading
will be skipped.
@param artifact the artifact of the package
@return dict the package information return by the package server
'''
Log.print_info("Uploading pacakge: %s" % package_tarball)
revision = generate_package_revision(package_source)
Log.print_success("Revision is: %s" % revision)
Log.print_info("Generating checksum of package: %s" % package_tarball)
checksum = generate_checksum(package_tarball)
Log.print_success("Checksum is: %s" % checksum)
tank_client = get_tank_client()
package_info = tank_client.check_package(artifact, checksum)
if not package_info:
if 200 == tank_client.upload(package_tarball, artifact, revision):
Log.print_success("Upload package %s success" % package_tarball)
package_info = tank_client.check_package(artifact, checksum)
return eval(package_info)
else:
Log.print_warning("Package %s has already uploaded, skip uploading" %
package_tarball)
return eval(package_info)
return None
def parse_command_line():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Manage Minos packages.')
parser.add_argument('--version', action='version',
version='%(prog)s 1.0.0-beta')
parser.add_argument('-v', '--verbosity', default=0, type=int,
help='The verbosity level of log, higher value, more details.')
subparsers = parser.add_subparsers(
title='commands',
help='Type \'%(prog)s command -h\' to get more help for individual '
'command.')
sub_parser = subparsers.add_parser(
'list',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help='List packages, locally or remotely.')
sub_parser.add_argument('--remote', action='store_true',
help='List remote packages.')
sub_parser.set_defaults(handler=process_command_list)
sub_parser = subparsers.add_parser(
'build',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help='Build local package.')
sub_parser.add_argument('package',
help='The package name.')
sub_parser.set_defaults(handler=process_command_build)
sub_parser = subparsers.add_parser(
'install',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help='Install binary packages from local to remote package server.')
sub_parser.add_argument('--make_current', action='store_false',
help='Make the installed pacakge as current version.')
sub_parser.add_argument('package',
help='The package name.')
sub_parser.set_defaults(handler=process_command_install)
args = parser.parse_args()
Log.verbosity = args.verbosity
return args
def process_command_list(args):
if not args.remote:
# list local packages.
Log.print_info('All local packages:')
print '[package]: [artifact] [version]'
for path in glob.glob(get_package_config_file('*')):
basename = os.path.basename(path)
package = basename[:-len('.yaml')]
package_config = get_pacakge_config(package)
print '%s: %s %s' % (
package, package_config['artifact'], package_config['version'])
else:
# list local packages.
Log.print_critical('Not implemented yet!')
def process_command_build(args):
package_file = get_package_config_file(args.package)
package_config = get_pacakge_config(args.package)
package_dir = os.path.dirname(package_file)
package_source = os.path.abspath(
os.path.join(package_dir, package_config['source']))
check_directory(package_source)
subprocess.check_call(
'cd %s; %s' % (package_source, package_config['build']), shell=True)
def process_command_install(args):
package_file = get_package_config_file(args.package)
package_config = get_pacakge_config(args.package)
package_dir = os.path.dirname(package_file)
package_source = os.path.abspath(
os.path.join(package_dir, package_config['source']))
package_tarball = os.path.abspath(
os.path.join(package_source, package_config['package']['tarball']))
# the abspath would remove the trailing slash, so we have to check the
# original config.
if package_config['package']['tarball'][-1] == '/':
package_tarball += '/%s-%s.tar.gz' % (
package_config['artifact'], package_config['version'])
check_file(package_tarball)
Log.print_info("Installing %s to package server" % package_config['artifact'])
package_info = upload_package(
package_config['artifact'], package_tarball, package_source)
if package_info:
Log.print_success("Install %s to package server success" %
package_config['artifact'])
pprint.pprint(package_info)
else:
Log.print_critical("Install %s to package server fail" %
package_config['artifact'])
def main():
args = parse_command_line()
return args.handler(args)
if __name__ == '__main__':
main()
| apache-2.0 | -8,566,301,969,603,299,000 | 1,400,358,364,857,785,900 | 31.584 | 80 | 0.683157 | false |
kennedyshead/home-assistant | tests/components/nut/test_config_flow.py | 2 | 11140 | """Test the Network UPS Tools (NUT) config flow."""
from unittest.mock import patch
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.nut.const import DOMAIN
from homeassistant.const import CONF_RESOURCES, CONF_SCAN_INTERVAL
from .util import _get_mock_pynutclient
from tests.common import MockConfigEntry
VALID_CONFIG = {
"host": "localhost",
"port": 123,
"name": "name",
"resources": ["battery.charge"],
}
async def test_form_zeroconf(hass):
"""Test we can setup from zeroconf."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={"host": "192.168.1.5", "port": 1234},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage", "ups.status": "OL"}, list_ups=["ups1"]
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"username": "test-username", "password": "test-password"},
)
assert result2["step_id"] == "resources"
assert result2["type"] == "form"
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"resources": ["battery.voltage", "ups.status", "ups.status.display"]},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "192.168.1.5:1234"
assert result3["data"] == {
"host": "192.168.1.5",
"password": "test-password",
"port": 1234,
"resources": ["battery.voltage", "ups.status", "ups.status.display"],
"username": "test-username",
}
assert result3["result"].unique_id is None
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_one_ups(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage", "ups.status": "OL"}, list_ups=["ups1"]
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"username": "test-username",
"password": "test-password",
"port": 2222,
},
)
assert result2["step_id"] == "resources"
assert result2["type"] == "form"
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"resources": ["battery.voltage", "ups.status", "ups.status.display"]},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "1.1.1.1:2222"
assert result3["data"] == {
"host": "1.1.1.1",
"password": "test-password",
"port": 2222,
"resources": ["battery.voltage", "ups.status", "ups.status.display"],
"username": "test-username",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_multiple_ups(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"host": "2.2.2.2", "port": 123, "resources": ["battery.charge"]},
options={CONF_RESOURCES: ["battery.charge"]},
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage"},
list_ups={"ups1": "UPS 1", "ups2": "UPS2"},
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"username": "test-username",
"password": "test-password",
"port": 2222,
},
)
assert result2["step_id"] == "ups"
assert result2["type"] == "form"
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"alias": "ups2"},
)
assert result3["step_id"] == "resources"
assert result3["type"] == "form"
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"],
{"resources": ["battery.voltage"]},
)
await hass.async_block_till_done()
assert result4["type"] == "create_entry"
assert result4["title"] == "[email protected]:2222"
assert result4["data"] == {
"host": "1.1.1.1",
"password": "test-password",
"alias": "ups2",
"port": 2222,
"resources": ["battery.voltage"],
"username": "test-username",
}
assert len(mock_setup_entry.mock_calls) == 2
async def test_form_user_one_ups_with_ignored_entry(hass):
"""Test we can setup a new one when there is an ignored one."""
ignored_entry = MockConfigEntry(
domain=DOMAIN, data={}, source=config_entries.SOURCE_IGNORE
)
ignored_entry.add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage", "ups.status": "OL"}, list_ups=["ups1"]
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"username": "test-username",
"password": "test-password",
"port": 2222,
},
)
assert result2["step_id"] == "resources"
assert result2["type"] == "form"
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch(
"homeassistant.components.nut.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{"resources": ["battery.voltage", "ups.status", "ups.status.display"]},
)
await hass.async_block_till_done()
assert result3["type"] == "create_entry"
assert result3["title"] == "1.1.1.1:2222"
assert result3["data"] == {
"host": "1.1.1.1",
"password": "test-password",
"port": 2222,
"resources": ["battery.voltage", "ups.status", "ups.status.display"],
"username": "test-username",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_pynut = _get_mock_pynutclient()
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"host": "1.1.1.1",
"username": "test-username",
"password": "test-password",
"port": 2222,
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_options_flow(hass):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="abcde12345",
data=VALID_CONFIG,
options={CONF_RESOURCES: ["battery.charge"]},
)
config_entry.add_to_hass(hass)
mock_pynut = _get_mock_pynutclient(
list_vars={"battery.voltage": "voltage"}, list_ups=["ups1"]
)
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch("homeassistant.components.nut.async_setup_entry", return_value=True):
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_RESOURCES: ["battery.voltage"]}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONF_RESOURCES: ["battery.voltage"],
CONF_SCAN_INTERVAL: 60,
}
with patch(
"homeassistant.components.nut.PyNUTClient",
return_value=mock_pynut,
), patch("homeassistant.components.nut.async_setup_entry", return_value=True):
result2 = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["step_id"] == "init"
result2 = await hass.config_entries.options.async_configure(
result2["flow_id"],
user_input={CONF_RESOURCES: ["battery.voltage"], CONF_SCAN_INTERVAL: 12},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONF_RESOURCES: ["battery.voltage"],
CONF_SCAN_INTERVAL: 12,
}
| apache-2.0 | -3,020,498,777,597,277,000 | -6,958,309,283,485,022,000 | 31.383721 | 87 | 0.584201 | false |
moreati/pandashells | pandashells/lib/arg_lib.py | 7 | 6681 | from pandashells.lib import config_lib
def _check_for_recognized_args(*args):
"""
Raise an error if unrecognized argset is specified
"""
allowed_arg_set = set([
'io_in',
'io_out',
'example',
'xy_plotting',
'decorating',
])
in_arg_set = set(args)
unrecognized_set = in_arg_set - allowed_arg_set
if unrecognized_set:
msg = '{} not in allowed set {}'.format(unrecognized_set,
allowed_arg_set)
raise ValueError(msg)
def _io_in_adder(parser, config_dict, *args):
"""
Add input options to the parser
"""
in_arg_set = set(args)
if 'io_in' in in_arg_set:
group = parser.add_argument_group('Input Options')
# define the valid components
io_opt_list = ['csv', 'table', 'header', 'noheader']
# allow the option of supplying input column names
msg = 'Overwrite input column names with this list'
group.add_argument(
'--names', nargs='+', type=str, dest='names',
metavar="name", help=msg)
default_for_input = [
config_dict['io_input_type'],
config_dict['io_input_header']
]
msg = 'Must be one of {}'.format(repr(io_opt_list))
group.add_argument(
'-i', '--input_options', nargs='+', type=str, dest='input_options',
metavar='option', default=default_for_input, choices=io_opt_list,
help=msg)
def _io_out_adder(parser, config_dict, *args):
"""
Add output options to the parser
"""
in_arg_set = set(args)
if 'io_out' in in_arg_set:
group = parser.add_argument_group('Output Options')
# define the valid components
io_opt_list = [
'csv', 'table', 'html', 'header', 'noheader', 'index', 'noindex',
]
# define the current defaults
default_for_output = [
config_dict['io_output_type'],
config_dict['io_output_header'],
config_dict['io_output_index']
]
# show the current defaults in the arg parser
msg = 'Must be one of {}'.format(repr(io_opt_list))
group.add_argument(
'-o', '--output_options', nargs='+',
type=str, dest='output_options', metavar='option',
default=default_for_output, help=msg)
msg = (
'Replace NaNs with this string. '
'A string containing \'nan\' will set na_rep to numpy NaN. '
'Current default is {}'
).format(repr(str(config_dict['io_output_na_rep'])))
group.add_argument(
'--output_na_rep', nargs=1, type=str, dest='io_output_na_rep',
help=msg)
def _decorating_adder(parser, *args):
in_arg_set = set(args)
if 'decorating' in in_arg_set:
# get a list of valid plot styling info
context_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_context'][0][1]
theme_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_theme'][0][1]
palette_list = [t for t in config_lib.CONFIG_OPTS if
t[0] == 'plot_palette'][0][1]
group = parser.add_argument_group('Plot specific Options')
msg = "Set the x-limits for the plot"
group.add_argument(
'--xlim', nargs=2, type=float, dest='xlim',
metavar=('XMIN', 'XMAX'), help=msg)
msg = "Set the y-limits for the plot"
group.add_argument(
'--ylim', nargs=2, type=float, dest='ylim',
metavar=('YMIN', 'YMAX'), help=msg)
msg = "Draw x axis with log scale"
group.add_argument(
'--xlog', action='store_true', dest='xlog', default=False,
help=msg)
msg = "Draw y axis with log scale"
group.add_argument(
'--ylog', action='store_true', dest='ylog', default=False,
help=msg)
msg = "Set the x-label for the plot"
group.add_argument(
'--xlabel', nargs=1, type=str, dest='xlabel', help=msg)
msg = "Set the y-label for the plot"
group.add_argument(
'--ylabel', nargs=1, type=str, dest='ylabel', help=msg)
msg = "Set the title for the plot"
group.add_argument(
'--title', nargs=1, type=str, dest='title', help=msg)
msg = "Specify legend location"
group.add_argument(
'--legend', nargs=1, type=str, dest='legend',
choices=['1', '2', '3', '4', 'best'], help=msg)
msg = "Specify whether hide the grid or not"
group.add_argument(
'--nogrid', action='store_true', dest='no_grid', default=False,
help=msg)
msg = "Specify plot context. Default = '{}' ".format(context_list[0])
group.add_argument(
'--context', nargs=1, type=str, dest='plot_context',
default=[context_list[0]], choices=context_list, help=msg)
msg = "Specify plot theme. Default = '{}' ".format(theme_list[0])
group.add_argument(
'--theme', nargs=1, type=str, dest='plot_theme',
default=[theme_list[0]], choices=theme_list, help=msg)
msg = "Specify plot palette. Default = '{}' ".format(palette_list[0])
group.add_argument(
'--palette', nargs=1, type=str, dest='plot_palette',
default=[palette_list[0]], choices=palette_list, help=msg)
msg = "Save the figure to this file"
group.add_argument('--savefig', nargs=1, type=str, help=msg)
def _xy_adder(parser, *args):
in_arg_set = set(args)
if 'xy_plotting' in in_arg_set:
msg = 'Column to plot on x-axis'
parser.add_argument(
'-x', nargs=1, type=str, dest='x', metavar='col', help=msg)
msg = 'List of columns to plot on y-axis'
parser.add_argument(
'-y', nargs='+', type=str, dest='y', metavar='col', help=msg)
msg = "Plot style(s) defaults to .-"
parser.add_argument(
'-s', '--style', nargs='+', type=str, dest='style', default=['.-'],
help=msg, metavar='style')
def add_args(parser, *args):
"""Adds argument blocks to the arg parser
:type parser: argparse instance
:param parser: The argarse instance to use in adding arguments
Additinional arguments are the names of argument blocks to add
"""
config_dict = config_lib.get_config()
_check_for_recognized_args(*args)
_io_in_adder(parser, config_dict, *args)
_io_out_adder(parser, config_dict, *args)
_decorating_adder(parser, *args)
_xy_adder(parser, *args)
| bsd-2-clause | -1,454,540,986,515,601,200 | -601,875,683,908,631,000 | 36.324022 | 79 | 0.552163 | false |
andybak/hendrix | hendrix/test/testproject/settings.py | 4 | 1047 |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
ALLOWED_HOSTS = []
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
SECRET_KEY = 'NOTREALt@k0)scq$uuph3gjpbhjhd%ipe)04f5d^^1%)%my(%b6&pus_2NOTREAL'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'hendrix.tests.testproject.urls'
WSGI_APPLICATION = 'hendrix.test.testproject.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
| mit | 8,536,560,996,544,536,000 | 2,617,278,190,657,626,000 | 22.795455 | 79 | 0.663801 | false |
mm112287/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_case.py | 738 | 51689 | import difflib
import pprint
import pickle
import re
import sys
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from .support import (
TestEquality, TestHashing, LoggingResult,
ResultWithNoStartTestRunStopTestRun
)
class Test(object):
"Keep these TestCase classes out of the main namespace"
class Foo(unittest.TestCase):
def runTest(self): pass
def test1(self): pass
class Bar(Foo):
def test2(self): pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
### Set up attributes used by inherited tests
################################################################
# Used by TestHashing.test_hash and TestEquality.test_eq
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
# Used by TestEquality.test_ne
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')),
(Test.Foo('test1'), Test.Bar('test1')),
(Test.Foo('test1'), Test.Bar('test2'))]
################################################################
### /Set up attributes used by inherited tests
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
# ...
# "methodName defaults to "runTest"."
#
# Make sure it really is optional, and that it defaults to the proper
# thing.
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
# test that TestCase can be instantiated with no args
# primarily for use at the interactive interpreter
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
# "class TestCase([methodName])"
# ...
# "Each instance of TestCase will run a single test method: the
# method named methodName."
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self): raise MyException()
def test(self): pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail("Failed to raise ValueError")
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self): pass
self.assertEqual(Foo('test').countTestCases(), 1)
# "Return the default type of test result object to be used to run this
# test. For TestCase instances, this will always be
# unittest.TestResult; subclasses of TestCase should
# override this as necessary."
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
# "With a temporary result stopTestRun is called when setUp errors.
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "With a default result, an error in the test still results in stopTestRun
# being called."
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
# "When a test fails with a default result stopTestRun is still called."
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
# "When tearDown errors with a default result stopTestRun is still called."
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "TestCase.run() still works when the defaultTestResult is a TestResult
# that does not support startTestRun and stopTestRun.
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework. The initial value of this
# attribute is AssertionError"
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertTrue(Foo('test').failureException is AssertionError)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "This class attribute gives the exception raised by the test() method.
# If a test framework needs to use a specialized exception, possibly to
# carry additional information, it must subclass this exception in
# order to ``play fair'' with the framework."
#
# Make sure TestCase.run() respects the designated failureException
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail("foo")
failureException = RuntimeError
self.assertTrue(Foo('test').failureException is RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
# "The default implementation does nothing."
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().setUp()
# "The default implementation does nothing."
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
# ... and nothing should happen
Foo().tearDown()
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
# "If result is omitted or None, a temporary result object is created,
# used, and is made available to the caller. As TestCase owns the
# temporary result startTestRun and stopTestRun are called.
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
# Make run() find a result object on its own
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
# "The result object is returned to run's caller"
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
# "The same effect [as method run] may be had by simply calling the
# TestCase instance."
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(
self.shortDescription(),
'Tests shortDescription() for a method with a longer '
'docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
# No this doesn't clean up and remove the SadSnake equality func
# from this TestCase instance but since its a local nothing else
# will ever notice that.
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object())
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3])
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({1: "one"}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
# this used to cause a UnicodeDecodeError constructing the failure msg
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '\uFFFD'})
def testAssertEqual(self):
equal_pairs = [
((), ()),
({}, {}),
([], []),
(set(), set()),
(frozenset(), frozenset())]
for a, b in equal_pairs:
# This mess of try excepts is to test the assertEqual behavior
# itself.
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [
((), []),
({}, set()),
(set([4,1]), frozenset([4,2])),
(frozenset([4,5]), set([2,3])),
(set([3,4]), set([5,4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException,
self.assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException,
self.assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual,
a, tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual, None,
tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual,
1, 1)
self.assertDictEqual({}, {})
c = { 'x': 1 }
d = {}
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException,
self.assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80*8)
seq1 = 'a' + 'x' * 80**2
seq2 = 'b' + 'x' * 80**2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
# the +1 is the leading \n added by assertSequenceEqual
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff)//2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) < len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertTrue(len(msg) > len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {1: 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
# check threshold value
self.assertEqual(self._diffThreshold, 2**16)
# disable madDiff to get diff markers
self.maxDiff = None
# set a lower threshold value and add a cleanup to restore it
old_threshold = self._diffThreshold
self._diffThreshold = 2**8
self.addCleanup(lambda: setattr(self, '_diffThreshold', old_threshold))
# under the threshold: diff marker (^) in error message
s = 'x' * (2**7)
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
# over the threshold: diff not used and marker (^) not in error message
s = 'x' * (2**9)
# if the path that uses difflib is taken, _truncateMessage will be
# called -- replace it with explodingTruncation to verify that this
# doesn't happen
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda: setattr(self, '_truncateMessage', old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, "2", "a", "a"], ["a", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, "2", "a", "a"], ["a", "2", True, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[10], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11], [10])
self.assertRaises(self.failureException, self.assertCountEqual,
[10, 11, 10], [10, 11])
# Test that sequences of unhashable objects can be tested for sameness:
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
# Test that iterator of unhashable objects can be tested for sameness:
self.assertCountEqual(iter([1, 2, [], 3, 4]),
iter([1, 2, [], 3, 4]))
# hashable types, but not orderable
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, 'x', 1, 5j, 2j, frozenset()])
# comparing dicts
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
# comparing heterogenous non-hashable sequences
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[], [divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual,
[[1]], [[2]])
# Same elements, but not same sequence length
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, 1, "2", "a", "a"], ["2", "2", True, "a"])
self.assertRaises(self.failureException, self.assertCountEqual,
[1, {'b': 2}, None, True], [{'b': 2}, True, None])
# Same elements which don't reliably compare, in
# different order, see issue 10242
a = [{2,4}, {1,2}]
b = a[::-1]
self.assertCountEqual(a, b)
# test utility functions supporting assertCountEqual()
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3,1,'a'), (1,3,'b'), (1,0,'d'), (0,1,'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None, set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1, None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = "foo"
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2, set1)
# make sure any string formatting is tuple-safe
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1, set2)
def testInequality(self):
# Try ints
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
# Try Floats
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0)
# Try Strings
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, 'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual, 'bug', 'ant')
# Try bytes
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug', b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant', b'ant')
self.assertRaises(self.failureException, self.assertLessEqual, b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """\
http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """\
http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """\
- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
# need to remove the first line of the error message
error = str(e).split('\n', 1)[1]
# no fair testing ourself with ourself, and assertEqual is used for strings
# so can't use assertEqual either. Just use assertTrue.
self.assertTrue(sample_text_error == error)
def testAsertEqualSingleLine(self):
sample_text = "laden swallows fly slowly"
revised_sample_text = "unladen swallows fly quickly"
sample_text_error = """\
- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertTrue(sample_text_error == error)
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', r'ab+')
self.assertRaises(self.failureException, self.assertRegex,
'saaas', r'aaaa')
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, re.compile('x'),
lambda: None)
self.assertRaisesRegex(
self.failureException, '^Exception not raised by <lambda>$',
self.assertRaisesRegex, Exception, 'x',
lambda: None)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception, '^Expected$',
Stub)
self.assertRaisesRegex(
self.failureException,
r'"\^Expected\$" does not match "Unexpected"',
self.assertRaisesRegex, Exception,
re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = "particular value"
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
# Success when the right warning is triggered, even several times
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
# A tuple of warning classes is accepted
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
# *args and **kwargs also work
self.assertWarns(RuntimeWarning,
warnings.warn, "foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
# Believe it or not, it is preferrable to duplicate all tests above,
# to make sure the __warningregistry__ $@ is circumvented correctly.
def _runtime_warn():
warnings.warn("foo", RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
# A tuple of warning classes is accepted
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
# The context manager exposes various useful attributes
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foo")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Same with several warnings
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn("foo", category=RuntimeWarning)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
# Filters for other warnings are not modified
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "foox")
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
lambda: 0)
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, "o+",
_runtime_warn, "foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, "o+",
_runtime_warn, "barz")
def testAssertWarnsRegexContext(self):
# Same as above, but with assertWarnsRegex as a context manager
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, "o+") as cm:
_runtime_warn("foox")
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], "foox")
self.assertIn("test_case.py", cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
# Failure when no warning is triggered
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
pass
# Failure when another warning is triggered
with warnings.catch_warnings():
# Force default filter (in case tests are run with -We)
warnings.simplefilter("default", RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, "o+"):
_runtime_warn("foox")
# Failure when message doesn't match
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
# A little trickier: we ask RuntimeWarnings to be raised, and then
# check for some of them. It is implementation-defined whether
# non-matching RuntimeWarnings are simply re-raised, or produce a
# failureException.
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, "o+"):
_runtime_warn("barz")
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (
(self.failIfEqual, (3, 5)),
(self.assertNotEquals, (3, 5)),
(self.failUnlessEqual, (3, 3)),
(self.assertEquals, (3, 3)),
(self.failUnlessAlmostEqual, (2.0, 2.0)),
(self.assertAlmostEquals, (2.0, 2.0)),
(self.failIfAlmostEqual, (3.0, 5.0)),
(self.assertNotAlmostEquals, (3.0, 5.0)),
(self.failUnless, (True,)),
(self.assert_, (True,)),
(self.failUnlessRaises, (TypeError, lambda _: 3.14 + 'spam')),
(self.failIf, (False,)),
(self.assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))),
(self.assertRaisesRegexp, (KeyError, 'foo', lambda: {}['foo'])),
(self.assertRegexpMatches, ('bar', 'bar')),
)
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
# disable this test for now. When the version where the fail* methods will
# be removed is decided, re-enable it and update the version
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = [
'failIfEqual', 'failUnlessEqual', 'failUnlessAlmostEqual',
'failIfAlmostEqual', 'failUnless', 'failUnlessRaises', 'failIf',
'assertDictContainsSubset',
]
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name) # remove these in 3.x
def testDeepcopy(self):
# Issue: 5660
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
# This shouldn't blow up
deepcopy(test)
def testPickle(self):
# Issue 10326
# Can't use TestCase classes defined in Test class as
# pickle does not work with inner classes
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
# blew up prior to fix
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
# exercise the TestCase instance in a way that will invoke
# the type equality lookup mechanism
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
| gpl-3.0 | 6,740,781,059,871,792,000 | 6,258,115,170,218,212,000 | 38.307224 | 87 | 0.595794 | false |
avedaee/DIRAC | Interfaces/scripts/dirac-wms-job-parameters.py | 8 | 1186 | #!/usr/bin/env python
########################################################################
# $HeadURL$
# File : dirac-wms-job-delete
# Author : Stuart Paterson
########################################################################
"""
Retrieve parameters associated to the given DIRAC job
"""
__RCSID__ = "$Id$"
import DIRAC
from DIRAC.Core.Base import Script
Script.setUsageMessage( '\n'.join( [ __doc__.split( '\n' )[1],
'Usage:',
' %s [option|cfgfile] ... JobID ...' % Script.scriptName,
'Arguments:',
' JobID: DIRAC Job ID' ] ) )
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if len( args ) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
exitCode = 0
errorList = []
for job in args:
result = dirac.parameters( job, printOutput = True )
if not result['OK']:
errorList.append( ( job, result['Message'] ) )
exitCode = 2
for error in errorList:
print "ERROR %s: %s" % error
DIRAC.exit( exitCode )
| gpl-3.0 | -1,252,049,179,191,454,500 | -5,191,876,865,571,389,000 | 28.65 | 95 | 0.478921 | false |
sdague/home-assistant | homeassistant/components/aprs/device_tracker.py | 17 | 5575 | """Support for APRS device tracking."""
import logging
import threading
import aprslib
from aprslib import ConnectionError as AprsConnectionError, LoginError
import geopy.distance
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_HOST,
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import slugify
DOMAIN = "aprs"
_LOGGER = logging.getLogger(__name__)
ATTR_ALTITUDE = "altitude"
ATTR_COURSE = "course"
ATTR_COMMENT = "comment"
ATTR_FROM = "from"
ATTR_FORMAT = "format"
ATTR_POS_AMBIGUITY = "posambiguity"
ATTR_SPEED = "speed"
CONF_CALLSIGNS = "callsigns"
DEFAULT_HOST = "rotate.aprs2.net"
DEFAULT_PASSWORD = "-1"
DEFAULT_TIMEOUT = 30.0
FILTER_PORT = 14580
MSG_FORMATS = ["compressed", "uncompressed", "mic-e"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_CALLSIGNS): cv.ensure_list,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(float),
}
)
def make_filter(callsigns: list) -> str:
"""Make a server-side filter from a list of callsigns."""
return " ".join(f"b/{sign.upper()}" for sign in callsigns)
def gps_accuracy(gps, posambiguity: int) -> int:
"""Calculate the GPS accuracy based on APRS posambiguity."""
pos_a_map = {0: 0, 1: 1 / 600, 2: 1 / 60, 3: 1 / 6, 4: 1}
if posambiguity in pos_a_map:
degrees = pos_a_map[posambiguity]
gps2 = (gps[0], gps[1] + degrees)
dist_m = geopy.distance.distance(gps, gps2).m
accuracy = round(dist_m)
else:
message = f"APRS position ambiguity must be 0-4, not '{posambiguity}'."
raise ValueError(message)
return accuracy
def setup_scanner(hass, config, see, discovery_info=None):
"""Set up the APRS tracker."""
callsigns = config.get(CONF_CALLSIGNS)
server_filter = make_filter(callsigns)
callsign = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
host = config.get(CONF_HOST)
timeout = config.get(CONF_TIMEOUT)
aprs_listener = AprsListenerThread(callsign, password, host, server_filter, see)
def aprs_disconnect(event):
"""Stop the APRS connection."""
aprs_listener.stop()
aprs_listener.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, aprs_disconnect)
if not aprs_listener.start_event.wait(timeout):
_LOGGER.error("Timeout waiting for APRS to connect")
return
if not aprs_listener.start_success:
_LOGGER.error(aprs_listener.start_message)
return
_LOGGER.debug(aprs_listener.start_message)
return True
class AprsListenerThread(threading.Thread):
"""APRS message listener."""
def __init__(
self, callsign: str, password: str, host: str, server_filter: str, see
):
"""Initialize the class."""
super().__init__()
self.callsign = callsign
self.host = host
self.start_event = threading.Event()
self.see = see
self.server_filter = server_filter
self.start_message = ""
self.start_success = False
self.ais = aprslib.IS(
self.callsign, passwd=password, host=self.host, port=FILTER_PORT
)
def start_complete(self, success: bool, message: str):
"""Complete startup process."""
self.start_message = message
self.start_success = success
self.start_event.set()
def run(self):
"""Connect to APRS and listen for data."""
self.ais.set_filter(self.server_filter)
try:
_LOGGER.info(
"Opening connection to %s with callsign %s", self.host, self.callsign
)
self.ais.connect()
self.start_complete(
True, f"Connected to {self.host} with callsign {self.callsign}."
)
self.ais.consumer(callback=self.rx_msg, immortal=True)
except (AprsConnectionError, LoginError) as err:
self.start_complete(False, str(err))
except OSError:
_LOGGER.info(
"Closing connection to %s with callsign %s", self.host, self.callsign
)
def stop(self):
"""Close the connection to the APRS network."""
self.ais.close()
def rx_msg(self, msg: dict):
"""Receive message and process if position."""
_LOGGER.debug("APRS message received: %s", str(msg))
if msg[ATTR_FORMAT] in MSG_FORMATS:
dev_id = slugify(msg[ATTR_FROM])
lat = msg[ATTR_LATITUDE]
lon = msg[ATTR_LONGITUDE]
attrs = {}
if ATTR_POS_AMBIGUITY in msg:
pos_amb = msg[ATTR_POS_AMBIGUITY]
try:
attrs[ATTR_GPS_ACCURACY] = gps_accuracy((lat, lon), pos_amb)
except ValueError:
_LOGGER.warning(
"APRS message contained invalid posambiguity: %s", str(pos_amb)
)
for attr in [ATTR_ALTITUDE, ATTR_COMMENT, ATTR_COURSE, ATTR_SPEED]:
if attr in msg:
attrs[attr] = msg[attr]
self.see(dev_id=dev_id, gps=(lat, lon), attributes=attrs)
| apache-2.0 | 2,533,035,894,567,979,500 | -7,603,220,383,083,513,000 | 29.464481 | 87 | 0.618475 | false |
tanglei528/glance | glance/tests/unit/common/test_property_utils.py | 1 | 23549 | # Copyright 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import xrange
from glance.api import policy
from glance.common import exception
from glance.common import property_utils
import glance.context
from glance.tests.unit import base
CONFIG_SECTIONS = [
'^x_owner_.*',
'spl_create_prop',
'spl_read_prop',
'spl_read_only_prop',
'spl_update_prop',
'spl_update_only_prop',
'spl_delete_prop',
'^x_all_permitted.*',
'^x_none_permitted.*',
'x_none_read',
'x_none_update',
'x_none_delete',
'x_foo_matcher',
'x_foo_*',
'.*'
]
def create_context(policy, roles=None):
if roles is None:
roles = []
return glance.context.RequestContext(roles=roles,
policy_enforcer=policy)
class TestPropertyRulesWithRoles(base.IsolatedUnitTest):
def setUp(self):
super(TestPropertyRulesWithRoles, self).setUp()
self.set_property_protections()
self.policy = policy.Enforcer()
def tearDown(self):
super(TestPropertyRulesWithRoles, self).tearDown()
def test_is_property_protections_enabled_true(self):
self.config(property_protection_file="property-protections.conf")
self.assertTrue(property_utils.is_property_protection_enabled())
def test_is_property_protections_enabled_false(self):
self.config(property_protection_file=None)
self.assertFalse(property_utils.is_property_protection_enabled())
def test_property_protection_file_doesnt_exist(self):
self.config(property_protection_file='fake-file.conf')
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_mutually_exclusive_rule(self):
exclusive_rules = {'.*': {'create': ['@', '!'],
'read': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(exclusive_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_malformed_rule(self):
malformed_rules = {'^[0-9)': {'create': ['fake-role'],
'read': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(malformed_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_missing_operation(self):
rules_with_missing_operation = {'^[0-9]': {'create': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(rules_with_missing_operation)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_misspelt_operation(self):
rules_with_misspelt_operation = {'^[0-9]': {'create': ['fake-role'],
'rade': ['fake-role'],
'update': ['fake-role'],
'delete': ['fake-role']}}
self.set_property_protection_rules(rules_with_misspelt_operation)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_whitespace(self):
rules_whitespace = {
'^test_prop.*': {
'create': ['member ,fake-role'],
'read': ['fake-role, member'],
'update': ['fake-role, member'],
'delete': ['fake-role, member']
}
}
self.set_property_protection_rules(rules_whitespace)
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules('test_prop_1',
'read', create_context(self.policy, ['member'])))
self.assertTrue(self.rules_checker.check_property_rules('test_prop_1',
'read', create_context(self.policy, ['fake-role'])))
def test_check_property_rules_invalid_action(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'hall', create_context(self.policy, ['admin'])))
def test_check_property_rules_read_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'read', create_context(self.policy, ['admin'])))
def test_check_property_rules_read_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'read',
create_context(self.policy, ['member'])))
def test_check_property_rules_read_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'read', create_context(self.policy, ['member'])))
def test_check_property_rules_create_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'create', create_context(self.policy, ['admin'])))
def test_check_property_rules_create_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'create',
create_context(self.policy, ['member'])))
def test_check_property_rules_create_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'create', create_context(self.policy, ['member'])))
def test_check_property_rules_update_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'update', create_context(self.policy, ['admin'])))
def test_check_property_rules_update_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'update',
create_context(self.policy, ['member'])))
def test_check_property_rules_update_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'update', create_context(self.policy, ['member'])))
def test_check_property_rules_delete_permitted_admin_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules('test_prop',
'delete', create_context(self.policy, ['admin'])))
def test_check_property_rules_delete_permitted_specific_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertTrue(self.rules_checker.check_property_rules(
'x_owner_prop', 'delete',
create_context(self.policy, ['member'])))
def test_check_property_rules_delete_unpermitted_role(self):
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertFalse(self.rules_checker.check_property_rules('test_prop',
'delete', create_context(self.policy, ['member'])))
def test_property_config_loaded_in_order(self):
"""
Verify the order of loaded config sections matches that from the
configuration file
"""
self.rules_checker = property_utils.PropertyRules(self.policy)
self.assertEqual(property_utils.CONFIG.sections(), CONFIG_SECTIONS)
def test_property_rules_loaded_in_order(self):
"""
Verify rules are iterable in the same order as read from the config
file
"""
self.rules_checker = property_utils.PropertyRules(self.policy)
for i in xrange(len(property_utils.CONFIG.sections())):
self.assertEqual(property_utils.CONFIG.sections()[i],
self.rules_checker.rules[i][0].pattern)
def test_check_property_rules_create_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_create_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_read_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_read', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'delete',
create_context(self.policy, [''])))
def test_check_property_rules_update_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_update', 'update',
create_context(self.policy, [''])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'delete',
create_context(self.policy, ['admin', 'member'])))
def test_check_property_rules_delete_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'update',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_delete', 'delete',
create_context(self.policy, [''])))
def test_check_return_first_match(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'create',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'delete',
create_context(self.policy, [''])))
class TestPropertyRulesWithPolicies(base.IsolatedUnitTest):
def setUp(self):
super(TestPropertyRulesWithPolicies, self).setUp()
self.set_property_protections(use_policies=True)
self.policy = policy.Enforcer()
self.rules_checker = property_utils.PropertyRules(self.policy)
def tearDown(self):
super(TestPropertyRulesWithPolicies, self).tearDown()
def test_check_property_rules_create_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'create',
create_context(self.policy, ['spl_role'])))
def test_check_property_rules_create_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'create',
create_context(self.policy, ['fake-role'])))
def test_check_property_rules_read_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'read',
create_context(self.policy, ['spl_role'])))
def test_check_property_rules_read_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'read',
create_context(self.policy, ['fake-role'])))
def test_check_property_rules_update_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'update',
create_context(self.policy, ['admin'])))
def test_check_property_rules_update_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'update',
create_context(self.policy, ['fake-role'])))
def test_check_property_rules_delete_permitted_specific_policy(self):
self.assertTrue(self.rules_checker.check_property_rules(
'spl_creator_policy', 'delete',
create_context(self.policy, ['admin'])))
def test_check_property_rules_delete_unpermitted_policy(self):
self.assertFalse(self.rules_checker.check_property_rules(
'spl_creator_policy', 'delete',
create_context(self.policy, ['fake-role'])))
def test_property_protection_with_malformed_rule(self):
malformed_rules = {'^[0-9)': {'create': ['fake-policy'],
'read': ['fake-policy'],
'update': ['fake-policy'],
'delete': ['fake-policy']}}
self.set_property_protection_rules(malformed_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_property_protection_with_multiple_policies(self):
malformed_rules = {'^x_.*': {'create': ['fake-policy, another_pol'],
'read': ['fake-policy'],
'update': ['fake-policy'],
'delete': ['fake-policy']}}
self.set_property_protection_rules(malformed_rules)
self.assertRaises(exception.InvalidPropertyProtectionConfiguration,
property_utils.PropertyRules)
def test_check_property_rules_create_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_all_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_all_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_create_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'create', create_context(self.policy, [''])))
def test_check_property_rules_read_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'read', create_context(self.policy, [''])))
def test_check_property_rules_update_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'update', create_context(self.policy, [''])))
def test_check_property_rules_delete_none_permitted(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_permitted', 'delete', create_context(self.policy, [''])))
def test_check_property_rules_read_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_read', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_read', 'delete',
create_context(self.policy, [''])))
def test_check_property_rules_update_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_update', 'update',
create_context(self.policy, [''])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_update', 'delete',
create_context(self.policy, ['admin', 'member'])))
def test_check_property_rules_delete_none(self):
self.rules_checker = property_utils.PropertyRules()
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'create',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'read',
create_context(self.policy, ['admin', 'member'])))
self.assertTrue(self.rules_checker.check_property_rules(
'x_none_delete', 'update',
create_context(self.policy, ['admin', 'member'])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_none_delete', 'delete',
create_context(self.policy, [''])))
def test_check_return_first_match(self):
self.rules_checker = property_utils.PropertyRules()
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'create',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'read',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'update',
create_context(self.policy, [''])))
self.assertFalse(self.rules_checker.check_property_rules(
'x_foo_matcher', 'delete',
create_context(self.policy, [''])))
| apache-2.0 | 1,281,006,492,237,038,600 | 7,184,046,723,766,925,000 | 48.162839 | 78 | 0.618115 | false |
ppasq/geonode | geonode/base/management/commands/delete_orphaned_thumbs.py | 18 | 1091 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.core.management.base import BaseCommand
from geonode.base.utils import delete_orphaned_thumbs
class Command(BaseCommand):
help = ("Delete orphaned thumbnails.")
def handle(self, *args, **options):
delete_orphaned_thumbs()
| gpl-3.0 | 411,540,444,571,602,370 | -6,573,466,618,180,150,000 | 36.62069 | 73 | 0.64253 | false |
mykoz/ThinkStats2 | code/thinkstats2.py | 68 | 68825 | """This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy
from scipy import stats
from scipy import special
from scipy import ndimage
from io import open
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def __mul__(self, other):
"""Computes the Pmf of the product of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.MulPmf(other)
except AttributeError:
return self.MulConstant(other)
def MulPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 * v2, p1 * p2)
return pmf
def MulConstant(self, other):
"""Computes the Pmf of the product of a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 * other, p1)
return pmf
def __div__(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.DivPmf(other)
except AttributeError:
return self.MulConstant(1/other)
__truediv__ = __div__
def DivPmf(self, other):
"""Computes the Pmf of the ratio of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 / v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
# don't use +=, or else an int array + float yields int array
new.xs = new.xs + term
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
# don't use *=, or else an int array * float yields int array
new.xs = new.xs * factor
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
If options includes low and high, it must also include n;
in that case the density is evaluated an n locations between
low and high, including both.
If options includes xs, the density is evaluate at those location.
Otherwise, self.GetLinspace is invoked to provide the locations.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial PMF.
Returns the probabily of k successes in n trials with probability p.
"""
return stats.binom.pmf(k, n, p)
def EvalHypergeomPmf(k, N, K, n):
"""Evaluates the hypergeometric PMF.
Returns the probabily of k successes in n trials from a population
N with K successes in it.
"""
return stats.hypergeom.pmf(k, N, K, n)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / special.gamma(k+1)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = [special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.uniform(-jitter, +jitter, n) + values
def NormalProbabilityPlot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, **options)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables.loc[len(variables)-1, 'end'] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Cdf(dict(weights))
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Given a collection of lines, selects percentiles along vertical axis.
For example, if ys_seq contains simulation results like ys as a
function of time, and percents contains (5, 95), the result would
be a 90% CI for each vertical slice of the simulation results.
ys_seq: sequence of lines (y values)
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays, one for each percentile
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
| gpl-3.0 | 5,465,355,086,111,057,000 | -2,000,767,162,033,916,000 | 23.571582 | 79 | 0.57793 | false |
paulorauber/nn | examples/rnn.py | 1 | 2389 | import numpy as np
from sklearn.utils import check_random_state
from nn.model.recurrent import RecurrentNetwork
random_state = check_random_state(None)
def nback(n, k, length):
"""Random n-back targets given n, number of digits k and sequence length"""
Xi = random_state.randint(k, size=length)
yi = np.zeros(length, dtype=int)
for t in range(n, length):
yi[t] = (Xi[t - n] == Xi[t])
return Xi, yi
def one_of_k(Xi_, k):
Xi = np.zeros((len(Xi_), k))
for t, Xit in np.ndenumerate(Xi_):
Xi[t, Xit] = 1
return Xi
def nback_dataset(n_sequences, mean_length, std_length, n, k):
X, y = [], []
for _ in range(n_sequences):
length = random_state.normal(loc=mean_length, scale=std_length)
length = int(max(n + 1, length))
Xi_, yi = nback(n, k, length)
Xi = one_of_k(Xi_, k)
X.append(Xi)
y.append(yi)
return X, y
def nback_example():
# Input dimension
k = 4
# n-back
n = 3
n_sequences = 100
mean_length = 20
std_length = 5
# Training
Xtrain, ytrain = nback_dataset(n_sequences, mean_length, std_length, n, k)
rnn = RecurrentNetwork(64, learning_rate=2.0, n_epochs=30,
lmbda=0.0, mu=0.2, output_activation='softmax',
random_state=None, verbose=1)
rnn.fit(Xtrain, ytrain)
# Evaluating
Xtest, ytest = nback_dataset(5*n_sequences, 5*mean_length, 5*std_length, n, k)
print('Average accuracy: {0:.3f}'.format(rnn.score(Xtest, ytest)))
acc_zeros = 0.0
for yi in ytest:
acc_zeros += float((yi == 0).sum()) / len(yi)
acc_zeros /= len(ytest)
print('Negative guess accuracy: {0:.3f}'.format(acc_zeros))
# Example
Xi_ = [3, 2, 1, 3, 2, 1, 3, 2, 2, 1, 2, 3, 1, 2, 0, 0, 2, 0]
print('\nExample sequence: {0}'.format(Xi_))
yi = np.zeros(len(Xi_), dtype=int)
for t in range(n, len(Xi_)):
yi[t] = (Xi_[t - n] == Xi_[t])
Xi = one_of_k(Xi_, k)
yipred = rnn.predict([Xi])[0]
print('Correct: \t{0}'.format(yi))
print('Predicted: \t{0}'.format(yipred))
print('Accuracy: {0:.3f}'.format(float((yi == yipred).sum())/len(yi)))
def main():
nback_example()
if __name__ == "__main__":
main() | mit | 6,992,350,974,806,303,000 | 8,896,529,668,799,366,000 | 26.159091 | 82 | 0.537045 | false |
MoamerEncsConcordiaCa/tensorflow | tensorflow/contrib/ndlstm/python/lstm1d_test.py | 94 | 4122 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for 1D LSTM."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.ndlstm.python import lstm1d as lstm1d_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
lstm1d = lstm1d_lib
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class Lstm1DTest(test.TestCase):
def testSequenceToSequenceDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(17, 1, 5))
outputs = lstm1d.ndlstm_base(inputs, 8)
variables.global_variables_initializer().run()
names = [v.name for v in variables.trainable_variables()]
self.assertEqual(len(names), 2)
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 1, 8))
def testSequenceToSequenceGradient(self):
with self.test_session():
size = (17, 1, 15)
output_size = (17, 1, 8)
inputs = constant_op.constant(_rand(*size))
outputs = lstm1d.ndlstm_base(inputs, 8, dynamic=False)
variables.global_variables_initializer().run()
gradients = gradients_impl.gradients(outputs, inputs)
if 1: # pylint: disable=using-constant-test
gradients = gradients_impl.gradients(outputs, inputs)[0].eval()
self.assertEqual(gradients.shape, size)
else:
# TODO(tmb) tf.test.compute_gradient error is currently broken
# with dynamic_rnn. Enable this test case eventually.
err = gradient_checker.compute_gradient_error(
inputs, size, outputs, output_size, delta=1e-4)
self.assert_(not np.isnan(err))
self.assert_(err < 0.1)
def testSequenceToSequenceGradientReverse(self):
with self.test_session():
size = (17, 1, 15)
output_size = (17, 1, 8)
inputs = constant_op.constant(_rand(*size))
outputs = lstm1d.ndlstm_base(inputs, 8, reverse=1, dynamic=False)
variables.global_variables_initializer().run()
if 1: # pylint: disable=using-constant-test
gradients = gradients_impl.gradients(outputs, inputs)[0].eval()
self.assertEqual(gradients.shape, size)
else:
# TODO(tmb) tf.test.compute_gradient error is currently broken
# with dynamic_rnn. Enable this test case eventually.
err = gradient_checker.compute_gradient_error(
inputs, size, outputs, output_size, delta=1e-4)
self.assert_(not np.isnan(err))
self.assert_(err < 0.1)
def testSequenceToFinalDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(17, 6, 5))
outputs = lstm1d.sequence_to_final(inputs, 8)
variables.global_variables_initializer().run()
names = [v.name for v in variables.trainable_variables()]
self.assertEqual(len(names), 2)
result = outputs.eval()
self.assertEqual(tuple(result.shape), (6, 8))
def testSequenceSoftmaxDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(17, 1, 5))
outputs = lstm1d.sequence_softmax(inputs, 8)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 1, 8))
if __name__ == "__main__":
test.main()
| apache-2.0 | 2,767,589,542,292,292,000 | 877,747,860,396,380,900 | 37.886792 | 80 | 0.674672 | false |
cldershem/osf.io | website/mailchimp_utils.py | 16 | 2747 | # -*- coding: utf-8 -*-
import mailchimp
from framework import sentry
from framework.tasks import app
from framework.auth.core import User
from framework.tasks.handlers import queued_task
from framework.auth.signals import user_confirmed
from framework.transactions.context import transaction
from website import settings
def get_mailchimp_api():
if not settings.MAILCHIMP_API_KEY:
raise RuntimeError("An API key is required to connect to Mailchimp.")
return mailchimp.Mailchimp(settings.MAILCHIMP_API_KEY)
def get_list_id_from_name(list_name):
m = get_mailchimp_api()
mailing_list = m.lists.list(filters={'list_name': list_name})
return mailing_list['data'][0]['id']
def get_list_name_from_id(list_id):
m = get_mailchimp_api()
mailing_list = m.lists.list(filters={'list_id': list_id})
return mailing_list['data'][0]['name']
@queued_task
@app.task
@transaction()
def subscribe_mailchimp(list_name, user_id):
user = User.load(user_id)
m = get_mailchimp_api()
list_id = get_list_id_from_name(list_name=list_name)
if user.mailing_lists is None:
user.mailing_lists = {}
try:
m.lists.subscribe(
id=list_id,
email={'email': user.username},
merge_vars={
'fname': user.given_name,
'lname': user.family_name,
},
double_optin=False,
update_existing=True,
)
except mailchimp.ValidationError as error:
sentry.log_exception()
sentry.log_message(error.message)
user.mailing_lists[list_name] = False
else:
user.mailing_lists[list_name] = True
finally:
user.save()
@queued_task
@app.task
@transaction()
def unsubscribe_mailchimp(list_name, user_id, username=None):
"""Unsubscribe a user from a mailchimp mailing list given its name.
:param str list_name: mailchimp mailing list name
:param str user_id: current user's id
:param str username: current user's email (required for merged users)
:raises: ListNotSubscribed if user not already subscribed
"""
user = User.load(user_id)
m = get_mailchimp_api()
list_id = get_list_id_from_name(list_name=list_name)
m.lists.unsubscribe(id=list_id, email={'email': username or user.username})
# Update mailing_list user field
if user.mailing_lists is None:
user.mailing_lists = {}
user.save()
user.mailing_lists[list_name] = False
user.save()
@user_confirmed.connect
def subscribe_on_confirm(user):
# Subscribe user to general OSF mailing list upon account confirmation
if settings.ENABLE_EMAIL_SUBSCRIPTIONS:
subscribe_mailchimp(settings.MAILCHIMP_GENERAL_LIST, user._id)
| apache-2.0 | 8,075,666,599,617,124,000 | 7,545,560,311,674,361,000 | 27.319588 | 79 | 0.666545 | false |
k4cg/Rezeptionistin | plugins/temperature.py | 1 | 1851 | # coding: utf8
import socket
import urllib2
import json
from plugin import Plugin
class Temperature(Plugin):
def __init__(self, config=None):
try:
self.wunderground = config.get('Temperature', 'wunderground')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
print "Temperature was not properly configured in your config.ini"
super(Temperature, self).__init__()
def help_text(self, bot):
return bot.translate("temp_help")
def get_indoor_temp(self, bot):
msg = bot.get_spacestatus_data()
if msg is None:
return None
else:
return str(msg['temperature'])
def get_outdoor_temp(self, bot):
f = urllib2.urlopen(self.wunderground)
json_string = f.read()
parsed_json = json.loads(json_string)
temp_outdoor = parsed_json['current_observation']['temp_c']
f.close()
return temp_outdoor
def on_msg(self, bot, user_nick, host, channel, message):
if message.lower().startswith('!kt'):
temp = self.get_indoor_temp(bot)
temp_outdoor = self.get_outdoor_temp(bot)
if temp is not None:
bot.send_message(channel, bot.translate("temp_str1").format(temp=temp) + " " + bot.translate("temp_str2").format(temp=temp_outdoor), user_nick)
else:
bot.send_message(channel, bot.translate("temp_str3").format(temp=temp_outdoor), user_nick)
def on_privmsg(self, bot, user_nick, host, message):
if message.lower().startswith('!kt'):
temp = self.get_indoor_temp(bot)
temp_outdoor = self.get_outdoor_temp(bot)
if temp is not None:
bot.send_message(user_nick, bot.translate("temp_str1").format(temp=temp) + " " + bot.translate("temp_str2").format(temp=temp_outdoor), user_nick)
else:
bot.send_message(user_nick, bot.translate("temp_str3").format(temp=temp_outdoor), user_nick)
| mit | -1,125,503,445,131,092,200 | -1,903,285,714,075,200,300 | 33.277778 | 155 | 0.666126 | false |
blockc/fabric | bddtests/peer/configuration_pb2.py | 17 | 4136 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: peer/configuration.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='peer/configuration.proto',
package='protos',
syntax='proto3',
serialized_pb=_b('\n\x18peer/configuration.proto\x12\x06protos\"7\n\x0b\x41nchorPeers\x12(\n\x0c\x61nchor_peers\x18\x01 \x03(\x0b\x32\x12.protos.AnchorPeer\"(\n\nAnchorPeer\x12\x0c\n\x04host\x18\x01 \x01(\t\x12\x0c\n\x04port\x18\x02 \x01(\x05\x42O\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peerb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ANCHORPEERS = _descriptor.Descriptor(
name='AnchorPeers',
full_name='protos.AnchorPeers',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='anchor_peers', full_name='protos.AnchorPeers.anchor_peers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=36,
serialized_end=91,
)
_ANCHORPEER = _descriptor.Descriptor(
name='AnchorPeer',
full_name='protos.AnchorPeer',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='host', full_name='protos.AnchorPeer.host', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='port', full_name='protos.AnchorPeer.port', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=133,
)
_ANCHORPEERS.fields_by_name['anchor_peers'].message_type = _ANCHORPEER
DESCRIPTOR.message_types_by_name['AnchorPeers'] = _ANCHORPEERS
DESCRIPTOR.message_types_by_name['AnchorPeer'] = _ANCHORPEER
AnchorPeers = _reflection.GeneratedProtocolMessageType('AnchorPeers', (_message.Message,), dict(
DESCRIPTOR = _ANCHORPEERS,
__module__ = 'peer.configuration_pb2'
# @@protoc_insertion_point(class_scope:protos.AnchorPeers)
))
_sym_db.RegisterMessage(AnchorPeers)
AnchorPeer = _reflection.GeneratedProtocolMessageType('AnchorPeer', (_message.Message,), dict(
DESCRIPTOR = _ANCHORPEER,
__module__ = 'peer.configuration_pb2'
# @@protoc_insertion_point(class_scope:protos.AnchorPeer)
))
_sym_db.RegisterMessage(AnchorPeer)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\"org.hyperledger.fabric.protos.peerZ)github.com/hyperledger/fabric/protos/peer'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| apache-2.0 | 4,471,884,405,920,925,700 | -6,347,734,211,104,626,000 | 31.3125 | 343 | 0.725822 | false |
sbidoul/pip | tests/unit/test_locations.py | 4 | 5324 | """
locations.py tests
"""
import getpass
import os
import shutil
import sys
import tempfile
from unittest.mock import Mock
import pytest
from pip._internal.locations import SCHEME_KEYS, get_scheme
if sys.platform == 'win32':
pwd = Mock()
else:
import pwd
def _get_scheme_dict(*args, **kwargs):
scheme = get_scheme(*args, **kwargs)
return {k: getattr(scheme, k) for k in SCHEME_KEYS}
class TestLocations:
def setup(self):
self.tempdir = tempfile.mkdtemp()
self.st_uid = 9999
self.username = "example"
self.patch()
def teardown(self):
self.revert_patch()
shutil.rmtree(self.tempdir, ignore_errors=True)
def patch(self):
""" first store and then patch python methods pythons """
self.tempfile_gettempdir = tempfile.gettempdir
self.old_os_fstat = os.fstat
if sys.platform != 'win32':
# os.geteuid and pwd.getpwuid are not implemented on windows
self.old_os_geteuid = os.geteuid
self.old_pwd_getpwuid = pwd.getpwuid
self.old_getpass_getuser = getpass.getuser
# now patch
tempfile.gettempdir = lambda: self.tempdir
getpass.getuser = lambda: self.username
os.geteuid = lambda: self.st_uid
os.fstat = lambda fd: self.get_mock_fstat(fd)
if sys.platform != 'win32':
pwd.getpwuid = lambda uid: self.get_mock_getpwuid(uid)
def revert_patch(self):
""" revert the patches to python methods """
tempfile.gettempdir = self.tempfile_gettempdir
getpass.getuser = self.old_getpass_getuser
if sys.platform != 'win32':
# os.geteuid and pwd.getpwuid are not implemented on windows
os.geteuid = self.old_os_geteuid
pwd.getpwuid = self.old_pwd_getpwuid
os.fstat = self.old_os_fstat
def get_mock_fstat(self, fd):
""" returns a basic mock fstat call result.
Currently only the st_uid attribute has been set.
"""
result = Mock()
result.st_uid = self.st_uid
return result
def get_mock_getpwuid(self, uid):
""" returns a basic mock pwd.getpwuid call result.
Currently only the pw_name attribute has been set.
"""
result = Mock()
result.pw_name = self.username
return result
class TestDistutilsScheme:
def test_root_modifies_appropriately(self, monkeypatch):
# This deals with nt/posix path differences
# root is c:\somewhere\else or /somewhere/else
root = os.path.normcase(os.path.abspath(
os.path.join(os.path.sep, 'somewhere', 'else')))
norm_scheme = _get_scheme_dict("example")
root_scheme = _get_scheme_dict("example", root=root)
for key, value in norm_scheme.items():
drive, path = os.path.splitdrive(os.path.abspath(value))
expected = os.path.join(root, path[1:])
assert os.path.abspath(root_scheme[key]) == expected
@pytest.mark.incompatible_with_venv
def test_distutils_config_file_read(self, tmpdir, monkeypatch):
# This deals with nt/posix path differences
install_scripts = os.path.normcase(os.path.abspath(
os.path.join(os.path.sep, 'somewhere', 'else')))
f = tmpdir / "config" / "setup.cfg"
f.parent.mkdir()
f.write_text("[install]\ninstall-scripts=" + install_scripts)
from distutils.dist import Distribution
# patch the function that returns what config files are present
monkeypatch.setattr(
Distribution,
'find_config_files',
lambda self: [f],
)
scheme = _get_scheme_dict('example')
assert scheme['scripts'] == install_scripts
@pytest.mark.incompatible_with_venv
# when we request install-lib, we should install everything (.py &
# .so) into that path; i.e. ensure platlib & purelib are set to
# this path
def test_install_lib_takes_precedence(self, tmpdir, monkeypatch):
# This deals with nt/posix path differences
install_lib = os.path.normcase(os.path.abspath(
os.path.join(os.path.sep, 'somewhere', 'else')))
f = tmpdir / "config" / "setup.cfg"
f.parent.mkdir()
f.write_text("[install]\ninstall-lib=" + install_lib)
from distutils.dist import Distribution
# patch the function that returns what config files are present
monkeypatch.setattr(
Distribution,
'find_config_files',
lambda self: [f],
)
scheme = _get_scheme_dict('example')
assert scheme['platlib'] == install_lib + os.path.sep
assert scheme['purelib'] == install_lib + os.path.sep
def test_prefix_modifies_appropriately(self):
prefix = os.path.abspath(os.path.join('somewhere', 'else'))
normal_scheme = _get_scheme_dict("example")
prefix_scheme = _get_scheme_dict("example", prefix=prefix)
def _calculate_expected(value):
path = os.path.join(prefix, os.path.relpath(value, sys.prefix))
return os.path.normpath(path)
expected = {
k: _calculate_expected(v)
for k, v in normal_scheme.items()
}
assert prefix_scheme == expected
| mit | -3,910,305,415,129,260,500 | 6,997,475,339,036,668,000 | 33.348387 | 75 | 0.61589 | false |
2013Commons/HUE-SHARK | desktop/core/ext-py/Django-1.2.3/django/contrib/messages/storage/base.py | 399 | 6134 | from django.conf import settings
from django.utils.encoding import force_unicode, StrAndUnicode
from django.contrib.messages import constants, utils
LEVEL_TAGS = utils.get_level_tags()
class Message(StrAndUnicode):
"""
Represents an actual message that can be stored in any of the supported
storage classes (typically session- or cookie-based) and rendered in a view
or template.
"""
def __init__(self, level, message, extra_tags=None):
self.level = int(level)
self.message = message
self.extra_tags = extra_tags
def _prepare(self):
"""
Prepares the message for serialization by forcing the ``message``
and ``extra_tags`` to unicode in case they are lazy translations.
Known "safe" types (None, int, etc.) are not converted (see Django's
``force_unicode`` implementation for details).
"""
self.message = force_unicode(self.message, strings_only=True)
self.extra_tags = force_unicode(self.extra_tags, strings_only=True)
def __eq__(self, other):
return isinstance(other, Message) and self.level == other.level and \
self.message == other.message
def __unicode__(self):
return force_unicode(self.message)
def _get_tags(self):
label_tag = force_unicode(LEVEL_TAGS.get(self.level, ''),
strings_only=True)
extra_tags = force_unicode(self.extra_tags, strings_only=True)
if extra_tags and label_tag:
return u' '.join([extra_tags, label_tag])
elif extra_tags:
return extra_tags
elif label_tag:
return label_tag
return ''
tags = property(_get_tags)
class BaseStorage(object):
"""
This is the base backend for temporary message storage.
This is not a complete class; to be a usable storage backend, it must be
subclassed and the two methods ``_get`` and ``_store`` overridden.
"""
def __init__(self, request, *args, **kwargs):
self.request = request
self._queued_messages = []
self.used = False
self.added_new = False
super(BaseStorage, self).__init__(*args, **kwargs)
def __len__(self):
return len(self._loaded_messages) + len(self._queued_messages)
def __iter__(self):
self.used = True
if self._queued_messages:
self._loaded_messages.extend(self._queued_messages)
self._queued_messages = []
return iter(self._loaded_messages)
def __contains__(self, item):
return item in self._loaded_messages or item in self._queued_messages
@property
def _loaded_messages(self):
"""
Returns a list of loaded messages, retrieving them first if they have
not been loaded yet.
"""
if not hasattr(self, '_loaded_data'):
messages, all_retrieved = self._get()
self._loaded_data = messages or []
return self._loaded_data
def _get(self, *args, **kwargs):
"""
Retrieves a list of stored messages. Returns a tuple of the messages
and a flag indicating whether or not all the messages originally
intended to be stored in this storage were, in fact, stored and
retrieved; e.g., ``(messages, all_retrieved)``.
**This method must be implemented by a subclass.**
If it is possible to tell if the backend was not used (as opposed to
just containing no messages) then ``None`` should be returned in
place of ``messages``.
"""
raise NotImplementedError()
def _store(self, messages, response, *args, **kwargs):
"""
Stores a list of messages, returning a list of any messages which could
not be stored.
One type of object must be able to be stored, ``Message``.
**This method must be implemented by a subclass.**
"""
raise NotImplementedError()
def _prepare_messages(self, messages):
"""
Prepares a list of messages for storage.
"""
for message in messages:
message._prepare()
def update(self, response):
"""
Stores all unread messages.
If the backend has yet to be iterated, previously stored messages will
be stored again. Otherwise, only messages added after the last
iteration will be stored.
"""
self._prepare_messages(self._queued_messages)
if self.used:
return self._store(self._queued_messages, response)
elif self.added_new:
messages = self._loaded_messages + self._queued_messages
return self._store(messages, response)
def add(self, level, message, extra_tags=''):
"""
Queues a message to be stored.
The message is only queued if it contained something and its level is
not less than the recording level (``self.level``).
"""
if not message:
return
# Check that the message level is not less than the recording level.
level = int(level)
if level < self.level:
return
# Add the message.
self.added_new = True
message = Message(level, message, extra_tags=extra_tags)
self._queued_messages.append(message)
def _get_level(self):
"""
Returns the minimum recorded level.
The default level is the ``MESSAGE_LEVEL`` setting. If this is
not found, the ``INFO`` level is used.
"""
if not hasattr(self, '_level'):
self._level = getattr(settings, 'MESSAGE_LEVEL', constants.INFO)
return self._level
def _set_level(self, value=None):
"""
Sets a custom minimum recorded level.
If set to ``None``, the default level will be used (see the
``_get_level`` method).
"""
if value is None and hasattr(self, '_level'):
del self._level
else:
self._level = int(value)
level = property(_get_level, _set_level, _set_level)
| apache-2.0 | 5,080,588,995,030,907,000 | 7,661,447,045,270,267,000 | 32.889503 | 79 | 0.599283 | false |
Drvanon/Game | venv/lib/python3.3/site-packages/werkzeug/serving.py | 309 | 27668 | # -*- coding: utf-8 -*-
"""
werkzeug.serving
~~~~~~~~~~~~~~~~
There are many ways to serve a WSGI application. While you're developing
it you usually don't want a full blown webserver like Apache but a simple
standalone one. From Python 2.5 onwards there is the `wsgiref`_ server in
the standard library. If you're using older versions of Python you can
download the package from the cheeseshop.
However there are some caveats. Sourcecode won't reload itself when
changed and each time you kill the server using ``^C`` you get an
`KeyboardInterrupt` error. While the latter is easy to solve the first
one can be a pain in the ass in some situations.
The easiest way is creating a small ``start-myproject.py`` that runs the
application::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from myproject import make_app
from werkzeug.serving import run_simple
app = make_app(...)
run_simple('localhost', 8080, app, use_reloader=True)
You can also pass it a `extra_files` keyword argument with a list of
additional files (like configuration files) you want to observe.
For bigger applications you should consider using `werkzeug.script`
instead of a simple start file.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import os
import socket
import sys
import time
import signal
import subprocess
try:
import thread
except ImportError:
import _thread as thread
try:
from SocketServer import ThreadingMixIn, ForkingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from socketserver import ThreadingMixIn, ForkingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
import werkzeug
from werkzeug._internal import _log
from werkzeug._compat import iteritems, PY2, reraise, text_type, \
wsgi_encoding_dance
from werkzeug.urls import url_parse, url_unquote
from werkzeug.exceptions import InternalServerError, BadRequest
class WSGIRequestHandler(BaseHTTPRequestHandler, object):
"""A request handler that implements WSGI dispatching."""
@property
def server_version(self):
return 'Werkzeug/' + werkzeug.__version__
def make_environ(self):
request_url = url_parse(self.path)
def shutdown_server():
self.server.shutdown_signal = True
url_scheme = self.server.ssl_context is None and 'http' or 'https'
path_info = url_unquote(request_url.path)
environ = {
'wsgi.version': (1, 0),
'wsgi.url_scheme': url_scheme,
'wsgi.input': self.rfile,
'wsgi.errors': sys.stderr,
'wsgi.multithread': self.server.multithread,
'wsgi.multiprocess': self.server.multiprocess,
'wsgi.run_once': False,
'werkzeug.server.shutdown':
shutdown_server,
'SERVER_SOFTWARE': self.server_version,
'REQUEST_METHOD': self.command,
'SCRIPT_NAME': '',
'PATH_INFO': wsgi_encoding_dance(path_info),
'QUERY_STRING': wsgi_encoding_dance(request_url.query),
'CONTENT_TYPE': self.headers.get('Content-Type', ''),
'CONTENT_LENGTH': self.headers.get('Content-Length', ''),
'REMOTE_ADDR': self.client_address[0],
'REMOTE_PORT': self.client_address[1],
'SERVER_NAME': self.server.server_address[0],
'SERVER_PORT': str(self.server.server_address[1]),
'SERVER_PROTOCOL': self.request_version
}
for key, value in self.headers.items():
key = 'HTTP_' + key.upper().replace('-', '_')
if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
environ[key] = value
if request_url.netloc:
environ['HTTP_HOST'] = request_url.netloc
return environ
def run_wsgi(self):
if self.headers.get('Expect', '').lower().strip() == '100-continue':
self.wfile.write(b'HTTP/1.1 100 Continue\r\n\r\n')
environ = self.make_environ()
headers_set = []
headers_sent = []
def write(data):
assert headers_set, 'write() before start_response'
if not headers_sent:
status, response_headers = headers_sent[:] = headers_set
try:
code, msg = status.split(None, 1)
except ValueError:
code, msg = status, ""
self.send_response(int(code), msg)
header_keys = set()
for key, value in response_headers:
self.send_header(key, value)
key = key.lower()
header_keys.add(key)
if 'content-length' not in header_keys:
self.close_connection = True
self.send_header('Connection', 'close')
if 'server' not in header_keys:
self.send_header('Server', self.version_string())
if 'date' not in header_keys:
self.send_header('Date', self.date_time_string())
self.end_headers()
assert type(data) is bytes, 'applications must write bytes'
self.wfile.write(data)
self.wfile.flush()
def start_response(status, response_headers, exc_info=None):
if exc_info:
try:
if headers_sent:
reraise(*exc_info)
finally:
exc_info = None
elif headers_set:
raise AssertionError('Headers already set')
headers_set[:] = [status, response_headers]
return write
def execute(app):
application_iter = app(environ, start_response)
try:
for data in application_iter:
write(data)
if not headers_sent:
write(b'')
finally:
if hasattr(application_iter, 'close'):
application_iter.close()
application_iter = None
try:
execute(self.server.app)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e, environ)
except Exception:
if self.server.passthrough_errors:
raise
from werkzeug.debug.tbtools import get_current_traceback
traceback = get_current_traceback(ignore_system_exceptions=True)
try:
# if we haven't yet sent the headers but they are set
# we roll back to be able to set them again.
if not headers_sent:
del headers_set[:]
execute(InternalServerError())
except Exception:
pass
self.server.log('error', 'Error on request:\n%s',
traceback.plaintext)
def handle(self):
"""Handles a request ignoring dropped connections."""
rv = None
try:
rv = BaseHTTPRequestHandler.handle(self)
except (socket.error, socket.timeout) as e:
self.connection_dropped(e)
except Exception:
if self.server.ssl_context is None or not is_ssl_error():
raise
if self.server.shutdown_signal:
self.initiate_shutdown()
return rv
def initiate_shutdown(self):
"""A horrible, horrible way to kill the server for Python 2.6 and
later. It's the best we can do.
"""
# Windows does not provide SIGKILL, go with SIGTERM then.
sig = getattr(signal, 'SIGKILL', signal.SIGTERM)
# reloader active
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
os.kill(os.getpid(), sig)
# python 2.7
self.server._BaseServer__shutdown_request = True
# python 2.6
self.server._BaseServer__serving = False
def connection_dropped(self, error, environ=None):
"""Called if the connection was closed by the client. By default
nothing happens.
"""
def handle_one_request(self):
"""Handle a single HTTP request."""
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
elif self.parse_request():
return self.run_wsgi()
def send_response(self, code, message=None):
"""Send the response header and log the response code."""
self.log_request(code)
if message is None:
message = code in self.responses and self.responses[code][0] or ''
if self.request_version != 'HTTP/0.9':
hdr = "%s %d %s\r\n" % (self.protocol_version, code, message)
self.wfile.write(hdr.encode('ascii'))
def version_string(self):
return BaseHTTPRequestHandler.version_string(self).strip()
def address_string(self):
return self.client_address[0]
def log_request(self, code='-', size='-'):
self.log('info', '"%s" %s %s', self.requestline, code, size)
def log_error(self, *args):
self.log('error', *args)
def log_message(self, format, *args):
self.log('info', format, *args)
def log(self, type, message, *args):
_log(type, '%s - - [%s] %s\n' % (self.address_string(),
self.log_date_time_string(),
message % args))
#: backwards compatible name if someone is subclassing it
BaseRequestHandler = WSGIRequestHandler
def generate_adhoc_ssl_pair(cn=None):
from random import random
from OpenSSL import crypto
# pretty damn sure that this is not actually accepted by anyone
if cn is None:
cn = '*'
cert = crypto.X509()
cert.set_serial_number(int(random() * sys.maxint))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60 * 60 * 24 * 365)
subject = cert.get_subject()
subject.CN = cn
subject.O = 'Dummy Certificate'
issuer = cert.get_issuer()
issuer.CN = 'Untrusted Authority'
issuer.O = 'Self-Signed'
pkey = crypto.PKey()
pkey.generate_key(crypto.TYPE_RSA, 768)
cert.set_pubkey(pkey)
cert.sign(pkey, 'md5')
return cert, pkey
def make_ssl_devcert(base_path, host=None, cn=None):
"""Creates an SSL key for development. This should be used instead of
the ``'adhoc'`` key which generates a new cert on each server start.
It accepts a path for where it should store the key and cert and
either a host or CN. If a host is given it will use the CN
``*.host/CN=host``.
For more information see :func:`run_simple`.
.. versionadded:: 0.9
:param base_path: the path to the certificate and key. The extension
``.crt`` is added for the certificate, ``.key`` is
added for the key.
:param host: the name of the host. This can be used as an alternative
for the `cn`.
:param cn: the `CN` to use.
"""
from OpenSSL import crypto
if host is not None:
cn = '*.%s/CN=%s' % (host, host)
cert, pkey = generate_adhoc_ssl_pair(cn=cn)
cert_file = base_path + '.crt'
pkey_file = base_path + '.key'
with open(cert_file, 'w') as f:
f.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
with open(pkey_file, 'w') as f:
f.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
return cert_file, pkey_file
def generate_adhoc_ssl_context():
"""Generates an adhoc SSL context for the development server."""
from OpenSSL import SSL
cert, pkey = generate_adhoc_ssl_pair()
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_privatekey(pkey)
ctx.use_certificate(cert)
return ctx
def load_ssl_context(cert_file, pkey_file):
"""Loads an SSL context from a certificate and private key file."""
from OpenSSL import SSL
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.use_certificate_file(cert_file)
ctx.use_privatekey_file(pkey_file)
return ctx
def is_ssl_error(error=None):
"""Checks if the given error (or the current one) is an SSL error."""
if error is None:
error = sys.exc_info()[1]
from OpenSSL import SSL
return isinstance(error, SSL.Error)
class _SSLConnectionFix(object):
"""Wrapper around SSL connection to provide a working makefile()."""
def __init__(self, con):
self._con = con
def makefile(self, mode, bufsize):
return socket._fileobject(self._con, mode, bufsize)
def __getattr__(self, attrib):
return getattr(self._con, attrib)
def shutdown(self, arg=None):
try:
self._con.shutdown()
except Exception:
pass
def select_ip_version(host, port):
"""Returns AF_INET4 or AF_INET6 depending on where to connect to."""
# disabled due to problems with current ipv6 implementations
# and various operating systems. Probably this code also is
# not supposed to work, but I can't come up with any other
# ways to implement this.
##try:
## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
## socket.SOCK_STREAM, 0,
## socket.AI_PASSIVE)
## if info:
## return info[0][0]
##except socket.gaierror:
## pass
if ':' in host and hasattr(socket, 'AF_INET6'):
return socket.AF_INET6
return socket.AF_INET
class BaseWSGIServer(HTTPServer, object):
"""Simple single-threaded, single-process WSGI server."""
multithread = False
multiprocess = False
request_queue_size = 128
def __init__(self, host, port, app, handler=None,
passthrough_errors=False, ssl_context=None):
if handler is None:
handler = WSGIRequestHandler
self.address_family = select_ip_version(host, port)
HTTPServer.__init__(self, (host, int(port)), handler)
self.app = app
self.passthrough_errors = passthrough_errors
self.shutdown_signal = False
if ssl_context is not None:
try:
from OpenSSL import tsafe
except ImportError:
raise TypeError('SSL is not available if the OpenSSL '
'library is not installed.')
if isinstance(ssl_context, tuple):
ssl_context = load_ssl_context(*ssl_context)
if ssl_context == 'adhoc':
ssl_context = generate_adhoc_ssl_context()
self.socket = tsafe.Connection(ssl_context, self.socket)
self.ssl_context = ssl_context
else:
self.ssl_context = None
def log(self, type, message, *args):
_log(type, message, *args)
def serve_forever(self):
self.shutdown_signal = False
try:
HTTPServer.serve_forever(self)
except KeyboardInterrupt:
pass
def handle_error(self, request, client_address):
if self.passthrough_errors:
raise
else:
return HTTPServer.handle_error(self, request, client_address)
def get_request(self):
con, info = self.socket.accept()
if self.ssl_context is not None:
con = _SSLConnectionFix(con)
return con, info
class ThreadedWSGIServer(ThreadingMixIn, BaseWSGIServer):
"""A WSGI server that does threading."""
multithread = True
class ForkingWSGIServer(ForkingMixIn, BaseWSGIServer):
"""A WSGI server that does forking."""
multiprocess = True
def __init__(self, host, port, app, processes=40, handler=None,
passthrough_errors=False, ssl_context=None):
BaseWSGIServer.__init__(self, host, port, app, handler,
passthrough_errors, ssl_context)
self.max_children = processes
def make_server(host, port, app=None, threaded=False, processes=1,
request_handler=None, passthrough_errors=False,
ssl_context=None):
"""Create a new server instance that is either threaded, or forks
or just processes one request after another.
"""
if threaded and processes > 1:
raise ValueError("cannot have a multithreaded and "
"multi process server.")
elif threaded:
return ThreadedWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
elif processes > 1:
return ForkingWSGIServer(host, port, app, processes, request_handler,
passthrough_errors, ssl_context)
else:
return BaseWSGIServer(host, port, app, request_handler,
passthrough_errors, ssl_context)
def _iter_module_files():
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _reloader_stat_loop(extra_files=None, interval=1):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
Copyright notice. This function is based on the autoreload.py from
the CherryPy trac which originated from WSGIKit which is now dead.
:param extra_files: a list of additional files it should watch.
"""
from itertools import chain
mtimes = {}
while 1:
for filename in chain(_iter_module_files(), extra_files or ()):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
_log('info', ' * Detected change in %r, reloading' % filename)
sys.exit(3)
time.sleep(interval)
def _reloader_inotify(extra_files=None, interval=None):
# Mutated by inotify loop when changes occur.
changed = [False]
# Setup inotify watches
from pyinotify import WatchManager, Notifier
# this API changed at one point, support both
try:
from pyinotify import EventsCodes as ec
ec.IN_ATTRIB
except (ImportError, AttributeError):
import pyinotify as ec
wm = WatchManager()
mask = ec.IN_DELETE_SELF | ec.IN_MOVE_SELF | ec.IN_MODIFY | ec.IN_ATTRIB
def signal_changed(event):
if changed[0]:
return
_log('info', ' * Detected change in %r, reloading' % event.path)
changed[:] = [True]
for fname in extra_files or ():
wm.add_watch(fname, mask, signal_changed)
# ... And now we wait...
notif = Notifier(wm)
try:
while not changed[0]:
# always reiterate through sys.modules, adding them
for fname in _iter_module_files():
wm.add_watch(fname, mask, signal_changed)
notif.process_events()
if notif.check_events(timeout=interval):
notif.read_events()
# TODO Set timeout to something small and check parent liveliness
finally:
notif.stop()
sys.exit(3)
# currently we always use the stat loop reloader for the simple reason
# that the inotify one does not respond to added files properly. Also
# it's quite buggy and the API is a mess.
reloader_loop = _reloader_stat_loop
def restart_with_reloader():
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with reloader')
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt' and PY2:
for key, value in iteritems(new_environ):
if isinstance(value, text_type):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def run_with_reloader(main_func, extra_files=None, interval=1):
"""Run the given function in an independent python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
thread.start_new_thread(main_func, ())
try:
reloader_loop(extra_files, interval)
except KeyboardInterrupt:
return
try:
sys.exit(restart_with_reloader())
except KeyboardInterrupt:
pass
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False, use_evalex=True,
extra_files=None, reloader_interval=1, threaded=False,
processes=1, request_handler=None, static_files=None,
passthrough_errors=False, ssl_context=None):
"""Start an application using wsgiref and with an optional reloader. This
wraps `wsgiref` to fix the wrong default reporting of the multithreaded
WSGI variable and adds optional multithreading and fork support.
This function has a command-line interface too::
python -m werkzeug.serving --help
.. versionadded:: 0.5
`static_files` was added to simplify serving of static files as well
as `passthrough_errors`.
.. versionadded:: 0.6
support for SSL was added.
.. versionadded:: 0.8
Added support for automatically loading a SSL context from certificate
file and private key.
.. versionadded:: 0.9
Added command-line interface.
:param hostname: The host for the application. eg: ``'localhost'``
:param port: The port for the server. eg: ``8080``
:param application: the WSGI application to execute
:param use_reloader: should the server automatically restart the python
process if modules were changed?
:param use_debugger: should the werkzeug debugging system be used?
:param use_evalex: should the exception evaluation feature be enabled?
:param extra_files: a list of files the reloader should watch
additionally to the modules. For example configuration
files.
:param reloader_interval: the interval for the reloader in seconds.
:param threaded: should the process handle each request in a separate
thread?
:param processes: if greater than 1 then handle each request in a new process
up to this maximum number of concurrent processes.
:param request_handler: optional parameter that can be used to replace
the default one. You can use this to replace it
with a different
:class:`~BaseHTTPServer.BaseHTTPRequestHandler`
subclass.
:param static_files: a dict of paths for static files. This works exactly
like :class:`SharedDataMiddleware`, it's actually
just wrapping the application in that middleware before
serving.
:param passthrough_errors: set this to `True` to disable the error catching.
This means that the server will die on errors but
it can be useful to hook debuggers in (pdb etc.)
:param ssl_context: an SSL context for the connection. Either an OpenSSL
context, a tuple in the form ``(cert_file, pkey_file)``,
the string ``'adhoc'`` if the server should
automatically create one, or `None` to disable SSL
(which is the default).
"""
if use_debugger:
from werkzeug.debug import DebuggedApplication
application = DebuggedApplication(application, use_evalex)
if static_files:
from werkzeug.wsgi import SharedDataMiddleware
application = SharedDataMiddleware(application, static_files)
def inner():
make_server(hostname, port, application, threaded,
processes, request_handler,
passthrough_errors, ssl_context).serve_forever()
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
display_hostname = hostname != '*' and hostname or 'localhost'
if ':' in display_hostname:
display_hostname = '[%s]' % display_hostname
_log('info', ' * Running on %s://%s:%d/', ssl_context is None
and 'http' or 'https', display_hostname, port)
if use_reloader:
# Create and destroy a socket so that any exceptions are raised before
# we spawn a separate Python interpreter and lose this ability.
address_family = select_ip_version(hostname, port)
test_socket = socket.socket(address_family, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
test_socket.bind((hostname, port))
test_socket.close()
run_with_reloader(inner, extra_files, reloader_interval)
else:
inner()
def main():
'''A simple command-line interface for :py:func:`run_simple`.'''
# in contrast to argparse, this works at least under Python < 2.7
import optparse
from werkzeug.utils import import_string
parser = optparse.OptionParser(usage='Usage: %prog [options] app_module:app_object')
parser.add_option('-b', '--bind', dest='address',
help='The hostname:port the app should listen on.')
parser.add_option('-d', '--debug', dest='use_debugger',
action='store_true', default=False,
help='Use Werkzeug\'s debugger.')
parser.add_option('-r', '--reload', dest='use_reloader',
action='store_true', default=False,
help='Reload Python process if modules change.')
options, args = parser.parse_args()
hostname, port = None, None
if options.address:
address = options.address.split(':')
hostname = address[0]
if len(address) > 1:
port = address[1]
if len(args) != 1:
sys.stdout.write('No application supplied, or too much. See --help\n')
sys.exit(1)
app = import_string(args[0])
run_simple(
hostname=(hostname or '127.0.0.1'), port=int(port or 5000),
application=app, use_reloader=options.use_reloader,
use_debugger=options.use_debugger
)
if __name__ == '__main__':
main()
| apache-2.0 | 4,629,764,198,447,834,000 | 5,776,119,838,917,316,000 | 35.93992 | 88 | 0.595345 | false |
shootstar/novatest | nova/api/openstack/compute/plugins/v3/rescue.py | 4 | 3531 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The rescue mode extension."""
from oslo.config import cfg
import webob
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova import utils
ALIAS = "os-rescue"
CONF = cfg.CONF
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
class RescueController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(RescueController, self).__init__(*args, **kwargs)
self.compute_api = compute.API()
def _get_instance(self, context, instance_id):
try:
return self.compute_api.get(context, instance_id)
except exception.InstanceNotFound:
msg = _("Server not found")
raise exc.HTTPNotFound(msg)
@wsgi.action('rescue')
def _rescue(self, req, id, body):
"""Rescue an instance."""
context = req.environ["nova.context"]
authorize(context)
if body['rescue'] and 'adminPass' in body['rescue']:
password = body['rescue']['adminPass']
else:
password = utils.generate_password()
instance = self._get_instance(context, id)
try:
self.compute_api.rescue(context, instance,
rescue_password=password)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'rescue')
except exception.InvalidVolume as volume_error:
raise exc.HTTPConflict(explanation=volume_error.format_message())
except exception.InstanceNotRescuable as non_rescuable:
raise exc.HTTPBadRequest(
explanation=non_rescuable.format_message())
return {'adminPass': password}
@wsgi.action('unrescue')
def _unrescue(self, req, id, body):
"""Unrescue an instance."""
context = req.environ["nova.context"]
authorize(context)
instance = self._get_instance(context, id)
try:
self.compute_api.unrescue(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'unrescue')
return webob.Response(status_int=202)
class Rescue(extensions.V3APIExtensionBase):
"""Instance rescue mode."""
name = "Rescue"
alias = ALIAS
namespace = "http://docs.openstack.org/compute/ext/rescue/api/v3"
version = 1
def get_resources(self):
return []
def get_controller_extensions(self):
controller = RescueController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| apache-2.0 | 6,522,504,736,509,600,000 | 5,538,122,638,222,918,000 | 34.31 | 79 | 0.638912 | false |
wemanuel/smry | server-auth/ls/google-cloud-sdk/.install/.backup/lib/googlecloudapis/container/v1/container_v1_messages.py | 4 | 19560 | """Generated message classes for container version v1.
The Google Container Engine API is used for building and managing container
based applications, powered by the open source Kubernetes technology.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from protorpc import messages as _messages
from googlecloudapis.apitools.base.py import encoding
package = 'container'
class Cluster(_messages.Message):
"""A Google Container Engine cluster.
Enums:
StatusValueValuesEnum: [Output only] The current status of this cluster.
Fields:
clusterIpv4Cidr: The IP address range of the container pods in this
cluster, in [CIDR](http://en.wikipedia.org/wiki/Classless_Inter-
Domain_Routing) notation (e.g. `10.96.0.0/14`). Leave blank to have one
automatically chosen or specify a `/14` block in `10.0.0.0/8` or
`172.16.0.0/12`.
createTime: [Output only] The time the cluster was created, in
[RFC3339](href='https://www.ietf.org/rfc/rfc3339.txt) text format.
currentMasterVersion: [Output only] The current software version of the
master endpoint.
currentNodeVersion: [Output only] The current version of the node software
components. If they are currently at different versions because they're
in the process of being upgraded, this reflects the minimum version of
any of them.
description: An optional description of this cluster.
endpoint: [Output only] The IP address of this cluster's Kubernetes master
endpoint. The endpoint can be accessed from the internet at
`https://username:password@endpoint/`. See the `masterAuth` property of
this resource for username and password information.
initialClusterVersion: [Output only] The software version of Kubernetes
master and kubelets used in the cluster when it was first created. The
version can be upgraded over time.
initialNodeCount: The number of nodes to create in this cluster. You must
ensure that your Compute Engine [resource quota](/compute/docs/resource-
quotas) is sufficient for this number of instances. You must also have
available firewall and routes quota.
instanceGroupUrls: [Output only] The resource URLs of [instance
groups](/compute/docs/instance-groups/) associated with this cluster.
loggingService: The logging service that the cluster should write logs to.
Currently available options: * "logging.googleapis.com" - the Google
Cloud Logging service * "none" - no logs will be exported from the
cluster * "" - default value; the default is "logging.googleapis.com"
masterAuth: The authentication information for accessing the master.
monitoringService: The monitoring service that the cluster should write
metrics to. Currently available options: * "monitoring.googleapis.com"
- the Google Cloud Monitoring service * "none" - no metrics will be
exported from the cluster * "" - default value; the default is
"monitoring.googleapis.com"
name: The name of this cluster. The name must be unique within this
project and zone, and can be up to 40 characters with the following
restrictions: * Lowercase letters, numbers, and hyphens only. * Must
start with a letter. * Must end with a number or a letter.
network: The name of the Google Compute Engine
[network](/compute/docs/networking#networks_1) to which the cluster is
connected. If left unspecified, the "default" network will be used.
nodeConfig: Parameters used in creating the cluster's nodes. See the
descriptions of the child properties of `nodeConfig`.
nodeIpv4CidrSize: [Output only] The size of the address space on each node
for hosting containers. This is provisioned from within the
container_ipv4_cidr range.
selfLink: [Output only] Server-defined URL for the resource.
servicesIpv4Cidr: [Output only] The IP address range of the Kubernetes
services in this cluster, in [CIDR](http://en.wikipedia.org/wiki
/Classless_Inter-Domain_Routing) notation (e.g. `1.2.3.4/29`). Service
addresses are typically put in the last /16 from the container CIDR.
status: [Output only] The current status of this cluster.
statusMessage: [Output only] Additional information about the current
status of this cluster, if available.
zone: [Output only] The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
class StatusValueValuesEnum(_messages.Enum):
"""[Output only] The current status of this cluster.
Values:
STATUS_UNSPECIFIED: Not set.
PROVISIONING: The PROVISIONING state indicates the cluster is being
created.
RUNNING: The RUNNING state indicates the cluster has been created and is
fully usable.
RECONCILING: The RECONCILING state indicates that some work is actively
being done on the cluster, such as upgrading the master or node
software. Details can be found in the statusMessage field.
STOPPING: The STOPPING state indicates the cluster is being deleted.
ERROR: The ERROR state indicates the cluster may be unusable. Details
can be found in the statusMessage field.
"""
STATUS_UNSPECIFIED = 0
PROVISIONING = 1
RUNNING = 2
RECONCILING = 3
STOPPING = 4
ERROR = 5
clusterIpv4Cidr = _messages.StringField(1)
createTime = _messages.StringField(2)
currentMasterVersion = _messages.StringField(3)
currentNodeVersion = _messages.StringField(4)
description = _messages.StringField(5)
endpoint = _messages.StringField(6)
initialClusterVersion = _messages.StringField(7)
initialNodeCount = _messages.IntegerField(8, variant=_messages.Variant.INT32)
instanceGroupUrls = _messages.StringField(9, repeated=True)
loggingService = _messages.StringField(10)
masterAuth = _messages.MessageField('MasterAuth', 11)
monitoringService = _messages.StringField(12)
name = _messages.StringField(13)
network = _messages.StringField(14)
nodeConfig = _messages.MessageField('NodeConfig', 15)
nodeIpv4CidrSize = _messages.IntegerField(16, variant=_messages.Variant.INT32)
selfLink = _messages.StringField(17)
servicesIpv4Cidr = _messages.StringField(18)
status = _messages.EnumField('StatusValueValuesEnum', 19)
statusMessage = _messages.StringField(20)
zone = _messages.StringField(21)
class ClusterUpdate(_messages.Message):
"""ClusterUpdate describes an update to the cluster.
Fields:
desiredMasterVersion: The Kubernetes version to change the master to
(typically an upgrade). Use "-" to upgrade to the latest version
supported by the server.
desiredNodeVersion: The Kubernetes version to change the nodes to
(typically an upgrade). Use "-" to upgrade to the latest version
supported by the server.
"""
desiredMasterVersion = _messages.StringField(1)
desiredNodeVersion = _messages.StringField(2)
class ContainerProjectsZonesClustersCreateRequest(_messages.Message):
"""A ContainerProjectsZonesClustersCreateRequest object.
Fields:
createClusterRequest: A CreateClusterRequest resource to be passed as the
request body.
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
createClusterRequest = _messages.MessageField('CreateClusterRequest', 1)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesClustersDeleteRequest(_messages.Message):
"""A ContainerProjectsZonesClustersDeleteRequest object.
Fields:
clusterId: The name of the cluster to delete.
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
clusterId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesClustersGetRequest(_messages.Message):
"""A ContainerProjectsZonesClustersGetRequest object.
Fields:
clusterId: The name of the cluster to retrieve.
projectId: The Google Developers Console A [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
clusterId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesClustersListRequest(_messages.Message):
"""A ContainerProjectsZonesClustersListRequest object.
Fields:
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides, or
"-" for all zones.
"""
projectId = _messages.StringField(1, required=True)
zone = _messages.StringField(2, required=True)
class ContainerProjectsZonesClustersUpdateRequest(_messages.Message):
"""A ContainerProjectsZonesClustersUpdateRequest object.
Fields:
clusterId: The name of the cluster to upgrade.
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
updateClusterRequest: A UpdateClusterRequest resource to be passed as the
request body.
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
clusterId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
updateClusterRequest = _messages.MessageField('UpdateClusterRequest', 3)
zone = _messages.StringField(4, required=True)
class ContainerProjectsZonesOperationsGetRequest(_messages.Message):
"""A ContainerProjectsZonesOperationsGetRequest object.
Fields:
operationId: The server-assigned `name` of the operation.
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the cluster resides.
"""
operationId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
zone = _messages.StringField(3, required=True)
class ContainerProjectsZonesOperationsListRequest(_messages.Message):
"""A ContainerProjectsZonesOperationsListRequest object.
Fields:
projectId: The Google Developers Console [project
ID](https://console.developers.google.com/project) or [project
number](https://developers.google.com/console/help/project-number)
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) to return operations for, or "-"
for all zones.
"""
projectId = _messages.StringField(1, required=True)
zone = _messages.StringField(2, required=True)
class CreateClusterRequest(_messages.Message):
"""CreateClusterRequest creates a cluster.
Fields:
cluster: A [cluster resource](/container-
engine/reference/rest/v1/projects.zones.clusters)
"""
cluster = _messages.MessageField('Cluster', 1)
class ListClustersResponse(_messages.Message):
"""ListClustersResponse is the result of ListClustersRequest.
Fields:
clusters: A list of clusters in the project in the specified zone, or
across all ones.
"""
clusters = _messages.MessageField('Cluster', 1, repeated=True)
class ListOperationsResponse(_messages.Message):
"""ListOperationsResponse is the result of ListOperationsRequest.
Fields:
operations: A list of operations in the project in the specified zone.
"""
operations = _messages.MessageField('Operation', 1, repeated=True)
class MasterAuth(_messages.Message):
"""The authentication information for accessing the master endpoint.
Authentication can be done using HTTP basic auth or using client
certificates.
Fields:
clientCertificate: [Output only] Base64 encoded public certificate used by
clients to authenticate to the cluster endpoint.
clientKey: [Output only] Base64 encoded private key used by clients to
authenticate to the cluster endpoint.
clusterCaCertificate: [Output only] Base64 encoded public certificate that
is the root of trust for the cluster.
password: The password to use for HTTP basic authentication when accessing
the Kubernetes master endpoint. Because the master endpoint is open to
the internet, you should create a strong password.
username: The username to use for HTTP basic authentication when accessing
the Kubernetes master endpoint.
"""
clientCertificate = _messages.StringField(1)
clientKey = _messages.StringField(2)
clusterCaCertificate = _messages.StringField(3)
password = _messages.StringField(4)
username = _messages.StringField(5)
class NodeConfig(_messages.Message):
"""Per-node parameters.
Fields:
diskSizeGb: Size of the disk attached to each node, specified in GB. The
smallest allowed disk size is 10GB, and the default is 100GB.
machineType: The name of a Google Compute Engine [machine
type](/compute/docs/machine-types) (e.g. `n1-standard-1`). If
unspecified, the default machine type is `n1-standard-1`.
oauthScopes: The set of Google API scopes to be made available on all of
the node VMs under the "default" service account. Currently, the
following scopes are necessary to ensure the correct functioning of the
cluster: * "https://www.googleapis.com/auth/compute" *
"https://www.googleapis.com/auth/devstorage.read_only"
"""
diskSizeGb = _messages.IntegerField(1, variant=_messages.Variant.INT32)
machineType = _messages.StringField(2)
oauthScopes = _messages.StringField(3, repeated=True)
class Operation(_messages.Message):
"""Defines the operation resource. All fields are output only.
Enums:
OperationTypeValueValuesEnum: The operation type.
StatusValueValuesEnum: The current status of the operation.
Fields:
name: The server-assigned ID for the operation.
operationType: The operation type.
selfLink: Server-defined URL for the resource.
status: The current status of the operation.
statusMessage: If an error has occurred, a textual description of the
error.
targetLink: Server-defined URL for the target of the operation.
zone: The name of the Google Compute Engine
[zone](/compute/docs/zones#available) in which the operation is taking
place.
"""
class OperationTypeValueValuesEnum(_messages.Enum):
"""The operation type.
Values:
TYPE_UNSPECIFIED: Not set.
CREATE_CLUSTER: Cluster create.
DELETE_CLUSTER: Cluster delete.
UPGRADE_MASTER: A master upgrade.
UPGRADE_NODES: A node upgrade.
REPAIR_CLUSTER: Cluster repair.
"""
TYPE_UNSPECIFIED = 0
CREATE_CLUSTER = 1
DELETE_CLUSTER = 2
UPGRADE_MASTER = 3
UPGRADE_NODES = 4
REPAIR_CLUSTER = 5
class StatusValueValuesEnum(_messages.Enum):
"""The current status of the operation.
Values:
STATUS_UNSPECIFIED: Not set.
PENDING: The operation has been created.
RUNNING: The operation is currently running.
DONE: The operation is done, either cancelled or completed.
"""
STATUS_UNSPECIFIED = 0
PENDING = 1
RUNNING = 2
DONE = 3
name = _messages.StringField(1)
operationType = _messages.EnumField('OperationTypeValueValuesEnum', 2)
selfLink = _messages.StringField(3)
status = _messages.EnumField('StatusValueValuesEnum', 4)
statusMessage = _messages.StringField(5)
targetLink = _messages.StringField(6)
zone = _messages.StringField(7)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" or "email:<ldap>" to
include in api requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class UpdateClusterRequest(_messages.Message):
"""UpdateClusterRequest updates a cluster.
Fields:
update: A description of the update.
"""
update = _messages.MessageField('ClusterUpdate', 1)
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'container')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'container')
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'container')
| apache-2.0 | 2,549,927,567,512,657,400 | 8,464,477,180,077,467,000 | 38.756098 | 80 | 0.732311 | false |
t-wissmann/qutebrowser | qutebrowser/misc/savemanager.py | 1 | 8427 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2020 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Saving things to disk periodically."""
import os.path
import collections
import typing
from PyQt5.QtCore import pyqtSlot, QObject, QTimer
from qutebrowser.config import config
from qutebrowser.api import cmdutils
from qutebrowser.utils import utils, log, message, usertypes, error
from qutebrowser.misc import objects
class Saveable:
"""A single thing which can be saved.
Attributes:
_name: The name of the thing to be saved.
_dirty: Whether the saveable was changed since the last save.
_save_handler: The function to call to save this Saveable.
_save_on_exit: Whether to always save this saveable on exit.
_config_opt: A config option which decides whether to auto-save or not.
None if no such option exists.
_filename: The filename of the underlying file.
"""
def __init__(self, name, save_handler, changed=None, config_opt=None,
filename=None):
self._name = name
self._dirty = False
self._save_handler = save_handler
self._config_opt = config_opt
if changed is not None:
changed.connect(self.mark_dirty)
self._save_on_exit = False
else:
self._save_on_exit = True
self._filename = filename
if filename is not None and not os.path.exists(filename):
self._dirty = True
self.save()
def __repr__(self):
return utils.get_repr(self, name=self._name, dirty=self._dirty,
save_handler=self._save_handler,
config_opt=self._config_opt,
save_on_exit=self._save_on_exit,
filename=self._filename)
def mark_dirty(self):
"""Mark this saveable as dirty (having changes)."""
log.save.debug("Marking {} as dirty.".format(self._name))
self._dirty = True
def save(self, is_exit=False, explicit=False, silent=False, force=False):
"""Save this saveable.
Args:
is_exit: Whether we're currently exiting qutebrowser.
explicit: Whether the user explicitly requested this save.
silent: Don't write information to log.
force: Force saving, no matter what.
"""
if (self._config_opt is not None and
(not config.instance.get(self._config_opt)) and
(not explicit) and (not force)):
if not silent:
log.save.debug("Not saving {name} because autosaving has been "
"disabled by {cfg[0]} -> {cfg[1]}.".format(
name=self._name, cfg=self._config_opt))
return
do_save = self._dirty or (self._save_on_exit and is_exit) or force
if not silent:
log.save.debug("Save of {} requested - dirty {}, save_on_exit {}, "
"is_exit {}, force {} -> {}".format(
self._name, self._dirty, self._save_on_exit,
is_exit, force, do_save))
if do_save:
self._save_handler()
self._dirty = False
class SaveManager(QObject):
"""Responsible to save 'saveables' periodically and on exit.
Attributes:
saveables: A dict mapping names to Saveable instances.
_save_timer: The Timer used to periodically auto-save things.
"""
def __init__(self, parent=None):
super().__init__(parent)
self.saveables = collections.OrderedDict(
) # type: typing.MutableMapping[str, Saveable]
self._save_timer = usertypes.Timer(self, name='save-timer')
self._save_timer.timeout.connect(self.autosave)
self._set_autosave_interval()
config.instance.changed.connect(self._set_autosave_interval)
def __repr__(self):
return utils.get_repr(self, saveables=self.saveables)
@config.change_filter('auto_save.interval')
def _set_autosave_interval(self):
"""Set the auto-save interval."""
interval = config.val.auto_save.interval
if interval == 0:
self._save_timer.stop()
else:
self._save_timer.setInterval(interval)
self._save_timer.start()
def add_saveable(self, name, save, changed=None, config_opt=None,
filename=None, dirty=False):
"""Add a new saveable.
Args:
name: The name to use.
save: The function to call to save this saveable.
changed: The signal emitted when this saveable changed.
config_opt: An option deciding whether to auto-save or not.
filename: The filename of the underlying file, so we can force
saving if it doesn't exist.
dirty: Whether the saveable is already dirty.
"""
if name in self.saveables:
raise ValueError("Saveable {} already registered!".format(name))
saveable = Saveable(name, save, changed, config_opt, filename)
self.saveables[name] = saveable
if dirty:
saveable.mark_dirty()
QTimer.singleShot(0, saveable.save)
def save(self, name, is_exit=False, explicit=False, silent=False,
force=False):
"""Save a saveable by name.
Args:
is_exit: Whether we're currently exiting qutebrowser.
explicit: Whether this save operation was triggered explicitly.
silent: Don't write information to log. Used to reduce log spam
when autosaving.
force: Force saving, no matter what.
"""
self.saveables[name].save(is_exit=is_exit, explicit=explicit,
silent=silent, force=force)
def save_all(self, *args, **kwargs):
"""Save all saveables."""
for saveable in self.saveables:
self.save(saveable, *args, **kwargs)
@pyqtSlot()
def autosave(self):
"""Slot used when the configs are auto-saved."""
for (key, saveable) in self.saveables.items():
try:
saveable.save(silent=True)
except OSError as e:
message.error("Failed to auto-save {}: {}".format(key, e))
@cmdutils.register(instance='save-manager', name='save',
star_args_optional=True)
def save_command(self, *what):
"""Save configs and state.
Args:
*what: What to save (`config`/`key-config`/`cookies`/...).
If not given, everything is saved.
"""
if what:
explicit = True
else:
what = tuple(self.saveables)
explicit = False
for key in what:
if key not in self.saveables:
message.error("{} is nothing which can be saved".format(key))
else:
try:
self.save(key, explicit=explicit, force=True)
except OSError as e:
message.error("Could not save {}: {}".format(key, e))
log.save.debug(":save saved {}".format(', '.join(what)))
@pyqtSlot()
def shutdown(self):
"""Save all saveables when shutting down."""
for key in self.saveables:
try:
self.save(key, is_exit=True)
except OSError as e:
error.handle_fatal_exc(
e, "Error while saving!",
pre_text="Error while saving {}".format(key),
no_err_windows=objects.args.no_err_windows)
| gpl-3.0 | -535,101,950,764,814,660 | 2,822,758,190,307,870,700 | 37.655963 | 79 | 0.581939 | false |
les69/calvin-base | calvin/actorstore/systemactors/net/UDPListener.py | 2 | 3328 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, manage, condition, guard
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class UDPListener(Actor):
"""
Listen for UDP messages on a given port.
Control port takes control commands of the form (uri only applicable for connect.)
{
"command" : "connect"/"disconnect",
"uri": "udp://<ipv4 address>:<port>"
}
Input:
control_in : JSON containing host & port to listen to.
Output:
data_out : Data received on the UDP port will be sent as tokens.
"""
@manage(['host', 'port'])
def init(self):
self.host = None
self.port = None
self.listener = None
self.setup()
def listen(self):
self.listener = self['server'].start(self.host, self.port, "udp")
def did_migrate(self):
self.setup()
if self.port is not None:
self.listen()
def setup(self):
self.use('calvinsys.network.serverhandler', shorthand='server')
self.use('calvinsys.native.python-re', shorthand='regexp')
@condition(action_output=['data_out'])
@guard(lambda self: self.listener and self.listener.have_data())
def receive(self):
data = self.listener.data_get()
return ActionResult(production=(data,))
# URI parsing - 0: protocol, 1: host, 2: port
URI_REGEXP = r'([^:]+)://([^/:]*):([0-9]+)'
def parse_uri(self, uri):
status = False
try:
parsed_uri = self['regexp'].findall(self.URI_REGEXP, uri)[0]
protocol = parsed_uri[0]
if protocol != 'udp':
_log.warn("Protocol '%s' not supported, assuming udp" % (protocol,))
self.host = parsed_uri[1]
self.port = int(parsed_uri[2])
status = True
except:
_log.warn("malformed or erroneous control uri '%s'" % (uri,))
self.host = None
self.port = None
return status
@condition(action_input=['control_in'])
@guard(lambda self, control: control.get('command', '') == 'listen' and not self.listener)
def new_port(self, control):
if self.parse_uri(control.get('uri', '')):
self.listen()
return ActionResult()
@condition(action_input=['control_in'])
@guard(lambda self, control: control.get('command', '') == 'stop' and self.listener)
def close_port(self, control):
self.listener.stop()
del self.listener
self.listener = None
return ActionResult(production=())
action_priority = (new_port, close_port, receive)
requires = ['calvinsys.network.serverhandler', 'calvinsys.native.python-re']
| apache-2.0 | -7,342,971,313,689,527,000 | -8,701,238,170,245,041,000 | 31.31068 | 94 | 0.61899 | false |
guozhangwang/kafka | tests/kafkatest/services/log_compaction_tester.py | 3 | 4139 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ducktape.services.background_thread import BackgroundThreadService
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin, CORE_LIBS_JAR_NAME, CORE_DEPENDANT_TEST_LIBS_JAR_NAME
from kafkatest.services.security.security_config import SecurityConfig
from kafkatest.version import DEV_BRANCH
class LogCompactionTester(KafkaPathResolverMixin, BackgroundThreadService):
OUTPUT_DIR = "/mnt/logcompaction_tester"
LOG_PATH = os.path.join(OUTPUT_DIR, "logcompaction_tester_stdout.log")
VERIFICATION_STRING = "Data verification is completed"
logs = {
"tool_logs": {
"path": LOG_PATH,
"collect_default": True}
}
def __init__(self, context, kafka, security_protocol="PLAINTEXT", stop_timeout_sec=30, tls_version=None):
super(LogCompactionTester, self).__init__(context, 1)
self.kafka = kafka
self.security_protocol = security_protocol
self.tls_version = tls_version
self.security_config = SecurityConfig(self.context, security_protocol, tls_version=tls_version)
self.stop_timeout_sec = stop_timeout_sec
self.log_compaction_completed = False
def _worker(self, idx, node):
node.account.ssh("mkdir -p %s" % LogCompactionTester.OUTPUT_DIR)
cmd = self.start_cmd(node)
self.logger.info("LogCompactionTester %d command: %s" % (idx, cmd))
self.security_config.setup_node(node)
for line in node.account.ssh_capture(cmd):
self.logger.debug("Checking line:{}".format(line))
if line.startswith(LogCompactionTester.VERIFICATION_STRING):
self.log_compaction_completed = True
def start_cmd(self, node):
core_libs_jar = self.path.jar(CORE_LIBS_JAR_NAME, DEV_BRANCH)
core_dependant_test_libs_jar = self.path.jar(CORE_DEPENDANT_TEST_LIBS_JAR_NAME, DEV_BRANCH)
cmd = "for file in %s; do CLASSPATH=$CLASSPATH:$file; done;" % core_libs_jar
cmd += " for file in %s; do CLASSPATH=$CLASSPATH:$file; done;" % core_dependant_test_libs_jar
cmd += " export CLASSPATH;"
cmd += self.path.script("kafka-run-class.sh", node)
cmd += " %s" % self.java_class_name()
cmd += " --bootstrap-server %s --messages 1000000 --sleep 20 --duplicates 10 --percent-deletes 10" % (self.kafka.bootstrap_servers(self.security_protocol))
cmd += " 2>> %s | tee -a %s &" % (self.logs["tool_logs"]["path"], self.logs["tool_logs"]["path"])
return cmd
def stop_node(self, node):
node.account.kill_java_processes(self.java_class_name(), clean_shutdown=True,
allow_fail=True)
stopped = self.wait_node(node, timeout_sec=self.stop_timeout_sec)
assert stopped, "Node %s: did not stop within the specified timeout of %s seconds" % \
(str(node.account), str(self.stop_timeout_sec))
def clean_node(self, node):
node.account.kill_java_processes(self.java_class_name(), clean_shutdown=False,
allow_fail=True)
node.account.ssh("rm -rf %s" % LogCompactionTester.OUTPUT_DIR, allow_fail=False)
def java_class_name(self):
return "kafka.tools.LogCompactionTester"
@property
def is_done(self):
return self.log_compaction_completed
| apache-2.0 | 6,604,151,750,405,989,000 | -6,670,637,256,933,380,000 | 45.505618 | 163 | 0.674076 | false |
agopalak/football_pred | pre_proc/proc_data.py | 1 | 4667 |
import sys
import yaml
import re
import datetime as DT
import logging
from rainbow_logging_handler import RainbowLoggingHandler
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn_pandas import DataFrameMapper
# Capturing current module. Needed to call getattr on this module
this_module = sys.modules[__name__]
# Setup logging module
# TODO: Figure out a standard way to install/handle logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(filename)s:%(lineno)4s - %(funcName)15s()] %(levelname)8s: %(message)s')
# Setup RainbowLoggingHandler
handler = RainbowLoggingHandler(sys.stderr, color_funcName=('black', 'yellow', True))
handler.setFormatter(formatter)
logger.addHandler(handler)
# Converting Boolean to String during YAML load
# Done to workaround quirkness with PyYAML
def bool_constructor(self, node):
value = self.construct_yaml_bool(node)
if value == False:
return 'False'
else:
return 'True'
yaml.Loader.add_constructor(u'tag:yaml.org,2002:bool', bool_constructor)
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:bool', bool_constructor)
# Load data from CSV, configuration file
# Process data and provide input/output data frames
def load_data(data_csv, data_cfg):
# Load Data YAML configuration file
with open(data_cfg, 'r') as yf:
data = yaml.load(yf)
# Read CSV into data frame
df = pd.read_csv(data_csv)
# Filling holes with zeros
df.fillna(0, inplace=True)
# Process Columns
for item in data:
if item['include'] == False:
continue
else:
colnum = item['column']
logger.info('Processing Column %s', colnum)
# Create a column data frame
col_df = df.iloc[:, [colnum-1]].copy()
logger.debug(col_df.columns)
logger.debug('Preprocess Column Input\n%s', col_df.head())
# Apply transformations
col_df = do_transform(col_df, item['transform'])
logger.debug('Preprocess Column Output\n%s', col_df.head())
# Perform Data Transformations
def do_transform(df, tf):
for func in tf:
funckey, funcval = func.items()[0]
# Getting transformation call name
transform = getattr(this_module, funckey, None)
# Splitting funcval to individual function arguments
# First argument is True/False to indicate if transform is called
try:
pattern = re.compile('\s*,\s*')
funcvals = pattern.split(funcval)
logger.debug('Funcvals --> %s', funcvals)
except AttributeError:
funcvals = [funcval]
# Calling transformation
if funcvals[0] == 'True':
try:
logger.debug('Funckey --> %s', funckey)
df = transform(df, funcvals[1:])
except AttributeError:
logger.error('Function %s has not been implemented!', funckey)
return df
# Performs feature scaling on data frame
# TODO: scale - Add implementation to handle val
def scale(df, val):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
mms = preprocessing.MinMaxScaler()
return pd.DataFrame(mms.fit_transform(df.values.ravel().reshape(-1, 1)), columns=df.columns)
# conv2num: Converts column data to ordered integers
# TODO: conv2num - Add implementation to handle args
def conv2num(df, args):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
le = preprocessing.LabelEncoder()
return pd.DataFrame(le.fit_transform(df.values.ravel()), columns=df.columns)
# conv2bin: Converts column data to binary
# TODO: conv2bin - Add implementation to handle args
def conv2bin(df, args):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
le = preprocessing.LabelBinarizer()
return pd.DataFrame(le.fit_transform(df.values.ravel()), columns=df.columns)
# conv2timedelta: Converts column data to age
# TODO: conv2timedelta - Current returns in years. May need to make it more scalable
def conv2timedelta(df, args):
logger.info('Function %s called..', sys._getframe().f_code.co_name)
if args[1] == 'now':
refdate = pd.Timestamp(DT.datetime.now())
else:
refdate = pd.Timestamp(DT.datetime.strptime(args[1], args[0]))
logger.debug('Reference date is: %s', refdate)
df = pd.DataFrame((refdate - pd.to_datetime(df.values.ravel())), columns=df.columns)
return df.apply(lambda x: (x/np.timedelta64(1, 'Y')).astype(int))
# Main Program
if __name__ == '__main__':
load_data('nflData.csv', 'datacfg.yaml')
| mit | 3,822,282,509,987,279,000 | -976,096,664,449,338,900 | 34.090226 | 106 | 0.668738 | false |
Kongsea/tensorflow | tensorflow/python/eager/graph_only_ops.py | 69 | 2363 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Graph-only versions of a few op functions, for internal use only."""
# Must be separate from array_ops to avoid a cyclic dependency.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
def graph_zeros_like(tensor):
"""Graph-only version of tf.zeros_like(), for internal use only."""
g = ops._get_graph_from_inputs([tensor]) # pylint: disable=protected-access
with g.as_default(), ops.name_scope(None, "zeros_like", [tensor]) as name:
tensor = ops.convert_to_tensor(tensor, name="tensor")
dtype = tensor.dtype.base_dtype
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
op = g.create_op("ZerosLike", [tensor], [dtype], input_types=[dtype],
attrs={"T": dtype_value}, name=name)
result, = op.outputs
return result
def graph_placeholder(dtype, shape, name=None):
"""Graph-only version of tf.placeholder(), for internal use only."""
dtype = dtype.base_dtype
dtype_value = attr_value_pb2.AttrValue(type=dtype.as_datatype_enum)
if isinstance(shape, (list, tuple)):
shape = tensor_shape.TensorShape(shape)
assert isinstance(shape, tensor_shape.TensorShape)
shape = attr_value_pb2.AttrValue(shape=shape.as_proto())
g = ops.get_default_graph()
with ops.name_scope(name, "placeholder", []) as name:
op = g.create_op("Placeholder", [], [dtype], input_types=[],
attrs={"dtype": dtype_value, "shape": shape}, name=name)
result, = op.outputs
return result
| apache-2.0 | -1,848,535,797,681,182,000 | 8,162,821,941,717,716,000 | 42.759259 | 80 | 0.691494 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.