repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
ironbox360/django
|
tests/auth_tests/test_views.py
|
35
|
44018
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import itertools
import os
import re
from importlib import import_module
from django.apps import apps
from django.conf import settings
from django.contrib.admin.models import LogEntry
from django.contrib.auth import REDIRECT_FIELD_NAME, SESSION_KEY
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, SetPasswordForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.tests.custom_user import CustomUser
from django.contrib.auth.views import login as login_view, redirect_to_login
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.sites.requests import RequestSite
from django.core import mail
from django.core.urlresolvers import NoReverseMatch, reverse, reverse_lazy
from django.db import connection
from django.http import HttpRequest, QueryDict
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.test import TestCase, override_settings
from django.test.utils import patch_logger
from django.utils.encoding import force_text
from django.utils.http import urlquote
from django.utils.six.moves.urllib.parse import ParseResult, urlparse
from django.utils.translation import LANGUAGE_SESSION_KEY
from .models import UUIDUser
from .settings import AUTH_TEMPLATES
@override_settings(
LANGUAGES=[
('en', 'English'),
],
LANGUAGE_CODE='en',
TEMPLATES=AUTH_TEMPLATES,
USE_TZ=False,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls',
)
class AuthViewsTestCase(TestCase):
"""
Helper base class for all the follow test cases.
"""
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='testclient',
first_name='Test', last_name='Client', email='[email protected]', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u2 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='inactive',
first_name='Inactive', last_name='User', email='[email protected]', is_staff=False, is_active=False,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u3 = User.objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False, username='staff',
first_name='Staff', last_name='Member', email='[email protected]', is_staff=True, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u4 = User.objects.create(
password='', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='empty_password', first_name='Empty', last_name='Password', email='[email protected]',
is_staff=False, is_active=True, date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u5 = User.objects.create(
password='$', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unmanageable_password', first_name='Unmanageable', last_name='Password',
email='[email protected]', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
cls.u6 = User.objects.create(
password='foo$bar', last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), is_superuser=False,
username='unknown_password', first_name='Unknown', last_name='Password',
email='[email protected]', is_staff=False, is_active=True,
date_joined=datetime.datetime(2006, 12, 17, 7, 3, 31)
)
def login(self, username='testclient', password='password'):
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def assertFormError(self, response, error):
"""Assert that error is found in response.context['form'] errors"""
form_errors = list(itertools.chain(*response.context['form'].errors.values()))
self.assertIn(force_text(error), form_errors)
def assertURLEqual(self, url, expected, parse_qs=False):
"""
Given two URLs, make sure all their components (the ones given by
urlparse) are equal, only comparing components that are present in both
URLs.
If `parse_qs` is True, then the querystrings are parsed with QueryDict.
This is useful if you don't want the order of parameters to matter.
Otherwise, the query strings are compared as-is.
"""
fields = ParseResult._fields
for attr, x, y in zip(fields, urlparse(url), urlparse(expected)):
if parse_qs and attr == 'query':
x, y = QueryDict(x), QueryDict(y)
if x and y and x != y:
self.fail("%r != %r (%s doesn't match)" % (url, expected, attr))
@override_settings(ROOT_URLCONF='django.contrib.auth.urls')
class AuthViewNamedURLTests(AuthViewsTestCase):
def test_named_urls(self):
"Named URLs should be reversible"
expected_named_urls = [
('login', [], {}),
('logout', [], {}),
('password_change', [], {}),
('password_change_done', [], {}),
('password_reset', [], {}),
('password_reset_done', [], {}),
('password_reset_confirm', [], {
'uidb64': 'aaaaaaa',
'token': '1111-aaaaa',
}),
('password_reset_complete', [], {}),
]
for name, args, kwargs in expected_named_urls:
try:
reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.fail("Reversal of url named '%s' failed with NoReverseMatch" % name)
class PasswordResetTest(AuthViewsTestCase):
def test_email_not_found(self):
"""If the provided email is not registered, don't raise any error but
also don't send any email."""
response = self.client.get('/password_reset/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 0)
def test_email_found(self):
"Email is sent if a valid email address is provided for password reset"
response = self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("http://", mail.outbox[0].body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL, mail.outbox[0].from_email)
# optional multipart text/html email has been added. Make sure original,
# default functionality is 100% the same
self.assertFalse(mail.outbox[0].message().is_multipart())
def test_extra_email_context(self):
"""
extra_email_context should be available in the email template context.
"""
response = self.client.post(
'/password_reset_extra_email_context/',
{'email': '[email protected]'},
)
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Email email context: "Hello!"', mail.outbox[0].body)
def test_html_mail_template(self):
"""
A multipart email with text/plain and text/html is sent
if the html_email_template parameter is passed to the view
"""
response = self.client.post('/password_reset/html_email_template/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertNotIn('<html>', message.get_payload(0).get_payload())
self.assertIn('<html>', message.get_payload(1).get_payload())
def test_email_found_custom_from(self):
"Email is sent if a valid email address is provided for password reset when a custom from_email is provided."
response = self.client.post('/password_reset_from_email/', {'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual("[email protected]", mail.outbox[0].from_email)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host(self):
"Poisoned HTTP_HOST headers can't be used for reset emails"
# This attack is based on the way browsers handle URLs. The colon
# should be used to separate the port, but if the URL contains an @,
# the colon is interpreted as part of a username for login purposes,
# making 'evil.com' the request domain. Since HTTP_HOST is used to
# produce a meaningful reset URL, we need to be certain that the
# HTTP_HOST header isn't poisoned. This is done as a check when get_host()
# is invoked, but we check here as a practical consequence.
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/password_reset/',
{'email': '[email protected]'},
HTTP_HOST='www.example:[email protected]'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
# Skip any 500 handler action (like sending more mail...)
@override_settings(DEBUG_PROPAGATE_EXCEPTIONS=True)
def test_poisoned_http_host_admin_site(self):
"Poisoned HTTP_HOST headers can't be used for reset emails on admin views"
with patch_logger('django.security.DisallowedHost', 'error') as logger_calls:
response = self.client.post(
'/admin_password_reset/',
{'email': '[email protected]'},
HTTP_HOST='www.example:[email protected]'
)
self.assertEqual(response.status_code, 400)
self.assertEqual(len(mail.outbox), 0)
self.assertEqual(len(logger_calls), 1)
def _test_confirm_start(self):
# Start by creating the email
self.client.post('/password_reset/', {'email': '[email protected]'})
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
def test_confirm_invalid(self):
url, path = self._test_confirm_start()
# Let's munge the token in the path, but keep the same length,
# in case the URLconf will reject a different length.
path = path[:-5] + ("0" * 4) + path[-1]
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_user(self):
# Ensure that we get a 200 response for a non-existent user, not a 404
response = self.client.get('/reset/123456/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_overflow_user(self):
# Ensure that we get a 200 response for a base36 user id that overflows int
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "The password reset link was invalid")
def test_confirm_invalid_post(self):
# Same as test_confirm_invalid, but trying
# to do a POST instead.
url, path = self._test_confirm_start()
path = path[:-5] + ("0" * 4) + path[-1]
self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': ' anewpassword',
})
# Check the password has not been changed
u = User.objects.get(email='[email protected]')
self.assertTrue(not u.check_password("anewpassword"))
def test_confirm_complete(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
# Check the password has been changed
u = User.objects.get(email='[email protected]')
self.assertTrue(u.check_password("anewpassword"))
# Check we can't use the link again
response = self.client.get(path)
self.assertContains(response, "The password reset link was invalid")
def test_confirm_different_passwords(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'x'})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_reset_redirect_default(self):
response = self.client.post('/password_reset/',
{'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/done/')
def test_reset_custom_redirect(self):
response = self.client.post('/password_reset/custom_redirect/',
{'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_reset_custom_redirect_named(self):
response = self.client.post('/password_reset/custom_redirect/named/',
{'email': '[email protected]'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_redirect_default(self):
url, path = self._test_confirm_start()
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/reset/done/')
def test_confirm_redirect_custom(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_confirm_redirect_custom_named(self):
url, path = self._test_confirm_start()
path = path.replace('/reset/', '/reset/custom/named/')
response = self.client.post(path, {'new_password1': 'anewpassword',
'new_password2': 'anewpassword'})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
def test_confirm_display_user_from_form(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# #16919 -- The ``password_reset_confirm`` view should pass the user
# object to the ``SetPasswordForm``, even on GET requests.
# For this test, we render ``{{ form.user }}`` in the template
# ``registration/password_reset_confirm.html`` so that we can test this.
username = User.objects.get(email='[email protected]').username
self.assertContains(response, "Hello, %s." % username)
# However, the view should NOT pass any user object on a form if the
# password reset link was invalid.
response = self.client.get('/reset/zzzzzzzzzzzzz/1-1/')
self.assertContains(response, "Hello, .")
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
class CustomUserPasswordResetTest(AuthViewsTestCase):
user_email = '[email protected]'
@classmethod
def setUpTestData(cls):
cls.u1 = CustomUser.custom_objects.create(
password='sha1$6efc0$f93efe9fd7542f25a7be94871ea45aa95de57161',
last_login=datetime.datetime(2006, 12, 17, 7, 3, 31), email='[email protected]', is_active=True,
is_admin=False, date_of_birth=datetime.date(1976, 11, 8)
)
def _test_confirm_start(self):
# Start by creating the email
response = self.client.post('/password_reset/', {'email': self.user_email})
self.assertEqual(response.status_code, 302)
self.assertEqual(len(mail.outbox), 1)
return self._read_signup_email(mail.outbox[0])
def _read_signup_email(self, email):
urlmatch = re.search(r"https?://[^/]*(/.*reset/\S*)", email.body)
self.assertIsNotNone(urlmatch, "No URL found in sent email")
return urlmatch.group(), urlmatch.groups()[0]
def test_confirm_valid_custom_user(self):
url, path = self._test_confirm_start()
response = self.client.get(path)
# redirect to a 'complete' page:
self.assertContains(response, "Please enter your new password")
# then submit a new password
response = self.client.post(path, {
'new_password1': 'anewpassword',
'new_password2': 'anewpassword',
})
self.assertRedirects(response, '/reset/done/')
@override_settings(AUTH_USER_MODEL='auth.UUIDUser')
class UUIDUserPasswordResetTest(CustomUserPasswordResetTest):
def _test_confirm_start(self):
# instead of fixture
UUIDUser.objects.create_user(
email=self.user_email,
username='foo',
password='foo',
)
return super(UUIDUserPasswordResetTest, self)._test_confirm_start()
class ChangePasswordTest(AuthViewsTestCase):
def fail_login(self, password='password'):
response = self.client.post('/login/', {
'username': 'testclient',
'password': password,
})
self.assertFormError(response, AuthenticationForm.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
})
def logout(self):
self.client.get('/logout/')
def test_password_change_fails_with_invalid_old_password(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'donuts',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertFormError(response, PasswordChangeForm.error_messages['password_incorrect'])
def test_password_change_fails_with_mismatched_passwords(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'donuts',
})
self.assertFormError(response, SetPasswordForm.error_messages['password_mismatch'])
def test_password_change_succeeds(self):
self.login()
self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.fail_login()
self.login(password='password1')
def test_password_change_done_succeeds(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
@override_settings(LOGIN_URL='/login/')
def test_password_change_done_fails(self):
response = self.client.get('/password_change/done/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/?next=/password_change/done/')
def test_password_change_redirect_default(self):
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_change/done/')
def test_password_change_redirect_custom(self):
self.login()
response = self.client.post('/password_change/custom/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/custom/')
def test_password_change_redirect_custom_named(self):
self.login()
response = self.client.post('/password_change/custom/named/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
class SessionAuthenticationTests(AuthViewsTestCase):
def test_user_password_change_updates_session(self):
"""
#21649 - Ensure contrib.auth.views.password_change updates the user's
session auth hash after a password change so the session isn't logged out.
"""
self.login()
response = self.client.post('/password_change/', {
'old_password': 'password',
'new_password1': 'password1',
'new_password2': 'password1',
})
# if the hash isn't updated, retrieving the redirection page will fail.
self.assertRedirects(response, '/password_change/done/')
class LoginTest(AuthViewsTestCase):
def test_current_site_in_context_after_login(self):
response = self.client.get(reverse('login'))
self.assertEqual(response.status_code, 200)
if apps.is_installed('django.contrib.sites'):
Site = apps.get_model('sites.Site')
site = Site.objects.get_current()
self.assertEqual(response.context['site'], site)
self.assertEqual(response.context['site_name'], site.name)
else:
self.assertIsInstance(response.context['site'], RequestSite)
self.assertIsInstance(response.context['form'], AuthenticationForm)
def test_security_check(self, password='password'):
login_url = reverse('login')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
response = self.client.post(nasty_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': login_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
response = self.client.post(safe_url, {
'username': 'testclient',
'password': password,
})
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
def test_login_form_contains_request(self):
# 15198
self.client.post('/custom_requestauth_login/', {
'username': 'testclient',
'password': 'password',
}, follow=True)
# the custom authentication form used by this login asserts
# that a request is passed to the form successfully.
def test_login_csrf_rotate(self, password='password'):
"""
Makes sure that a login rotates the currently-used CSRF token.
"""
# Do a GET to establish a CSRF token
# TestClient isn't used here as we're testing middleware, essentially.
req = HttpRequest()
CsrfViewMiddleware().process_view(req, login_view, (), {})
# get_token() triggers CSRF token inclusion in the response
get_token(req)
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token1 = csrf_cookie.coded_value
# Prepare the POST request
req = HttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = token1
req.method = "POST"
req.POST = {'username': 'testclient', 'password': password, 'csrfmiddlewaretoken': token1}
# Use POST request to log in
SessionMiddleware().process_request(req)
CsrfViewMiddleware().process_view(req, login_view, (), {})
req.META["SERVER_NAME"] = "testserver" # Required to have redirect work in login view
req.META["SERVER_PORT"] = 80
resp = login_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, None)
token2 = csrf_cookie.coded_value
# Check the CSRF token switched
self.assertNotEqual(token1, token2)
def test_session_key_flushed_on_login(self):
"""
To avoid reusing another user's session, ensure a new, empty session is
created if the existing session corresponds to a different authenticated
user.
"""
self.login()
original_session_key = self.client.session.session_key
self.login(username='staff')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_session_key_flushed_on_login_after_password_change(self):
"""
As above, but same user logging in after a password change.
"""
self.login()
original_session_key = self.client.session.session_key
# If no password change, session key should not be flushed.
self.login()
self.assertEqual(original_session_key, self.client.session.session_key)
user = User.objects.get(username='testclient')
user.set_password('foobar')
user.save()
self.login(password='foobar')
self.assertNotEqual(original_session_key, self.client.session.session_key)
def test_login_session_without_hash_session_key(self):
"""
Session without django.contrib.auth.HASH_SESSION_KEY should login
without an exception.
"""
user = User.objects.get(username='testclient')
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[SESSION_KEY] = user.id
session.save()
original_session_key = session.session_key
self.client.cookies[settings.SESSION_COOKIE_NAME] = original_session_key
self.login()
self.assertNotEqual(original_session_key, self.client.session.session_key)
class LoginURLSettings(AuthViewsTestCase):
"""Tests for settings.LOGIN_URL."""
def assertLoginURLEquals(self, url, parse_qs=False):
response = self.client.get('/login_required/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url, parse_qs=parse_qs)
@override_settings(LOGIN_URL='/login/')
def test_standard_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='login')
def test_named_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
@override_settings(LOGIN_URL='http://remote.example.com/login')
def test_remote_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='https:///login/')
def test_https_login_url(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'https:///login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL='/login/?pretty=1')
def test_login_url_with_querystring(self):
self.assertLoginURLEquals('/login/?pretty=1&next=/login_required/', parse_qs=True)
@override_settings(LOGIN_URL='http://remote.example.com/login/?next=/default/')
def test_remote_login_url_with_next_querystring(self):
quoted_next = urlquote('http://testserver/login_required/')
expected = 'http://remote.example.com/login/?next=%s' % quoted_next
self.assertLoginURLEquals(expected)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_lazy_login_url(self):
self.assertLoginURLEquals('/login/?next=/login_required/')
class LoginRedirectUrlTest(AuthViewsTestCase):
"""Tests for settings.LOGIN_REDIRECT_URL."""
def assertLoginRedirectURLEqual(self, url):
response = self.login()
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, url)
def test_default(self):
self.assertLoginRedirectURLEqual('/accounts/profile/')
@override_settings(LOGIN_REDIRECT_URL='/custom/')
def test_custom(self):
self.assertLoginRedirectURLEqual('/custom/')
@override_settings(LOGIN_REDIRECT_URL='password_reset')
def test_named(self):
self.assertLoginRedirectURLEqual('/password_reset/')
@override_settings(LOGIN_REDIRECT_URL='http://remote.example.com/welcome/')
def test_remote(self):
self.assertLoginRedirectURLEqual('http://remote.example.com/welcome/')
class RedirectToLoginTests(AuthViewsTestCase):
"""Tests for the redirect_to_login view"""
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy(self):
login_redirect_response = redirect_to_login(next='/else/where/')
expected = '/login/?next=/else/where/'
self.assertEqual(expected, login_redirect_response.url)
@override_settings(LOGIN_URL=reverse_lazy('login'))
def test_redirect_to_login_with_lazy_and_unicode(self):
login_redirect_response = redirect_to_login(next='/else/where/झ/')
expected = '/login/?next=/else/where/%E0%A4%9D/'
self.assertEqual(expected, login_redirect_response.url)
class LogoutTest(AuthViewsTestCase):
def confirm_logged_out(self):
self.assertNotIn(SESSION_KEY, self.client.session)
def test_logout_default(self):
"Logout without next_page option renders the default template"
self.login()
response = self.client.get('/logout/')
self.assertContains(response, 'Logged out')
self.confirm_logged_out()
def test_14377(self):
# Bug 14377
self.login()
response = self.client.get('/logout/')
self.assertIn('site', response.context)
def test_logout_with_overridden_redirect_url(self):
# Bug 11223
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
response = self.client.get('/logout/next_page/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_next_page_specified(self):
"Logout with next_page option given redirects to specified resource"
self.login()
response = self.client.get('/logout/next_page/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_redirect_argument(self):
"Logout with query string redirects to specified resource"
self.login()
response = self.client.get('/logout/?next=/login/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/login/')
self.confirm_logged_out()
def test_logout_with_custom_redirect_argument(self):
"Logout with custom query string redirects to specified resource"
self.login()
response = self.client.get('/logout/custom_query/?follow=/somewhere/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/somewhere/')
self.confirm_logged_out()
def test_logout_with_named_redirect(self):
"Logout resolves names or URLs passed as next_page."
self.login()
response = self.client.get('/logout/next_page/named/')
self.assertEqual(response.status_code, 302)
self.assertURLEqual(response.url, '/password_reset/')
self.confirm_logged_out()
def test_security_check(self, password='password'):
logout_url = reverse('logout')
# Those URLs should not pass the security check
for bad_url in ('http://example.com',
'http:///example.com',
'https://example.com',
'ftp://exampel.com',
'///example.com',
'//example.com',
'javascript:alert("XSS")'):
nasty_url = '%(url)s?%(next)s=%(bad_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'bad_url': urlquote(bad_url),
}
self.login()
response = self.client.get(nasty_url)
self.assertEqual(response.status_code, 302)
self.assertNotIn(bad_url, response.url,
"%s should be blocked" % bad_url)
self.confirm_logged_out()
# These URLs *should* still pass the security check
for good_url in ('/view/?param=http://example.com',
'/view/?param=https://example.com',
'/view?param=ftp://exampel.com',
'view/?param=//example.com',
'https://testserver/',
'HTTPS://testserver/',
'//testserver/',
'/url%20with%20spaces/'): # see ticket #12534
safe_url = '%(url)s?%(next)s=%(good_url)s' % {
'url': logout_url,
'next': REDIRECT_FIELD_NAME,
'good_url': urlquote(good_url),
}
self.login()
response = self.client.get(safe_url)
self.assertEqual(response.status_code, 302)
self.assertIn(good_url, response.url, "%s should be allowed" % good_url)
self.confirm_logged_out()
def test_logout_preserve_language(self):
"""Check that language stored in session is preserved after logout"""
# Create a new session with language
engine = import_module(settings.SESSION_ENGINE)
session = engine.SessionStore()
session[LANGUAGE_SESSION_KEY] = 'pl'
session.save()
self.client.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
self.client.get('/logout/')
self.assertEqual(self.client.session[LANGUAGE_SESSION_KEY], 'pl')
# Redirect in test_user_change_password will fail if session auth hash
# isn't updated after password change (#21649)
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='auth_tests.urls_admin',
)
class ChangelistTests(AuthViewsTestCase):
def setUp(self):
# Make me a superuser before logging in.
User.objects.filter(username='testclient').update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=self.u1.pk)
def get_user_data(self, user):
return {
'username': user.username,
'password': user.password,
'email': user.email,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'first_name': user.first_name,
'last_name': user.last_name,
}
# #20078 - users shouldn't be allowed to guess password hashes via
# repeated password__startswith queries.
def test_changelist_disallows_password_lookups(self):
# A lookup that tries to filter on password isn't OK
with patch_logger('django.security.DisallowedModelAdminLookup', 'error') as logger_calls:
response = self.client.get(reverse('auth_test_admin:auth_user_changelist') + '?password__startswith=sha1$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
def test_user_change_email(self):
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
data
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
response = self.client.post(
reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,)),
self.get_user_data(self.admin)
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_changelist'))
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
user_change_url = reverse('auth_test_admin:auth_user_change', args=(self.admin.pk,))
password_change_url = reverse('auth_test_admin:auth_user_password_change', args=(self.admin.pk,))
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'you can change the password using <a href="([^"]*)">this form</a>',
force_text(response.content)
).groups()[0]
self.assertEqual(
os.path.normpath(user_change_url + rel_link),
os.path.normpath(password_change_url)
)
response = self.client.post(
password_change_url,
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='password1')
def test_user_change_different_user_password(self):
u = User.objects.get(email='[email protected]')
response = self.client.post(
reverse('auth_test_admin:auth_user_password_change', args=(u.pk,)),
{
'password1': 'password1',
'password2': 'password1',
}
)
self.assertRedirects(response, reverse('auth_test_admin:auth_user_change', args=(u.pk,)))
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
def test_password_change_bad_url(self):
response = self.client.get(reverse('auth_test_admin:auth_user_password_change', args=('foobar',)))
self.assertEqual(response.status_code, 404)
@override_settings(
AUTH_USER_MODEL='auth.UUIDUser',
ROOT_URLCONF='auth_tests.urls_custom_user_admin',
)
class UUIDUserTests(TestCase):
def test_admin_password_change(self):
u = UUIDUser.objects.create_superuser(username='uuid', email='[email protected]', password='test')
self.assertTrue(self.client.login(username='uuid', password='test'))
user_change_url = reverse('custom_user_admin:auth_uuiduser_change', args=(u.pk,))
response = self.client.get(user_change_url)
self.assertEqual(response.status_code, 200)
password_change_url = reverse('custom_user_admin:auth_user_password_change', args=(u.pk,))
response = self.client.get(password_change_url)
self.assertEqual(response.status_code, 200)
# A LogEntry is created with pk=1 which breaks a FK constraint on MySQL
with connection.constraint_checks_disabled():
response = self.client.post(password_change_url, {
'password1': 'password1',
'password2': 'password1',
})
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, 1) # harcoded in CustomUserAdmin.log_change()
self.assertEqual(row.object_id, str(u.pk))
self.assertEqual(row.change_message, 'Changed password.')
|
bsd-3-clause
|
edouard-lopez/ansible-modules-core
|
cloud/rackspace/rax_cbs.py
|
157
|
7106
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_cbs
short_description: Manipulate Rackspace Cloud Block Storage Volumes
description:
- Manipulate Rackspace Cloud Block Storage Volumes
version_added: 1.6
options:
description:
description:
- Description to give the volume being created
default: null
image:
description:
- image to use for bootable volumes. Can be an C(id), C(human_id) or
C(name). This option requires C(pyrax>=1.9.3)
default: null
version_added: 1.9
meta:
description:
- A hash of metadata to associate with the volume
default: null
name:
description:
- Name to give the volume being created
default: null
required: true
size:
description:
- Size of the volume to create in Gigabytes
default: 100
required: true
snapshot_id:
description:
- The id of the snapshot to create the volume from
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
volume_type:
description:
- Type of the volume being created
choices:
- SATA
- SSD
default: SATA
required: true
wait:
description:
- wait for the volume to be in state 'available' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume create request
local_action:
module: rax_cbs
credentials: ~/.raxpub
name: my-volume
description: My Volume
volume_type: SSD
size: 150
region: DFW
wait: yes
state: present
meta:
app: my-cool-app
register: my_volume
'''
from distutils.version import LooseVersion
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image):
changed = False
volume = None
instance = {}
cbs = pyrax.cloud_blockstorage
if cbs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if image:
# pyrax<1.9.3 did not have support for specifying an image when
# creating a volume which is required for bootable volumes
if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
module.fail_json(msg='Creating a bootable volume requires '
'pyrax>=1.9.3')
image = rax_find_image(module, pyrax, image)
volume = rax_find_volume(module, pyrax, name)
if state == 'present':
if not volume:
kwargs = dict()
if image:
kwargs['image'] = image
try:
volume = cbs.create(name, size=size, volume_type=volume_type,
description=description,
metadata=meta,
snapshot_id=snapshot_id, **kwargs)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
if wait:
attempts = wait_timeout / 5
pyrax.utils.wait_for_build(volume, interval=5,
attempts=attempts)
volume.get()
instance = rax_to_dict(volume)
result = dict(changed=changed, volume=instance)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait and volume.status not in VOLUME_STATUS:
result['msg'] = 'Timeout waiting on %s' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
if volume:
instance = rax_to_dict(volume)
try:
volume.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
description=dict(type='str'),
image=dict(type='str'),
meta=dict(type='dict', default={}),
name=dict(required=True),
size=dict(type='int', default=100),
snapshot_id=dict(),
state=dict(default='present', choices=['present', 'absent']),
volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
description = module.params.get('description')
image = module.params.get('image')
meta = module.params.get('meta')
name = module.params.get('name')
size = module.params.get('size')
snapshot_id = module.params.get('snapshot_id')
state = module.params.get('state')
volume_type = module.params.get('volume_type')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
# invoke the module
main()
|
gpl-3.0
|
ojengwa/oh-mainline
|
vendor/packages/twisted/twisted/trial/test/detests.py
|
98
|
4765
|
from __future__ import generators
from twisted.trial import unittest
from twisted.internet import defer, threads, reactor
class DeferredSetUpOK(unittest.TestCase):
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb_setUpCalled)
return d
def _cb_setUpCalled(self, ignored):
self._setUpCalled = True
def test_ok(self):
self.failUnless(self._setUpCalled)
class DeferredSetUpFail(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.fail(unittest.FailTest('i fail'))
def test_ok(self):
DeferredSetUpFail.testCalled = True
self.fail("I should not get called")
class DeferredSetUpCallbackFail(unittest.TestCase):
testCalled = False
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb_setUpCalled)
return d
def _cb_setUpCalled(self, ignored):
self.fail('deliberate failure')
def test_ok(self):
DeferredSetUpCallbackFail.testCalled = True
class DeferredSetUpError(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.fail(RuntimeError('deliberate error'))
def test_ok(self):
DeferredSetUpError.testCalled = True
class DeferredSetUpNeverFire(unittest.TestCase):
testCalled = False
def setUp(self):
return defer.Deferred()
def test_ok(self):
DeferredSetUpNeverFire.testCalled = True
class DeferredSetUpSkip(unittest.TestCase):
testCalled = False
def setUp(self):
d = defer.succeed('value')
d.addCallback(self._cb1)
return d
def _cb1(self, ignored):
raise unittest.SkipTest("skip me")
def test_ok(self):
DeferredSetUpSkip.testCalled = True
class DeferredTests(unittest.TestCase):
touched = False
def _cb_fail(self, reason):
self.fail(reason)
def _cb_error(self, reason):
raise RuntimeError(reason)
def _cb_skip(self, reason):
raise unittest.SkipTest(reason)
def _touchClass(self, ignored):
self.__class__.touched = True
def setUp(self):
self.__class__.touched = False
def test_pass(self):
return defer.succeed('success')
def test_passGenerated(self):
self._touchClass(None)
yield None
test_passGenerated = defer.deferredGenerator(test_passGenerated)
def test_fail(self):
return defer.fail(self.failureException('I fail'))
def test_failureInCallback(self):
d = defer.succeed('fail')
d.addCallback(self._cb_fail)
return d
def test_errorInCallback(self):
d = defer.succeed('error')
d.addCallback(self._cb_error)
return d
def test_skip(self):
d = defer.succeed('skip')
d.addCallback(self._cb_skip)
d.addCallback(self._touchClass)
return d
def test_thread(self):
return threads.deferToThread(lambda : None)
def test_expectedFailure(self):
d = defer.succeed('todo')
d.addCallback(self._cb_error)
return d
test_expectedFailure.todo = "Expected failure"
class TimeoutTests(unittest.TestCase):
timedOut = None
def test_pass(self):
d = defer.Deferred()
reactor.callLater(0, d.callback, 'hoorj!')
return d
test_pass.timeout = 2
def test_passDefault(self):
# test default timeout
d = defer.Deferred()
reactor.callLater(0, d.callback, 'hoorj!')
return d
def test_timeout(self):
return defer.Deferred()
test_timeout.timeout = 0.1
def test_timeoutZero(self):
return defer.Deferred()
test_timeoutZero.timeout = 0
def test_expectedFailure(self):
return defer.Deferred()
test_expectedFailure.timeout = 0.1
test_expectedFailure.todo = "i will get it right, eventually"
def test_skip(self):
return defer.Deferred()
test_skip.timeout = 0.1
test_skip.skip = "i will get it right, eventually"
def test_errorPropagation(self):
def timedOut(err):
self.__class__.timedOut = err
return err
d = defer.Deferred()
d.addErrback(timedOut)
return d
test_errorPropagation.timeout = 0.1
def test_calledButNeverCallback(self):
d = defer.Deferred()
def neverFire(r):
return defer.Deferred()
d.addCallback(neverFire)
d.callback(1)
return d
test_calledButNeverCallback.timeout = 0.1
class TestClassTimeoutAttribute(unittest.TestCase):
timeout = 0.2
def setUp(self):
self.d = defer.Deferred()
def testMethod(self):
self.methodCalled = True
return self.d
|
agpl-3.0
|
okuta/chainer
|
tests/chainer_tests/links_tests/connection_tests/test_scale.py
|
9
|
4443
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
@testing.parameterize(
{'learn_W': True, 'bias_term': False, 'bias_shape': None},
{'learn_W': True, 'bias_term': True, 'bias_shape': None},
{'learn_W': False, 'bias_term': False, 'bias_shape': None},
{'learn_W': False, 'bias_term': True, 'bias_shape': (2,)}
)
class TestScale(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
self.W = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.b = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.y_expected = numpy.copy(self.x)
for i, j, k in numpy.ndindex(self.y_expected.shape):
self.y_expected[i, j, k] *= self.W[j]
if self.bias_term:
self.y_expected[i, j, k] += self.b[j]
self.gy = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
bias_term = self.bias_term
bias_shape = self.bias_shape
axis = 1
if self.learn_W:
self.link = links.Scale(
axis, self.W.shape, bias_term, bias_shape)
self.link.W.data = self.W
if bias_term:
self.link.bias.b.data = self.b
else:
self.link = links.Scale(
axis, None, bias_term, bias_shape)
if bias_term:
self.link.bias.b.data = self.b
self.link.cleargrads()
def test_attribute_presence(self):
self.assertEqual(self.learn_W, hasattr(self.link, 'W'))
self.assertEqual(self.bias_term, hasattr(self.link, 'bias'))
def check_forward(self, x_data, W_data, y_expected):
x = chainer.Variable(x_data)
if W_data is None:
y = self.link(x)
testing.assert_allclose(y_expected, y.data)
else:
W = chainer.Variable(W_data)
y = self.link(x, W)
testing.assert_allclose(y_expected, y.data)
def test_forward_cpu(self):
if self.learn_W:
W = None
else:
W = self.W
self.check_forward(self.x, W, self.y_expected)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_W:
W = None
else:
W = cuda.to_gpu(self.W)
self.check_forward(x, W, self.y_expected)
def check_backward(self, x_data, W_data, y_grad):
if W_data is None:
params = [self.link.W]
gradient_check.check_backward(
self.link, x_data, y_grad, params, atol=1e-2)
else:
gradient_check.check_backward(
self.link, (x_data, W_data), y_grad, atol=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
if self.learn_W:
W = None
else:
W = self.W
self.check_backward(self.x, W, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.link.to_gpu()
x = cuda.to_gpu(self.x)
if self.learn_W:
W = None
else:
W = cuda.to_gpu(self.W)
gy = cuda.to_gpu(self.gy)
self.check_backward(x, W, gy)
class TestScaleInvalidArgc(unittest.TestCase):
def setUp(self):
x_data = numpy.random.uniform(-1, 1, (3, 2, 3)).astype(numpy.float32)
W_data = numpy.random.uniform(-1, 1, (2)).astype(numpy.float32)
self.axis = 1
self.x = chainer.Variable(x_data)
self.W = chainer.Variable(W_data)
def test_scale_invalid_argc1(self):
func = links.Scale(self.axis, self.W.data.shape)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x, self.W)
def test_scale_invalid_argc2(self):
func = links.Scale(self.axis, None)
with chainer.using_config('debug', True):
with self.assertRaises(AssertionError):
func(self.x)
class TestScaleNoBiasShape(unittest.TestCase):
def test_scale_no_bias_shape(self):
axis = 1
with self.assertRaises(ValueError):
links.Scale(axis, None, True, None)
testing.run_module(__name__, __file__)
|
mit
|
mahak/nova
|
nova/db/sqlalchemy/api_models.py
|
4
|
24311
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import models
from oslo_log import log as logging
from sqlalchemy import Boolean
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy.dialects.mysql import MEDIUMTEXT
from sqlalchemy import Enum
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Float
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import orm
from sqlalchemy.orm import backref
from sqlalchemy import schema
from sqlalchemy import String
from sqlalchemy import Text
from sqlalchemy import Unicode
LOG = logging.getLogger(__name__)
def MediumText():
return Text().with_variant(MEDIUMTEXT(), 'mysql')
class _NovaAPIBase(models.ModelBase, models.TimestampMixin):
pass
API_BASE = declarative_base(cls=_NovaAPIBase)
class AggregateHost(API_BASE):
"""Represents a host that is member of an aggregate."""
__tablename__ = 'aggregate_hosts'
__table_args__ = (schema.UniqueConstraint(
"host", "aggregate_id",
name="uniq_aggregate_hosts0host0aggregate_id"
),
)
id = Column(Integer, primary_key=True, autoincrement=True)
host = Column(String(255))
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class AggregateMetadata(API_BASE):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
__table_args__ = (
schema.UniqueConstraint("aggregate_id", "key",
name="uniq_aggregate_metadata0aggregate_id0key"
),
Index('aggregate_metadata_key_idx', 'key'),
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255), nullable=False)
aggregate_id = Column(Integer, ForeignKey('aggregates.id'), nullable=False)
class Aggregate(API_BASE):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
__table_args__ = (Index('aggregate_uuid_idx', 'uuid'),
schema.UniqueConstraint(
"name", name="uniq_aggregate0name")
)
id = Column(Integer, primary_key=True, autoincrement=True)
uuid = Column(String(36))
name = Column(String(255))
_hosts = orm.relationship(AggregateHost,
primaryjoin='Aggregate.id == AggregateHost.aggregate_id',
cascade='delete')
_metadata = orm.relationship(AggregateMetadata,
primaryjoin='Aggregate.id == AggregateMetadata.aggregate_id',
cascade='delete')
@property
def _extra_keys(self):
return ['hosts', 'metadetails', 'availability_zone']
@property
def hosts(self):
return [h.host for h in self._hosts]
@property
def metadetails(self):
return {m.key: m.value for m in self._metadata}
@property
def availability_zone(self):
if 'availability_zone' not in self.metadetails:
return None
return self.metadetails['availability_zone']
class CellMapping(API_BASE):
"""Contains information on communicating with a cell"""
__tablename__ = 'cell_mappings'
__table_args__ = (Index('uuid_idx', 'uuid'),
schema.UniqueConstraint('uuid',
name='uniq_cell_mappings0uuid'))
id = Column(Integer, primary_key=True)
uuid = Column(String(36), nullable=False)
name = Column(String(255))
transport_url = Column(Text())
database_connection = Column(Text())
disabled = Column(Boolean, default=False)
host_mapping = orm.relationship('HostMapping',
backref=backref('cell_mapping', uselist=False),
foreign_keys=id,
primaryjoin=(
'CellMapping.id == HostMapping.cell_id'))
class InstanceMapping(API_BASE):
"""Contains the mapping of an instance to which cell it is in"""
__tablename__ = 'instance_mappings'
__table_args__ = (Index('project_id_idx', 'project_id'),
Index('instance_uuid_idx', 'instance_uuid'),
schema.UniqueConstraint('instance_uuid',
name='uniq_instance_mappings0instance_uuid'),
Index('instance_mappings_user_id_project_id_idx',
'user_id', 'project_id'))
id = Column(Integer, primary_key=True)
instance_uuid = Column(String(36), nullable=False)
cell_id = Column(Integer, ForeignKey('cell_mappings.id'),
nullable=True)
project_id = Column(String(255), nullable=False)
# FIXME(melwitt): This should eventually be non-nullable, but we need a
# transition period first.
user_id = Column(String(255), nullable=True)
queued_for_delete = Column(Boolean)
cell_mapping = orm.relationship('CellMapping',
backref=backref('instance_mapping', uselist=False),
foreign_keys=cell_id,
primaryjoin=('InstanceMapping.cell_id == CellMapping.id'))
class HostMapping(API_BASE):
"""Contains mapping of a compute host to which cell it is in"""
__tablename__ = "host_mappings"
__table_args__ = (Index('host_idx', 'host'),
schema.UniqueConstraint('host',
name='uniq_host_mappings0host'))
id = Column(Integer, primary_key=True)
cell_id = Column(Integer, ForeignKey('cell_mappings.id'),
nullable=False)
host = Column(String(255), nullable=False)
class RequestSpec(API_BASE):
"""Represents the information passed to the scheduler."""
__tablename__ = 'request_specs'
__table_args__ = (
Index('request_spec_instance_uuid_idx', 'instance_uuid'),
schema.UniqueConstraint('instance_uuid',
name='uniq_request_specs0instance_uuid'),
)
id = Column(Integer, primary_key=True)
instance_uuid = Column(String(36), nullable=False)
spec = Column(MediumText(), nullable=False)
class Flavors(API_BASE):
"""Represents possible flavors for instances"""
__tablename__ = 'flavors'
__table_args__ = (
schema.UniqueConstraint("flavorid", name="uniq_flavors0flavorid"),
schema.UniqueConstraint("name", name="uniq_flavors0name"))
id = Column(Integer, primary_key=True)
name = Column(String(255), nullable=False)
memory_mb = Column(Integer, nullable=False)
vcpus = Column(Integer, nullable=False)
root_gb = Column(Integer)
ephemeral_gb = Column(Integer)
flavorid = Column(String(255), nullable=False)
swap = Column(Integer, nullable=False, default=0)
rxtx_factor = Column(Float, default=1)
vcpu_weight = Column(Integer)
disabled = Column(Boolean, default=False)
is_public = Column(Boolean, default=True)
description = Column(Text)
class FlavorExtraSpecs(API_BASE):
"""Represents additional specs as key/value pairs for a flavor"""
__tablename__ = 'flavor_extra_specs'
__table_args__ = (
Index('flavor_extra_specs_flavor_id_key_idx', 'flavor_id', 'key'),
schema.UniqueConstraint('flavor_id', 'key',
name='uniq_flavor_extra_specs0flavor_id0key'),
{'mysql_collate': 'utf8_bin'},
)
id = Column(Integer, primary_key=True)
key = Column(String(255), nullable=False)
value = Column(String(255))
flavor_id = Column(Integer, ForeignKey('flavors.id'), nullable=False)
flavor = orm.relationship(Flavors, backref='extra_specs',
foreign_keys=flavor_id,
primaryjoin=(
'FlavorExtraSpecs.flavor_id == Flavors.id'))
class FlavorProjects(API_BASE):
"""Represents projects associated with flavors"""
__tablename__ = 'flavor_projects'
__table_args__ = (schema.UniqueConstraint('flavor_id', 'project_id',
name='uniq_flavor_projects0flavor_id0project_id'),)
id = Column(Integer, primary_key=True)
flavor_id = Column(Integer, ForeignKey('flavors.id'), nullable=False)
project_id = Column(String(255), nullable=False)
flavor = orm.relationship(Flavors, backref='projects',
foreign_keys=flavor_id,
primaryjoin=(
'FlavorProjects.flavor_id == Flavors.id'))
class BuildRequest(API_BASE):
"""Represents the information passed to the scheduler."""
__tablename__ = 'build_requests'
__table_args__ = (
Index('build_requests_instance_uuid_idx', 'instance_uuid'),
Index('build_requests_project_id_idx', 'project_id'),
schema.UniqueConstraint('instance_uuid',
name='uniq_build_requests0instance_uuid'),
)
id = Column(Integer, primary_key=True)
# TODO(mriedem): instance_uuid should be nullable=False
instance_uuid = Column(String(36))
project_id = Column(String(255), nullable=False)
instance = Column(MediumText())
block_device_mappings = Column(MediumText())
tags = Column(Text())
# TODO(alaski): Drop these from the db in Ocata
# columns_to_drop = ['request_spec_id', 'user_id', 'display_name',
# 'instance_metadata', 'progress', 'vm_state', 'task_state',
# 'image_ref', 'access_ip_v4', 'access_ip_v6', 'info_cache',
# 'security_groups', 'config_drive', 'key_name', 'locked_by',
# 'reservation_id', 'launch_index', 'hostname', 'kernel_id',
# 'ramdisk_id', 'root_device_name', 'user_data']
class KeyPair(API_BASE):
"""Represents a public key pair for ssh / WinRM."""
__tablename__ = 'key_pairs'
__table_args__ = (
schema.UniqueConstraint("user_id", "name",
name="uniq_key_pairs0user_id0name"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
fingerprint = Column(String(255))
public_key = Column(Text())
type = Column(Enum('ssh', 'x509', name='keypair_types'),
nullable=False, server_default='ssh')
# TODO(stephenfin): Remove this as it's now unused post-placement split
class ResourceClass(API_BASE):
"""Represents the type of resource for an inventory or allocation."""
__tablename__ = 'resource_classes'
__table_args__ = (
schema.UniqueConstraint("name", name="uniq_resource_classes0name"),
)
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(255), nullable=False)
# TODO(stephenfin): Remove this as it's now unused post-placement split
class ResourceProvider(API_BASE):
"""Represents a mapping to a providers of resources."""
__tablename__ = "resource_providers"
__table_args__ = (
Index('resource_providers_uuid_idx', 'uuid'),
schema.UniqueConstraint('uuid',
name='uniq_resource_providers0uuid'),
Index('resource_providers_name_idx', 'name'),
Index('resource_providers_root_provider_id_idx',
'root_provider_id'),
Index('resource_providers_parent_provider_id_idx',
'parent_provider_id'),
schema.UniqueConstraint('name',
name='uniq_resource_providers0name')
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
name = Column(Unicode(200), nullable=True)
generation = Column(Integer, default=0)
# Represents the root of the "tree" that the provider belongs to
root_provider_id = Column(Integer, ForeignKey('resource_providers.id'),
nullable=True)
# The immediate parent provider of this provider, or NULL if there is no
# parent. If parent_provider_id == NULL then root_provider_id == id
parent_provider_id = Column(Integer, ForeignKey('resource_providers.id'),
nullable=True)
# TODO(stephenfin): Remove this as it's now unused post-placement split
class Inventory(API_BASE):
"""Represents a quantity of available resource."""
__tablename__ = "inventories"
__table_args__ = (
Index('inventories_resource_provider_id_idx',
'resource_provider_id'),
Index('inventories_resource_class_id_idx',
'resource_class_id'),
Index('inventories_resource_provider_resource_class_idx',
'resource_provider_id', 'resource_class_id'),
schema.UniqueConstraint('resource_provider_id', 'resource_class_id',
name='uniq_inventories0resource_provider_resource_class')
)
id = Column(Integer, primary_key=True, nullable=False)
resource_provider_id = Column(Integer, nullable=False)
resource_class_id = Column(Integer, nullable=False)
total = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
min_unit = Column(Integer, nullable=False)
max_unit = Column(Integer, nullable=False)
step_size = Column(Integer, nullable=False)
allocation_ratio = Column(Float, nullable=False)
resource_provider = orm.relationship(
"ResourceProvider",
primaryjoin=('Inventory.resource_provider_id == '
'ResourceProvider.id'),
foreign_keys=resource_provider_id)
# TODO(stephenfin): Remove this as it's now unused post-placement split
class Allocation(API_BASE):
"""A use of inventory."""
__tablename__ = "allocations"
__table_args__ = (
Index('allocations_resource_provider_class_used_idx',
'resource_provider_id', 'resource_class_id',
'used'),
Index('allocations_resource_class_id_idx',
'resource_class_id'),
Index('allocations_consumer_id_idx', 'consumer_id')
)
id = Column(Integer, primary_key=True, nullable=False)
resource_provider_id = Column(Integer, nullable=False)
consumer_id = Column(String(36), nullable=False)
resource_class_id = Column(Integer, nullable=False)
used = Column(Integer, nullable=False)
resource_provider = orm.relationship(
"ResourceProvider",
primaryjoin=('Allocation.resource_provider_id == '
'ResourceProvider.id'),
foreign_keys=resource_provider_id)
# TODO(stephenfin): Remove this as it's now unused post-placement split
class ResourceProviderAggregate(API_BASE):
"""Associate a resource provider with an aggregate."""
__tablename__ = 'resource_provider_aggregates'
__table_args__ = (
Index('resource_provider_aggregates_aggregate_id_idx',
'aggregate_id'),
)
resource_provider_id = Column(Integer, primary_key=True, nullable=False)
aggregate_id = Column(Integer, primary_key=True, nullable=False)
# TODO(stephenfin): Remove this as it's now unused post-placement split
class PlacementAggregate(API_BASE):
"""A grouping of resource providers."""
__tablename__ = 'placement_aggregates'
__table_args__ = (
schema.UniqueConstraint("uuid", name="uniq_placement_aggregates0uuid"),
)
id = Column(Integer, primary_key=True, autoincrement=True)
uuid = Column(String(36), index=True)
class InstanceGroupMember(API_BASE):
"""Represents the members for an instance group."""
__tablename__ = 'instance_group_member'
__table_args__ = (
Index('instance_group_member_instance_idx', 'instance_uuid'),
)
id = Column(Integer, primary_key=True, nullable=False)
instance_uuid = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
class InstanceGroupPolicy(API_BASE):
"""Represents the policy type for an instance group."""
__tablename__ = 'instance_group_policy'
__table_args__ = (
Index('instance_group_policy_policy_idx', 'policy'),
)
id = Column(Integer, primary_key=True, nullable=False)
policy = Column(String(255))
group_id = Column(Integer, ForeignKey('instance_groups.id'),
nullable=False)
rules = Column(Text)
class InstanceGroup(API_BASE):
"""Represents an instance group.
A group will maintain a collection of instances and the relationship
between them.
"""
__tablename__ = 'instance_groups'
__table_args__ = (
schema.UniqueConstraint('uuid', name='uniq_instance_groups0uuid'),
)
id = Column(Integer, primary_key=True, autoincrement=True)
user_id = Column(String(255))
project_id = Column(String(255))
uuid = Column(String(36), nullable=False)
name = Column(String(255))
_policies = orm.relationship(InstanceGroupPolicy,
primaryjoin='InstanceGroup.id == InstanceGroupPolicy.group_id')
_members = orm.relationship(InstanceGroupMember,
primaryjoin='InstanceGroup.id == InstanceGroupMember.group_id')
@property
def policy(self):
if len(self._policies) > 1:
msg = ("More than one policy (%(policies)s) is associated with "
"group %(group_name)s, only the first one in the list "
"would be returned.")
LOG.warning(msg, {"policies": [p.policy for p in self._policies],
"group_name": self.name})
return self._policies[0] if self._policies else None
@property
def members(self):
return [m.instance_uuid for m in self._members]
class Quota(API_BASE):
"""Represents a single quota override for a project.
If there is no row for a given project id and resource, then the
default for the quota class is used. If there is no row for a
given quota class and resource, then the default for the
deployment is used. If the row is present but the hard limit is
Null, then the resource is unlimited.
"""
__tablename__ = 'quotas'
__table_args__ = (
schema.UniqueConstraint("project_id", "resource",
name="uniq_quotas0project_id0resource"
),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class ProjectUserQuota(API_BASE):
"""Represents a single quota override for a user with in a project."""
__tablename__ = 'project_user_quotas'
uniq_name = "uniq_project_user_quotas0user_id0project_id0resource"
__table_args__ = (
schema.UniqueConstraint("user_id", "project_id", "resource",
name=uniq_name),
Index('project_user_quotas_project_id_idx',
'project_id'),
Index('project_user_quotas_user_id_idx',
'user_id',)
)
id = Column(Integer, primary_key=True, nullable=False)
project_id = Column(String(255), nullable=False)
user_id = Column(String(255), nullable=False)
resource = Column(String(255), nullable=False)
hard_limit = Column(Integer)
class QuotaClass(API_BASE):
"""Represents a single quota override for a quota class.
If there is no row for a given quota class and resource, then the
default for the deployment is used. If the row is present but the
hard limit is Null, then the resource is unlimited.
"""
__tablename__ = 'quota_classes'
__table_args__ = (
Index('quota_classes_class_name_idx', 'class_name'),
)
id = Column(Integer, primary_key=True)
class_name = Column(String(255))
resource = Column(String(255))
hard_limit = Column(Integer)
class QuotaUsage(API_BASE):
"""Represents the current usage for a given resource."""
__tablename__ = 'quota_usages'
__table_args__ = (
Index('quota_usages_project_id_idx', 'project_id'),
Index('quota_usages_user_id_idx', 'user_id'),
)
id = Column(Integer, primary_key=True)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255), nullable=False)
in_use = Column(Integer, nullable=False)
reserved = Column(Integer, nullable=False)
@property
def total(self):
return self.in_use + self.reserved
until_refresh = Column(Integer)
class Reservation(API_BASE):
"""Represents a resource reservation for quotas."""
__tablename__ = 'reservations'
__table_args__ = (
Index('reservations_project_id_idx', 'project_id'),
Index('reservations_uuid_idx', 'uuid'),
Index('reservations_expire_idx', 'expire'),
Index('reservations_user_id_idx', 'user_id'),
)
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
usage_id = Column(Integer, ForeignKey('quota_usages.id'), nullable=False)
project_id = Column(String(255))
user_id = Column(String(255))
resource = Column(String(255))
delta = Column(Integer, nullable=False)
expire = Column(DateTime)
usage = orm.relationship(
"QuotaUsage",
foreign_keys=usage_id,
primaryjoin='Reservation.usage_id == QuotaUsage.id')
class Trait(API_BASE):
"""Represents a trait."""
__tablename__ = "traits"
__table_args__ = (
schema.UniqueConstraint('name', name='uniq_traits0name'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
name = Column(Unicode(255), nullable=False)
# TODO(stephenfin): Remove this as it's now unused post-placement split
class ResourceProviderTrait(API_BASE):
"""Represents the relationship between traits and resource provider"""
__tablename__ = "resource_provider_traits"
__table_args__ = (
Index('resource_provider_traits_resource_provider_trait_idx',
'resource_provider_id', 'trait_id'),
)
trait_id = Column(Integer, ForeignKey('traits.id'), primary_key=True,
nullable=False)
resource_provider_id = Column(Integer,
ForeignKey('resource_providers.id'),
primary_key=True,
nullable=False)
# TODO(stephenfin): Remove this as it's unused
class Project(API_BASE):
"""The project is the Keystone project."""
__tablename__ = 'projects'
__table_args__ = (
schema.UniqueConstraint(
'external_id',
name='uniq_projects0external_id',
),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
external_id = Column(String(255), nullable=False)
# TODO(stephenfin): Remove this as it's unused
class User(API_BASE):
"""The user is the Keystone user."""
__tablename__ = 'users'
__table_args__ = (
schema.UniqueConstraint(
'external_id',
name='uniq_users0external_id',
),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
external_id = Column(String(255), nullable=False)
# TODO(stephenfin): Remove this as it's unused
class Consumer(API_BASE):
"""Represents a resource consumer."""
__tablename__ = 'consumers'
__table_args__ = (
Index('consumers_project_id_uuid_idx', 'project_id', 'uuid'),
Index('consumers_project_id_user_id_uuid_idx', 'project_id', 'user_id',
'uuid'),
schema.UniqueConstraint('uuid', name='uniq_consumers0uuid'),
)
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
project_id = Column(Integer, nullable=False)
user_id = Column(Integer, nullable=False)
# FIXME(mriedem): Change this to server_default=text("0") to match the
# 059_add_consumer_generation script once bug 1776527 is fixed.
generation = Column(Integer, nullable=False, server_default="0", default=0)
|
apache-2.0
|
sfluo/mrbot
|
crypto/pycrypto-2.6/build/lib.macosx-10.7-intel-2.7/Crypto/Cipher/DES.py
|
117
|
4403
|
# -*- coding: utf-8 -*-
#
# Cipher/DES.py : DES
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""DES symmetric cipher
DES `(Data Encryption Standard)`__ is a symmetric block cipher standardized
by NIST_ . It has a fixed data block size of 8 bytes.
Its keys are 64 bits long, even though 8 bits were used for integrity (now they
are ignored) and do not contribute to securty.
DES is cryptographically secure, but its key length is too short by nowadays
standards and it could be brute forced with some effort.
DES should not be used for new designs. Use `AES`.
As an example, encryption can be done as follows:
>>> from Crypto.Cipher import DES3
>>> from Crypto import Random
>>>
>>> key = b'Sixteen byte key'
>>> iv = Random.new().read(DES3.block_size)
>>> cipher = DES3.new(key, DES3.MODE_OFB, iv)
>>> plaintext = b'sona si latine loqueris '
>>> msg = iv + cipher.encrypt(plaintext)
.. __: http://en.wikipedia.org/wiki/Data_Encryption_Standard
.. _NIST: http://csrc.nist.gov/publications/fips/fips46-3/fips46-3.pdf
:undocumented: __revision__, __package__
"""
__revision__ = "$Id$"
from Crypto.Cipher import blockalgo
from Crypto.Cipher import _DES
class DESCipher(blockalgo.BlockAlgo):
"""DES cipher object"""
def __init__(self, key, *args, **kwargs):
"""Initialize a DES cipher object
See also `new()` at the module level."""
blockalgo.BlockAlgo.__init__(self, _DES, key, *args, **kwargs)
def new(key, *args, **kwargs):
"""Create a new DES cipher
:Parameters:
key : byte string
The secret key to use in the symmetric cipher.
It must be 8 byte long. The parity bits will be ignored.
:Keywords:
mode : a *MODE_** constant
The chaining mode to use for encryption or decryption.
Default is `MODE_ECB`.
IV : byte string
The initialization vector to use for encryption or decryption.
It is ignored for `MODE_ECB` and `MODE_CTR`.
For `MODE_OPENPGP`, IV must be `block_size` bytes long for encryption
and `block_size` +2 bytes for decryption (in the latter case, it is
actually the *encrypted* IV which was prefixed to the ciphertext).
It is mandatory.
For all other modes, it must be `block_size` bytes longs. It is optional and
when not present it will be given a default value of all zeroes.
counter : callable
(*Only* `MODE_CTR`). A stateful function that returns the next
*counter block*, which is a byte string of `block_size` bytes.
For better performance, use `Crypto.Util.Counter`.
segment_size : integer
(*Only* `MODE_CFB`).The number of bits the plaintext and ciphertext
are segmented in.
It must be a multiple of 8. If 0 or not specified, it will be assumed to be 8.
:Return: an `DESCipher` object
"""
return DESCipher(key, *args, **kwargs)
#: Electronic Code Book (ECB). See `blockalgo.MODE_ECB`.
MODE_ECB = 1
#: Cipher-Block Chaining (CBC). See `blockalgo.MODE_CBC`.
MODE_CBC = 2
#: Cipher FeedBack (CFB). See `blockalgo.MODE_CFB`.
MODE_CFB = 3
#: This mode should not be used.
MODE_PGP = 4
#: Output FeedBack (OFB). See `blockalgo.MODE_OFB`.
MODE_OFB = 5
#: CounTer Mode (CTR). See `blockalgo.MODE_CTR`.
MODE_CTR = 6
#: OpenPGP Mode. See `blockalgo.MODE_OPENPGP`.
MODE_OPENPGP = 7
#: Size of a data block (in bytes)
block_size = 8
#: Size of a key (in bytes)
key_size = 8
|
bsd-3-clause
|
samstern/MSc-Project
|
pybrain/rl/environments/timeseries/autoRegRRL.py
|
1
|
3851
|
from pybrain.rl.environments.timeseries.maximizereturntask import MaximizeReturnTask
from pybrain.rl.environments.timeseries.timeseries import RWEnvironment
from pybrain.rl.learners.directsearch.rrl import RRL
from pybrain.structure import RecurrentNetwork
from pybrain.structure import LinearLayer, BiasUnit, FullConnection
from pybrain.structure.modules.signlayer import SignLayer
from pybrain.rl.agents.learning import LearningAgent
from pybrain.rl.experiments import ContinuousExperiment
import performanceEvaluation as pE
import matplotlib.pyplot as plt
import scipy.stats as st
from numpy import cumsum, log, sign, mean
from math import floor
"""
Parameter settings
eta=0.01
step size=0.01
"""
def main():
numIterations=200
terminal_EMA_SharpeRatio=[0 for i in range(numIterations)]
numTrades=[0 for i in range(numIterations)]
sharpe_first_half=[0 for i in range(numIterations)]
sharpe_sec_half=[0 for i in range(numIterations)]
sharpe_ratio_total=[0 for i in range(numIterations)]
for i in range(numIterations):
env=RWEnvironment(2000)
task = MaximizeReturnTask(env)
numIn=min(env.worldState.shape)
net=RecurrentNetwork()
net.addInputModule(BiasUnit(name='bias'))
net.addOutputModule((SignLayer(1,name='out')))
net.addRecurrentConnection(FullConnection(net['out'], net['out'], name='c3'))
net.addInputModule(LinearLayer(numIn,name='in'))
net.addConnection(FullConnection(net['in'],net['out'],name='c1'))
net.addConnection((FullConnection(net['bias'],net['out'],name='c2')))
net.sortModules()
ts=env.ts
learner = RRL(numIn+2,ts) # ENAC() #Q_LinFA(2,1)
agent = LearningAgent(net,learner)
exp = ContinuousExperiment(task,agent)
#performance tracking
exp.doInteractionsAndLearn(len(ts)-1)
#print(net._params)
terminal_EMA_SharpeRatio[i]=learner.ema_sharpeRatio[-1]
rs=pE.calculateTradingReturn(env.actionHistory,ts)
sharpe_first_half[i]=pE.annualisedSharpe(rs[:(len(ts)/2)])
sharpe_sec_half[i]=pE.annualisedSharpe(rs[len(ts)/2:])
sharpe_ratio_total[i]=pE.annualisedSharpe(rs)
numTrades[i]=learner.numTrades
print(net._params)
print("average number of trades per 1000 observations is {}".format(mean(numTrades)/2))
print("mean Sharpe ratios are {} with standard errors {}, and {} with standard errors {}".format(mean(sharpe_first_half),st.sem(sharpe_first_half),mean(sharpe_sec_half),st.sem(sharpe_sec_half)))
print("average sharpe ratio for each entire epoche is {} with standard error {}".format(mean(sharpe_ratio_total),st.sem(sharpe_ratio_total)))
fig,ax= plt.subplots(nrows=2,ncols=1,sharex=True,sharey=True)
l1=ax[0].hist(sharpe_first_half,bins=20)
ax[0].set_title('Annualised Sharpe Ratio (t=0:1000)')
l2=ax[1].hist(sharpe_sec_half,bins=20)
ax[1].set_title('Annualised Sharpe Ratio (t=1001:2000)')
plt.show()
#plt.hist(numTrades,bins=20)
#plt.plot(terminal_EMA_SharpeRatio)
#plt.show()
actionHist=env.actionHistory
ts=[t/100 for t in ts]
cum_log_r=cumsum([log(1+ts[i]) for i in range(len(ts))])
cum_log_R=cumsum([log(1+(actionHist[i]*ts[i])) for i in range(len(ts))])
fix, axes = plt.subplots(3, sharex=True)
ln1=axes[0].plot(cum_log_r,label='Buy and Hold')
ln2=axes[0].plot(cum_log_R,label='Trading Agent')
lns=ln1+ln2
labs=[l.get_label() for l in lns]
axes[0].legend(lns,labs,loc='upper left')
axes[0].set_ylabel("Cumulative Log Returns")
ax[0].set_title("Artificial Series")
ln3=axes[1].plot(actionHist,'r',label='Trades')
axes[1].set_ylabel("F(t)")
axes[2].plot(learner.ema_sharpeRatio)
axes[2].set_ylabel("EMA Sharpe Ratio")
plt.show()
if __name__ == '__main__':
main()
|
bsd-3-clause
|
Mixser/django
|
tests/urlpatterns_reverse/views.py
|
218
|
1538
|
from functools import partial, update_wrapper
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from django.views.generic import RedirectView
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def defaults_view(request, arg1, arg2):
pass
def nested_view(request):
pass
def erroneous_view(request):
import non_existent # NOQA
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse('')
response.resolver_match = request.resolver_match
return response
uncallable = None # neither a callable nor a string
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
empty_view_partial = partial(empty_view, template_name="template.html")
empty_view_wrapped = update_wrapper(
partial(empty_view, template_name="template.html"), empty_view,
)
|
bsd-3-clause
|
2uller/LotF
|
App/Lib/logging/handlers.py
|
2
|
48645
|
# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Additional handlers for the logging package for Python. The core package is
based on PEP 282 and comments thereto in comp.lang.python.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging.handlers' and log away!
"""
import errno, logging, socket, os, cPickle, struct, time, re
from stat import ST_DEV, ST_INO, ST_MTIME
try:
import codecs
except ImportError:
codecs = None
try:
unicode
_unicode = True
except NameError:
_unicode = False
#
# Some constants...
#
DEFAULT_TCP_LOGGING_PORT = 9020
DEFAULT_UDP_LOGGING_PORT = 9021
DEFAULT_HTTP_LOGGING_PORT = 9022
DEFAULT_SOAP_LOGGING_PORT = 9023
SYSLOG_UDP_PORT = 514
SYSLOG_TCP_PORT = 514
_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
class BaseRotatingHandler(logging.FileHandler):
"""
Base class for handlers that rotate log files at a certain point.
Not meant to be instantiated directly. Instead, use RotatingFileHandler
or TimedRotatingFileHandler.
"""
def __init__(self, filename, mode, encoding=None, delay=0):
"""
Use the specified filename for streamed logging
"""
if codecs is None:
encoding = None
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.mode = mode
self.encoding = encoding
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class RotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size.
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
By default, the file grows indefinitely. You can specify particular
values of maxBytes and backupCount to allow the file to rollover at
a predetermined size.
Rollover occurs whenever the current log file is nearly maxBytes in
length. If backupCount is >= 1, the system will successively create
new files with the same pathname as the base file, but with extensions
".1", ".2" etc. appended to it. For example, with a backupCount of 5
and a base file name of "app.log", you would get "app.log",
"app.log.1", "app.log.2", ... through to "app.log.5". The file being
written to is always "app.log" - when it gets filled up, it is closed
and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
exist, then they are renamed to "app.log.2", "app.log.3" etc.
respectively.
If maxBytes is zero, rollover never occurs.
"""
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
self.maxBytes = maxBytes
self.backupCount = backupCount
def doRollover(self):
"""
Do a rollover, as described in __init__().
"""
if self.stream:
self.stream.close()
self.stream = None
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d" % (self.baseFilename, i)
dfn = "%s.%d" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
#print "%s -> %s" % (sfn, dfn)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1"
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
#print "%s -> %s" % (self.baseFilename, dfn)
self.stream = self._open()
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
return 0
class TimedRotatingFileHandler(BaseRotatingHandler):
"""
Handler for logging to a file, rotating the log file at certain timed
intervals.
If backupCount is > 0, when rollover is done, no more than backupCount
files are kept - the oldest ones are deleted.
"""
def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
self.when = when.upper()
self.backupCount = backupCount
self.utc = utc
# Calculate the real rollover interval, which is just the number of
# seconds between rollovers. Also set the filename suffix used when
# a rollover occurs. Current 'when' events supported:
# S - Seconds
# M - Minutes
# H - Hours
# D - Days
# midnight - roll over at midnight
# W{0-6} - roll over on a certain day; 0 - Monday
#
# Case of the 'when' specifier is not important; lower or upper case
# will work.
if self.when == 'S':
self.interval = 1 # one second
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
elif self.when == 'M':
self.interval = 60 # one minute
self.suffix = "%Y-%m-%d_%H-%M"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
elif self.when == 'H':
self.interval = 60 * 60 # one hour
self.suffix = "%Y-%m-%d_%H"
self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
elif self.when == 'D' or self.when == 'MIDNIGHT':
self.interval = 60 * 60 * 24 # one day
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
elif self.when.startswith('W'):
self.interval = 60 * 60 * 24 * 7 # one week
if len(self.when) != 2:
raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
if self.when[1] < '0' or self.when[1] > '6':
raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
self.dayOfWeek = int(self.when[1])
self.suffix = "%Y-%m-%d"
self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
else:
raise ValueError("Invalid rollover interval specified: %s" % self.when)
self.extMatch = re.compile(self.extMatch)
self.interval = self.interval * interval # multiply by units requested
if os.path.exists(filename):
t = os.stat(filename)[ST_MTIME]
else:
t = int(time.time())
self.rolloverAt = self.computeRollover(t)
def computeRollover(self, currentTime):
"""
Work out the rollover time based on the specified time.
"""
result = currentTime + self.interval
# If we are rolling over at midnight or weekly, then the interval is already known.
# What we need to figure out is WHEN the next interval is. In other words,
# if you are rolling over at midnight, then your base interval is 1 day,
# but you want to start that one day clock at midnight, not now. So, we
# have to fudge the rolloverAt value in order to trigger the first rollover
# at the right time. After that, the regular interval will take care of
# the rest. Note that this code doesn't care about leap seconds. :)
if self.when == 'MIDNIGHT' or self.when.startswith('W'):
# This could be done with less code, but I wanted it to be clear
if self.utc:
t = time.gmtime(currentTime)
else:
t = time.localtime(currentTime)
currentHour = t[3]
currentMinute = t[4]
currentSecond = t[5]
# r is the number of seconds left between now and midnight
r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
currentSecond)
result = currentTime + r
# If we are rolling over on a certain day, add in the number of days until
# the next rollover, but offset by 1 since we just calculated the time
# until the next day starts. There are three cases:
# Case 1) The day to rollover is today; in this case, do nothing
# Case 2) The day to rollover is further in the interval (i.e., today is
# day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
# next rollover is simply 6 - 2 - 1, or 3.
# Case 3) The day to rollover is behind us in the interval (i.e., today
# is day 5 (Saturday) and rollover is on day 3 (Thursday).
# Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
# number of days left in the current week (1) plus the number
# of days in the next week until the rollover day (3).
# The calculations described in 2) and 3) above need to have a day added.
# This is because the above time calculation takes us to midnight on this
# day, i.e. the start of the next day.
if self.when.startswith('W'):
day = t[6] # 0 is Monday
if day != self.dayOfWeek:
if day < self.dayOfWeek:
daysToWait = self.dayOfWeek - day
else:
daysToWait = 6 - day + self.dayOfWeek + 1
newRolloverAt = result + (daysToWait * (60 * 60 * 24))
if not self.utc:
dstNow = t[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
result = newRolloverAt
return result
def shouldRollover(self, record):
"""
Determine if rollover should occur.
record is not used, as we are just comparing times, but it is needed so
the method signatures are the same
"""
t = int(time.time())
if t >= self.rolloverAt:
return 1
#print "No need to rollover: %d, %d" % (t, self.rolloverAt)
return 0
def getFilesToDelete(self):
"""
Determine the files to delete when rolling over.
More specific than the earlier method, which just used glob.glob().
"""
dirName, baseName = os.path.split(self.baseFilename)
fileNames = os.listdir(dirName)
result = []
prefix = baseName + "."
plen = len(prefix)
for fileName in fileNames:
if fileName[:plen] == prefix:
suffix = fileName[plen:]
if self.extMatch.match(suffix):
result.append(os.path.join(dirName, fileName))
result.sort()
if len(result) < self.backupCount:
result = []
else:
result = result[:len(result) - self.backupCount]
return result
def doRollover(self):
"""
do a rollover; in this case, a date/time stamp is appended to the filename
when the rollover happens. However, you want the file to be named for the
start of the interval, not the current time. If there is a backup count,
then we have to get a list of matching filenames, sort them and remove
the one with the oldest suffix.
"""
if self.stream:
self.stream.close()
self.stream = None
# get the time that this sequence started at and make it a TimeTuple
currentTime = int(time.time())
dstNow = time.localtime(currentTime)[-1]
t = self.rolloverAt - self.interval
if self.utc:
timeTuple = time.gmtime(t)
else:
timeTuple = time.localtime(t)
dstThen = timeTuple[-1]
if dstNow != dstThen:
if dstNow:
addend = 3600
else:
addend = -3600
timeTuple = time.localtime(t + addend)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
# find the oldest log file and delete it
#s = glob.glob(self.baseFilename + ".20*")
#if len(s) > self.backupCount:
# s.sort()
# os.remove(s[0])
for s in self.getFilesToDelete():
os.remove(s)
#print "%s -> %s" % (self.baseFilename, dfn)
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
addend = -3600
else: # DST bows out before next rollover, so we need to add an hour
addend = 3600
newRolloverAt += addend
self.rolloverAt = newRolloverAt
class WatchedFileHandler(logging.FileHandler):
"""
A handler for logging to a file, which watches the file
to see if it has changed while in use. This can happen because of
usage of programs such as newsyslog and logrotate which perform
log file rotation. This handler, intended for use under Unix,
watches the file to see if it has changed since the last emit.
(A file has changed if its device or inode have changed.)
If it has changed, the old file stream is closed, and the file
opened to get a new stream.
This handler is not appropriate for use under Windows, because
under Windows open files cannot be moved or renamed - logging
opens the files with exclusive locks - and so there is no need
for such a handler. Furthermore, ST_INO is not supported under
Windows; stat always returns zero for this value.
This handler is based on a suggestion and patch by Chad J.
Schroeder.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
logging.FileHandler.__init__(self, filename, mode, encoding, delay)
self.dev, self.ino = -1, -1
self._statstream()
def _statstream(self):
if self.stream:
sres = os.fstat(self.stream.fileno())
self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
def emit(self, record):
"""
Emit a record.
First check if the underlying file has changed, and if it
has, close the old stream and reopen the file to get the
current stream.
"""
# Reduce the chance of race conditions by stat'ing by path only
# once and then fstat'ing our new fd if we opened a new log stream.
# See issue #14632: Thanks to John Mulligan for the problem report
# and patch.
try:
# stat the file by path, checking for existence
sres = os.stat(self.baseFilename)
except OSError as err:
if err.errno == errno.ENOENT:
sres = None
else:
raise
# compare file system stat with that of our stream file handle
if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
if self.stream is not None:
# we have an open file handle, clean it up
self.stream.flush()
self.stream.close()
# open a new file handle and get new stat info from that fd
self.stream = self._open()
self._statstream()
logging.FileHandler.emit(self, record)
class SocketHandler(logging.Handler):
"""
A handler class which writes logging records, in pickle format, to
a streaming socket. The socket is kept open across logging calls.
If the peer resets it, an attempt is made to reconnect on the next call.
The pickle which is sent is that of the LogRecord's attribute dictionary
(__dict__), so that the receiver does not need to have the logging module
installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
The attribute 'closeOnError' is set to 1 - which means that if
a socket error occurs, the socket is silently closed and then
reopened on the next logging call.
"""
logging.Handler.__init__(self)
self.host = host
self.port = port
self.sock = None
self.closeOnError = 0
self.retryTime = None
#
# Exponential backoff parameters.
#
self.retryStart = 1.0
self.retryMax = 30.0
self.retryFactor = 2.0
def makeSocket(self, timeout=1):
"""
A factory method which allows subclasses to define the precise
type of socket they want.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if hasattr(s, 'settimeout'):
s.settimeout(timeout)
s.connect((self.host, self.port))
return s
def createSocket(self):
"""
Try to create a socket, using an exponential backoff with
a max retry time. Thanks to Robert Olson for the original patch
(SF #815911) which has been slightly refactored.
"""
now = time.time()
# Either retryTime is None, in which case this
# is the first time back after a disconnect, or
# we've waited long enough.
if self.retryTime is None:
attempt = 1
else:
attempt = (now >= self.retryTime)
if attempt:
try:
self.sock = self.makeSocket()
self.retryTime = None # next time, no delay before trying
except socket.error:
#Creation failed, so set the retry time and return.
if self.retryTime is None:
self.retryPeriod = self.retryStart
else:
self.retryPeriod = self.retryPeriod * self.retryFactor
if self.retryPeriod > self.retryMax:
self.retryPeriod = self.retryMax
self.retryTime = now + self.retryPeriod
def send(self, s):
"""
Send a pickled string to the socket.
This function allows for partial sends which can happen when the
network is busy.
"""
if self.sock is None:
self.createSocket()
#self.sock can be None either because we haven't reached the retry
#time yet, or because we have reached the retry time and retried,
#but are still unable to connect.
if self.sock:
try:
if hasattr(self.sock, "sendall"):
self.sock.sendall(s)
else:
sentsofar = 0
left = len(s)
while left > 0:
sent = self.sock.send(s[sentsofar:])
sentsofar = sentsofar + sent
left = left - sent
except socket.error:
self.sock.close()
self.sock = None # so we can call createSocket next time
def makePickle(self, record):
"""
Pickles the record in binary format with a length prefix, and
returns it ready for transmission across the socket.
"""
ei = record.exc_info
if ei:
# just to get traceback text into record.exc_text ...
dummy = self.format(record)
record.exc_info = None # to avoid Unpickleable error
# See issue #14436: If msg or args are objects, they may not be
# available on the receiving end. So we convert the msg % args
# to a string, save it as msg and zap the args.
d = dict(record.__dict__)
d['msg'] = record.getMessage()
d['args'] = None
s = cPickle.dumps(d, 1)
if ei:
record.exc_info = ei # for next handler
slen = struct.pack(">L", len(s))
return slen + s
def handleError(self, record):
"""
Handle an error during logging.
An error has occurred during logging. Most likely cause -
connection lost. Close the socket so that we can retry on the
next event.
"""
if self.closeOnError and self.sock:
self.sock.close()
self.sock = None #try to reconnect next time
else:
logging.Handler.handleError(self, record)
def emit(self, record):
"""
Emit a record.
Pickles the record and writes it to the socket in binary format.
If there is an error with the socket, silently drop the packet.
If there was a problem with the socket, re-establishes the
socket.
"""
try:
s = self.makePickle(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Closes the socket.
"""
self.acquire()
try:
if self.sock:
self.sock.close()
self.sock = None
finally:
self.release()
logging.Handler.close(self)
class DatagramHandler(SocketHandler):
"""
A handler class which writes logging records, in pickle format, to
a datagram socket. The pickle which is sent is that of the LogRecord's
attribute dictionary (__dict__), so that the receiver does not need to
have the logging module installed in order to process the logging event.
To unpickle the record at the receiving end into a LogRecord, use the
makeLogRecord function.
"""
def __init__(self, host, port):
"""
Initializes the handler with a specific host address and port.
"""
SocketHandler.__init__(self, host, port)
self.closeOnError = 0
def makeSocket(self):
"""
The factory method of SocketHandler is here overridden to create
a UDP socket (SOCK_DGRAM).
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return s
def send(self, s):
"""
Send a pickled string to a socket.
This function no longer allows for partial sends which can happen
when the network is busy - UDP does not guarantee delivery and
can deliver packets out of sequence.
"""
if self.sock is None:
self.createSocket()
self.sock.sendto(s, (self.host, self.port))
class SysLogHandler(logging.Handler):
"""
A handler class which sends formatted logging records to a syslog
server. Based on Sam Rushing's syslog module:
http://www.nightmare.com/squirl/python-ext/misc/syslog.py
Contributed by Nicolas Untz (after which minor refactoring changes
have been made).
"""
# from <linux/sys/syslog.h>:
# ======================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where
# the bottom 3 bits are the priority (0-7) and the top 28 bits are the
# facility (0-big number). Both the priorities and the facilities map
# roughly one-to-one to strings in the syslogd(8) source code. This
# mapping is included in this file.
#
# priorities (these are ordered)
LOG_EMERG = 0 # system is unusable
LOG_ALERT = 1 # action must be taken immediately
LOG_CRIT = 2 # critical conditions
LOG_ERR = 3 # error conditions
LOG_WARNING = 4 # warning conditions
LOG_NOTICE = 5 # normal but significant condition
LOG_INFO = 6 # informational
LOG_DEBUG = 7 # debug-level messages
# facility codes
LOG_KERN = 0 # kernel messages
LOG_USER = 1 # random user-level messages
LOG_MAIL = 2 # mail system
LOG_DAEMON = 3 # system daemons
LOG_AUTH = 4 # security/authorization messages
LOG_SYSLOG = 5 # messages generated internally by syslogd
LOG_LPR = 6 # line printer subsystem
LOG_NEWS = 7 # network news subsystem
LOG_UUCP = 8 # UUCP subsystem
LOG_CRON = 9 # clock daemon
LOG_AUTHPRIV = 10 # security/authorization messages (private)
LOG_FTP = 11 # FTP daemon
# other codes through 15 reserved for system use
LOG_LOCAL0 = 16 # reserved for local use
LOG_LOCAL1 = 17 # reserved for local use
LOG_LOCAL2 = 18 # reserved for local use
LOG_LOCAL3 = 19 # reserved for local use
LOG_LOCAL4 = 20 # reserved for local use
LOG_LOCAL5 = 21 # reserved for local use
LOG_LOCAL6 = 22 # reserved for local use
LOG_LOCAL7 = 23 # reserved for local use
priority_names = {
"alert": LOG_ALERT,
"crit": LOG_CRIT,
"critical": LOG_CRIT,
"debug": LOG_DEBUG,
"emerg": LOG_EMERG,
"err": LOG_ERR,
"error": LOG_ERR, # DEPRECATED
"info": LOG_INFO,
"notice": LOG_NOTICE,
"panic": LOG_EMERG, # DEPRECATED
"warn": LOG_WARNING, # DEPRECATED
"warning": LOG_WARNING,
}
facility_names = {
"auth": LOG_AUTH,
"authpriv": LOG_AUTHPRIV,
"cron": LOG_CRON,
"daemon": LOG_DAEMON,
"ftp": LOG_FTP,
"kern": LOG_KERN,
"lpr": LOG_LPR,
"mail": LOG_MAIL,
"news": LOG_NEWS,
"security": LOG_AUTH, # DEPRECATED
"syslog": LOG_SYSLOG,
"user": LOG_USER,
"uucp": LOG_UUCP,
"local0": LOG_LOCAL0,
"local1": LOG_LOCAL1,
"local2": LOG_LOCAL2,
"local3": LOG_LOCAL3,
"local4": LOG_LOCAL4,
"local5": LOG_LOCAL5,
"local6": LOG_LOCAL6,
"local7": LOG_LOCAL7,
}
#The map below appears to be trivially lowercasing the key. However,
#there's more to it than meets the eye - in some locales, lowercasing
#gives unexpected results. See SF #1524081: in the Turkish locale,
#"INFO".lower() != "info"
priority_map = {
"DEBUG" : "debug",
"INFO" : "info",
"WARNING" : "warning",
"ERROR" : "error",
"CRITICAL" : "critical"
}
def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
facility=LOG_USER, socktype=None):
"""
Initialize a handler.
If address is specified as a string, a UNIX socket is used. To log to a
local syslogd, "SysLogHandler(address="/dev/log")" can be used.
If facility is not specified, LOG_USER is used. If socktype is
specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
socket type will be used. For Unix sockets, you can also specify a
socktype of None, in which case socket.SOCK_DGRAM will be used, falling
back to socket.SOCK_STREAM.
"""
logging.Handler.__init__(self)
self.address = address
self.facility = facility
self.socktype = socktype
if isinstance(address, basestring):
self.unixsocket = 1
self._connect_unixsocket(address)
else:
self.unixsocket = 0
if socktype is None:
socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_INET, socktype)
if socktype == socket.SOCK_STREAM:
self.socket.connect(address)
self.socktype = socktype
self.formatter = None
def _connect_unixsocket(self, address):
use_socktype = self.socktype
if use_socktype is None:
use_socktype = socket.SOCK_DGRAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
if self.socktype is not None:
# user didn't specify falling back, so fail
raise
use_socktype = socket.SOCK_STREAM
self.socket = socket.socket(socket.AF_UNIX, use_socktype)
try:
self.socket.connect(address)
# it worked, so set self.socktype to the used type
self.socktype = use_socktype
except socket.error:
self.socket.close()
raise
# curious: when talking to the unix-domain '/dev/log' socket, a
# zero-terminator seems to be required. this string is placed
# into a class variable so that it can be overridden if
# necessary.
log_format_string = '<%d>%s\000'
def encodePriority(self, facility, priority):
"""
Encode the facility and priority. You can pass in strings or
integers - if strings are passed, the facility_names and
priority_names mapping dictionaries are used to convert them to
integers.
"""
if isinstance(facility, basestring):
facility = self.facility_names[facility]
if isinstance(priority, basestring):
priority = self.priority_names[priority]
return (facility << 3) | priority
def close (self):
"""
Closes the socket.
"""
self.acquire()
try:
if self.unixsocket:
self.socket.close()
finally:
self.release()
logging.Handler.close(self)
def mapPriority(self, levelName):
"""
Map a logging level name to a key in the priority_names map.
This is useful in two scenarios: when custom levels are being
used, and in the case where you can't do a straightforward
mapping by lowercasing the logging level name because of locale-
specific issues (see SF #1524081).
"""
return self.priority_map.get(levelName, "warning")
def emit(self, record):
"""
Emit a record.
The record is formatted, and then sent to the syslog server. If
exception information is present, it is NOT sent to the server.
"""
msg = self.format(record) + '\000'
"""
We need to convert record level to lowercase, maybe this will
change in the future.
"""
prio = '<%d>' % self.encodePriority(self.facility,
self.mapPriority(record.levelname))
# Message is a string. Convert to bytes as required by RFC 5424
if type(msg) is unicode:
msg = msg.encode('utf-8')
msg = prio + msg
try:
if self.unixsocket:
try:
self.socket.send(msg)
except socket.error:
self._connect_unixsocket(self.address)
self.socket.send(msg)
elif self.socktype == socket.SOCK_DGRAM:
self.socket.sendto(msg, self.address)
else:
self.socket.sendall(msg)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class SMTPHandler(logging.Handler):
"""
A handler class which sends an SMTP email for each logging event.
"""
def __init__(self, mailhost, fromaddr, toaddrs, subject,
credentials=None, secure=None):
"""
Initialize the handler.
Initialize the instance with the from and to addresses and subject
line of the email. To specify a non-standard SMTP port, use the
(host, port) tuple format for the mailhost argument. To specify
authentication credentials, supply a (username, password) tuple
for the credentials argument. To specify the use of a secure
protocol (TLS), pass in a tuple for the secure argument. This will
only be used when authentication credentials are supplied. The tuple
will be either an empty tuple, or a single-value tuple with the name
of a keyfile, or a 2-value tuple with the names of the keyfile and
certificate file. (This tuple is passed to the `starttls` method).
"""
logging.Handler.__init__(self)
if isinstance(mailhost, tuple):
self.mailhost, self.mailport = mailhost
else:
self.mailhost, self.mailport = mailhost, None
if isinstance(credentials, tuple):
self.username, self.password = credentials
else:
self.username = None
self.fromaddr = fromaddr
if isinstance(toaddrs, basestring):
toaddrs = [toaddrs]
self.toaddrs = toaddrs
self.subject = subject
self.secure = secure
self._timeout = 5.0
def getSubject(self, record):
"""
Determine the subject for the email.
If you want to specify a subject line which is record-dependent,
override this method.
"""
return self.subject
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
try:
import smtplib
from email.utils import formatdate
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self._timeout)
msg = self.format(record)
msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
self.fromaddr,
",".join(self.toaddrs),
self.getSubject(record),
formatdate(), msg)
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.sendmail(self.fromaddr, self.toaddrs, msg)
smtp.quit()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class NTEventLogHandler(logging.Handler):
"""
A handler class which sends events to the NT Event Log. Adds a
registry entry for the specified application name. If no dllname is
provided, win32service.pyd (which contains some basic message
placeholders) is used. Note that use of these placeholders will make
your event logs big, as the entire message source is held in the log.
If you want slimmer logs, you have to pass in the name of your own DLL
which contains the message definitions you want to use in the event log.
"""
def __init__(self, appname, dllname=None, logtype="Application"):
logging.Handler.__init__(self)
try:
import win32evtlogutil, win32evtlog
self.appname = appname
self._welu = win32evtlogutil
if not dllname:
dllname = os.path.split(self._welu.__file__)
dllname = os.path.split(dllname[0])
dllname = os.path.join(dllname[0], r'win32service.pyd')
self.dllname = dllname
self.logtype = logtype
self._welu.AddSourceToRegistry(appname, dllname, logtype)
self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
self.typemap = {
logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
}
except ImportError:
print("The Python Win32 extensions for NT (service, event "\
"logging) appear not to be available.")
self._welu = None
def getMessageID(self, record):
"""
Return the message ID for the event record. If you are using your
own messages, you could do this by having the msg passed to the
logger being an ID rather than a formatting string. Then, in here,
you could use a dictionary lookup to get the message ID. This
version returns 1, which is the base message ID in win32service.pyd.
"""
return 1
def getEventCategory(self, record):
"""
Return the event category for the record.
Override this if you want to specify your own categories. This version
returns 0.
"""
return 0
def getEventType(self, record):
"""
Return the event type for the record.
Override this if you want to specify your own types. This version does
a mapping using the handler's typemap attribute, which is set up in
__init__() to a dictionary which contains mappings for DEBUG, INFO,
WARNING, ERROR and CRITICAL. If you are using your own levels you will
either need to override this method or place a suitable dictionary in
the handler's typemap attribute.
"""
return self.typemap.get(record.levelno, self.deftype)
def emit(self, record):
"""
Emit a record.
Determine the message ID, event category and event type. Then
log the message in the NT event log.
"""
if self._welu:
try:
id = self.getMessageID(record)
cat = self.getEventCategory(record)
type = self.getEventType(record)
msg = self.format(record)
self._welu.ReportEvent(self.appname, id, cat, type, [msg])
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
"""
Clean up this handler.
You can remove the application name from the registry as a
source of event log entries. However, if you do this, you will
not be able to see the events as you intended in the Event Log
Viewer - it needs to be able to access the registry to get the
DLL name.
"""
#self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
logging.Handler.close(self)
class HTTPHandler(logging.Handler):
"""
A class which sends records to a Web server, using either GET or
POST semantics.
"""
def __init__(self, host, url, method="GET"):
"""
Initialize the instance with the host, the request URL, and the method
("GET" or "POST")
"""
logging.Handler.__init__(self)
method = method.upper()
if method not in ["GET", "POST"]:
raise ValueError("method must be GET or POST")
self.host = host
self.url = url
self.method = method
def mapLogRecord(self, record):
"""
Default implementation of mapping the log record into a dict
that is sent as the CGI data. Overwrite in your class.
Contributed by Franz Glasner.
"""
return record.__dict__
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as a percent-encoded dictionary
"""
try:
import httplib, urllib
host = self.host
h = httplib.HTTP(host)
url = self.url
data = urllib.urlencode(self.mapLogRecord(record))
if self.method == "GET":
if (url.find('?') >= 0):
sep = '&'
else:
sep = '?'
url = url + "%c%s" % (sep, data)
h.putrequest(self.method, url)
# support multiple hosts on one IP address...
# need to strip optional :port from host, if present
i = host.find(":")
if i >= 0:
host = host[:i]
h.putheader("Host", host)
if self.method == "POST":
h.putheader("Content-type",
"application/x-www-form-urlencoded")
h.putheader("Content-length", str(len(data)))
h.endheaders(data if self.method == "POST" else None)
h.getreply() #can't do anything with the result
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class BufferingHandler(logging.Handler):
"""
A handler class which buffers logging records in memory. Whenever each
record is added to the buffer, a check is made to see if the buffer should
be flushed. If it should, then flush() is expected to do what's needed.
"""
def __init__(self, capacity):
"""
Initialize the handler with the buffer size.
"""
logging.Handler.__init__(self)
self.capacity = capacity
self.buffer = []
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
return (len(self.buffer) >= self.capacity)
def emit(self, record):
"""
Emit a record.
Append the record. If shouldFlush() tells us to, call flush() to process
the buffer.
"""
self.buffer.append(record)
if self.shouldFlush(record):
self.flush()
def flush(self):
"""
Override to implement custom flushing behaviour.
This version just zaps the buffer to empty.
"""
self.acquire()
try:
self.buffer = []
finally:
self.release()
def close(self):
"""
Close the handler.
This version just flushes and chains to the parent class' close().
"""
self.flush()
logging.Handler.close(self)
class MemoryHandler(BufferingHandler):
"""
A handler class which buffers logging records in memory, periodically
flushing them to a target handler. Flushing occurs whenever the buffer
is full, or when an event of a certain severity or greater is seen.
"""
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
"""
Initialize the handler with the buffer size, the level at which
flushing should occur and an optional target.
Note that without a target being set either here or via setTarget(),
a MemoryHandler is no use to anyone!
"""
BufferingHandler.__init__(self, capacity)
self.flushLevel = flushLevel
self.target = target
def shouldFlush(self, record):
"""
Check for buffer full or a record at the flushLevel or higher.
"""
return (len(self.buffer) >= self.capacity) or \
(record.levelno >= self.flushLevel)
def setTarget(self, target):
"""
Set the target handler for this handler.
"""
self.target = target
def flush(self):
"""
For a MemoryHandler, flushing means just sending the buffered
records to the target, if there is one. Override if you want
different behaviour.
"""
self.acquire()
try:
if self.target:
for record in self.buffer:
self.target.handle(record)
self.buffer = []
finally:
self.release()
def close(self):
"""
Flush, set the target to None and lose the buffer.
"""
self.flush()
self.acquire()
try:
self.target = None
BufferingHandler.close(self)
finally:
self.release()
|
gpl-2.0
|
cindyyu/kuma
|
vendor/packages/pygments/lexers/markup.py
|
72
|
16886
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.markup
~~~~~~~~~~~~~~~~~~~~~~
Lexers for non-HTML markup languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexers.html import HtmlLexer, XmlLexer
from pygments.lexers.javascript import JavascriptLexer
from pygments.lexers.css import CssLexer
from pygments.lexer import RegexLexer, DelegatingLexer, include, bygroups, \
using, this, do_insertions, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Generic, Other
from pygments.util import get_bool_opt, ClassNotFound
__all__ = ['BBCodeLexer', 'MoinWikiLexer', 'RstLexer', 'TexLexer', 'GroffLexer',
'MozPreprocHashLexer', 'MozPreprocPercentLexer',
'MozPreprocXulLexer', 'MozPreprocJavascriptLexer',
'MozPreprocCssLexer']
class BBCodeLexer(RegexLexer):
"""
A lexer that highlights BBCode(-like) syntax.
.. versionadded:: 0.6
"""
name = 'BBCode'
aliases = ['bbcode']
mimetypes = ['text/x-bbcode']
tokens = {
'root': [
(r'[^[]+', Text),
# tag/end tag begin
(r'\[/?\w+', Keyword, 'tag'),
# stray bracket
(r'\[', Text),
],
'tag': [
(r'\s+', Text),
# attribute with value
(r'(\w+)(=)("?[^\s"\]]+"?)',
bygroups(Name.Attribute, Operator, String)),
# tag argument (a la [color=green])
(r'(=)("?[^\s"\]]+"?)',
bygroups(Operator, String)),
# tag end
(r'\]', Keyword, '#pop'),
],
}
class MoinWikiLexer(RegexLexer):
"""
For MoinMoin (and Trac) Wiki markup.
.. versionadded:: 0.7
"""
name = 'MoinMoin/Trac Wiki markup'
aliases = ['trac-wiki', 'moin']
filenames = []
mimetypes = ['text/x-trac-wiki']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'^#.*$', Comment),
(r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
# Titles
(r'^(=+)([^=]+)(=+)(\s*#.+)?$',
bygroups(Generic.Heading, using(this), Generic.Heading, String)),
# Literal code blocks, with optional shebang
(r'(\{\{\{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
(r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
# Lists
(r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
(r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
# Other Formatting
(r'\[\[\w+.*?\]\]', Keyword), # Macro
(r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
bygroups(Keyword, String, Keyword)), # Link
(r'^----+$', Keyword), # Horizontal rules
(r'[^\n\'\[{!_~^,|]+', Text),
(r'\n', Text),
(r'.', Text),
],
'codeblock': [
(r'\}\}\}', Name.Builtin, '#pop'),
# these blocks are allowed to be nested in Trac, but not MoinMoin
(r'\{\{\{', Text, '#push'),
(r'[^{}]+', Comment.Preproc), # slurp boring text
(r'.', Comment.Preproc), # allow loose { or }
],
}
class RstLexer(RegexLexer):
"""
For `reStructuredText <http://docutils.sf.net/rst.html>`_ markup.
.. versionadded:: 0.7
Additional options accepted:
`handlecodeblocks`
Highlight the contents of ``.. sourcecode:: language``,
``.. code:: language`` and ``.. code-block:: language``
directives with a lexer for the given language (default:
``True``).
.. versionadded:: 0.8
"""
name = 'reStructuredText'
aliases = ['rst', 'rest', 'restructuredtext']
filenames = ['*.rst', '*.rest']
mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
flags = re.MULTILINE
def _handle_sourcecode(self, match):
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), Punctuation, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator.Word, match.group(3)
yield match.start(4), Punctuation, match.group(4)
yield match.start(5), Text, match.group(5)
yield match.start(6), Keyword, match.group(6)
yield match.start(7), Text, match.group(7)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name(match.group(6).strip())
except ClassNotFound:
pass
indention = match.group(8)
indention_size = len(indention)
code = (indention + match.group(9) + match.group(10) + match.group(11))
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(8), String, code
return
# highlight the lines with the lexer.
ins = []
codelines = code.splitlines(True)
code = ''
for line in codelines:
if len(line) > indention_size:
ins.append((len(code), [(0, Text, line[:indention_size])]))
code += line[indention_size:]
else:
code += line
for item in do_insertions(ins, lexer.get_tokens_unprocessed(code)):
yield item
# from docutils.parsers.rst.states
closers = u'\'")]}>\u2019\u201d\xbb!?'
unicode_delimiters = u'\u2010\u2011\u2012\u2013\u2014\u00a0'
end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
% (re.escape(unicode_delimiters),
re.escape(closers)))
tokens = {
'root': [
# Heading with overline
(r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
(r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
bygroups(Generic.Heading, Text, Generic.Heading, Text)),
# Bulleted lists
(r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered lists
(r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
bygroups(Text, Number, using(this, state='inline'))),
# Numbered, but keep words at BOL from becoming lists
(r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
# Line blocks
(r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
(r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote/citation target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A substitution def
(r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
(r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
(r'^( *)(:.*?:)([ \t]+)(.*?)$',
bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
(r'^(\S.*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
# Code blocks
(r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*|)\n)+)',
bygroups(String.Escape, Text, String, String, Text, String)),
include('inline'),
],
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
(r'(`.+?)(<.+?>)(`__?)', # reference with inline target
bygroups(String, String.Interpol, String)),
(r'`.+?`__?', String), # reference
(r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
(r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
(r'<.+?>', Name.Tag), # Hyperlink
(r'[^\\\n\[*`:]+', Text),
(r'.', Text),
],
'literal': [
(r'[^`]+', String),
(r'``' + end_string_suffix, String, '#pop'),
(r'`', String),
]
}
def __init__(self, **options):
self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
RegexLexer.__init__(self, **options)
def analyse_text(text):
if text[:2] == '..' and text[2:3] != '.':
return 0.3
p1 = text.find("\n")
p2 = text.find("\n", p1 + 1)
if (p2 > -1 and # has two lines
p1 * 2 + 1 == p2 and # they are the same length
text[p1+1] in '-=' and # the next line both starts and ends with
text[p1+1] == text[p2-1]): # ...a sufficiently high header
return 0.5
class TexLexer(RegexLexer):
"""
Lexer for the TeX and LaTeX typesetting languages.
"""
name = 'TeX'
aliases = ['tex', 'latex']
filenames = ['*.tex', '*.aux', '*.toc']
mimetypes = ['text/x-tex', 'text/x-latex']
tokens = {
'general': [
(r'%.*?\n', Comment),
(r'[{}]', Name.Builtin),
(r'[&_^]', Name.Builtin),
],
'root': [
(r'\\\[', String.Backtick, 'displaymath'),
(r'\\\(', String, 'inlinemath'),
(r'\$\$', String.Backtick, 'displaymath'),
(r'\$', String, 'inlinemath'),
(r'\\([a-zA-Z]+|.)', Keyword, 'command'),
(r'\\$', Keyword),
include('general'),
(r'[^\\$%&_^{}]+', Text),
],
'math': [
(r'\\([a-zA-Z]+|.)', Name.Variable),
include('general'),
(r'[0-9]+', Number),
(r'[-=!+*/()\[\]]', Operator),
(r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
],
'inlinemath': [
(r'\\\)', String, '#pop'),
(r'\$', String, '#pop'),
include('math'),
],
'displaymath': [
(r'\\\]', String, '#pop'),
(r'\$\$', String, '#pop'),
(r'\$', Name.Builtin),
include('math'),
],
'command': [
(r'\[.*?\]', Name.Attribute),
(r'\*', Keyword),
default('#pop'),
],
}
def analyse_text(text):
for start in ("\\documentclass", "\\input", "\\documentstyle",
"\\relax"):
if text[:len(start)] == start:
return True
class GroffLexer(RegexLexer):
"""
Lexer for the (g)roff typesetting language, supporting groff
extensions. Mainly useful for highlighting manpage sources.
.. versionadded:: 0.6
"""
name = 'Groff'
aliases = ['groff', 'nroff', 'man']
filenames = ['*.[1234567]', '*.man']
mimetypes = ['application/x-troff', 'text/troff']
tokens = {
'root': [
(r'(\.)(\w+)', bygroups(Text, Keyword), 'request'),
(r'\.', Punctuation, 'request'),
# Regular characters, slurp till we find a backslash or newline
(r'[^\\\n]+', Text, 'textline'),
default('textline'),
],
'textline': [
include('escapes'),
(r'[^\\\n]+', Text),
(r'\n', Text, '#pop'),
],
'escapes': [
# groff has many ways to write escapes.
(r'\\"[^\n]*', Comment),
(r'\\[fn]\w', String.Escape),
(r'\\\(.{2}', String.Escape),
(r'\\.\[.*\]', String.Escape),
(r'\\.', String.Escape),
(r'\\\n', Text, 'request'),
],
'request': [
(r'\n', Text, '#pop'),
include('escapes'),
(r'"[^\n"]+"', String.Double),
(r'\d+', Number),
(r'\S+', String),
(r'\s+', Text),
],
}
def analyse_text(text):
if text[:1] != '.':
return False
if text[:3] == '.\\"':
return True
if text[:4] == '.TH ':
return True
if text[1:3].isalnum() and text[3].isspace():
return 0.9
class MozPreprocHashLexer(RegexLexer):
"""
Lexer for Mozilla Preprocessor files (with '#' as the marker).
Other data is left untouched.
.. versionadded:: 2.0
"""
name = 'mozhashpreproc'
aliases = [name]
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^#', Comment.Preproc, ('expr', 'exprstart')),
(r'.+', Other),
],
'exprstart': [
(r'(literal)(.*)', bygroups(Comment.Preproc, Text), '#pop:2'),
(words((
'define', 'undef', 'if', 'ifdef', 'ifndef', 'else', 'elif',
'elifdef', 'elifndef', 'endif', 'expand', 'filter', 'unfilter',
'include', 'includesubst', 'error')),
Comment.Preproc, '#pop'),
],
'expr': [
(words(('!', '!=', '==', '&&', '||')), Operator),
(r'(defined)(\()', bygroups(Keyword, Punctuation)),
(r'\)', Punctuation),
(r'[0-9]+', Number.Decimal),
(r'__\w+?__', Name.Variable),
(r'@\w+?@', Name.Class),
(r'\w+', Name),
(r'\n', Text, '#pop'),
(r'\s+', Text),
(r'\S', Punctuation),
],
}
class MozPreprocPercentLexer(MozPreprocHashLexer):
"""
Lexer for Mozilla Preprocessor files (with '%' as the marker).
Other data is left untouched.
.. versionadded:: 2.0
"""
name = 'mozpercentpreproc'
aliases = [name]
filenames = []
mimetypes = []
tokens = {
'root': [
(r'^%', Comment.Preproc, ('expr', 'exprstart')),
(r'.+', Other),
],
}
class MozPreprocXulLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`XmlLexer`.
.. versionadded:: 2.0
"""
name = "XUL+mozpreproc"
aliases = ['xul+mozpreproc']
filenames = ['*.xul.in']
mimetypes = []
def __init__(self, **options):
super(MozPreprocXulLexer, self).__init__(
XmlLexer, MozPreprocHashLexer, **options)
class MozPreprocJavascriptLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`JavascriptLexer`.
.. versionadded:: 2.0
"""
name = "Javascript+mozpreproc"
aliases = ['javascript+mozpreproc']
filenames = ['*.js.in']
mimetypes = []
def __init__(self, **options):
super(MozPreprocJavascriptLexer, self).__init__(
JavascriptLexer, MozPreprocHashLexer, **options)
class MozPreprocCssLexer(DelegatingLexer):
"""
Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
`CssLexer`.
.. versionadded:: 2.0
"""
name = "CSS+mozpreproc"
aliases = ['css+mozpreproc']
filenames = ['*.css.in']
mimetypes = []
def __init__(self, **options):
super(MozPreprocCssLexer, self).__init__(
CssLexer, MozPreprocPercentLexer, **options)
|
mpl-2.0
|
viraptor/cryptography
|
tests/hazmat/primitives/test_ec.py
|
1
|
11748
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import itertools
import os
import pytest
from cryptography import exceptions, utils
from cryptography.hazmat.backends.interfaces import EllipticCurveBackend
from cryptography.hazmat.primitives import hashes, interfaces
from cryptography.hazmat.primitives.asymmetric import ec
from ...utils import (
der_encode_dsa_signature, load_fips_ecdsa_key_pair_vectors,
load_fips_ecdsa_signing_vectors, load_vectors_from_file,
raises_unsupported_algorithm
)
_HASH_TYPES = {
"SHA-1": hashes.SHA1,
"SHA-224": hashes.SHA224,
"SHA-256": hashes.SHA256,
"SHA-384": hashes.SHA384,
"SHA-512": hashes.SHA512,
}
def _skip_ecdsa_vector(backend, curve_type, hash_type):
if not backend.elliptic_curve_signature_algorithm_supported(
ec.ECDSA(hash_type()),
curve_type()
):
pytest.skip(
"ECDSA not supported with this hash {0} and curve {1}".format(
hash_type().name, curve_type().name
)
)
def _skip_curve_unsupported(backend, curve):
if not backend.elliptic_curve_supported(curve):
pytest.skip(
"Curve {0} is not supported by this backend {1}".format(
curve.name, backend
)
)
@utils.register_interface(interfaces.EllipticCurve)
class DummyCurve(object):
name = "dummy-curve"
key_size = 1
@utils.register_interface(interfaces.EllipticCurveSignatureAlgorithm)
class DummySignatureAlgorithm(object):
pass
@utils.register_interface(EllipticCurveBackend)
class DeprecatedDummyECBackend(object):
def elliptic_curve_private_key_from_numbers(self, numbers):
return b"private_key"
def elliptic_curve_public_key_from_numbers(self, numbers):
return b"public_key"
@pytest.mark.requires_backend_interface(interface=EllipticCurveBackend)
def test_skip_curve_unsupported(backend):
with pytest.raises(pytest.skip.Exception):
_skip_curve_unsupported(backend, DummyCurve())
def test_ec_numbers():
numbers = ec.EllipticCurvePrivateNumbers(
1,
ec.EllipticCurvePublicNumbers(
2, 3, DummyCurve()
)
)
assert numbers.private_value == 1
assert numbers.public_numbers.x == 2
assert numbers.public_numbers.y == 3
assert isinstance(numbers.public_numbers.curve, DummyCurve)
with pytest.raises(TypeError):
ec.EllipticCurvePrivateNumbers(
None,
ec.EllipticCurvePublicNumbers(
2, 3, DummyCurve()
)
)
with pytest.raises(TypeError):
ec.EllipticCurvePrivateNumbers(
1,
ec.EllipticCurvePublicNumbers(
None, 3, DummyCurve()
)
)
with pytest.raises(TypeError):
ec.EllipticCurvePrivateNumbers(
1,
ec.EllipticCurvePublicNumbers(
2, None, DummyCurve()
)
)
with pytest.raises(TypeError):
ec.EllipticCurvePrivateNumbers(
1,
ec.EllipticCurvePublicNumbers(
2, 3, None
)
)
with pytest.raises(TypeError):
ec.EllipticCurvePrivateNumbers(
1,
None
)
@pytest.mark.requires_backend_interface(interface=EllipticCurveBackend)
class TestECWithNumbers(object):
@pytest.mark.parametrize(
("vector", "hash_type"),
list(itertools.product(
load_vectors_from_file(
os.path.join(
"asymmetric", "ECDSA", "FIPS_186-3", "KeyPair.rsp"),
load_fips_ecdsa_key_pair_vectors
),
_HASH_TYPES.values()
))
)
def test_with_numbers(self, backend, vector, hash_type):
curve_type = ec._CURVE_TYPES[vector['curve']]
_skip_ecdsa_vector(backend, curve_type, hash_type)
key = ec.EllipticCurvePrivateNumbers(
vector['d'],
ec.EllipticCurvePublicNumbers(
vector['x'],
vector['y'],
curve_type()
)
).private_key(backend)
assert key
if isinstance(key, interfaces.EllipticCurvePrivateKeyWithNumbers):
priv_num = key.private_numbers()
assert priv_num.private_value == vector['d']
assert priv_num.public_numbers.x == vector['x']
assert priv_num.public_numbers.y == vector['y']
assert curve_type().name == priv_num.public_numbers.curve.name
@pytest.mark.requires_backend_interface(interface=EllipticCurveBackend)
class TestECDSAVectors(object):
@pytest.mark.parametrize(
("vector", "hash_type"),
list(itertools.product(
load_vectors_from_file(
os.path.join(
"asymmetric", "ECDSA", "FIPS_186-3", "KeyPair.rsp"),
load_fips_ecdsa_key_pair_vectors
),
_HASH_TYPES.values()
))
)
def test_signing_with_example_keys(self, backend, vector, hash_type):
curve_type = ec._CURVE_TYPES[vector['curve']]
_skip_ecdsa_vector(backend, curve_type, hash_type)
key = ec.EllipticCurvePrivateNumbers(
vector['d'],
ec.EllipticCurvePublicNumbers(
vector['x'],
vector['y'],
curve_type()
)
).private_key(backend)
assert key
pkey = key.public_key()
assert pkey
signer = key.signer(ec.ECDSA(hash_type()))
signer.update(b"YELLOW SUBMARINE")
signature = signer.finalize()
verifier = pkey.verifier(signature, ec.ECDSA(hash_type()))
verifier.update(b"YELLOW SUBMARINE")
verifier.verify()
@pytest.mark.parametrize(
"curve", ec._CURVE_TYPES.values()
)
def test_generate_vector_curves(self, backend, curve):
_skip_curve_unsupported(backend, curve())
key = ec.generate_private_key(curve(), backend)
assert key
assert isinstance(key.curve, curve)
assert key.curve.key_size
pkey = key.public_key()
assert pkey
assert isinstance(pkey.curve, curve)
assert key.curve.key_size == pkey.curve.key_size
def test_generate_unknown_curve(self, backend):
with raises_unsupported_algorithm(
exceptions._Reasons.UNSUPPORTED_ELLIPTIC_CURVE
):
ec.generate_private_key(DummyCurve(), backend)
assert backend.elliptic_curve_signature_algorithm_supported(
ec.ECDSA(hashes.SHA256()),
DummyCurve()
) is False
def test_unknown_signature_algoritm(self, backend):
_skip_curve_unsupported(backend, ec.SECP192R1())
key = ec.generate_private_key(ec.SECP192R1(), backend)
with raises_unsupported_algorithm(
exceptions._Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
key.signer(DummySignatureAlgorithm())
with raises_unsupported_algorithm(
exceptions._Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM
):
key.public_key().verifier(b"", DummySignatureAlgorithm())
assert backend.elliptic_curve_signature_algorithm_supported(
DummySignatureAlgorithm(),
ec.SECP192R1()
) is False
def test_load_invalid_ec_key_from_numbers(self, backend):
_skip_curve_unsupported(backend, ec.SECP256R1())
numbers = ec.EllipticCurvePrivateNumbers(
357646505660320080863666618182642070958081774038609089496899025506,
ec.EllipticCurvePublicNumbers(
47250808410327023131573602008345894927686381772325561185532964,
1120253292479243545483756778742719537373113335231773536789915,
ec.SECP256R1(),
)
)
with pytest.raises(ValueError):
numbers.private_key(backend)
numbers = ec.EllipticCurvePrivateNumbers(
357646505660320080863666618182642070958081774038609089496899025506,
ec.EllipticCurvePublicNumbers(
-4725080841032702313157360200834589492768638177232556118553296,
1120253292479243545483756778742719537373113335231773536789915,
ec.SECP256R1(),
)
)
with pytest.raises(ValueError):
numbers.private_key(backend)
numbers = ec.EllipticCurvePrivateNumbers(
357646505660320080863666618182642070958081774038609089496899025506,
ec.EllipticCurvePublicNumbers(
47250808410327023131573602008345894927686381772325561185532964,
-1120253292479243545483756778742719537373113335231773536789915,
ec.SECP256R1(),
)
)
with pytest.raises(ValueError):
numbers.private_key(backend)
@pytest.mark.parametrize(
"vector",
load_vectors_from_file(
os.path.join(
"asymmetric", "ECDSA", "FIPS_186-3", "SigGen.txt"),
load_fips_ecdsa_signing_vectors
)
)
def test_signatures(self, backend, vector):
hash_type = _HASH_TYPES[vector['digest_algorithm']]
curve_type = ec._CURVE_TYPES[vector['curve']]
_skip_ecdsa_vector(backend, curve_type, hash_type)
key = ec.EllipticCurvePublicNumbers(
vector['x'],
vector['y'],
curve_type()
).public_key(backend)
signature = der_encode_dsa_signature(
vector['r'],
vector['s']
)
verifier = key.verifier(
signature,
ec.ECDSA(hash_type())
)
verifier.update(vector['message'])
assert verifier.verify()
@pytest.mark.parametrize(
"vector",
load_vectors_from_file(
os.path.join(
"asymmetric", "ECDSA", "FIPS_186-3", "SigVer.rsp"),
load_fips_ecdsa_signing_vectors
)
)
def test_signature_failures(self, backend, vector):
hash_type = _HASH_TYPES[vector['digest_algorithm']]
curve_type = ec._CURVE_TYPES[vector['curve']]
_skip_ecdsa_vector(backend, curve_type, hash_type)
key = ec.EllipticCurvePublicNumbers(
vector['x'],
vector['y'],
curve_type()
).public_key(backend)
signature = der_encode_dsa_signature(
vector['r'],
vector['s']
)
verifier = key.verifier(
signature,
ec.ECDSA(hash_type())
)
verifier.update(vector['message'])
if vector["fail"] is True:
with pytest.raises(exceptions.InvalidSignature):
verifier.verify()
else:
verifier.verify()
def test_deprecated_public_private_key_load(self):
b = DeprecatedDummyECBackend()
pub_numbers = ec.EllipticCurvePublicNumbers(
2,
3,
ec.SECT283K1()
)
numbers = ec.EllipticCurvePrivateNumbers(1, pub_numbers)
assert numbers.private_key(b) == b"private_key"
assert pub_numbers.public_key(b) == b"public_key"
|
apache-2.0
|
Chilledheart/chromium
|
third_party/mojo/src/mojo/public/third_party/jinja2/runtime.py
|
606
|
19558
|
# -*- coding: utf-8 -*-
"""
jinja2.runtime
~~~~~~~~~~~~~~
Runtime helpers.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from itertools import chain
from jinja2.nodes import EvalContext, _context_function_types
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
internalcode, object_type_repr
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
TemplateNotFound
from jinja2._compat import next, imap, text_type, iteritems, \
implements_iterator, implements_to_string, string_types, PY2
# these variables are exported to the template runtime
__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
'TemplateRuntimeError', 'missing', 'concat', 'escape',
'markup_join', 'unicode_join', 'to_string', 'identity',
'TemplateNotFound']
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
#: the identity function. Useful for certain things in the environment
identity = lambda x: x
_last_iteration = object()
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf)
def unicode_join(seq):
"""Simple args to unicode conversion and concatenation."""
return concat(imap(text_type, seq))
def new_context(environment, template_name, blocks, vars=None,
shared=None, globals=None, locals=None):
"""Internal helper to for context creation."""
if vars is None:
vars = {}
if shared:
parent = vars
else:
parent = dict(globals or (), **vars)
if locals:
# if the parent is shared a copy should be created because
# we don't want to modify the dict passed
if shared:
parent = dict(parent)
for key, value in iteritems(locals):
if key[:2] == 'l_' and value is not missing:
parent[key[2:]] = value
return Context(environment, parent, template_name, blocks)
class TemplateReference(object):
"""The `self` in templates."""
def __init__(self, context):
self.__context = context
def __getitem__(self, name):
blocks = self.__context.blocks[name]
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.__context.name
)
class Context(object):
"""The template context holds the variables of a template. It stores the
values passed to the template and also the names the template exports.
Creating instances is neither supported nor useful as it's created
automatically at various stages of the template evaluation and should not
be created by hand.
The context is immutable. Modifications on :attr:`parent` **must not**
happen and modifications on :attr:`vars` are allowed from generated
template code only. Template filters and global functions marked as
:func:`contextfunction`\s get the active context passed as first argument
and are allowed to access the context read-only.
The template context supports read only dict operations (`get`,
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
__slots__ = ('parent', 'vars', 'environment', 'eval_ctx', 'exported_vars',
'name', 'blocks', '__weakref__')
def __init__(self, environment, parent, name, blocks):
self.parent = parent
self.vars = {}
self.environment = environment
self.eval_ctx = EvalContext(self.environment, name)
self.exported_vars = set()
self.name = name
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
# from the template.
self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
def super(self, name, current):
"""Render a parent block."""
try:
blocks = self.blocks[name]
index = blocks.index(current) + 1
blocks[index]
except LookupError:
return self.environment.undefined('there is no parent block '
'called %r.' % name,
name='super')
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
"""Returns an item from the template context, if it doesn't exist
`default` is returned.
"""
try:
return self[key]
except KeyError:
return default
def resolve(self, key):
"""Looks up a variable like `__getitem__` or `get` but returns an
:class:`Undefined` object with the name of the name looked up.
"""
if key in self.vars:
return self.vars[key]
if key in self.parent:
return self.parent[key]
return self.environment.undefined(name=key)
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars)
def get_all(self):
"""Return a copy of the complete context as dict including the
exported variables.
"""
return dict(self.parent, **self.vars)
@internalcode
def call(__self, __obj, *args, **kwargs):
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
:func:`environmentfunction`.
"""
if __debug__:
__traceback_hide__ = True
# Allow callable classes to take a context
fn = __obj.__call__
for fn_type in ('contextfunction',
'evalcontextfunction',
'environmentfunction'):
if hasattr(fn, fn_type):
__obj = fn
break
if isinstance(__obj, _context_function_types):
if getattr(__obj, 'contextfunction', 0):
args = (__self,) + args
elif getattr(__obj, 'evalcontextfunction', 0):
args = (__self.eval_ctx,) + args
elif getattr(__obj, 'environmentfunction', 0):
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined('value was undefined because '
'a callable raised a '
'StopIteration exception')
def derived(self, locals=None):
"""Internal helper function to create a derived context."""
context = new_context(self.environment, self.name, {},
self.parent, True, None, locals)
context.vars.update(self.vars)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
def _all(meth):
proxy = lambda self: getattr(self.get_all(), meth)()
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
keys = _all('keys')
values = _all('values')
items = _all('items')
# not available on python 3
if PY2:
iterkeys = _all('iterkeys')
itervalues = _all('itervalues')
iteritems = _all('iteritems')
del _all
def __contains__(self, name):
return name in self.vars or name in self.parent
def __getitem__(self, key):
"""Lookup a variable or raise `KeyError` if the variable is
undefined.
"""
item = self.resolve(key)
if isinstance(item, Undefined):
raise KeyError(key)
return item
def __repr__(self):
return '<%s %s of %r>' % (
self.__class__.__name__,
repr(self.get_all()),
self.name
)
# register the context as mapping if possible
try:
from collections import Mapping
Mapping.register(Context)
except ImportError:
pass
class BlockReference(object):
"""One block on a template reference."""
def __init__(self, name, context, stack, depth):
self.name = name
self._context = context
self._stack = stack
self._depth = depth
@property
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment. \
undefined('there is no parent block called %r.' %
self.name, name='super')
return BlockReference(self.name, self._context, self._stack,
self._depth + 1)
@internalcode
def __call__(self):
rv = concat(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
class LoopContext(object):
"""A loop context for dynamic iteration."""
def __init__(self, iterable, recurse=None, depth0=0):
self._iterator = iter(iterable)
self._recurse = recurse
self._after = self._safe_next()
self.index0 = -1
self.depth0 = depth0
# try to get the length of the iterable early. This must be done
# here because there are some broken iterators around where there
# __len__ is the number of iterations left (i'm looking at your
# listreverseiterator!).
try:
self._length = len(iterable)
except (TypeError, AttributeError):
self._length = None
def cycle(self, *args):
"""Cycles among the arguments with the current loop index."""
if not args:
raise TypeError('no items for cycling given')
return args[self.index0 % len(args)]
first = property(lambda x: x.index0 == 0)
last = property(lambda x: x._after is _last_iteration)
index = property(lambda x: x.index0 + 1)
revindex = property(lambda x: x.length - x.index0)
revindex0 = property(lambda x: x.length - x.index)
depth = property(lambda x: x.depth0 + 1)
def __len__(self):
return self.length
def __iter__(self):
return LoopContextIterator(self)
def _safe_next(self):
try:
return next(self._iterator)
except StopIteration:
return _last_iteration
@internalcode
def loop(self, iterable):
if self._recurse is None:
raise TypeError('Tried to call non recursive loop. Maybe you '
"forgot the 'recursive' modifier.")
return self._recurse(iterable, self._recurse, self.depth0 + 1)
# a nifty trick to enhance the error message if someone tried to call
# the the loop without or with too many arguments.
__call__ = loop
del loop
@property
def length(self):
if self._length is None:
# if was not possible to get the length of the iterator when
# the loop context was created (ie: iterating over a generator)
# we have to convert the iterable into a sequence and use the
# length of that.
iterable = tuple(self._iterator)
self._iterator = iter(iterable)
self._length = len(iterable) + self.index0 + 1
return self._length
def __repr__(self):
return '<%s %r/%r>' % (
self.__class__.__name__,
self.index,
self.length
)
@implements_iterator
class LoopContextIterator(object):
"""The iterator for a loop context."""
__slots__ = ('context',)
def __init__(self, context):
self.context = context
def __iter__(self):
return self
def __next__(self):
ctx = self.context
ctx.index0 += 1
if ctx._after is _last_iteration:
raise StopIteration()
next_elem = ctx._after
ctx._after = ctx._safe_next()
return next_elem, ctx
class Macro(object):
"""Wraps a macro function."""
def __init__(self, environment, func, name, arguments, defaults,
catch_kwargs, catch_varargs, caller):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
self.name = name
self.arguments = arguments
self.defaults = defaults
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
@internalcode
def __call__(self, *args, **kwargs):
# try to consume the positional arguments
arguments = list(args[:self._argument_count])
off = len(arguments)
# if the number of arguments consumed is not the number of
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
for idx, name in enumerate(self.arguments[len(arguments):]):
try:
value = kwargs.pop(name)
except KeyError:
try:
value = self.defaults[idx - self._argument_count + off]
except IndexError:
value = self._environment.undefined(
'parameter %r was not provided' % name, name=name)
arguments.append(value)
# it's important that the order of these arguments does not change
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller:
caller = kwargs.pop('caller', None)
if caller is None:
caller = self._environment.undefined('No caller defined',
name='caller')
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
raise TypeError('macro %r takes no keyword argument %r' %
(self.name, next(iter(kwargs))))
if self.catch_varargs:
arguments.append(args[self._argument_count:])
elif len(args) > self._argument_count:
raise TypeError('macro %r takes not more than %d argument(s)' %
(self.name, len(self.arguments)))
return self._func(*arguments)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name is None and 'anonymous' or repr(self.name)
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
'_undefined_exception')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
"""Regular callback function for undefined objects that raises an
`UndefinedError` on call.
"""
if self._undefined_hint is None:
if self._undefined_obj is missing:
hint = '%r is undefined' % self._undefined_name
elif not isinstance(self._undefined_name, string_types):
hint = '%s has no element %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = '%r has no attribute %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = self._undefined_hint
raise self._undefined_exception(hint)
@internalcode
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
return self._fail_with_undefined_error()
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = \
_fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(type(self))
def __str__(self):
return u''
def __len__(self):
return 0
def __iter__(self):
if 0:
yield None
def __nonzero__(self):
return False
def __repr__(self):
return 'Undefined'
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
>>> foo = DebugUndefined(name='foo')
>>> str(foo)
'{{ foo }}'
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
return u'{{ %s }}' % self._undefined_name
return '{{ no such element: %s[%r] }}' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
return u'{{ undefined value printed: %s }}' % self._undefined_hint
@implements_to_string
class StrictUndefined(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
with it except checking if it's defined using the `defined` test.
>>> foo = StrictUndefined(name='foo')
>>> str(foo)
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
>>> not foo
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
>>> foo + 42
Traceback (most recent call last):
...
UndefinedError: 'foo' is undefined
"""
__slots__ = ()
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
__ne__ = __bool__ = __hash__ = \
Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
|
bsd-3-clause
|
rahuldhote/odoo
|
addons/hr_payroll/report/report_payslip.py
|
377
|
1982
|
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.report import report_sxw
class payslip_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(payslip_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'get_payslip_lines': self.get_payslip_lines,
})
def get_payslip_lines(self, obj):
payslip_line = self.pool.get('hr.payslip.line')
res = []
ids = []
for id in range(len(obj)):
if obj[id].appears_on_payslip is True:
ids.append(obj[id].id)
if ids:
res = payslip_line.browse(self.cr, self.uid, ids)
return res
class wrapped_report_payslip(osv.AbstractModel):
_name = 'report.hr_payroll.report_payslip'
_inherit = 'report.abstract_report'
_template = 'hr_payroll.report_payslip'
_wrapped_report_class = payslip_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
kargakis/test-infra
|
gubernator-rh/third_party/defusedxml/common.py
|
55
|
6086
|
# defusedxml
#
# Copyright (c) 2013 by Christian Heimes <[email protected]>
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
"""Common constants, exceptions and helpe functions
"""
import sys
from types import MethodType
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[:2] == (2, 6)
PY31 = sys.version_info[:2] == (3, 1)
class DefusedXmlException(ValueError):
"""Base exception
"""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden
"""
def __init__(self, name, sysid, pubid):
super(DTDForbidden, self).__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden
"""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super(EntitiesForbidden, self).__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden
"""
def __init__(self, context, base, sysid, pubid):
super(ExternalReferenceForbidden, self).__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
class NotSupportedError(DefusedXmlException):
"""The operation is not supported
"""
def _apply_defusing(defused_mod):
assert defused_mod is sys.modules[defused_mod.__name__]
stdlib_name = defused_mod.__origin__
__import__(stdlib_name, {}, {}, ["*"])
stdlib_mod = sys.modules[stdlib_name]
stdlib_names = set(dir(stdlib_mod))
for name, obj in vars(defused_mod).items():
if name.startswith("_") or name not in stdlib_names:
continue
setattr(stdlib_mod, name, obj)
return stdlib_mod
def _generate_etree_functions(DefusedXMLParser, _TreeBuilder,
_IterParseIterator, _parse, _iterparse):
"""Factory for functions needed by etree, dependent on whether
cElementTree or ElementTree is used."""
def parse(source, parser=None, forbid_dtd=False, forbid_entities=True,
forbid_external=True):
if parser is None:
parser = DefusedXMLParser(target=_TreeBuilder(),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external)
return _parse(source, parser)
if PY26 or PY31:
def bind(xmlparser, funcname, hookname):
func = getattr(DefusedXMLParser, funcname)
if PY26:
# unbound -> function
func = func.__func__
method = MethodType(func, xmlparser, xmlparser.__class__)
else:
method = MethodType(func, xmlparser)
# set hook
setattr(xmlparser._parser, hookname, method)
def iterparse(source, events=None, forbid_dtd=False,
forbid_entities=True, forbid_external=True):
it = _iterparse(source, events)
xmlparser = it._parser
if forbid_dtd:
bind(xmlparser, "defused_start_doctype_decl",
"StartDoctypeDeclHandler")
if forbid_entities:
bind(xmlparser, "defused_entity_decl",
"EntityDeclHandler")
bind(xmlparser, "defused_unparsed_entity_decl",
"UnparsedEntityDeclHandler")
if forbid_external:
bind(xmlparser, "defused_external_entity_ref_handler",
"ExternalEntityRefHandler")
return it
elif PY3:
def iterparse(source, events=None, parser=None, forbid_dtd=False,
forbid_entities=True, forbid_external=True):
close_source = False
if not hasattr(source, "read"):
source = open(source, "rb")
close_source = True
if not parser:
parser = DefusedXMLParser(target=_TreeBuilder(),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external)
return _IterParseIterator(source, events, parser, close_source)
else:
# Python 2.7
def iterparse(source, events=None, parser=None, forbid_dtd=False,
forbid_entities=True, forbid_external=True):
if parser is None:
parser = DefusedXMLParser(target=_TreeBuilder(),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external)
return _iterparse(source, events, parser)
def fromstring(text, forbid_dtd=False, forbid_entities=True,
forbid_external=True):
parser = DefusedXMLParser(target=_TreeBuilder(),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities,
forbid_external=forbid_external)
parser.feed(text)
return parser.close()
return parse, iterparse, fromstring
|
apache-2.0
|
saurabh6790/OFF-RISAPP
|
hr/doctype/holiday_list/holiday_list.py
|
29
|
2486
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import add_days, add_years, cint, getdate
from webnotes.model import db_exists
from webnotes.model.doc import addchild, make_autoname
from webnotes.model.bean import copy_doclist
from webnotes import msgprint
import datetime
class DocType:
def __init__(self,doc,doclist=[]):
self.doc = doc
self.doclist = doclist
def autoname(self):
self.doc.name = make_autoname(self.doc.fiscal_year +"/"+ self.doc.holiday_list_name+"/.###")
def validate(self):
self.update_default_holiday_list()
def get_weekly_off_dates(self):
self.validate_values()
yr_start_date, yr_end_date = self.get_fy_start_end_dates()
date_list = self.get_weekly_off_date_list(yr_start_date, yr_end_date)
last_idx = max([cint(d.idx) for d in self.doclist.get(
{"parentfield": "holiday_list_details"})] or [0,])
for i, d in enumerate(date_list):
ch = addchild(self.doc, 'holiday_list_details', 'Holiday', self.doclist)
ch.description = self.doc.weekly_off
ch.holiday_date = d
ch.idx = last_idx + i + 1
def validate_values(self):
if not self.doc.fiscal_year:
msgprint("Please select Fiscal Year")
raise Exception
if not self.doc.weekly_off:
msgprint("Please select weekly off day")
raise Exception
def get_fy_start_end_dates(self):
return webnotes.conn.sql("""select year_start_date, year_end_date
from `tabFiscal Year` where name=%s""", (self.doc.fiscal_year,))[0]
def get_weekly_off_date_list(self, year_start_date, year_end_date):
from webnotes.utils import getdate
year_start_date, year_end_date = getdate(year_start_date), getdate(year_end_date)
from dateutil import relativedelta
from datetime import timedelta
import calendar
date_list = []
weekday = getattr(calendar, (self.doc.weekly_off).upper())
reference_date = year_start_date + relativedelta.relativedelta(weekday=weekday)
while reference_date <= year_end_date:
date_list.append(reference_date)
reference_date += timedelta(days=7)
return date_list
def clear_table(self):
self.doclist = self.doc.clear_table(self.doclist, 'holiday_list_details')
def update_default_holiday_list(self):
webnotes.conn.sql("""update `tabHoliday List` set is_default = 0
where ifnull(is_default, 0) = 1 and fiscal_year = %s""", (self.doc.fiscal_year,))
|
agpl-3.0
|
tmshlvck/ulg
|
ulglinux.py
|
1
|
4555
|
#!/usr/bin/env python
#
# ULG - Universal Looking Glass
# (C) 2015 CZ.NIC, z.s.p.o.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Imports
import pexpect
import defaults
import ulgmodel
STRING_EXPECT_SSH_NEWKEY='Are you sure you want to continue connecting'
STRING_EXPECT_PASSWORD='(P|p)assword:'
class LinuxRouter(ulgmodel.Router):
""" Abstract class representing common base for linux router objects. """
def __init__(self):
pass
def _getDefaultCommands(self):
return [ulgmodel.TextCommand("ping -c 4 %s", param_specs=[ulgmodel.IPv4AddressParameter()]),
ulgmodel.TextCommand("ping6 -c 4 %s", param_specs=[ulgmodel.IPv6AddressParameter()]),
ulgmodel.TextCommand("traceroute %s", param_specs=[ulgmodel.IPv4AddressParameter()]),
ulgmodel.TextCommand("traceroute6 %s", param_specs=[ulgmodel.IPv6AddressParameter()]),
]
class LinuxRouterLocal(ulgmodel.LocalRouter,LinuxRouter):
def __init__(self,commands=None,name='localhost',acl=None):
ulgmodel.LocalRouter.__init__(self,acl=acl)
LinuxRouter.__init__(self)
self.setName(name)
# command autoconfiguration might run only after other parameters are set
if(commands):
self.setCommands(commands)
else:
self.setCommands(self._getDefaultCommands())
def runRawCommand(self,command,outfile):
s=pexpect.spawn(command,timeout=defaults.timeout)
while True:
i=s.expect(['\n',pexpect.EOF,pexpect.TIMEOUT])
if (i==0):
outfile.write(s.before)
elif (i==1):
break
elif (i==2):
raise Exception("pexpect session timed out. last output: "+s.before)
else:
raise Exception("pexpect session failed: Unknown error. last output: "+s.before)
def getForkNeeded(self):
return False
class LinuxRouterRemote(ulgmodel.RemoteRouter,LinuxRouter):
def __init__(self,host,user,password='',port=22,commands=None,name=None,bin_ssh=None,acl=None):
ulgmodel.RemoteRouter.__init__(self,acl=acl)
LinuxRouter.__init__(self)
self.setHost(host)
self.setUser(user)
self.setPassword(password)
self.setPort(port)
if(name):
self.setName(name)
else:
self.setName(host)
if(bin_ssh):
self.bin_ssh = bin_ssh
else:
self.bin_ssh = defaults.bin_ssh
# command autoconfiguration might run only after other parameters are set
if(commands):
self.setCommands(commands)
else:
self.setCommands(self._getDefaultCommands())
def getForkNeeded(self):
return True
def runRawCommand(self,command,outfile):
c = '/bin/bash -c \''+self.bin_ssh+' -p'+str(self.getPort())+' '+str(self.getUser())+'@'+self.getHost()+' "'+command+'"\''
s=pexpect.spawn(c,timeout=defaults.timeout)
# handle ssh
y=0
p=0
l=0
capture=False
while True:
i=s.expect([STRING_EXPECT_SSH_NEWKEY,STRING_EXPECT_PASSWORD,'\n',pexpect.EOF,pexpect.TIMEOUT])
if(i==0):
if(y>1):
raise Exception("pexpect session failed: Can not save SSH key.")
s.sendline('yes')
y+=1
elif(i==1):
if(p>1):
raise Exception("pexpect session failed: Password not accepted.")
s.sendline(self.password)
p+=1
elif(i==2):
outfile.write(s.before)
elif(i==3): # EOF -> process output
break
elif(i==4):
raise Exception("pexpect session timed out. last output: "+s.before)
else:
raise Exception("pexpect session failed: Unknown error. last output: "+s.before)
|
gpl-3.0
|
virtool/virtool
|
tests/db/snapshots/snap_test_migrate.py
|
2
|
3307
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_migrate_status[uvloop-True-True-True] 1'] = [
{
'_id': 'hmm',
'installed': None,
'process': None,
'release': None,
'updates': [
]
},
{
'_id': 'software',
'mongo_version': '3.6.3',
'process': None,
'updating': False,
'version': 'v3.0.0'
}
]
snapshots['test_migrate_status[uvloop-True-True-False] 1'] = [
{
'_id': 'hmm',
'installed': None,
'process': None,
'release': None,
'updates': [
]
},
{
'_id': 'software',
'installed': None,
'mongo_version': '3.6.3',
'process': None,
'releases': [
],
'updating': False,
'version': 'v3.0.0'
}
]
snapshots['test_migrate_status[uvloop-True-False-True] 1'] = [
{
'_id': 'hmm',
'installed': None,
'process': None,
'release': None,
'updates': [
]
},
{
'_id': 'software',
'mongo_version': '3.6.3',
'process': None,
'updating': False,
'version': 'v3.0.0'
}
]
snapshots['test_migrate_status[uvloop-True-False-False] 1'] = [
{
'_id': 'hmm',
'installed': None,
'process': None,
'release': None,
'updates': [
]
},
{
'_id': 'software',
'installed': None,
'mongo_version': '3.6.3',
'process': None,
'releases': [
],
'updating': False,
'version': 'v3.0.0'
}
]
snapshots['test_migrate_status[uvloop-False-True-True] 1'] = [
{
'_id': 'hmm',
'installed': None,
'process': None,
'release': None,
'updates': [
]
},
{
'_id': 'software',
'mongo_version': '3.6.3',
'process': None,
'updating': False,
'version': 'v3.0.0'
}
]
snapshots['test_migrate_status[uvloop-False-True-False] 1'] = [
{
'_id': 'hmm',
'installed': None,
'process': None,
'release': None,
'updates': [
]
},
{
'_id': 'software',
'installed': None,
'mongo_version': '3.6.3',
'process': None,
'releases': [
],
'updating': False,
'version': 'v3.0.0'
}
]
snapshots['test_migrate_status[uvloop-False-False-True] 1'] = [
{
'_id': 'hmm',
'installed': None,
'process': None,
'release': None,
'updates': [
]
},
{
'_id': 'software',
'mongo_version': '3.6.3',
'process': None,
'updating': False,
'version': 'v3.0.0'
}
]
snapshots['test_migrate_status[uvloop-False-False-False] 1'] = [
{
'_id': 'hmm',
'installed': None,
'process': None,
'release': None,
'updates': [
]
},
{
'_id': 'software',
'installed': None,
'mongo_version': '3.6.3',
'process': None,
'releases': [
],
'updating': False,
'version': 'v3.0.0'
}
]
|
mit
|
Opus1no2/Ifbyphone-API-Module
|
src/Ifbyphone/api/locator.py
|
1
|
3965
|
from base import IfbyphoneApiBase
class Locator(IfbyphoneApiBase):
def add_location(self, **kwargs):
"""Add a store location
keword arguments:
usr_locator_id -- ID of store locator
location_destination -- JSON encoded string | {"cmd":"XXXXX","parameter":"YYYYY"}
location_zipcode -- 5 digit zipcode
location_name -- name of the location | default: New Location
location_address -- address of the location
location_desctipion -- description of the location
location_city -- city of the location
location_state -- state of the location
location_email_address -- email address associated with location
location_action_parameter -- location_destination | SON-encoded string with
the following format: {"cmd":"XXXXX","parameter":"YYYYY"}
where XXXXX is a supported command or "hangup1" through "hangup4"
location_call_timeout -- time to try calling a location | default: 40
geocode_method -- method for geocoding | default: yahoo API
"""
self.options.update(kwargs)
self.options['action'] = 'locator.location.add'
return self.call(self.options)
def location_details(self, **kwargs):
"""Get details of a location in a store locator
keyword arguments:
usr_locator_id -- ID of the store locator
usr_location_id -- ID of the location
"""
self.options.update(kwargs)
self.options['action'] = 'locator.location.details'
return self.call(self.options)
def list_locations(self, _id):
"""List all locations in a store locator
keyword argument:
_id -- ID of the store locator
"""
self.options['usr_locator_id'] = _id
self.options['action'] = 'locator.location.list'
return self.call(self.options)
def remove_location(self, **kwargs):
"""Remove a location from a store locator
keyword arguments:
usr_locator_id -- ID of the store locator
usr_location_id -- ID of the location to be removed
"""
self.options.update(kwargs)
self.options['action'] = 'locator.location.remove'
return self.call(self.options)
def update_location(self, **kwargs):
"""Update configuration of a location
keyword arguments:
usr_locator_id -- ID of the store locator
usr_location_id -- ID of the location to be updated
location_destination -- JSON encoded string | {"cmd":"XXXXX","parameter":"YYYYY"}
location_zipcode -- 5 digit zipcode
location_name -- name of the location | default: New Location
location_address -- address of the location
location_desctipion -- description of the location
location_city -- city of the location
location_state -- state of the location
location_email_address -- email address associated with location
location_action_parameter -- location_destination | SON-encoded string with
the following format: {"cmd":"XXXXX","parameter":"YYYYY"}
where XXXXX is a supported command or "hangup1" through "hangup4"
location_call_timeout -- time to try calling a location | default: 40
geocode_method -- method for geocoding | default: yahoo API
"""
self.options.update(kwargs)
self.options['action'] = 'locator.location.update'
return self.call(self.options)
|
mit
|
BonexGu/Blik2D-SDK
|
Blik2D/addon/tensorflow-1.2.1_for_blik/tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py
|
7
|
69559
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
# Expected is 100, but because of the global step increment bug, this is 51.
self.assertEqual(51, step_counter.steps)
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=100)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=100)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
|
mit
|
UOWPhysSoc/Barnes-Hutt-Simulation--James-MattG-
|
barnesplayer.py
|
1
|
2783
|
'''
Player for the barnes-Hutt N-body simulation created by Matt Griffiths and James Archer.
Player Written by Matt Griffiths.
REQUIRES VPYTHON TO RUN
Avaliable for use under a GPL v3 licence.
'''
#Import dependent libraries
import pickle
from time import sleep
class player():
#Player Class containing unpacker and ply function
def __init__(self,filename, play_rate = 30):
#Unpacks data from file
self.play_rate = play_rate
self.index = 1
self.scene = display(width = 1000, height = 800)
self.trail = False
while True:
try:
self.file = open(filename + str(self.index) + '.barnes','rb')
print("Opened file " + filename + str(self.index))
self.index += 1
except:
print('Ended trying to open file ' + str(self.index))
break
#Open file, set up data structures for incoming data
self.steps = []
self.particles = []
#Unpack data into pre-existing data structures
self.steps = pickle.load(self.file)
self.file.close()
#print('Number of steps is ' + str(len(self.steps)))
#Create visual representations
for i in self.steps[0]:
self.particles.append(sphere(pos = (i[0],i[1],i[2]),radius = 0.1*pow(i[3],0.33)))#
if self.trail:
self.particles[-1].trail = curve()
if self.index == 1:
self.scene.autoscale = True
else:
self.scene.autoscale = False
#pass
self.play()
def play(self):
#Play function
#Set iteration pos to zero
i = 0
#print('f called')
#Loop through steps
sleep(1)
while i < len(self.steps):
#set refresh rate
rate(self.play_rate)
#Move spheres to relavent posiions
for j in range(0,len(self.particles)):
self.particles[j].pos = (self.steps[i][j][0],self.steps[i][j][1],self.steps[i][j][2])
if self.trail:
self.particles[j].trail.append(pos = self.particles[j].pos)
#Step player
i += 1
#Handle looping
#if i >= len(self.steps):
# i = 0
for i in self.particles:
i.visible = False
del i
if __name__ == '__main__':
from visual import *
#If this is executed as standalone give input options
while True:
try:
ifn = input('Input file name: ')
break
except:
pass
r = int(input('Playback rate: '))
p = player(ifn, r)
print('fin')
|
gpl-3.0
|
fredkingham/blog-of-fred
|
django/db/backends/__init__.py
|
19
|
36624
|
from django.db.utils import DatabaseError
try:
import thread
except ImportError:
import dummy_thread as thread
from contextlib import contextmanager
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.utils.importlib import import_module
from django.utils.timezone import is_aware
class BaseDatabaseWrapper(object):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Transaction related attributes
self.transaction_state = []
self.savepoint_state = 0
self._dirty = None
self._thread_ident = thread.get_ident()
self.allow_thread_sharing = allow_thread_sharing
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def abort(self):
"""
Roll back any ongoing transaction and clean the transaction state
stack.
"""
if self._dirty:
self._rollback()
self._dirty = False
while self.transaction_state:
self.leave_transaction_management()
def enter_transaction_management(self, managed=True):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if self.transaction_state:
self.transaction_state.append(self.transaction_state[-1])
else:
self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
if self._dirty is None:
self._dirty = False
self._enter_transaction_management(managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
self._leave_transaction_management(self.is_managed())
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
if self._dirty:
self.rollback()
raise TransactionManagementError("Transaction managed block ended with "
"pending COMMIT/ROLLBACK")
self._dirty = False
def validate_thread_sharing(self):
"""
Validates that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raises an exception if the validation fails.
"""
if (not self.allow_thread_sharing
and self._thread_ident != thread.get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, thread.get_ident()))
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if self._dirty is not None:
self._dirty = True
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if self._dirty is not None:
self._dirty = False
else:
raise TransactionManagementError("This code isn't under transaction management")
self.clean_savepoints()
def clean_savepoints(self):
self.savepoint_state = 0
def is_managed(self):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if self.transaction_state:
return self.transaction_state[-1]
return settings.TRANSACTIONS_MANAGED
def managed(self, flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
top = self.transaction_state
if top:
top[-1] = flag
if not flag and self.is_dirty():
self._commit()
self.set_clean()
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def commit_unless_managed(self):
"""
Commits changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self._commit()
self.clean_savepoints()
else:
self.set_dirty()
def rollback_unless_managed(self):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self._rollback()
else:
self.set_dirty()
def commit(self):
"""
Does the commit itself and resets the dirty flag.
"""
self.validate_thread_sharing()
self._commit()
self.set_clean()
def rollback(self):
"""
This function does the rollback itself and resets the dirty flag.
"""
self.validate_thread_sharing()
self._rollback()
self.set_clean()
def savepoint(self):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
thread_ident = thread.get_ident()
self.savepoint_state += 1
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, self.savepoint_state)
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
self.validate_thread_sharing()
if self.savepoint_state:
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
self.validate_thread_sharing()
if self.savepoint_state:
self._savepoint_commit(sid)
@contextmanager
def constraint_checks_disabled(self):
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key constraint
checking.
"""
pass
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE"). Should raise an IntegrityError if any invalid foreign key references are encountered.
"""
pass
def close(self):
self.validate_thread_sharing()
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
self.validate_thread_sharing()
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_autocommit = False
uses_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
distinguishes_insert_from_update = True
supports_joins = True
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Features that need to be confirmed at runtime
# Cache whether the confirmation has been performed.
_confirmed = False
supports_transactions = None
supports_stddev = None
can_introspect_foreign_keys = None
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
def __init__(self, connection):
self.connection = connection
def confirm(self):
"Perform manual checks of any database features that might vary between installs"
self._confirmed = True
self.supports_transactions = self._supports_transactions()
self.supports_stddev = self._supports_stddev()
self.can_introspect_foreign_keys = self._can_introspect_foreign_keys()
def _supports_transactions(self):
"Confirm support for transactions"
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection._commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection._rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection._commit()
return count == 0
def _supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
except NotImplementedError:
self.supports_stddev = False
def _can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
return True
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return smart_unicode(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def value_to_db_auto(self, value):
"""
Transform an AutoField value to an object compatible with what is expected
by the backend driver for automatic keys.
"""
if value is None:
return None
return int(value)
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_time(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return unicode(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""Coerce the value returned by the database backend into a consistent type that
is compatible with the field type.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return value
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):
return value
# No field, or the field isn't known to be a decimal or integer
# Default to a float
return float(value)
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def modify_insert_params(self, placeholders, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self):
"Returns a list of names of all tables that exist in the database."
cursor = self.connection.cursor()
return self.get_table_list(cursor)
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
tables = list(tables)
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
tables = map(self.table_name_converter, tables)
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
raise NotImplementedError
def get_primary_key_column(self, cursor, table_name):
"""
Backends can override this to return the column name of the primary key for the given table.
"""
raise NotImplementedError
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
|
bsd-3-clause
|
ASlave2Audio/Restaurant-App
|
mingw/bin/lib/sunaudio.py
|
286
|
1399
|
"""Interpret sun audio headers."""
from warnings import warnpy3k
warnpy3k("the sunaudio module has been removed in Python 3.0; "
"use the sunau module instead", stacklevel=2)
del warnpy3k
MAGIC = '.snd'
class error(Exception):
pass
def get_long_be(s):
"""Convert a 4-char value to integer."""
return (ord(s[0])<<24) | (ord(s[1])<<16) | (ord(s[2])<<8) | ord(s[3])
def gethdr(fp):
"""Read a sound header from an open file."""
if fp.read(4) != MAGIC:
raise error, 'gethdr: bad magic word'
hdr_size = get_long_be(fp.read(4))
data_size = get_long_be(fp.read(4))
encoding = get_long_be(fp.read(4))
sample_rate = get_long_be(fp.read(4))
channels = get_long_be(fp.read(4))
excess = hdr_size - 24
if excess < 0:
raise error, 'gethdr: bad hdr_size'
if excess > 0:
info = fp.read(excess)
else:
info = ''
return (data_size, encoding, sample_rate, channels, info)
def printhdr(file):
"""Read and print the sound header of a named file."""
hdr = gethdr(open(file, 'r'))
data_size, encoding, sample_rate, channels, info = hdr
while info[-1:] == '\0':
info = info[:-1]
print 'File name: ', file
print 'Data size: ', data_size
print 'Encoding: ', encoding
print 'Sample rate:', sample_rate
print 'Channels: ', channels
print 'Info: ', repr(info)
|
mit
|
Tatsh-ansible/ansible
|
lib/ansible/modules/network/iosxr/iosxr_system.py
|
50
|
8452
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: iosxr_system
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Cisco IOS XR devices
description:
- This module provides declarative management of node system attributes
on Cisco IOS XR devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
extends_documentation_fragment: iosxr
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
lookup_source:
description:
- The C(lookup_source) argument provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) must be a valid interface configured
on the device.
lookup_enabled:
description:
- Provides administrative control
for enabling or disabling DNS lookups. When this argument is
set to True, lookups are performed and when it is set to False,
lookups are not performed.
type: bool
name_servers:
description:
- The C(name_serves) argument accepts a list of DNS name servers by
way of either FQDN or IP address to use to perform name resolution
lookups. This argument accepts wither a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain-name
iosxr_system:
hostname: iosxr01
domain_name: test.example.com
domain-search:
- ansible.com
- redhat.com
- cisco.com
- name: remove configuration
iosxr_system:
state: absent
- name: configure DNS lookup sources
iosxr_system:
lookup_source: MgmtEth0/0/CPU0/0
lookup_enabled: yes
- name: configure name servers
iosxr_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname iosxr01
- ip domain-name test.example.com
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.iosxr import get_config, load_config
from ansible.module_utils.iosxr import iosxr_argument_spec, check_args
def diff_list(want, have):
adds = set(want).difference(have)
removes = set(have).difference(want)
return (adds, removes)
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
if state == 'absent':
if have['hostname'] != 'ios':
commands.append('no hostname')
if have['domain_name']:
commands.append('no domain name')
if have['lookup_source']:
commands.append('no domain lookup source-interface %s' % have['lookup_source'])
if not have['lookup_enabled']:
commands.append('no domain lookup disable')
for item in have['name_servers']:
commands.append('no domain name-server %s' % item)
for item in have['domain_search']:
commands.append('no domain list %s' % item)
elif state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('domain_name'):
commands.append('domain name %s' % want['domain_name'])
if needs_update('lookup_source'):
commands.append('domain lookup source-interface %s' % want['lookup_source'])
if needs_update('lookup_enabled'):
cmd = 'domain lookup disable'
if want['lookup_enabled']:
cmd = 'no %s' % cmd
commands.append(cmd)
if want['name_servers'] is not None:
adds, removes = diff_list(want['name_servers'], have['name_servers'])
for item in adds:
commands.append('domain name-server %s' % item)
for item in removes:
commands.append('no domain name-server %s' % item)
if want['domain_search'] is not None:
adds, removes = diff_list(want['domain_search'], have['domain_search'])
for item in adds:
commands.append('domain list %s' % item)
for item in removes:
commands.append('no domain list %s' % item)
return commands
def parse_hostname(config):
match = re.search('^hostname (\S+)', config, re.M)
return match.group(1)
def parse_domain_name(config):
match = re.search('^domain name (\S+)', config, re.M)
if match:
return match.group(1)
def parse_lookup_source(config):
match = re.search('^domain lookup source-interface (\S+)', config, re.M)
if match:
return match.group(1)
def map_config_to_obj(module):
config = get_config(module)
return {
'hostname': parse_hostname(config),
'domain_name': parse_domain_name(config),
'domain_search': re.findall('^domain list (\S+)', config, re.M),
'lookup_source': parse_lookup_source(config),
'lookup_enabled': 'domain lookup disable' not in config,
'name_servers': re.findall('^domain name-server (\S+)', config, re.M)
}
def map_params_to_obj(module):
return {
'hostname': module.params['hostname'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'lookup_source': module.params['lookup_source'],
'lookup_enabled': module.params['lookup_enabled'],
'name_servers': module.params['name_servers']
}
def main():
""" Main entry point for Ansible module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(),
domain_search=dict(type='list'),
name_servers=dict(type='list'),
lookup_source=dict(),
lookup_enabled=dict(type='bool'),
state=dict(choices=['present', 'absent'], default='present')
)
argument_spec.update(iosxr_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
if not module.check_mode:
load_config(module, commands, result['warnings'], commit=True)
result['changed'] = True
module.exit_json(**result)
if __name__ == "__main__":
main()
|
gpl-3.0
|
F1ashhimself/robotframework-selenium2library
|
src/Selenium2Library/utils/__init__.py
|
65
|
3683
|
import os
from fnmatch import fnmatch
from browsercache import BrowserCache
__all__ = [
"get_child_packages_in",
"get_module_names_under",
"import_modules_under",
"escape_xpath_value",
"BrowserCache"
]
# Public
def get_child_packages_in(root_dir, include_root_package_name=True, exclusions=None):
packages = []
root_package_str = os.path.basename(root_dir) + '.' if include_root_package_name else ""
_discover_child_package_dirs(
root_dir,
_clean_exclusions(exclusions),
lambda abs_path, relative_path, name:
packages.append(root_package_str + relative_path.replace(os.sep, '.')))
return packages
def get_module_names_under(root_dir, include_root_package_name=True, exclusions=None, pattern=None):
module_names = []
root_package_str = os.path.basename(root_dir) + '.' if include_root_package_name else ""
_discover_module_files_in(
root_dir,
_clean_exclusions(exclusions),
pattern if pattern is not None else "*.*",
lambda abs_path, relative_path, name:
module_names.append(root_package_str + os.path.splitext(relative_path)[0].replace(os.sep, '.')))
return module_names
def import_modules_under(root_dir, include_root_package_name=True, exclusions=None, pattern=None):
module_names = get_module_names_under(root_dir, include_root_package_name, exclusions, pattern)
modules = [ __import__(module_name, globals(), locals(), ['*'], -1)
for module_name in module_names ]
return (module_names, modules)
def escape_xpath_value(value):
value = unicode(value)
if '"' in value and '\'' in value:
parts_wo_apos = value.split('\'')
return "concat('%s')" % "', \"'\", '".join(parts_wo_apos)
if '\'' in value:
return "\"%s\"" % value
return "'%s'" % value
# Private
def _clean_exclusions(exclusions):
if exclusions is None: exclusions = []
if not isinstance(exclusions, list): exclusions = [ exclusions ]
exclusions = [ os.sep + exclusion.lower().strip(os.sep) + os.sep
for exclusion in exclusions ]
return exclusions
def _discover_child_package_dirs(root_dir, exclusions, callback, relative_dir=None):
relative_dir = relative_dir if relative_dir is not None else ''
abs_dir = os.path.join(root_dir, relative_dir)
for item in os.listdir(abs_dir):
item_relative_path = os.path.join(relative_dir, item)
item_abs_path = os.path.join(root_dir, item_relative_path)
if os.path.isdir(item_abs_path):
if os.path.exists(os.path.join(item_abs_path, "__init__.py")):
exclusion_matches = [ exclusion for exclusion in exclusions
if os.sep + item_relative_path.lower() + os.sep == exclusion ]
if not exclusion_matches:
callback(item_abs_path, item_relative_path, item)
_discover_child_package_dirs(root_dir, exclusions, callback, item_relative_path)
def _discover_module_files_in(root_dir, exclusions, pattern, callback):
def find_matching_files(relative_dir):
abs_dir = os.path.join(root_dir, relative_dir)
for item in os.listdir(abs_dir):
item_relative_path = os.path.join(relative_dir, item)
item_abs_path = os.path.join(root_dir, item_relative_path)
if os.path.isfile(item_abs_path) and fnmatch(item, pattern):
callback(item_abs_path, item_relative_path, item)
find_matching_files('')
_discover_child_package_dirs(
root_dir,
_clean_exclusions(exclusions),
lambda abs_path, relative_path, name: find_matching_files(relative_path))
|
apache-2.0
|
40123237/w17test
|
static/Brython3.1.0-20150301-090019/Lib/xml/dom/pulldom.py
|
850
|
11761
|
import xml.sax
import xml.sax.handler
START_ELEMENT = "START_ELEMENT"
END_ELEMENT = "END_ELEMENT"
COMMENT = "COMMENT"
START_DOCUMENT = "START_DOCUMENT"
END_DOCUMENT = "END_DOCUMENT"
PROCESSING_INSTRUCTION = "PROCESSING_INSTRUCTION"
IGNORABLE_WHITESPACE = "IGNORABLE_WHITESPACE"
CHARACTERS = "CHARACTERS"
class PullDOM(xml.sax.ContentHandler):
_locator = None
document = None
def __init__(self, documentFactory=None):
from xml.dom import XML_NAMESPACE
self.documentFactory = documentFactory
self.firstEvent = [None, None]
self.lastEvent = self.firstEvent
self.elementStack = []
self.push = self.elementStack.append
try:
self.pop = self.elementStack.pop
except AttributeError:
# use class' pop instead
pass
self._ns_contexts = [{XML_NAMESPACE:'xml'}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self.pending_events = []
def pop(self):
result = self.elementStack[-1]
del self.elementStack[-1]
return result
def setDocumentLocator(self, locator):
self._locator = locator
def startPrefixMapping(self, prefix, uri):
if not hasattr(self, '_xmlns_attrs'):
self._xmlns_attrs = []
self._xmlns_attrs.append((prefix or 'xmlns', uri))
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix or None
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts.pop()
def startElementNS(self, name, tagName , attrs):
# Retrieve xml namespace declaration attributes.
xmlns_uri = 'http://www.w3.org/2000/xmlns/'
xmlns_attrs = getattr(self, '_xmlns_attrs', None)
if xmlns_attrs is not None:
for aname, value in xmlns_attrs:
attrs._attrs[(xmlns_uri, aname)] = value
self._xmlns_attrs = []
uri, localname = name
if uri:
# When using namespaces, the reader may or may not
# provide us with the original name. If not, create
# *a* valid tagName from the current context.
if tagName is None:
prefix = self._current_context[uri]
if prefix:
tagName = prefix + ":" + localname
else:
tagName = localname
if self.document:
node = self.document.createElementNS(uri, tagName)
else:
node = self.buildDocument(uri, tagName)
else:
# When the tagname is not prefixed, it just appears as
# localname
if self.document:
node = self.document.createElement(localname)
else:
node = self.buildDocument(None, localname)
for aname,value in attrs.items():
a_uri, a_localname = aname
if a_uri == xmlns_uri:
if a_localname == 'xmlns':
qname = a_localname
else:
qname = 'xmlns:' + a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
elif a_uri:
prefix = self._current_context[a_uri]
if prefix:
qname = prefix + ":" + a_localname
else:
qname = a_localname
attr = self.document.createAttributeNS(a_uri, qname)
node.setAttributeNodeNS(attr)
else:
attr = self.document.createAttribute(a_localname)
node.setAttributeNode(attr)
attr.value = value
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElementNS(self, name, tagName):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def startElement(self, name, attrs):
if self.document:
node = self.document.createElement(name)
else:
node = self.buildDocument(None, name)
for aname,value in attrs.items():
attr = self.document.createAttribute(aname)
attr.value = value
node.setAttributeNode(attr)
self.lastEvent[1] = [(START_ELEMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
def endElement(self, name):
self.lastEvent[1] = [(END_ELEMENT, self.pop()), None]
self.lastEvent = self.lastEvent[1]
def comment(self, s):
if self.document:
node = self.document.createComment(s)
self.lastEvent[1] = [(COMMENT, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(COMMENT, s), None]
self.pending_events.append(event)
def processingInstruction(self, target, data):
if self.document:
node = self.document.createProcessingInstruction(target, data)
self.lastEvent[1] = [(PROCESSING_INSTRUCTION, node), None]
self.lastEvent = self.lastEvent[1]
else:
event = [(PROCESSING_INSTRUCTION, target, data), None]
self.pending_events.append(event)
def ignorableWhitespace(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(IGNORABLE_WHITESPACE, node), None]
self.lastEvent = self.lastEvent[1]
def characters(self, chars):
node = self.document.createTextNode(chars)
self.lastEvent[1] = [(CHARACTERS, node), None]
self.lastEvent = self.lastEvent[1]
def startDocument(self):
if self.documentFactory is None:
import xml.dom.minidom
self.documentFactory = xml.dom.minidom.Document.implementation
def buildDocument(self, uri, tagname):
# Can't do that in startDocument, since we need the tagname
# XXX: obtain DocumentType
node = self.documentFactory.createDocument(uri, tagname, None)
self.document = node
self.lastEvent[1] = [(START_DOCUMENT, node), None]
self.lastEvent = self.lastEvent[1]
self.push(node)
# Put everything we have seen so far into the document
for e in self.pending_events:
if e[0][0] == PROCESSING_INSTRUCTION:
_,target,data = e[0]
n = self.document.createProcessingInstruction(target, data)
e[0] = (PROCESSING_INSTRUCTION, n)
elif e[0][0] == COMMENT:
n = self.document.createComment(e[0][1])
e[0] = (COMMENT, n)
else:
raise AssertionError("Unknown pending event ",e[0][0])
self.lastEvent[1] = e
self.lastEvent = e
self.pending_events = None
return node.firstChild
def endDocument(self):
self.lastEvent[1] = [(END_DOCUMENT, self.document), None]
self.pop()
def clear(self):
"clear(): Explicitly release parsing structures"
self.document = None
class ErrorHandler:
def warning(self, exception):
print(exception)
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class DOMEventStream:
def __init__(self, stream, parser, bufsize):
self.stream = stream
self.parser = parser
self.bufsize = bufsize
if not hasattr(self.parser, 'feed'):
self.getEvent = self._slurp
self.reset()
def reset(self):
self.pulldom = PullDOM()
# This content handler relies on namespace support
self.parser.setFeature(xml.sax.handler.feature_namespaces, 1)
self.parser.setContentHandler(self.pulldom)
def __getitem__(self, pos):
rc = self.getEvent()
if rc:
return rc
raise IndexError
def __next__(self):
rc = self.getEvent()
if rc:
return rc
raise StopIteration
def __iter__(self):
return self
def expandNode(self, node):
event = self.getEvent()
parents = [node]
while event:
token, cur_node = event
if cur_node is node:
return
if token != END_ELEMENT:
parents[-1].appendChild(cur_node)
if token == START_ELEMENT:
parents.append(cur_node)
elif token == END_ELEMENT:
del parents[-1]
event = self.getEvent()
def getEvent(self):
# use IncrementalParser interface, so we get the desired
# pull effect
if not self.pulldom.firstEvent[1]:
self.pulldom.lastEvent = self.pulldom.firstEvent
while not self.pulldom.firstEvent[1]:
buf = self.stream.read(self.bufsize)
if not buf:
self.parser.close()
return None
self.parser.feed(buf)
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def _slurp(self):
""" Fallback replacement for getEvent() using the
standard SAX2 interface, which means we slurp the
SAX events into memory (no performance gain, but
we are compatible to all SAX parsers).
"""
self.parser.parse(self.stream)
self.getEvent = self._emit
return self._emit()
def _emit(self):
""" Fallback replacement for getEvent() that emits
the events that _slurp() read previously.
"""
rc = self.pulldom.firstEvent[1][0]
self.pulldom.firstEvent[1] = self.pulldom.firstEvent[1][1]
return rc
def clear(self):
"""clear(): Explicitly release parsing objects"""
self.pulldom.clear()
del self.pulldom
self.parser = None
self.stream = None
class SAX2DOM(PullDOM):
def startElementNS(self, name, tagName , attrs):
PullDOM.startElementNS(self, name, tagName, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def startElement(self, name, attrs):
PullDOM.startElement(self, name, attrs)
curNode = self.elementStack[-1]
parentNode = self.elementStack[-2]
parentNode.appendChild(curNode)
def processingInstruction(self, target, data):
PullDOM.processingInstruction(self, target, data)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def ignorableWhitespace(self, chars):
PullDOM.ignorableWhitespace(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
def characters(self, chars):
PullDOM.characters(self, chars)
node = self.lastEvent[0][1]
parentNode = self.elementStack[-1]
parentNode.appendChild(node)
default_bufsize = (2 ** 14) - 20
def parse(stream_or_string, parser=None, bufsize=None):
if bufsize is None:
bufsize = default_bufsize
if isinstance(stream_or_string, str):
stream = open(stream_or_string, 'rb')
else:
stream = stream_or_string
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(stream, parser, bufsize)
def parseString(string, parser=None):
from io import StringIO
bufsize = len(string)
buf = StringIO(string)
if not parser:
parser = xml.sax.make_parser()
return DOMEventStream(buf, parser, bufsize)
|
gpl-3.0
|
standaloneSA/ncclient
|
ncclient/operations/util.py
|
11
|
2454
|
# Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'Boilerplate ugliness'
from ncclient.xml_ import *
from errors import OperationError, MissingCapabilityError
def one_of(*args):
"Verifies that only one of the arguments is not None"
for i, arg in enumerate(args):
if arg is not None:
for argh in args[i+1:]:
if argh is not None:
raise OperationError("Too many parameters")
else:
return
raise OperationError("Insufficient parameters")
def datastore_or_url(wha, loc, capcheck=None):
node = new_ele(wha)
if "://" in loc: # e.g. http://, file://, ftp://
if capcheck is not None:
capcheck(":url") # url schema check at some point!
sub_ele(node, "url").text = loc
else:
#if loc == 'candidate':
# capcheck(':candidate')
#elif loc == 'startup':
# capcheck(':startup')
#elif loc == 'running' and wha == 'target':
# capcheck(':writable-running')
sub_ele(node, loc)
return node
def build_filter(spec, capcheck=None):
type = None
if isinstance(spec, tuple):
type, criteria = spec
rep = new_ele("filter", type=type)
if type == "xpath":
rep.attrib["select"] = criteria
elif type == "subtree":
rep.append(to_ele(criteria))
else:
raise OperationError("Invalid filter type")
else:
rep = validated_element(spec, ("filter", qualify("filter")))
# results in XMLError: line 105 ncclient/xml_.py - commented by earies - 5/10/13
#rep = validated_element(spec, ("filter", qualify("filter")),
# attrs=("type",))
# TODO set type var here, check if select attr present in case of xpath..
if type == "xpath" and capcheck is not None:
capcheck(":xpath")
return rep
|
apache-2.0
|
t794104/ansible
|
lib/ansible/modules/network/fortios/fortios_extender_controller_extender.py
|
24
|
18088
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_extender_controller_extender
short_description: Extender controller configuration in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure extender_controller feature and extender category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
extender_controller_extender:
description:
- Extender controller configuration.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
aaa-shared-secret:
description:
- AAA shared secret.
access-point-name:
description:
- Access point name(APN).
admin:
description:
- FortiExtender Administration (enable or disable).
choices:
- disable
- discovered
- enable
at-dial-script:
description:
- Initialization AT commands specific to the MODEM.
billing-start-day:
description:
- Billing start day.
cdma-aaa-spi:
description:
- CDMA AAA SPI.
cdma-ha-spi:
description:
- CDMA HA SPI.
cdma-nai:
description:
- NAI for CDMA MODEMS.
conn-status:
description:
- Connection status.
description:
description:
- Description.
dial-mode:
description:
- Dial mode (dial-on-demand or always-connect).
choices:
- dial-on-demand
- always-connect
dial-status:
description:
- Dial status.
ext-name:
description:
- FortiExtender name.
ha-shared-secret:
description:
- HA shared secret.
id:
description:
- FortiExtender serial number.
required: true
ifname:
description:
- FortiExtender interface name.
initiated-update:
description:
- Allow/disallow network initiated updates to the MODEM.
choices:
- enable
- disable
mode:
description:
- FortiExtender mode.
choices:
- standalone
- redundant
modem-passwd:
description:
- MODEM password.
modem-type:
description:
- MODEM type (CDMA, GSM/LTE or WIMAX).
choices:
- cdma
- gsm/lte
- wimax
multi-mode:
description:
- MODEM mode of operation(3G,LTE,etc).
choices:
- auto
- auto-3g
- force-lte
- force-3g
- force-2g
ppp-auth-protocol:
description:
- PPP authentication protocol (PAP,CHAP or auto).
choices:
- auto
- pap
- chap
ppp-echo-request:
description:
- Enable/disable PPP echo request.
choices:
- enable
- disable
ppp-password:
description:
- PPP password.
ppp-username:
description:
- PPP username.
primary-ha:
description:
- Primary HA.
quota-limit-mb:
description:
- Monthly quota limit (MB).
redial:
description:
- Number of redials allowed based on failed attempts.
choices:
- none
- 1
- 2
- 3
- 4
- 5
- 6
- 7
- 8
- 9
- 10
redundant-intf:
description:
- Redundant interface.
roaming:
description:
- Enable/disable MODEM roaming.
choices:
- enable
- disable
role:
description:
- FortiExtender work role(Primary, Secondary, None).
choices:
- none
- primary
- secondary
secondary-ha:
description:
- Secondary HA.
sim-pin:
description:
- SIM PIN.
vdom:
description:
- VDOM
wimax-auth-protocol:
description:
- WiMax authentication protocol(TLS or TTLS).
choices:
- tls
- ttls
wimax-carrier:
description:
- WiMax carrier.
wimax-realm:
description:
- WiMax realm.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Extender controller configuration.
fortios_extender_controller_extender:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
extender_controller_extender:
state: "present"
aaa-shared-secret: "<your_own_value>"
access-point-name: "<your_own_value>"
admin: "disable"
at-dial-script: "<your_own_value>"
billing-start-day: "7"
cdma-aaa-spi: "<your_own_value>"
cdma-ha-spi: "<your_own_value>"
cdma-nai: "<your_own_value>"
conn-status: "11"
description: "<your_own_value>"
dial-mode: "dial-on-demand"
dial-status: "14"
ext-name: "<your_own_value>"
ha-shared-secret: "<your_own_value>"
id: "17"
ifname: "<your_own_value>"
initiated-update: "enable"
mode: "standalone"
modem-passwd: "<your_own_value>"
modem-type: "cdma"
multi-mode: "auto"
ppp-auth-protocol: "auto"
ppp-echo-request: "enable"
ppp-password: "<your_own_value>"
ppp-username: "<your_own_value>"
primary-ha: "<your_own_value>"
quota-limit-mb: "29"
redial: "none"
redundant-intf: "<your_own_value>"
roaming: "enable"
role: "none"
secondary-ha: "<your_own_value>"
sim-pin: "<your_own_value>"
vdom: "36"
wimax-auth-protocol: "tls"
wimax-carrier: "<your_own_value>"
wimax-realm: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_extender_controller_extender_data(json):
option_list = ['aaa-shared-secret', 'access-point-name', 'admin',
'at-dial-script', 'billing-start-day', 'cdma-aaa-spi',
'cdma-ha-spi', 'cdma-nai', 'conn-status',
'description', 'dial-mode', 'dial-status',
'ext-name', 'ha-shared-secret', 'id',
'ifname', 'initiated-update', 'mode',
'modem-passwd', 'modem-type', 'multi-mode',
'ppp-auth-protocol', 'ppp-echo-request', 'ppp-password',
'ppp-username', 'primary-ha', 'quota-limit-mb',
'redial', 'redundant-intf', 'roaming',
'role', 'secondary-ha', 'sim-pin',
'vdom', 'wimax-auth-protocol', 'wimax-carrier',
'wimax-realm']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def extender_controller_extender(data, fos):
vdom = data['vdom']
extender_controller_extender_data = data['extender_controller_extender']
filtered_data = filter_extender_controller_extender_data(extender_controller_extender_data)
if extender_controller_extender_data['state'] == "present":
return fos.set('extender-controller',
'extender',
data=filtered_data,
vdom=vdom)
elif extender_controller_extender_data['state'] == "absent":
return fos.delete('extender-controller',
'extender',
mkey=filtered_data['id'],
vdom=vdom)
def fortios_extender_controller(data, fos):
login(data)
methodlist = ['extender_controller_extender']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"extender_controller_extender": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"aaa-shared-secret": {"required": False, "type": "str"},
"access-point-name": {"required": False, "type": "str"},
"admin": {"required": False, "type": "str",
"choices": ["disable", "discovered", "enable"]},
"at-dial-script": {"required": False, "type": "str"},
"billing-start-day": {"required": False, "type": "int"},
"cdma-aaa-spi": {"required": False, "type": "str"},
"cdma-ha-spi": {"required": False, "type": "str"},
"cdma-nai": {"required": False, "type": "str"},
"conn-status": {"required": False, "type": "int"},
"description": {"required": False, "type": "str"},
"dial-mode": {"required": False, "type": "str",
"choices": ["dial-on-demand", "always-connect"]},
"dial-status": {"required": False, "type": "int"},
"ext-name": {"required": False, "type": "str"},
"ha-shared-secret": {"required": False, "type": "str"},
"id": {"required": True, "type": "str"},
"ifname": {"required": False, "type": "str"},
"initiated-update": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"mode": {"required": False, "type": "str",
"choices": ["standalone", "redundant"]},
"modem-passwd": {"required": False, "type": "str"},
"modem-type": {"required": False, "type": "str",
"choices": ["cdma", "gsm/lte", "wimax"]},
"multi-mode": {"required": False, "type": "str",
"choices": ["auto", "auto-3g", "force-lte",
"force-3g", "force-2g"]},
"ppp-auth-protocol": {"required": False, "type": "str",
"choices": ["auto", "pap", "chap"]},
"ppp-echo-request": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ppp-password": {"required": False, "type": "str"},
"ppp-username": {"required": False, "type": "str"},
"primary-ha": {"required": False, "type": "str"},
"quota-limit-mb": {"required": False, "type": "int"},
"redial": {"required": False, "type": "str",
"choices": ["none", "1", "2",
"3", "4", "5",
"6", "7", "8",
"9", "10"]},
"redundant-intf": {"required": False, "type": "str"},
"roaming": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"role": {"required": False, "type": "str",
"choices": ["none", "primary", "secondary"]},
"secondary-ha": {"required": False, "type": "str"},
"sim-pin": {"required": False, "type": "str"},
"vdom": {"required": False, "type": "int"},
"wimax-auth-protocol": {"required": False, "type": "str",
"choices": ["tls", "ttls"]},
"wimax-carrier": {"required": False, "type": "str"},
"wimax-realm": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_extender_controller(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
anjan-srivastava/tornado
|
tornado/test/simple_httpclient_test.py
|
57
|
27142
|
from __future__ import absolute_import, division, print_function, with_statement
import collections
from contextlib import closing
import errno
import gzip
import logging
import os
import re
import socket
import ssl
import sys
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.httputil import HTTPHeaders, ResponseStartLine
from tornado.ioloop import IOLoop
from tornado.log import gen_log
from tornado.netutil import Resolver, bind_sockets
from tornado.simple_httpclient import SimpleAsyncHTTPClient, _default_ca_certs
from tornado.test.httpclient_test import ChunkHandler, CountdownHandler, HelloWorldHandler
from tornado.test import httpclient_test
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog
from tornado.test.util import skipOnTravis, skipIfNoIPv6, refusing_port, unittest
from tornado.web import RequestHandler, Application, asynchronous, url, stream_request_body
class SimpleHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
def get_http_client(self):
client = SimpleAsyncHTTPClient(io_loop=self.io_loop,
force_instance=True)
self.assertTrue(isinstance(client, SimpleAsyncHTTPClient))
return client
class TriggerHandler(RequestHandler):
def initialize(self, queue, wake_callback):
self.queue = queue
self.wake_callback = wake_callback
@asynchronous
def get(self):
logging.debug("queuing trigger")
self.queue.append(self.finish)
if self.get_argument("wake", "true") == "true":
self.wake_callback()
class HangHandler(RequestHandler):
@asynchronous
def get(self):
pass
class ContentLengthHandler(RequestHandler):
def get(self):
self.set_header("Content-Length", self.get_argument("value"))
self.write("ok")
class HeadHandler(RequestHandler):
def head(self):
self.set_header("Content-Length", "7")
class OptionsHandler(RequestHandler):
def options(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.write("ok")
class NoContentHandler(RequestHandler):
def get(self):
if self.get_argument("error", None):
self.set_header("Content-Length", "5")
self.write("hello")
self.set_status(204)
class SeeOtherPostHandler(RequestHandler):
def post(self):
redirect_code = int(self.request.body)
assert redirect_code in (302, 303), "unexpected body %r" % self.request.body
self.set_header("Location", "/see_other_get")
self.set_status(redirect_code)
class SeeOtherGetHandler(RequestHandler):
def get(self):
if self.request.body:
raise Exception("unexpected body %r" % self.request.body)
self.write("ok")
class HostEchoHandler(RequestHandler):
def get(self):
self.write(self.request.headers["Host"])
class NoContentLengthHandler(RequestHandler):
@asynchronous
def get(self):
if self.request.version.startswith('HTTP/1'):
# Emulate the old HTTP/1.0 behavior of returning a body with no
# content-length. Tornado handles content-length at the framework
# level so we have to go around it.
stream = self.request.connection.detach()
stream.write(b"HTTP/1.0 200 OK\r\n\r\n"
b"hello")
stream.close()
else:
self.finish('HTTP/1 required')
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
@stream_request_body
class RespondInPrepareHandler(RequestHandler):
def prepare(self):
self.set_status(403)
self.finish("forbidden")
class SimpleHTTPClientTestMixin(object):
def get_app(self):
# callable objects to finish pending /trigger requests
self.triggers = collections.deque()
return Application([
url("/trigger", TriggerHandler, dict(queue=self.triggers,
wake_callback=self.stop)),
url("/chunk", ChunkHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/hang", HangHandler),
url("/hello", HelloWorldHandler),
url("/content_length", ContentLengthHandler),
url("/head", HeadHandler),
url("/options", OptionsHandler),
url("/no_content", NoContentHandler),
url("/see_other_post", SeeOtherPostHandler),
url("/see_other_get", SeeOtherGetHandler),
url("/host_echo", HostEchoHandler),
url("/no_content_length", NoContentLengthHandler),
url("/echo_post", EchoPostHandler),
url("/respond_in_prepare", RespondInPrepareHandler),
], gzip=True)
def test_singleton(self):
# Class "constructor" reuses objects on the same IOLoop
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is
SimpleAsyncHTTPClient(self.io_loop))
# unless force_instance is used
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(self.io_loop,
force_instance=True))
# different IOLoops use different objects
with closing(IOLoop()) as io_loop2:
self.assertTrue(SimpleAsyncHTTPClient(self.io_loop) is not
SimpleAsyncHTTPClient(io_loop2))
def test_connection_limit(self):
with closing(self.create_client(max_clients=2)) as client:
self.assertEqual(client.max_clients, 2)
seen = []
# Send 4 requests. Two can be sent immediately, while the others
# will be queued
for i in range(4):
client.fetch(self.get_url("/trigger"),
lambda response, i=i: (seen.append(i), self.stop()))
self.wait(condition=lambda: len(self.triggers) == 2)
self.assertEqual(len(client.queue), 2)
# Finish the first two requests and let the next two through
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: (len(self.triggers) == 2 and
len(seen) == 2))
self.assertEqual(set(seen), set([0, 1]))
self.assertEqual(len(client.queue), 0)
# Finish all the pending requests
self.triggers.popleft()()
self.triggers.popleft()()
self.wait(condition=lambda: len(seen) == 4)
self.assertEqual(set(seen), set([0, 1, 2, 3]))
self.assertEqual(len(self.triggers), 0)
def test_redirect_connection_limit(self):
# following redirects should not consume additional connections
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/countdown/3'), self.stop,
max_redirects=3)
response = self.wait()
response.rethrow()
def test_gzip(self):
# All the tests in this file should be using gzip, but this test
# ensures that it is in fact getting compressed.
# Setting Accept-Encoding manually bypasses the client's
# decompression so we can see the raw data.
response = self.fetch("/chunk", use_gzip=False,
headers={"Accept-Encoding": "gzip"})
self.assertEqual(response.headers["Content-Encoding"], "gzip")
self.assertNotEqual(response.body, b"asdfqwer")
# Our test data gets bigger when gzipped. Oops. :)
# Chunked encoding bypasses the MIN_LENGTH check.
self.assertEqual(len(response.body), 34)
f = gzip.GzipFile(mode="r", fileobj=response.buffer)
self.assertEqual(f.read(), b"asdfqwer")
def test_max_redirects(self):
response = self.fetch("/countdown/5", max_redirects=3)
self.assertEqual(302, response.code)
# We requested 5, followed three redirects for 4, 3, 2, then the last
# unfollowed redirect is to 1.
self.assertTrue(response.request.url.endswith("/countdown/5"))
self.assertTrue(response.effective_url.endswith("/countdown/2"))
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
def test_header_reuse(self):
# Apps may reuse a headers object if they are only passing in constant
# headers like user-agent. The header object should not be modified.
headers = HTTPHeaders({'User-Agent': 'Foo'})
self.fetch("/hello", headers=headers)
self.assertEqual(list(headers.get_all()), [('User-Agent', 'Foo')])
def test_see_other_redirect(self):
for code in (302, 303):
response = self.fetch("/see_other_post", method="POST", body="%d" % code)
self.assertEqual(200, response.code)
self.assertTrue(response.request.url.endswith("/see_other_post"))
self.assertTrue(response.effective_url.endswith("/see_other_get"))
# request is the original request, is a POST still
self.assertEqual("POST", response.request.method)
@skipOnTravis
def test_request_timeout(self):
timeout = 0.1
timeout_min, timeout_max = 0.099, 0.15
if os.name == 'nt':
timeout = 0.5
timeout_min, timeout_max = 0.4, 0.6
response = self.fetch('/trigger?wake=false', request_timeout=timeout)
self.assertEqual(response.code, 599)
self.assertTrue(timeout_min < response.request_time < timeout_max,
response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
# trigger the hanging request to let it clean up after itself
self.triggers.popleft()()
@skipIfNoIPv6
def test_ipv6(self):
try:
[sock] = bind_sockets(None, '::1', family=socket.AF_INET6)
port = sock.getsockname()[1]
self.http_server.add_socket(sock)
except socket.gaierror as e:
if e.args[0] == socket.EAI_ADDRFAMILY:
# python supports ipv6, but it's not configured on the network
# interface, so skip this test.
return
raise
url = '%s://[::1]:%d/hello' % (self.get_protocol(), port)
# ipv6 is currently enabled by default but can be disabled
self.http_client.fetch(url, self.stop, allow_ipv6=False)
response = self.wait()
self.assertEqual(response.code, 599)
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(response.body, b"Hello world!")
def xtest_multiple_content_length_accepted(self):
response = self.fetch("/content_length?value=2,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,%202,2")
self.assertEqual(response.body, b"ok")
response = self.fetch("/content_length?value=2,4")
self.assertEqual(response.code, 599)
response = self.fetch("/content_length?value=2,%202,3")
self.assertEqual(response.code, 599)
def test_head_request(self):
response = self.fetch("/head", method="HEAD")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "7")
self.assertFalse(response.body)
def test_options_request(self):
response = self.fetch("/options", method="OPTIONS")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["content-length"], "2")
self.assertEqual(response.headers["access-control-allow-origin"], "*")
self.assertEqual(response.body, b"ok")
def test_no_content(self):
response = self.fetch("/no_content")
self.assertEqual(response.code, 204)
# 204 status doesn't need a content-length, but tornado will
# add a zero content-length anyway.
#
# A test without a content-length header is included below
# in HTTP204NoContentTestCase.
self.assertEqual(response.headers["Content-length"], "0")
# 204 status with non-zero content length is malformed
with ExpectLog(gen_log, "Malformed HTTP message"):
response = self.fetch("/no_content?error=1")
self.assertEqual(response.code, 599)
def test_host_header(self):
host_re = re.compile(b"^localhost:[0-9]+$")
response = self.fetch("/host_echo")
self.assertTrue(host_re.match(response.body))
url = self.get_url("/host_echo").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertTrue(host_re.match(response.body), response.body)
def test_connection_refused(self):
cleanup_func, port = refusing_port()
self.addCleanup(cleanup_func)
with ExpectLog(gen_log, ".*", required=False):
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
response = self.wait()
self.assertEqual(599, response.code)
if sys.platform != 'cygwin':
# cygwin returns EPERM instead of ECONNREFUSED here
contains_errno = str(errno.ECONNREFUSED) in str(response.error)
if not contains_errno and hasattr(errno, "WSAECONNREFUSED"):
contains_errno = str(errno.WSAECONNREFUSED) in str(response.error)
self.assertTrue(contains_errno, response.error)
# This is usually "Connection refused".
# On windows, strerror is broken and returns "Unknown error".
expected_message = os.strerror(errno.ECONNREFUSED)
self.assertTrue(expected_message in str(response.error),
response.error)
def test_queue_timeout(self):
with closing(self.create_client(max_clients=1)) as client:
client.fetch(self.get_url('/trigger'), self.stop,
request_timeout=10)
# Wait for the trigger request to block, not complete.
self.wait()
client.fetch(self.get_url('/hello'), self.stop,
connect_timeout=0.1)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertTrue(response.request_time < 1, response.request_time)
self.assertEqual(str(response.error), "HTTP 599: Timeout")
self.triggers.popleft()()
self.wait()
def test_no_content_length(self):
response = self.fetch("/no_content_length")
if response.body == b"HTTP/1 required":
self.skipTest("requires HTTP/1.x")
else:
self.assertEquals(b"hello", response.body)
def sync_body_producer(self, write):
write(b'1234')
write(b'5678')
@gen.coroutine
def async_body_producer(self, write):
yield write(b'1234')
yield gen.Task(IOLoop.current().add_callback)
yield write(b'5678')
def test_sync_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_sync_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.sync_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_chunked(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer)
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_async_body_producer_content_length(self):
response = self.fetch("/echo_post", method="POST",
body_producer=self.async_body_producer,
headers={'Content-Length': '8'})
response.rethrow()
self.assertEqual(response.body, b"12345678")
def test_100_continue(self):
response = self.fetch("/echo_post", method="POST",
body=b"1234",
expect_100_continue=True)
self.assertEqual(response.body, b"1234")
def test_100_continue_early_response(self):
def body_producer(write):
raise Exception("should not be called")
response = self.fetch("/respond_in_prepare", method="POST",
body_producer=body_producer,
expect_100_continue=True)
self.assertEqual(response.code, 403)
class SimpleHTTPClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPTestCase):
def setUp(self):
super(SimpleHTTPClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
**kwargs)
class SimpleHTTPSClientTestCase(SimpleHTTPClientTestMixin, AsyncHTTPSTestCase):
def setUp(self):
super(SimpleHTTPSClientTestCase, self).setUp()
self.http_client = self.create_client()
def create_client(self, **kwargs):
return SimpleAsyncHTTPClient(self.io_loop, force_instance=True,
defaults=dict(validate_cert=False),
**kwargs)
def test_ssl_options(self):
resp = self.fetch("/hello", ssl_options={})
self.assertEqual(resp.body, b"Hello world!")
@unittest.skipIf(not hasattr(ssl, 'SSLContext'),
'ssl.SSLContext not present')
def test_ssl_context(self):
resp = self.fetch("/hello",
ssl_options=ssl.SSLContext(ssl.PROTOCOL_SSLv23))
self.assertEqual(resp.body, b"Hello world!")
def test_ssl_options_handshake_fail(self):
with ExpectLog(gen_log, "SSL Error|Uncaught exception",
required=False):
resp = self.fetch(
"/hello", ssl_options=dict(cert_reqs=ssl.CERT_REQUIRED))
self.assertRaises(ssl.SSLError, resp.rethrow)
@unittest.skipIf(not hasattr(ssl, 'SSLContext'),
'ssl.SSLContext not present')
def test_ssl_context_handshake_fail(self):
with ExpectLog(gen_log, "SSL Error|Uncaught exception"):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
resp = self.fetch("/hello", ssl_options=ctx)
self.assertRaises(ssl.SSLError, resp.rethrow)
def test_error_logging(self):
# No stack traces are logged for SSL errors (in this case,
# failure to validate the testing self-signed cert).
# The SSLError is exposed through ssl.SSLError.
with ExpectLog(gen_log, '.*') as expect_log:
response = self.fetch("/", validate_cert=True)
self.assertEqual(response.code, 599)
self.assertIsInstance(response.error, ssl.SSLError)
self.assertFalse(expect_log.logged_stack)
class CreateAsyncHTTPClientTestCase(AsyncTestCase):
def setUp(self):
super(CreateAsyncHTTPClientTestCase, self).setUp()
self.saved = AsyncHTTPClient._save_configuration()
def tearDown(self):
AsyncHTTPClient._restore_configuration(self.saved)
super(CreateAsyncHTTPClientTestCase, self).tearDown()
def test_max_clients(self):
AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 10)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=11, force_instance=True)) as client:
self.assertEqual(client.max_clients, 11)
# Now configure max_clients statically and try overriding it
# with each way max_clients can be passed
AsyncHTTPClient.configure(SimpleAsyncHTTPClient, max_clients=12)
with closing(AsyncHTTPClient(
self.io_loop, force_instance=True)) as client:
self.assertEqual(client.max_clients, 12)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=13, force_instance=True)) as client:
self.assertEqual(client.max_clients, 13)
with closing(AsyncHTTPClient(
self.io_loop, max_clients=14, force_instance=True)) as client:
self.assertEqual(client.max_clients, 14)
class HTTP100ContinueTestCase(AsyncHTTPTestCase):
def respond_100(self, request):
self.http1 = request.version.startswith('HTTP/1.')
if not self.http1:
request.connection.write_headers(ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
self.request = request
self.request.connection.stream.write(
b"HTTP/1.1 100 CONTINUE\r\n\r\n",
self.respond_200)
def respond_200(self):
self.request.connection.stream.write(
b"HTTP/1.1 200 OK\r\nContent-Length: 1\r\n\r\nA",
self.request.connection.stream.close)
def get_app(self):
# Not a full Application, but works as an HTTPServer callback
return self.respond_100
def test_100_continue(self):
res = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(res.body, b'A')
class HTTP204NoContentTestCase(AsyncHTTPTestCase):
def respond_204(self, request):
self.http1 = request.version.startswith('HTTP/1.')
if not self.http1:
# Close the request cleanly in HTTP/2; it will be skipped anyway.
request.connection.write_headers(ResponseStartLine('', 200, 'OK'),
HTTPHeaders())
request.connection.finish()
return
# A 204 response never has a body, even if doesn't have a content-length
# (which would otherwise mean read-until-close). Tornado always
# sends a content-length, so we simulate here a server that sends
# no content length and does not close the connection.
#
# Tests of a 204 response with a Content-Length header are included
# in SimpleHTTPClientTestMixin.
stream = request.connection.detach()
stream.write(
b"HTTP/1.1 204 No content\r\n\r\n")
stream.close()
def get_app(self):
return self.respond_204
def test_204_no_content(self):
resp = self.fetch('/')
if not self.http1:
self.skipTest("requires HTTP/1.x")
self.assertEqual(resp.code, 204)
self.assertEqual(resp.body, b'')
class HostnameMappingTestCase(AsyncHTTPTestCase):
def setUp(self):
super(HostnameMappingTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
hostname_mapping={
'www.example.com': '127.0.0.1',
('foo.example.com', 8000): ('127.0.0.1', self.get_http_port()),
})
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_hostname_mapping(self):
self.http_client.fetch(
'http://www.example.com:%d/hello' % self.get_http_port(), self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
def test_port_mapping(self):
self.http_client.fetch('http://foo.example.com:8000/hello', self.stop)
response = self.wait()
response.rethrow()
self.assertEqual(response.body, b'Hello world!')
class ResolveTimeoutTestCase(AsyncHTTPTestCase):
def setUp(self):
# Dummy Resolver subclass that never invokes its callback.
class BadResolver(Resolver):
def resolve(self, *args, **kwargs):
pass
super(ResolveTimeoutTestCase, self).setUp()
self.http_client = SimpleAsyncHTTPClient(
self.io_loop,
resolver=BadResolver())
def get_app(self):
return Application([url("/hello", HelloWorldHandler), ])
def test_resolve_timeout(self):
response = self.fetch('/hello', connect_timeout=0.1)
self.assertEqual(response.code, 599)
class MaxHeaderSizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 100)
self.write("ok")
class LargeHeaders(RequestHandler):
def get(self):
self.set_header("X-Filler", "a" * 1000)
self.write("ok")
return Application([('/small', SmallHeaders),
('/large', LargeHeaders)])
def get_http_client(self):
return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_header_size=1024)
def test_small_headers(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'ok')
def test_large_headers(self):
with ExpectLog(gen_log, "Unsatisfiable read"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
class MaxBodySizeTest(AsyncHTTPTestCase):
def get_app(self):
class SmallBody(RequestHandler):
def get(self):
self.write("a"*1024*64)
class LargeBody(RequestHandler):
def get(self):
self.write("a"*1024*100)
return Application([('/small', SmallBody),
('/large', LargeBody)])
def get_http_client(self):
return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_body_size=1024*64)
def test_small_body(self):
response = self.fetch('/small')
response.rethrow()
self.assertEqual(response.body, b'a'*1024*64)
def test_large_body(self):
with ExpectLog(gen_log, "Malformed HTTP message from None: Content-Length too long"):
response = self.fetch('/large')
self.assertEqual(response.code, 599)
class MaxBufferSizeTest(AsyncHTTPTestCase):
def get_app(self):
class LargeBody(RequestHandler):
def get(self):
self.write("a"*1024*100)
return Application([('/large', LargeBody)])
def get_http_client(self):
# 100KB body with 64KB buffer
return SimpleAsyncHTTPClient(io_loop=self.io_loop, max_body_size=1024*100, max_buffer_size=1024*64)
def test_large_body(self):
response = self.fetch('/large')
response.rethrow()
self.assertEqual(response.body, b'a'*1024*100)
|
apache-2.0
|
invisiblek/python-for-android
|
python3-alpha/python3-src/Lib/distutils/version.py
|
145
|
12479
|
#
# distutils/version.py
#
# Implements multiple version numbering conventions for the
# Python Module Distribution Utilities.
#
# $Id$
#
"""Provides classes to represent module version numbers (one class for
each style of version numbering). There are currently two such classes
implemented: StrictVersion and LooseVersion.
Every version number class implements the following interface:
* the 'parse' method takes a string and parses it to some internal
representation; if the string is an invalid version number,
'parse' raises a ValueError exception
* the class constructor takes an optional string argument which,
if supplied, is passed to 'parse'
* __str__ reconstructs the string that was passed to 'parse' (or
an equivalent string -- ie. one that will generate an equivalent
version number instance)
* __repr__ generates Python code to recreate the version number instance
* _cmp compares the current instance with either another instance
of the same class or a string (which will be parsed to an instance
of the same class, thus must follow the same rules)
"""
import re
class Version:
"""Abstract base class for version numbering classes. Just provides
constructor (__init__) and reproducer (__repr__), because those
seem to be the same for all version numbering classes; and route
rich comparisons to _cmp.
"""
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def __repr__ (self):
return "%s ('%s')" % (self.__class__.__name__, str(self))
def __eq__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c == 0
def __ne__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c != 0
def __lt__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c < 0
def __le__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c <= 0
def __gt__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c > 0
def __ge__(self, other):
c = self._cmp(other)
if c is NotImplemented:
return c
return c >= 0
# Interface for version-number classes -- must be implemented
# by the following classes (the concrete ones -- Version should
# be treated as an abstract class).
# __init__ (string) - create and take same action as 'parse'
# (string parameter is optional)
# parse (string) - convert a string representation to whatever
# internal representation is appropriate for
# this style of version numbering
# __str__ (self) - convert back to a string; should be very similar
# (if not identical to) the string supplied to parse
# __repr__ (self) - generate Python code to recreate
# the instance
# _cmp (self, other) - compare two version numbers ('other' may
# be an unparsed version string, or another
# instance of your version class)
class StrictVersion (Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
re.VERBOSE | re.ASCII)
def parse (self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError("invalid version number '%s'" % vstring)
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(int, [major, minor, patch]))
else:
self.version = tuple(map(int, [major, minor])) + (0,)
if prerelease:
self.prerelease = (prerelease[0], int(prerelease_num))
else:
self.prerelease = None
def __str__ (self):
if self.version[2] == 0:
vstring = '.'.join(map(str, self.version[0:2]))
else:
vstring = '.'.join(map(str, self.version))
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def _cmp (self, other):
if isinstance(other, str):
other = StrictVersion(other)
if self.version != other.version:
# numeric versions don't match
# prerelease stuff doesn't matter
if self.version < other.version:
return -1
else:
return 1
# have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
if self.prerelease == other.prerelease:
return 0
elif self.prerelease < other.prerelease:
return -1
else:
return 1
else:
assert False, "never get here"
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking him.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
class LooseVersion (Version):
"""Version numbering for anarchists and software realists.
Implements the standard interface for version number classes as
described above. A version number consists of a series of numbers,
separated by either periods or strings of letters. When comparing
version numbers, the numeric components will be compared
numerically, and the alphabetic components lexically. The following
are all valid version numbers, in no particular order:
1.5.1
1.5.2b2
161
3.10a
8.02
3.4j
1996.07.12
3.2.pl0
3.1.1.6
2g6
11g
0.960923
2.2beta29
1.13++
5.5.kw
2.0b1pl0
In fact, there is no such thing as an invalid version number under
this scheme; the rules for comparison are simple and predictable,
but may not always give the results you want (for some definition
of "want").
"""
component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
def __init__ (self, vstring=None):
if vstring:
self.parse(vstring)
def parse (self, vstring):
# I've given up on thinking I can reconstruct the version string
# from the parsed tuple -- so I just store the string here for
# use by __str__
self.vstring = vstring
components = [x for x in self.component_re.split(vstring)
if x and x != '.']
for i, obj in enumerate(components):
try:
components[i] = int(obj)
except ValueError:
pass
self.version = components
def __str__ (self):
return self.vstring
def __repr__ (self):
return "LooseVersion ('%s')" % str(self)
def _cmp (self, other):
if isinstance(other, str):
other = LooseVersion(other)
if self.version == other.version:
return 0
if self.version < other.version:
return -1
if self.version > other.version:
return 1
# end class LooseVersion
|
apache-2.0
|
elmerdpadilla/iv
|
addons/l10n_fr_hr_payroll/__openerp__.py
|
374
|
2165
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'French Payroll',
'category': 'Localization/Payroll',
'author': 'Yannick Buron (SYNERPGY)',
'depends': ['hr_payroll', 'l10n_fr'],
'version': '1.0',
'description': """
French Payroll Rules.
=====================
- Configuration of hr_payroll for French localization
- All main contributions rules for French payslip, for 'cadre' and 'non-cadre'
- New payslip report
TODO:
-----
- Integration with holidays module for deduction and allowance
- Integration with hr_payroll_account for the automatic account_move_line
creation from the payslip
- Continue to integrate the contribution. Only the main contribution are
currently implemented
- Remake the report under webkit
- The payslip.line with appears_in_payslip = False should appears in the
payslip interface, but not in the payslip report
""",
'active': False,
'data': [
'l10n_fr_hr_payroll_view.xml',
'l10n_fr_hr_payroll_data.xml',
'views/report_l10nfrfichepaye.xml',
'l10n_fr_hr_payroll_reports.xml',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
vganapath/rally
|
tests/unit/plugins/openstack/scenarios/murano/test_environments.py
|
1
|
6220
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.murano import environments
from tests.unit import test
MURANO_SCENARIO = ("rally.plugins.openstack.scenarios.murano."
"environments")
class MuranoEnvironmentsTestCase(test.ScenarioTestCase):
def _get_context(self):
self.context.update({
"tenant": {
"packages": [mock.MagicMock(fully_qualified_name="fake")]
},
"user": {
"tenant_id": "fake_tenant_id"
},
"config": {
"murano_packages": {
"app_package": (
"rally-jobs/extra/murano/"
"applications/HelloReporter/"
"io.murano.apps.HelloReporter.zip")
}
}
})
return self.context
def test_list_environments(self):
TEST_TARGET = "ListEnvironments"
list_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_list_environments")
scenario = environments.ListEnvironments(self.context)
with mock.patch(list_env_module) as mock_list_env:
scenario.run()
mock_list_env.assert_called_once_with()
def test_create_and_delete_environment(self):
TEST_TARGET = "CreateAndDeleteEnvironment"
generate_random_name_module = ("{}.{}.{}").format(
MURANO_SCENARIO, TEST_TARGET, "generate_random_name")
create_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_environment")
create_session_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_session")
delete_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_delete_environment")
scenario = environments.CreateAndDeleteEnvironment(self.context)
with mock.patch(generate_random_name_module) as mock_random_name:
with mock.patch(create_env_module) as mock_create_env:
with mock.patch(create_session_module) as mock_create_session:
with mock.patch(delete_env_module) as mock_delete_env:
fake_env = mock.Mock(id="fake_id")
mock_create_env.return_value = fake_env
mock_random_name.return_value = "foo"
scenario.run()
mock_create_env.assert_called_once_with()
mock_create_session.assert_called_once_with(
fake_env.id)
mock_delete_env.assert_called_once_with(
fake_env)
def test_create_and_deploy_environment(self):
TEST_TARGET = "CreateAndDeployEnvironment"
create_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_environment")
create_session_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_session")
create_service_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_service")
deploy_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_deploy_environment")
scenario = environments.CreateAndDeployEnvironment(self.context)
with mock.patch(create_env_module) as mock_create_env:
with mock.patch(create_session_module) as mock_create_session:
with mock.patch(create_service_module) as mock_create_service:
with mock.patch(deploy_env_module) as mock_deploy_env:
fake_env = mock.MagicMock(id="fake_env_id")
mock_create_env.return_value = fake_env
fake_session = mock.Mock(id="fake_session_id")
mock_create_session.return_value = fake_session
scenario.context = self._get_context()
scenario.context["tenants"] = {
"fake_tenant_id": {
"packages": [mock.MagicMock()]
}
}
scenario.run(1)
mock_create_env.assert_called_once_with()
mock_create_session.assert_called_once_with(
fake_env.id)
mock_create_service.assert_called_once_with(
fake_env,
fake_session,
"fake",
atomic_action=False)
mock_deploy_env.assert_called_once_with(
fake_env, fake_session)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"murano.create_services")
|
apache-2.0
|
mofarrell/osquery
|
tools/profile.py
|
28
|
13309
|
#!/usr/bin/env python
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
import argparse
except ImportError:
print ("Cannot import argparse.")
exit(1)
import json
import os
import psutil
import subprocess
import sys
import time
# Import the testing utils
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/tests/")
import utils
KB = 1024 * 1024
RANGES = {
"colors": (utils.blue, utils.green, utils.yellow, utils.red),
"utilization": (8, 20, 50),
"cpu_time": (0.4, 1, 10),
"memory": (8 * KB, 12 * KB, 24 * KB),
"fds": (10, 20, 50),
"duration": (0.8, 1, 3),
}
def get_stats(p, interval=1):
"""Run psutil and downselect the information."""
utilization = p.cpu_percent(interval=interval)
return {
"utilization": utilization,
"counters": p.io_counters() if utils.platform() != "darwin" else None,
"fds": p.num_fds(),
"cpu_times": p.cpu_times(),
"memory": p.memory_info_ex(),
}
def check_leaks_linux(shell, query, count=1, supp_file=None):
"""Run valgrind using the shell and a query, parse leak reports."""
suppressions = "" if supp_file is None else "--suppressions=%s" % supp_file
cmd = "valgrind --tool=memcheck %s %s --iterations=%d --query=\"%s\"" % (
suppressions, shell, count, query)
proc = subprocess.Popen(
cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
_, stderr = proc.communicate()
summary = {
"definitely": None,
"indirectly": None,
"possibly": None,
}
if args.verbose:
print (stderr)
for line in stderr.split("\n"):
for key in summary:
if line.find(key) >= 0:
summary[key] = line.split(":")[1].strip()
return summary
def check_leaks_darwin(shell, query, count=1):
# Run the shell with a --delay flag such that leaks can attach before exit.
proc = subprocess.Popen(
[shell, "--query", query, "--iterations", str(count), "--delay", "1"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
leak_checks = None
while proc.poll() is None:
# Continue to run leaks until the monitored shell exits.
leaks = subprocess.Popen(
["leaks", "%s" % proc.pid],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, _ = leaks.communicate()
if args.verbose:
print (stdout)
try:
for line in stdout.split("\n"):
if line.find("total leaked bytes") >= 0:
leak_checks = line.split(":")[1].strip()
except:
print ("Encountered exception while running leaks:")
print (stdout)
return {"definitely": leak_checks}
def check_leaks(shell, query, count=1, supp_file=None):
if utils.platform() == "darwin":
return check_leaks_darwin(shell, query, count=count)
else:
return check_leaks_linux(shell, query, count=count, supp_file=supp_file)
def profile_leaks(shell, queries, count=1, rounds=1, supp_file=None):
report = {}
for name, query in queries.iteritems():
print ("Analyzing leaks in query: %s" % query)
# Apply count (optionally run the query several times).
summary = check_leaks(shell, query, count, supp_file)
display = []
for key in summary:
output = summary[key]
if output is not None and output[0] != "0":
# Add some fun colored output if leaking.
if key == "definitely":
output = utils.red(output)
report[name] = "LEAKING"
if key == "indirectly":
output = utils.yellow(output)
report[name] = "WARNING"
else:
report[name] = "SAFE"
display.append("%s: %s" % (key, output))
print (" %s" % "; ".join(display))
return report
def run_query(shell, query, timeout=0, count=1):
"""Execute the osquery run testing wrapper with a setup/teardown delay."""
start_time = time.time()
proc = subprocess.Popen(
[shell, "--query", query, "--iterations", str(count),
"--delay", "1"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p = psutil.Process(pid=proc.pid)
delay = 0
step = 0.5
percents = []
# Calculate the CPU utilization in intervals of 1 second.
stats = {}
while p.is_running() and p.status() != psutil.STATUS_ZOMBIE:
try:
current_stats = get_stats(p, step)
if (current_stats["memory"].rss == 0):
break
stats = current_stats
percents.append(stats["utilization"])
except psutil.AccessDenied:
break
delay += step
if timeout > 0 and delay >= timeout + 2:
proc.kill()
break
duration = time.time() - start_time - 2
utilization = [percent for percent in percents if percent != 0]
if len(utilization) == 0:
avg_utilization = 0
else:
avg_utilization = sum(utilization) / len(utilization)
return {
"utilization": avg_utilization,
"duration": duration,
"memory": stats["memory"].rss,
"user_time": stats["cpu_times"].user,
"system_time": stats["cpu_times"].system,
"cpu_time": stats["cpu_times"].user + stats["cpu_times"].system,
"fds": stats["fds"],
"exit": p.wait(),
}
def summary_line(name, result):
if not args.n:
for key, v in result.iteritems():
print ("%s" % (
RANGES["colors"][v[0]]("%s:%s" % (
key[0].upper(), v[0]))),
end="")
print (" ", end="")
print ("%s:" % name, end=" ")
for key, v in result.iteritems():
print ("%s: %s" % (key, v[1]), end=" ")
print ("")
def summary(results, display=False):
"""Map the results to simple thresholds."""
def rank(value, ranges):
for i, r in enumerate(ranges):
if value < r:
return i
return len(ranges)
summary_results = {}
for name, result in results.iteritems():
failed = "exit" in result and result["exit"] > 0
summary_result = {}
for key in RANGES:
if key == "colors":
continue
if key not in result:
continue
if failed:
summary_result[key] = (len(RANGES["colors"]) - 1, -1)
else:
summary_result[key] = (rank(result[key], RANGES[key]),
result[key])
if display and not args.check:
summary_line(name, summary_result)
summary_results[name] = summary_result
return summary_results
def profile(shell, queries, timeout=0, count=1, rounds=1):
report = {}
for name, query in queries.iteritems():
print ("Profiling query: %s" % query)
results = {}
for i in range(rounds):
result = run_query(shell, query, timeout=timeout, count=count)
summary(
{"%s (%d/%d)" % (name, i + 1, rounds): result}, display=True)
# Store each result round to return an average.
for k, v in result.iteritems():
results[k] = results.get(k, [])
results[k].append(v)
average_results = {}
for k in results:
average_results[k] = sum(results[k]) / len(results[k])
report[name] = average_results
if rounds > 1:
summary({"%s avg" % name: report[name]}, display=True)
return report
def compare(profile1, profile2):
"""Compare two jSON profile outputs."""
for table in profile1:
if table not in profile2:
# No comparison possible
continue
summary_line(table, profile1[table])
summary_line(table, profile2[table])
def regress_check(profile1, profile2):
regressed = False
for table in profile1:
if table not in profile2:
continue
for measure in profile1[table]:
if profile2[table][measure][0] > profile1[table][measure][0]:
print ("%s %s has regressed (%s->%s)!" % (table, measure,
profile1[table][measure][0], profile2[table][measure][0]))
regressed = True
if not regressed:
print ("No regressions!")
return 0
return 1
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=(
"Profile osquery, individual tables, "
"or a set of osqueryd config queries."
))
parser.add_argument(
"-n", action="store_true", default=False,
help="Do not output colored ranks."
)
parser.add_argument(
"--verbose", action="store_true", default=False, help="Be verbose.")
parser.add_argument(
"--leaks", default=False, action="store_true",
help="Check for memory leaks instead of performance."
)
group = parser.add_argument_group("Query Options:")
group.add_argument(
"--restrict", metavar="LIST", default="",
help="Limit to a list of comma-separated tables."
)
group.add_argument(
"--tables", metavar="PATH", default="./specs",
help="Path to the osquery table specs."
)
group.add_argument(
"--config", metavar="FILE", default=None,
help="Use scheduled queries from a config."
)
group.add_argument(
"--query", metavar="STRING", default=None,
help="Profile a single query."
)
group = parser.add_argument_group("Run Options:")
group.add_argument(
"--timeout", metavar="N", default=0, type=int,
help="Max seconds a query may run --count times."
)
group.add_argument(
"--count", metavar="N", default=1, type=int,
help="Run the query N times serially."
)
group.add_argument(
"--rounds", metavar="N", default=1, type=int,
help="Run the profile for N rounds and use the average."
)
group.add_argument(
"--shell", metavar="PATH", default="./build/%s/osquery/run" % (
utils.platform()),
help="Path to osquery run wrapper (./build/<sys>/osquery/run)."
)
group = parser.add_argument_group("Performance Options:")
group.add_argument(
"--output", metavar="FILE", default=None,
help="Write JSON performance output to file."
)
group.add_argument(
"--check", metavar="OLD_OUTPUT", nargs=1,
help="Check regressions using an existing output."
)
group.add_argument(
"--compare", metavar="FILE", nargs=2,
help="Compare existing performance outputs (old, new)."
)
group = parser.add_argument_group("Memory Options:")
group.add_argument(
"--suppressions", metavar="SUPP", default="./tools/tests/osquery.supp",
help="Add a suppressions files to memory leak checking (linux only)."
)
args = parser.parse_args()
if args.compare:
with open(args.compare[0]) as fh:
profile1 = json.loads(fh.read())
with open(args.compare[1]) as fh:
profile2 = json.loads(fh.read())
compare(profile1, profile2)
exit(0)
if args.check:
with open(args.check[0]) as fh:
profile1 = json.loads(fh.read())
if not os.path.exists(args.shell):
print ("Cannot find --daemon: %s" % (args.shell))
exit(1)
if args.config is None and not os.path.exists(args.tables):
print ("Cannot find --tables: %s" % (args.tables))
exit(1)
queries = {}
if args.config is not None:
if not os.path.exists(args.config):
print ("Cannot find --config: %s" % (args.config))
exit(1)
queries = utils.queries_from_config(args.config)
elif args.query is not None:
queries["manual"] = args.query
else:
queries = utils.queries_from_tables(args.tables, args.restrict)
if args.leaks:
results = profile_leaks(
args.shell, queries, count=args.count,
rounds=args.rounds, supp_file=args.suppressions
)
else:
# Start the profiling!
results = profile(
args.shell, queries,
timeout=args.timeout, count=args.count, rounds=args.rounds
)
# Only apply checking/regressions to performance, not leaks.
if args.check:
exit(regress_check(profile1, summary(results)))
if args.output is not None:
with open(args.output, "w") as fh:
if args.leaks:
# Leaks report does not need a summary view.
fh.write(json.dumps(results, indent=1))
else:
fh.write(json.dumps(summary(results), indent=1))
print ("Wrote output summary: %s" % args.output)
|
bsd-3-clause
|
10fish/shadowsocks
|
shadowsocks/crypto/__init__.py
|
26
|
1111
|
#!/usr/bin/env python
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
mit
|
Spindletop16/namebench
|
nb_third_party/dns/rdtypes/ANY/__init__.py
|
243
|
1191
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Class ANY (generic) rdata type classes."""
__all__ = [
'AFSDB',
'CERT',
'CNAME',
'DLV',
'DNAME',
'DNSKEY',
'DS',
'GPOS',
'HINFO',
'HIP',
'ISDN',
'KEY',
'LOC',
'MX',
'NS',
'NSEC',
'NSEC3',
'NSEC3PARAM',
'NXT',
'PTR',
'RP',
'RRSIG',
'RT',
'SIG',
'SOA',
'SPF',
'SSHFP',
'TXT',
'X25',
]
|
apache-2.0
|
avaidyam/Binoculars
|
bin/plps/scripts/prepare_ligands.py
|
1
|
6755
|
import sys,os
keywords = ['PLPS_path', 'PDB2PQR_path', 'APBS_path', 'XLOGP3_path', 'ligand_file', 'BABEL_path',\
'n_conf', 'OMEGA_path']
def read_input(input_file):
file = open(input_file, 'r')
lig_file = []
for line in file:
key = line.split()[0]
if(key == keywords[0]):
PLPS_dir = line.split()[1]
elif(key == keywords[1]):
PDB2PQR_dir = line.split()[1]
elif(key == keywords[2]):
APBS_dir = line.split()[1]
elif(key == keywords[3]):
XLOGP3_dir = line.split()[1]
elif(key == keywords[4]):
lig_file.append(line.split()[1])
elif(key == keywords[5]):
BABEL_dir = line.split()[1]
elif(key == keywords[6]):
n_conf = int(line.split()[1])
elif(key == keywords[7]):
OMEGA_dir = line.split()[1]
elif(key not in keywords):
sys.exit('Please enter proper parameter name in input file')
return PLPS_dir, PDB2PQR_dir, APBS_dir, XLOGP3_dir, lig_file, BABEL_dir, n_conf, OMEGA_dir
def split_conf(mol_id):
conf_file = '%s_omega.mol2'%(mol_id)
file = open(conf_file, 'r')
i_conf = 0
for line in file:
if(line[0:17] == '@<TRIPOS>MOLECULE'):
i_conf += 1
if(i_conf < 10):
t_conf_file = '%s_conf_0%i.mol2'%(mol_id, i_conf)
else:
t_conf_file = '%s_conf_%i.mol2'%(mol_id, i_conf)
t_file = open(t_conf_file, 'w')
t_file.writelines(line)
t_file.close()
return i_conf
def generate_ssic(mol_id, i_conf, BABEL, PDB2PQR, script_dir, apbs_tool, APBS, bin_dir):
if(i_conf+1 < 10):
conf_pref = '%s_conf_0%i'%(mol_id, i_conf+1)
else:
conf_pref = '%s_conf_%i'%(mol_id, i_conf+1)
file = open('%s.mol2'%(conf_pref), 'a')
file.write('@<TRIPOS>SUBSTRUCTURE\n')
file.write(' 1 **** 1 TEMP 0 **** **** 0 ROOT\n')
file.close()
os.system("sed -i 's/<0>/MOL/g' %s.mol2"%(conf_pref))
os.system('%s -imol2 %s.mol2 -opdb %s.pdb'%(BABEL, conf_pref, conf_pref))
os.system("sed -i 's/ATOM /HETATM/g' %s.pdb"%(conf_pref))
os.system('%s --ligand=%s.mol2 --ff=amber %s.pdb %s.pqr'%(PDB2PQR, conf_pref, conf_pref, conf_pref))
convert_success = check_convert('%s.pqr'%(conf_pref))
if(not convert_success):
os.system('python %s/mol2topqr.py %s.mol2 %s.pqr'%(script_dir, conf_pref, conf_pref))
os.system("sed -i 's/HETATM/ATOM /g' %s.pdb"%(conf_pref))
os.system("sed -i 's/HETATM/ATOM /g' %s.pqr"%(conf_pref))
os.system('%s/psize.py %s.pqr > %s.psize'%(apbs_tool, conf_pref, conf_pref))
grid_pts, cntr_crd = get_grid_info('%s.psize'%(conf_pref))
write_apbs_input(conf_pref, grid_pts, cntr_crd)
os.system('%s %s.in'%(APBS, conf_pref))
os.system('%s/genLocInvPocketLig -s %s_smol.dx -d %s_pot.dx -q %s.pqr -xlp %s.xlp -o %s -l %s.pdb -mol2 %s.mol2 -rad 5 -psel -ar -sa 3.0'%(bin_dir, conf_pref, conf_pref, conf_pref, mol_id, conf_pref, conf_pref, conf_pref))
os.system('python %s/convert_seed_to_ssic.py %s.seed %s.ssic'%(script_dir, conf_pref, conf_pref))
def get_grid_info(psize_file):
file = open(psize_file, 'r')
grid_pts = []
cntr_crd = []
for line in file:
if(line.startswith('Num.')):
grid_pts.append(line.split()[5])
grid_pts.append(line.split()[7])
grid_pts.append(line.split()[9])
elif(line.startswith('Center')):
cntr_crd.append(line.split()[2])
cntr_crd.append(line.split()[4])
cntr_crd.append(line.split()[6])
file.close()
return grid_pts, cntr_crd
def write_apbs_input(conf_pref, grid_pts, cntr_crd):
input_file = '%s.in'%(conf_pref)
pqr_file = '%s.pqr'%(conf_pref)
pot_file = '%s_pot'%(conf_pref)
surf_file = '%s_smol'%(conf_pref)
file = open(input_file, 'w')
file.write('read\n')
file.write('mol pqr %s\n'%(pqr_file))
file.write('end\n\n')
file.write('# ENERGY OF PROTEIN CHUNK\n')
file.write('elec name solv\n')
file.write('mg-manual\n')
file.write('dime %s %s %s\n'%(grid_pts[0], grid_pts[1], grid_pts[2]))
file.write('grid 0.6 0.6 0.6\n')
file.write('gcent %s %s %s\n'%(cntr_crd[0], cntr_crd[1], cntr_crd[2]))
file.write('mol 1\n')
file.write('lpbe\n')
file.write('bcfl sdh\n')
file.write('pdie 2.0\n')
file.write('sdie 78.4\n')
file.write('chgm spl2\n')
file.write('srfm smol\n')
file.write('srad 1.4\n')
file.write('swin 0.3\n')
file.write('sdens 10.0\n')
file.write('temp 298.15\n')
file.write('calcenergy total\n')
file.write('calcforce no\n')
file.write('write pot dx %s\n'%(pot_file))
file.write('write smol dx %s\n'%(surf_file))
file.write('end\n\n')
file.write('quit\n')
file.close()
def check_convert(pqr_file):
convert_success = True
if(not os.path.isfile(pqr_file)):
convert_success = False
atom_exist = False
if(convert_success):
file = open(pqr_file, 'r')
for line in file:
if(line.startswith('ATOM') or line.startswith('HETATM')):
atom_exist = True
file.close()
if(not atom_exist):
convert_success = False
return convert_success
def main():
if(len(sys.argv) == 2):
input_file = sys.argv[1]
else:
print 'USAGE: python prepare_ligands.py [input file]'
exit(0)
# read parameters and set variables for binary files
PLPS_dir, PDB2PQR_dir, APBS_dir, XLOGP3_dir, lig_file, BABEL_dir, max_conf, OMEGA_dir = read_input(input_file)
apbs_tool = PLPS_dir + '/apbs_tool'
script_dir = PLPS_dir + '/scripts'
bin_dir = PLPS_dir + '/bin'
XLOGP3 = XLOGP3_dir + '/xlogp3.lnx.x86'
OMEGA = OMEGA_dir + '/omega2'
PDB2PQR = PDB2PQR_dir + '/pdb2pqr'
APBS = APBS_dir + '/apbs'
BABEL = BABEL_dir + '/babel'
for ligand in lig_file:
mol_id = ligand[:-5]
os.system('%s -ewindow 15.0 -maxconfs %i -rmsrange "0.5,0.8,1.0" -rangeIncrement 5 -commentEnergy -in %s.mol2 -out %s_omega.mol2 -strictstereo false'%(OMEGA, max_conf, mol_id, mol_id))
n_conf = split_conf(mol_id)
os.system('%s -v %s_conf_01.mol2 %s.xlp'%(XLOGP3, mol_id, mol_id))
for i_conf in range(n_conf):
generate_ssic(mol_id, i_conf, BABEL, PDB2PQR, script_dir, apbs_tool, APBS, bin_dir)
os.system('rm %s_conf*.in %s*.dx %s*.psize %s*.seed %s*.pqr %s*conf*.mol2 %s.xlp %s_omega.mol2'%(mol_id, mol_id, mol_id, mol_id, mol_id, mol_id, mol_id, mol_id))
os.system('mkdir %s'%(mol_id))
os.system('mv %s*.pdb %s*.ssic %s'%(mol_id, mol_id, mol_id))
os.system('rm omega* io.mc')
main()
|
mit
|
mafiya69/sympy
|
sympy/solvers/tests/test_constantsimp.py
|
112
|
9317
|
"""
If the arbitrary constant class from issue 4435 is ever implemented, this
should serve as a set of test cases.
"""
from sympy import (acos, cos, cosh, Eq, exp, Function, I, Integral, log, Pow,
S, sin, sinh, sqrt, Symbol)
from sympy.solvers.ode import constant_renumber, constantsimp
from sympy.utilities.pytest import XFAIL
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
C1 = Symbol('C1')
C2 = Symbol('C2')
C3 = Symbol('C3')
f = Function('f')
def test_constant_mul():
# We want C1 (Constant) below to absorb the y's, but not the x's
assert constant_renumber(constantsimp(y*C1, [C1]), 'C', 1, 1) == C1*y
assert constant_renumber(constantsimp(C1*y, [C1]), 'C', 1, 1) == C1*y
assert constant_renumber(constantsimp(x*C1, [C1]), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(C1*x, [C1]), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(2*C1, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1*2, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(y*C1*x, [C1, y]), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp(x*y*C1, [C1, y]), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(y*x*C1, [C1, y]), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(C1*x*y, [C1, y]), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp(x*C1*y, [C1, y]), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(C1*y*(y + 1), [C1]), 'C', 1, 1) == C1*y*(y+1)
assert constant_renumber(constantsimp(y*C1*(y + 1), [C1]), 'C', 1, 1) == C1*y*(y+1)
assert constant_renumber(constantsimp(x*(y*C1), [C1]), 'C', 1, 1) == x*y*C1
assert constant_renumber(constantsimp(x*(C1*y), [C1]), 'C', 1, 1) == x*y*C1
assert constant_renumber(constantsimp(C1*(x*y), [C1, y]), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp((x*y)*C1, [C1, y]), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp((y*x)*C1, [C1, y]), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp(y*(y + 1)*C1, [C1, y]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp((C1*x)*y, [C1, y]), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp(y*(x*C1), [C1, y]), 'C', 1, 1) == x*C1
assert constant_renumber(constantsimp((x*C1)*y, [C1, y]), 'C', 1, 1) == x*C1
assert constant_renumber(
constantsimp(C1*x*y*x*y*2, [C1, y]), 'C', 1, 1) == C1*x**2
assert constant_renumber(constantsimp(C1*x*y*z, [C1, y, z]), 'C', 1, 1) == C1*x
assert constant_renumber(
constantsimp(C1*x*y**2*sin(z), [C1, y, z]), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp(C1*C1, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1*C2, [C1, C2]), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C2*C2, [C1, C2]), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C1*C1*C2, [C1, C2]), 'C', 1, 2) == C1
assert constant_renumber(
constantsimp(C1*x*2**x, [C1]), 'C', 1, 1) == C1*x*2**x
def test_constant_add():
assert constant_renumber(constantsimp(C1 + C1, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1 + 2, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(2 + C1, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1 + y, [C1, y]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1 + x, [C1]), 'C', 1, 1) == C1 + x
assert constant_renumber(constantsimp(C1 + C1, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1 + C2, [C1, C2]), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C2 + C1, [C1, C2]), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C1 + C2 + C1, [C1, C2]), 'C', 1, 2) == C1
def test_constant_power_as_base():
assert constant_renumber(constantsimp(C1**C1, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(Pow(C1, C1), [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1**C1, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1**C2, [C1, C2]), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C2**C1, [C1, C2]), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C2**C2, [C1, C2]), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(C1**y, [C1, y]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(C1**x, [C1]), 'C', 1, 1) == C1**x
assert constant_renumber(constantsimp(C1**2, [C1]), 'C', 1, 1) == C1
assert constant_renumber(
constantsimp(C1**(x*y), [C1]), 'C', 1, 1) == C1**(x*y)
def test_constant_power_as_exp():
assert constant_renumber(constantsimp(x**C1, [C1]), 'C', 1, 1) == x**C1
assert constant_renumber(constantsimp(y**C1, [C1, y]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(x**y**C1, [C1, y]), 'C', 1, 1) == x**C1
assert constant_renumber(
constantsimp((x**y)**C1, [C1]), 'C', 1, 1) == (x**y)**C1
assert constant_renumber(
constantsimp(x**(y**C1), [C1, y]), 'C', 1, 1) == x**C1
assert constant_renumber(constantsimp(x**C1**y, [C1, y]), 'C', 1, 1) == x**C1
assert constant_renumber(
constantsimp(x**(C1**y), [C1, y]), 'C', 1, 1) == x**C1
assert constant_renumber(
constantsimp((x**C1)**y, [C1]), 'C', 1, 1) == (x**C1)**y
assert constant_renumber(constantsimp(2**C1, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(S(2)**C1, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(exp(C1), [C1]), 'C', 1, 1) == C1
assert constant_renumber(
constantsimp(exp(C1 + x), [C1]), 'C', 1, 1) == C1*exp(x)
assert constant_renumber(constantsimp(Pow(2, C1), [C1]), 'C', 1, 1) == C1
def test_constant_function():
assert constant_renumber(constantsimp(sin(C1), [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(f(C1), [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(f(C1, C1), [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(f(C1, C2), [C1, C2]), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(f(C2, C1), [C1, C2]), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(f(C2, C2), [C1, C2]), 'C', 1, 2) == C1
assert constant_renumber(
constantsimp(f(C1, x), [C1]), 'C', 1, 2) == f(C1, x)
assert constant_renumber(constantsimp(f(C1, y), [C1, y]), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(f(y, C1), [C1, y]), 'C', 1, 2) == C1
assert constant_renumber(constantsimp(f(C1, y, C2), [C1, C2, y]), 'C', 1, 2) == C1
def test_constant_function_multiple():
# The rules to not renumber in this case would be too complicated, and
# dsolve is not likely to ever encounter anything remotely like this.
assert constant_renumber(
constantsimp(f(C1, C1, x), [C1]), 'C', 1, 1) == f(C1, C1, x)
def test_constant_multiple():
assert constant_renumber(constantsimp(C1*2 + 2, [C1]), 'C', 1, 1) == C1
assert constant_renumber(constantsimp(x*2/C1, [C1]), 'C', 1, 1) == C1*x
assert constant_renumber(constantsimp(C1**2*2 + 2, [C1]), 'C', 1, 1) == C1
assert constant_renumber(
constantsimp(sin(2*C1) + x + sqrt(2), [C1]), 'C', 1, 1) == C1 + x
assert constant_renumber(constantsimp(2*C1 + C2, [C1, C2]), 'C', 1, 2) == C1
def test_constant_repeated():
assert C1 + C1*x == constant_renumber( C1 + C1*x, 'C', 1, 3)
def test_ode_solutions():
# only a few examples here, the rest will be tested in the actual dsolve tests
assert constant_renumber(constantsimp(C1*exp(2*x) + exp(x)*(C2 + C3), [C1, C2, C3]), 'C', 1, 3) == \
constant_renumber((C1*exp(x) + C2*exp(2*x)), 'C', 1, 2)
assert constant_renumber(
constantsimp(Eq(f(x), I*C1*sinh(x/3) + C2*cosh(x/3)), [C1, C2]),
'C', 1, 2) == constant_renumber(Eq(f(x), C1*sinh(x/3) + C2*cosh(x/3)), 'C', 1, 2)
assert constant_renumber(constantsimp(Eq(f(x), acos((-C1)/cos(x))), [C1]), 'C', 1, 1) == \
Eq(f(x), acos(C1/cos(x)))
assert constant_renumber(
constantsimp(Eq(log(f(x)/C1) + 2*exp(x/f(x)), 0), [C1]),
'C', 1, 1) == Eq(log(C1*f(x)) + 2*exp(x/f(x)), 0)
assert constant_renumber(constantsimp(Eq(log(x*sqrt(2)*sqrt(1/x)*sqrt(f(x))
/C1) + x**2/(2*f(x)**2), 0), [C1]), 'C', 1, 1) == \
Eq(log(C1*sqrt(x)*sqrt(f(x))) + x**2/(2*f(x)**2), 0)
assert constant_renumber(constantsimp(Eq(-exp(-f(x)/x)*sin(f(x)/x)/2 + log(x/C1) -
cos(f(x)/x)*exp(-f(x)/x)/2, 0), [C1]), 'C', 1, 1) == \
Eq(-exp(-f(x)/x)*sin(f(x)/x)/2 + log(C1*x) - cos(f(x)/x)*
exp(-f(x)/x)/2, 0)
u2 = Symbol('u2')
_a = Symbol('_a')
assert constant_renumber(constantsimp(Eq(-Integral(-1/(sqrt(1 - u2**2)*u2),
(u2, _a, x/f(x))) + log(f(x)/C1), 0), [C1]), 'C', 1, 1) == \
Eq(-Integral(-1/(u2*sqrt(1 - u2**2)), (u2, _a, x/f(x))) +
log(C1*f(x)), 0)
assert [constantsimp(i, [C1]) for i in [Eq(f(x), sqrt(-C1*x + x**2)), Eq(f(x), -sqrt(-C1*x + x**2))]] == \
[Eq(f(x), sqrt(x*(C1 + x))), Eq(f(x), -sqrt(x*(C1 + x)))]
@XFAIL
def test_nonlocal_simplification():
assert constantsimp(C1 + C2+x*C2, [C1, C2]) == C1 + C2*x
def test_constant_Eq():
# C1 on the rhs is well-tested, but the lhs is only tested here
assert constantsimp(Eq(C1, 3 + f(x)*x), [C1]) == Eq(x*f(x), C1)
assert constantsimp(Eq(C1, 3 * f(x)*x), [C1]) == Eq(f(x)*x, C1)
|
bsd-3-clause
|
koala-team/Chillin-PyServer
|
chillin_server/helpers/parser.py
|
1
|
2511
|
# -*- coding: utf-8 -*-
# python imports
import sys
import os
import imp
import inspect
from enum import Enum
# project imports
from . import messages
PY3 = sys.version_info > (3,)
class Parser:
def __init__(self, ks_command_files):
self._message_factory = MessageFactory(ks_command_files)
def encode(self, payload_obj):
msg = messages.Message()
msg.type, msg.payload = self.get_tuplestring(payload_obj)
return msg.serialize()
def decode(self, data):
mf = self._message_factory
msg = messages.Message()
msg.deserialize(data)
res = mf.get_message(msg.type)
res.deserialize(self.get_bytes(msg.payload))
msg_type = msg.type
msg = res
cmd_type = None
cmd = None
if isinstance(msg, messages.BaseCommand):
cmd_type = msg.type
cmd = mf.get_command(msg.type)
cmd.deserialize(self.get_bytes(msg.payload))
return msg_type, msg, cmd_type, cmd
@classmethod
def get_tuplestring(cls, serializable_obj):
return serializable_obj.name(), cls.get_string(serializable_obj.serialize())
@staticmethod
def get_string(bytes):
return bytes.decode('ISO-8859-1') if PY3 else bytes
@staticmethod
def get_bytes(string):
return string.encode('ISO-8859-1') if PY3 else string
class MessageFactory:
def __init__(self, ks_command_files):
self._installed_messages = self._load_messages()
self._installed_commands = self._load_commands(ks_command_files)
def _load_messages(self):
module = messages
return self._load_ks_objects([module])
def _load_commands(self, files):
modules = []
for file in files:
file = os.path.splitext(file)[0] + '.py'
name = os.path.splitext(os.path.basename(file))[0]
module = imp.load_source(name, file)
modules.append(module)
return self._load_ks_objects(modules)
def _load_ks_objects(self, modules):
objects = {}
for module in modules:
for _, member in inspect.getmembers(module, inspect.isclass):
if not issubclass(member, Enum):
objects[member.name()] = member
return objects
def get_message(self, message_name):
return self._installed_messages[message_name]()
def get_command(self, command_name):
return self._installed_commands[command_name]()
|
agpl-3.0
|
USGSDenverPychron/pychron
|
pychron/hardware/linear_axis.py
|
1
|
2527
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import Float, Property
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.hardware.core.abstract_device import AbstractDevice
class LinearAxis(AbstractDevice):
position = Property(depends_on='_position')
_position = Float
min_value = Float(0.0)
max_value = Float(100.0)
min_limit = Property(depends_on='_position')
max_limit = Property(depends_on='_position')
_slewing = False
def set_home(self):
if self._cdevice:
self._cdevice.set_home()
def set_position(self, v, **kw):
if self._cdevice:
self._cdevice.set_position(v, **kw)
# self.add_consumable((self._cdevice.set_position, v, kw))
# def relative_move(self, v):
# self.set_position(self._position + v)
def is_slewing(self):
return self._slewing
def is_stalled(self):
if self._cdevice:
return self._cdevice.stalled()
def slew(self, modifier):
if self._cdevice:
self._slewing = True
self._cdevice.slew(modifier)
def stop(self):
if self._cdevice:
self._slewing = False
self._cdevice.stop_drive()
def _get_min_limit(self):
return abs(self._position - self.min_value) < 1e-5
def _get_max_limit(self):
return abs(self._position - self.max_value) < 1e-5
def _get_position(self):
return float('{:0.3f}'.format(self._position))
def _set_position(self, v):
self._position = v
if self._cdevice:
self.set_position(v)
# ============= EOF =============================================
|
apache-2.0
|
WeichenXu123/spark
|
python/pyspark/conf.py
|
20
|
7601
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
>>> from pyspark.conf import SparkConf
>>> from pyspark.context import SparkContext
>>> conf = SparkConf()
>>> conf.setMaster("local").setAppName("My app")
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.master")
u'local'
>>> conf.get("spark.app.name")
u'My app'
>>> sc = SparkContext(conf=conf)
>>> sc.master
u'local'
>>> sc.appName
u'My app'
>>> sc.sparkHome is None
True
>>> conf = SparkConf(loadDefaults=False)
>>> conf.setSparkHome("/path")
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.home")
u'/path'
>>> conf.setExecutorEnv("VAR1", "value1")
<pyspark.conf.SparkConf object at ...>
>>> conf.setExecutorEnv(pairs = [("VAR3", "value3"), ("VAR4", "value4")])
<pyspark.conf.SparkConf object at ...>
>>> conf.get("spark.executorEnv.VAR1")
u'value1'
>>> print(conf.toDebugString())
spark.executorEnv.VAR1=value1
spark.executorEnv.VAR3=value3
spark.executorEnv.VAR4=value4
spark.home=/path
>>> sorted(conf.getAll(), key=lambda p: p[0])
[(u'spark.executorEnv.VAR1', u'value1'), (u'spark.executorEnv.VAR3', u'value3'), \
(u'spark.executorEnv.VAR4', u'value4'), (u'spark.home', u'/path')]
>>> conf._jconf.setExecutorEnv("VAR5", "value5")
JavaObject id...
>>> print(conf.toDebugString())
spark.executorEnv.VAR1=value1
spark.executorEnv.VAR3=value3
spark.executorEnv.VAR4=value4
spark.executorEnv.VAR5=value5
spark.home=/path
"""
__all__ = ['SparkConf']
import sys
import re
if sys.version > '3':
unicode = str
__doc__ = re.sub(r"(\W|^)[uU](['])", r'\1\2', __doc__)
class SparkConf(object):
"""
Configuration for a Spark application. Used to set various Spark
parameters as key-value pairs.
Most of the time, you would create a SparkConf object with
``SparkConf()``, which will load values from `spark.*` Java system
properties as well. In this case, any parameters you set directly on
the :class:`SparkConf` object take priority over system properties.
For unit tests, you can also call ``SparkConf(false)`` to skip
loading external settings and get the same configuration no matter
what the system properties are.
All setter methods in this class support chaining. For example,
you can write ``conf.setMaster("local").setAppName("My app")``.
.. note:: Once a SparkConf object is passed to Spark, it is cloned
and can no longer be modified by the user.
"""
def __init__(self, loadDefaults=True, _jvm=None, _jconf=None):
"""
Create a new Spark configuration.
:param loadDefaults: whether to load values from Java system
properties (True by default)
:param _jvm: internal parameter used to pass a handle to the
Java VM; does not need to be set by users
:param _jconf: Optionally pass in an existing SparkConf handle
to use its parameters
"""
if _jconf:
self._jconf = _jconf
else:
from pyspark.context import SparkContext
_jvm = _jvm or SparkContext._jvm
if _jvm is not None:
# JVM is created, so create self._jconf directly through JVM
self._jconf = _jvm.SparkConf(loadDefaults)
self._conf = None
else:
# JVM is not created, so store data in self._conf first
self._jconf = None
self._conf = {}
def set(self, key, value):
"""Set a configuration property."""
# Try to set self._jconf first if JVM is created, set self._conf if JVM is not created yet.
if self._jconf is not None:
self._jconf.set(key, unicode(value))
else:
self._conf[key] = unicode(value)
return self
def setIfMissing(self, key, value):
"""Set a configuration property, if not already set."""
if self.get(key) is None:
self.set(key, value)
return self
def setMaster(self, value):
"""Set master URL to connect to."""
self.set("spark.master", value)
return self
def setAppName(self, value):
"""Set application name."""
self.set("spark.app.name", value)
return self
def setSparkHome(self, value):
"""Set path where Spark is installed on worker nodes."""
self.set("spark.home", value)
return self
def setExecutorEnv(self, key=None, value=None, pairs=None):
"""Set an environment variable to be passed to executors."""
if (key is not None and pairs is not None) or (key is None and pairs is None):
raise Exception("Either pass one key-value pair or a list of pairs")
elif key is not None:
self.set("spark.executorEnv." + key, value)
elif pairs is not None:
for (k, v) in pairs:
self.set("spark.executorEnv." + k, v)
return self
def setAll(self, pairs):
"""
Set multiple parameters, passed as a list of key-value pairs.
:param pairs: list of key-value pairs to set
"""
for (k, v) in pairs:
self.set(k, v)
return self
def get(self, key, defaultValue=None):
"""Get the configured value for some key, or return a default otherwise."""
if defaultValue is None: # Py4J doesn't call the right get() if we pass None
if self._jconf is not None:
if not self._jconf.contains(key):
return None
return self._jconf.get(key)
else:
if key not in self._conf:
return None
return self._conf[key]
else:
if self._jconf is not None:
return self._jconf.get(key, defaultValue)
else:
return self._conf.get(key, defaultValue)
def getAll(self):
"""Get all values as a list of key-value pairs."""
if self._jconf is not None:
return [(elem._1(), elem._2()) for elem in self._jconf.getAll()]
else:
return self._conf.items()
def contains(self, key):
"""Does this configuration contain a given key?"""
if self._jconf is not None:
return self._jconf.contains(key)
else:
return key in self._conf
def toDebugString(self):
"""
Returns a printable version of the configuration, as a list of
key=value pairs, one per line.
"""
if self._jconf is not None:
return self._jconf.toDebugString()
else:
return '\n'.join('%s=%s' % (k, v) for k, v in self._conf.items())
def _test():
import doctest
(failure_count, test_count) = doctest.testmod(optionflags=doctest.ELLIPSIS)
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
apache-2.0
|
ankurankan/scikit-learn
|
examples/ensemble/plot_gradient_boosting_oob.py
|
21
|
4761
|
"""
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the boostrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
|
bsd-3-clause
|
afronski/playground-notes
|
introduction-to-big-data-with-apache-spark/lab-0/lab0_student.py
|
8
|
5711
|
# coding: utf-8
# # + 
# # **First Notebook: Virtual machine test and assignment submission**
# #### This notebook will test that the virtual machine (VM) is functioning properly and will show you how to submit an assignment to the autograder. To move through the notebook just run each of the cells. You will not need to solve any problems to complete this lab. You can run a cell by pressing "shift-enter", which will compute the current cell and advance to the next cell, or by clicking in a cell and pressing "control-enter", which will compute the current cell and remain in that cell. At the end of the notebook you will export / download the notebook and submit it to the autograder.
# #### ** This notebook covers: **
# #### *Part 1:* Test Spark functionality
# #### *Part 2:* Check class testing library
# #### *Part 3:* Check plotting
# #### *Part 4:* Check MathJax formulas
# #### *Part 5:* Export / download and submit
# ### ** Part 1: Test Spark functionality **
# #### ** (1a) Parallelize, filter, and reduce **
# In[1]:
# Check that Spark is working
largeRange = sc.parallelize(xrange(100000))
reduceTest = largeRange.reduce(lambda a, b: a + b)
filterReduceTest = largeRange.filter(lambda x: x % 7 == 0).sum()
print reduceTest
print filterReduceTest
# If the Spark jobs don't work properly these will raise an AssertionError
assert reduceTest == 4999950000
assert filterReduceTest == 714264285
# #### ** (1b) Loading a text file **
# In[2]:
# Check loading data with sc.textFile
import os.path
baseDir = os.path.join('data')
inputPath = os.path.join('cs100', 'lab1', 'shakespeare.txt')
fileName = os.path.join(baseDir, inputPath)
rawData = sc.textFile(fileName)
shakespeareCount = rawData.count()
print shakespeareCount
# If the text file didn't load properly an AssertionError will be raised
assert shakespeareCount == 122395
# ### ** Part 2: Check class testing library **
# #### ** (2a) Compare with hash **
# In[3]:
# TEST Compare with hash (2a)
# Check our testing library/package
# This should print '1 test passed.' on two lines
from test_helper import Test
twelve = 12
Test.assertEquals(twelve, 12, 'twelve should equal 12')
Test.assertEqualsHashed(twelve, '7b52009b64fd0a2a49e6d8a939753077792b0554',
'twelve, once hashed, should equal the hashed value of 12')
# #### ** (2b) Compare lists **
# In[4]:
# TEST Compare lists (2b)
# This should print '1 test passed.'
unsortedList = [(5, 'b'), (5, 'a'), (4, 'c'), (3, 'a')]
Test.assertEquals(sorted(unsortedList), [(3, 'a'), (4, 'c'), (5, 'a'), (5, 'b')],
'unsortedList does not sort properly')
# ### ** Part 3: Check plotting **
# #### ** (3a) Our first plot **
# #### After executing the code cell below, you should see a plot with 50 blue circles. The circles should start at the bottom left and end at the top right.
# In[5]:
# Check matplotlib plotting
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from math import log
# function for generating plot layout
def preparePlot(xticks, yticks, figsize=(10.5, 6), hideLabels=False, gridColor='#999999', gridWidth=1.0):
plt.close()
fig, ax = plt.subplots(figsize=figsize, facecolor='white', edgecolor='white')
ax.axes.tick_params(labelcolor='#999999', labelsize='10')
for axis, ticks in [(ax.get_xaxis(), xticks), (ax.get_yaxis(), yticks)]:
axis.set_ticks_position('none')
axis.set_ticks(ticks)
axis.label.set_color('#999999')
if hideLabels: axis.set_ticklabels([])
plt.grid(color=gridColor, linewidth=gridWidth, linestyle='-')
map(lambda position: ax.spines[position].set_visible(False), ['bottom', 'top', 'left', 'right'])
return fig, ax
# generate layout and plot data
x = range(1, 50)
y = [log(x1 ** 2) for x1 in x]
fig, ax = preparePlot(range(5, 60, 10), range(0, 12, 1))
plt.scatter(x, y, s=14**2, c='#d6ebf2', edgecolors='#8cbfd0', alpha=0.75)
ax.set_xlabel(r'$range(1, 50)$'), ax.set_ylabel(r'$\log_e(x^2)$')
pass
# ### ** Part 4: Check MathJax Formulas **
# #### ** (4a) Gradient descent formula **
# #### You should see a formula on the line below this one: $$ \scriptsize \mathbf{w}_{i+1} = \mathbf{w}_i - \alpha_i \sum_j (\mathbf{w}_i^\top\mathbf{x}_j - y_j) \mathbf{x}_j \,.$$
#
# #### This formula is included inline with the text and is $ \scriptsize (\mathbf{w}^\top \mathbf{x} - y) \mathbf{x} $.
# #### ** (4b) Log loss formula **
# #### This formula shows log loss for single point. Log loss is defined as: $$ \begin{align} \scriptsize \ell_{log}(p, y) = \begin{cases} -\log (p) & \text{if } y = 1 \\\ -\log(1-p) & \text{if } y = 0 \end{cases} \end{align} $$
# ### ** Part 5: Export / download and submit **
# #### ** (5a) Time to submit **
# #### You have completed the lab. To submit the lab for grading you will need to download it from your IPython Notebook environment. You can do this by clicking on "File", then hovering your mouse over "Download as", and then clicking on "Python (.py)". This will export your IPython Notebook as a .py file to your computer.
# #### To upload this file to the course autograder, go to the edX website and find the page for submitting this assignment. Click "Choose file", then navigate to and click on the downloaded .py file. Now click the "Open" button and then the "Check" button. Your submission will be graded shortly and will be available on the page where you submitted. Note that when submission volumes are high, it may take as long as an hour to receive results.
|
mit
|
chris-chris/tensorflow
|
tensorflow/contrib/distributions/python/ops/bijectors/chain_impl.py
|
51
|
5160
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Chain bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.framework import constant_op
from tensorflow.python.ops.distributions import bijector
__all__ = [
"Chain",
]
class Chain(bijector.Bijector):
"""Bijector which applies a sequence of bijectors.
Example Use:
```python
chain = Chain([Exp(), Softplus()], name="one_plus_exp")
```
Results in:
* Forward:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).forward(x)
= exp.forward(softplus.forward(x))
= tf.exp(tf.log(1. + tf.exp(x)))
= 1. + tf.exp(x)
```
* Inverse:
```python
exp = Exp()
softplus = Softplus()
Chain([exp, softplus]).inverse(y)
= softplus.inverse(exp.inverse(y))
= tf.log(tf.exp(tf.log(y)) - 1.)
= tf.log(y - 1.)
```
"""
def __init__(self, bijectors=None, validate_args=False, name=None):
"""Instantiates `Chain` bijector.
Args:
bijectors: Python `list` of bijector instances. An empty list makes this
bijector equivalent to the `Identity` bijector.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object. Default:
E.g., `Chain([Exp(), Softplus()]).name == "chain_of_exp_of_softplus"`.
Raises:
ValueError: if bijectors have different dtypes.
"""
if bijectors is None:
bijectors = ()
self._bijectors = bijectors
dtype = list(set([b.dtype for b in bijectors]))
if len(dtype) > 2:
raise ValueError("incompatible dtypes: %s" % dtype)
elif len(dtype) == 2:
dtype = dtype[1] if dtype[0] is None else dtype[0]
event_ndims = bijectors[0].event_ndims
elif len(dtype) == 1:
dtype = dtype[0]
event_ndims = bijectors[0].event_ndims
else:
dtype = None
event_ndims = None
super(Chain, self).__init__(
graph_parents=list(itertools.chain.from_iterable(
b.graph_parents for b in bijectors)),
is_constant_jacobian=all(b.is_constant_jacobian for b in bijectors),
validate_args=validate_args,
dtype=dtype,
event_ndims=event_ndims,
name=name or ("identity" if not bijectors else
"_of_".join(["chain"] + [b.name for b in bijectors])))
@property
def bijectors(self):
return self._bijectors
def _shape_helper(self, func_name, input_shape, reverse):
new_shape = input_shape
for b in reversed(self.bijectors) if reverse else self.bijectors:
func = getattr(b, func_name, None)
if func is None:
raise ValueError("unable to call %s on bijector %s (%s)" %
(func_name, b.name, func))
new_shape = func(new_shape)
return new_shape
def _forward_event_shape(self, input_shape):
return self._shape_helper("forward_event_shape", input_shape,
reverse=True)
def _forward_event_shape_tensor(self, input_shape):
return self._shape_helper(
"forward_event_shape_tensor", input_shape, reverse=True)
def _inverse_event_shape(self, output_shape):
return self._shape_helper("inverse_event_shape", output_shape,
reverse=False)
def _inverse_event_shape_tensor(self, output_shape):
return self._shape_helper("inverse_event_shape_tensor", output_shape,
reverse=False)
def _inverse(self, y, **kwargs):
for b in self.bijectors:
y = b.inverse(y, **kwargs.get(b.name, {}))
return y
def _inverse_log_det_jacobian(self, y, **kwargs):
ildj = constant_op.constant(0., dtype=y.dtype,
name="inverse_log_det_jacobian")
for b in self.bijectors:
ildj += b.inverse_log_det_jacobian(y, **kwargs.get(b.name, {}))
y = b.inverse(y, **kwargs.get(b.name, {}))
return ildj
def _forward(self, x, **kwargs):
for b in reversed(self.bijectors):
x = b.forward(x, **kwargs.get(b.name, {}))
return x
def _forward_log_det_jacobian(self, x, **kwargs):
fldj = constant_op.constant(0., dtype=x.dtype,
name="forward_log_det_jacobian")
for b in reversed(self.bijectors):
fldj += b.forward_log_det_jacobian(x, **kwargs.get(b.name, {}))
x = b.forward(x, **kwargs.get(b.name, {}))
return fldj
|
apache-2.0
|
pgleeson/TestArea
|
lib/jython/Lib/test/test_tarfile.py
|
9
|
26406
|
import sys
import os
import shutil
import tempfile
import StringIO
import unittest
import tarfile
from test import test_support
# Check for our compression modules.
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
gzip = None
try:
import bz2
except ImportError:
bz2 = None
def path(path):
return test_support.findfile(path)
testtar = path("testtar.tar")
tempdir = os.path.join(tempfile.gettempdir(), "testtar" + os.extsep + "dir")
tempname = test_support.TESTFN
membercount = 13
def tarname(comp=""):
if not comp:
return testtar
return os.path.join(dirname(), "%s%s%s" % (testtar, os.extsep, comp))
def dirname():
if not os.path.exists(tempdir):
os.mkdir(tempdir)
return tempdir
def tmpname():
return tempname
class BaseTest(unittest.TestCase):
comp = ''
mode = 'r'
sep = ':'
def setUp(self):
mode = self.mode + self.sep + self.comp
self.tar = tarfile.open(tarname(self.comp), mode)
def tearDown(self):
self.tar.close()
class ReadTest(BaseTest):
def test(self):
"""Test member extraction.
"""
members = 0
for tarinfo in self.tar:
members += 1
if not tarinfo.isreg():
continue
f = self.tar.extractfile(tarinfo)
self.assert_(len(f.read()) == tarinfo.size,
"size read does not match expected size")
f.close()
self.assert_(members == membercount,
"could not find all members")
def test_sparse(self):
"""Test sparse member extraction.
"""
if self.sep != "|":
f1 = self.tar.extractfile("S-SPARSE")
f2 = self.tar.extractfile("S-SPARSE-WITH-NULLS")
self.assert_(f1.read() == f2.read(),
"_FileObject failed on sparse file member")
def test_readlines(self):
"""Test readlines() method of _FileObject.
"""
if self.sep != "|":
filename = "0-REGTYPE-TEXT"
self.tar.extract(filename, dirname())
f = open(os.path.join(dirname(), filename), "rU")
lines1 = f.readlines()
f.close()
lines2 = self.tar.extractfile(filename).readlines()
self.assert_(lines1 == lines2,
"_FileObject.readline() does not work correctly")
def test_iter(self):
# Test iteration over ExFileObject.
if self.sep != "|":
filename = "0-REGTYPE-TEXT"
self.tar.extract(filename, dirname())
f = open(os.path.join(dirname(), filename), "rU")
lines1 = f.readlines()
f.close()
lines2 = [line for line in self.tar.extractfile(filename)]
self.assert_(lines1 == lines2,
"ExFileObject iteration does not work correctly")
def test_seek(self):
"""Test seek() method of _FileObject, incl. random reading.
"""
if self.sep != "|":
filename = "0-REGTYPE-TEXT"
self.tar.extract(filename, dirname())
f = open(os.path.join(dirname(), filename), "rb")
data = f.read()
f.close()
tarinfo = self.tar.getmember(filename)
fobj = self.tar.extractfile(tarinfo)
text = fobj.read()
fobj.seek(0)
self.assert_(0 == fobj.tell(),
"seek() to file's start failed")
fobj.seek(2048, 0)
self.assert_(2048 == fobj.tell(),
"seek() to absolute position failed")
fobj.seek(-1024, 1)
self.assert_(1024 == fobj.tell(),
"seek() to negative relative position failed")
fobj.seek(1024, 1)
self.assert_(2048 == fobj.tell(),
"seek() to positive relative position failed")
s = fobj.read(10)
self.assert_(s == data[2048:2058],
"read() after seek failed")
fobj.seek(0, 2)
self.assert_(tarinfo.size == fobj.tell(),
"seek() to file's end failed")
self.assert_(fobj.read() == "",
"read() at file's end did not return empty string")
fobj.seek(-tarinfo.size, 2)
self.assert_(0 == fobj.tell(),
"relative seek() to file's start failed")
fobj.seek(512)
s1 = fobj.readlines()
fobj.seek(512)
s2 = fobj.readlines()
self.assert_(s1 == s2,
"readlines() after seek failed")
fobj.seek(0)
self.assert_(len(fobj.readline()) == fobj.tell(),
"tell() after readline() failed")
fobj.seek(512)
self.assert_(len(fobj.readline()) + 512 == fobj.tell(),
"tell() after seek() and readline() failed")
fobj.seek(0)
line = fobj.readline()
self.assert_(fobj.read() == data[len(line):],
"read() after readline() failed")
fobj.close()
def test_old_dirtype(self):
"""Test old style dirtype member (bug #1336623).
"""
# Old tars create directory members using a REGTYPE
# header with a "/" appended to the filename field.
# Create an old tar style directory entry.
filename = tmpname()
tarinfo = tarfile.TarInfo("directory/")
tarinfo.type = tarfile.REGTYPE
fobj = open(filename, "w")
fobj.write(tarinfo.tobuf())
fobj.close()
try:
# Test if it is still a directory entry when
# read back.
tar = tarfile.open(filename)
tarinfo = tar.getmembers()[0]
tar.close()
self.assert_(tarinfo.type == tarfile.DIRTYPE)
self.assert_(tarinfo.name.endswith("/"))
finally:
try:
os.unlink(filename)
except:
pass
def test_dirtype(self):
for tarinfo in self.tar:
if tarinfo.isdir():
self.assert_(tarinfo.name.endswith("/"))
self.assert_(not tarinfo.name[:-1].endswith("/"))
def test_extractall(self):
# Test if extractall() correctly restores directory permissions
# and times (see issue1735).
if (sys.platform == "win32" or
test_support.is_jython and os._name == 'nt'):
# Win32 has no support for utime() on directories or
# fine grained permissions.
return
fobj = StringIO.StringIO()
tar = tarfile.open(fileobj=fobj, mode="w:")
for name in ("foo", "foo/bar"):
tarinfo = tarfile.TarInfo(name)
tarinfo.type = tarfile.DIRTYPE
tarinfo.mtime = 07606136617
tarinfo.mode = 0755
tar.addfile(tarinfo)
tar.close()
fobj.seek(0)
TEMPDIR = os.path.join(dirname(), "extract-test")
tar = tarfile.open(fileobj=fobj)
tar.extractall(TEMPDIR)
for tarinfo in tar.getmembers():
path = os.path.join(TEMPDIR, tarinfo.name)
self.assertEqual(tarinfo.mode, os.stat(path).st_mode & 0777)
self.assertEqual(tarinfo.mtime, os.path.getmtime(path))
tar.close()
def test_star(self):
try:
self.tar.getmember("7-STAR")
except KeyError:
self.fail("finding 7-STAR member failed (mangled prefix?)")
class ReadStreamTest(ReadTest):
sep = "|"
def test(self):
"""Test member extraction, and for StreamError when
seeking backwards.
"""
ReadTest.test(self)
tarinfo = self.tar.getmembers()[0]
f = self.tar.extractfile(tarinfo)
self.assertRaises(tarfile.StreamError, f.read)
def test_stream(self):
"""Compare the normal tar and the stream tar.
"""
stream = self.tar
tar = tarfile.open(tarname(), 'r')
while 1:
t1 = tar.next()
t2 = stream.next()
if t1 is None:
break
self.assert_(t2 is not None, "stream.next() failed.")
if t2.islnk() or t2.issym():
self.assertRaises(tarfile.StreamError, stream.extractfile, t2)
continue
v1 = tar.extractfile(t1)
v2 = stream.extractfile(t2)
if v1 is None:
continue
self.assert_(v2 is not None, "stream.extractfile() failed")
self.assert_(v1.read() == v2.read(), "stream extraction failed")
tar.close()
stream.close()
class ReadDetectTest(ReadTest):
def setUp(self):
self.tar = tarfile.open(tarname(self.comp), self.mode)
def tearDown(self):
self.tar.close()
class ReadDetectFileobjTest(ReadTest):
def setUp(self):
name = tarname(self.comp)
self.fileobj = open(name, "rb")
self.tar = tarfile.open(name, mode=self.mode,
fileobj=self.fileobj)
def tearDown(self):
self.tar.close()
self.fileobj.close()
class ReadAsteriskTest(ReadTest):
def setUp(self):
mode = self.mode + self.sep + "*"
self.tar = tarfile.open(tarname(self.comp), mode)
class ReadStreamAsteriskTest(ReadStreamTest):
def setUp(self):
mode = self.mode + self.sep + "*"
self.tar = tarfile.open(tarname(self.comp), mode)
class ReadFileobjTest(BaseTest):
def test_fileobj_with_offset(self):
# Skip the first member and store values from the second member
# of the testtar.
self.tar.next()
t = self.tar.next()
name = t.name
offset = t.offset
data = self.tar.extractfile(t).read()
self.tar.close()
# Open the testtar and seek to the offset of the second member.
if self.comp == "gz":
_open = gzip.GzipFile
elif self.comp == "bz2":
_open = bz2.BZ2File
else:
_open = open
fobj = _open(tarname(self.comp), "rb")
fobj.seek(offset)
# Test if the tarfile starts with the second member.
self.tar.close()
self.tar = tarfile.open(tarname(self.comp), "r:", fileobj=fobj)
t = self.tar.next()
self.assertEqual(t.name, name)
# Read to the end of fileobj and test if seeking back to the
# beginning works.
self.tar.getmembers()
self.assertEqual(self.tar.extractfile(t).read(), data,
"seek back did not work")
self.tar.close()
fobj.close()
class WriteTest(BaseTest):
mode = 'w'
def setUp(self):
mode = self.mode + self.sep + self.comp
self.src = tarfile.open(tarname(self.comp), 'r')
self.dstname = tmpname()
self.dst = tarfile.open(self.dstname, mode)
def tearDown(self):
self.src.close()
self.dst.close()
def test_posix(self):
self.dst.posix = 1
self._test()
def test_nonposix(self):
self.dst.posix = 0
self._test()
def test_small(self):
self.dst.add(os.path.join(os.path.dirname(__file__),"cfgparser.1"))
self.dst.close()
self.assertNotEqual(os.stat(self.dstname).st_size, 0)
def _test(self):
for tarinfo in self.src:
if not tarinfo.isreg():
continue
f = self.src.extractfile(tarinfo)
if self.dst.posix and len(tarinfo.name) > tarfile.LENGTH_NAME and "/" not in tarinfo.name:
self.assertRaises(ValueError, self.dst.addfile,
tarinfo, f)
else:
self.dst.addfile(tarinfo, f)
def test_add_self(self):
dstname = os.path.abspath(self.dstname)
self.assertEqual(self.dst.name, dstname, "archive name must be absolute")
self.dst.add(dstname)
self.assertEqual(self.dst.getnames(), [], "added the archive to itself")
cwd = os.getcwd()
os.chdir(dirname())
self.dst.add(dstname)
os.chdir(cwd)
self.assertEqual(self.dst.getnames(), [], "added the archive to itself")
class Write100Test(BaseTest):
# The name field in a tar header stores strings of at most 100 chars.
# If a string is shorter than 100 chars it has to be padded with '\0',
# which implies that a string of exactly 100 chars is stored without
# a trailing '\0'.
def setUp(self):
self.name = "01234567890123456789012345678901234567890123456789"
self.name += "01234567890123456789012345678901234567890123456789"
self.tar = tarfile.open(tmpname(), "w")
t = tarfile.TarInfo(self.name)
self.tar.addfile(t)
self.tar.close()
self.tar = tarfile.open(tmpname())
def tearDown(self):
self.tar.close()
def test(self):
self.assertEqual(self.tar.getnames()[0], self.name,
"failed to store 100 char filename")
class WriteSize0Test(BaseTest):
mode = 'w'
def setUp(self):
self.tmpdir = dirname()
self.dstname = tmpname()
self.dst = tarfile.open(self.dstname, "w")
def tearDown(self):
self.dst.close()
def test_file(self):
path = os.path.join(self.tmpdir, "file")
f = open(path, "w")
f.close()
tarinfo = self.dst.gettarinfo(path)
self.assertEqual(tarinfo.size, 0)
f = open(path, "w")
f.write("aaa")
f.close()
tarinfo = self.dst.gettarinfo(path)
self.assertEqual(tarinfo.size, 3)
def test_directory(self):
path = os.path.join(self.tmpdir, "directory")
if os.path.exists(path):
# This shouldn't be necessary, but is <wink> if a previous
# run was killed in mid-stream.
shutil.rmtree(path)
os.mkdir(path)
tarinfo = self.dst.gettarinfo(path)
self.assertEqual(tarinfo.size, 0)
def test_symlink(self):
if hasattr(os, "symlink"):
path = os.path.join(self.tmpdir, "symlink")
os.symlink("link_target", path)
tarinfo = self.dst.gettarinfo(path)
self.assertEqual(tarinfo.size, 0)
class WriteStreamTest(WriteTest):
sep = '|'
def test_padding(self):
self.dst.close()
if self.comp == "gz":
f = gzip.GzipFile(self.dstname)
s = f.read()
f.close()
elif self.comp == "bz2":
b = bz2.BZ2Decompressor()
f = file(self.dstname)
s = f.read()
f.close()
s = b.decompress(s)
self.assertEqual(len(f.unused_data), 0, "trailing data")
else:
f = file(self.dstname)
s = f.read()
f.close()
self.assertEqual(s.count("\0"), tarfile.RECORDSIZE,
"incorrect zero padding")
class WriteGNULongTest(unittest.TestCase):
"""This testcase checks for correct creation of GNU Longname
and Longlink extensions.
It creates a tarfile and adds empty members with either
long names, long linknames or both and compares the size
of the tarfile with the expected size.
It checks for SF bug #812325 in TarFile._create_gnulong().
While I was writing this testcase, I noticed a second bug
in the same method:
Long{names,links} weren't null-terminated which lead to
bad tarfiles when their length was a multiple of 512. This
is tested as well.
"""
def _length(self, s):
blocks, remainder = divmod(len(s) + 1, 512)
if remainder:
blocks += 1
return blocks * 512
def _calc_size(self, name, link=None):
# initial tar header
count = 512
if len(name) > tarfile.LENGTH_NAME:
# gnu longname extended header + longname
count += 512
count += self._length(name)
if link is not None and len(link) > tarfile.LENGTH_LINK:
# gnu longlink extended header + longlink
count += 512
count += self._length(link)
return count
def _test(self, name, link=None):
tarinfo = tarfile.TarInfo(name)
if link:
tarinfo.linkname = link
tarinfo.type = tarfile.LNKTYPE
tar = tarfile.open(tmpname(), "w")
tar.posix = False
tar.addfile(tarinfo)
v1 = self._calc_size(name, link)
v2 = tar.offset
self.assertEqual(v1, v2, "GNU longname/longlink creation failed")
tar.close()
tar = tarfile.open(tmpname())
member = tar.next()
self.failIf(member is None, "unable to read longname member")
self.assert_(tarinfo.name == member.name and \
tarinfo.linkname == member.linkname, \
"unable to read longname member")
tar.close()
def test_longname_1023(self):
self._test(("longnam/" * 127) + "longnam")
def test_longname_1024(self):
self._test(("longnam/" * 127) + "longname")
def test_longname_1025(self):
self._test(("longnam/" * 127) + "longname_")
def test_longlink_1023(self):
self._test("name", ("longlnk/" * 127) + "longlnk")
def test_longlink_1024(self):
self._test("name", ("longlnk/" * 127) + "longlink")
def test_longlink_1025(self):
self._test("name", ("longlnk/" * 127) + "longlink_")
def test_longnamelink_1023(self):
self._test(("longnam/" * 127) + "longnam",
("longlnk/" * 127) + "longlnk")
def test_longnamelink_1024(self):
self._test(("longnam/" * 127) + "longname",
("longlnk/" * 127) + "longlink")
def test_longnamelink_1025(self):
self._test(("longnam/" * 127) + "longname_",
("longlnk/" * 127) + "longlink_")
class ReadGNULongTest(unittest.TestCase):
def setUp(self):
self.tar = tarfile.open(tarname())
def tearDown(self):
self.tar.close()
def test_1471427(self):
"""Test reading of longname (bug #1471427).
"""
name = "test/" * 20 + "0-REGTYPE"
try:
tarinfo = self.tar.getmember(name)
except KeyError:
tarinfo = None
self.assert_(tarinfo is not None, "longname not found")
self.assert_(tarinfo.type != tarfile.DIRTYPE, "read longname as dirtype")
def test_read_name(self):
name = ("0-LONGNAME-" * 10)[:101]
try:
tarinfo = self.tar.getmember(name)
except KeyError:
tarinfo = None
self.assert_(tarinfo is not None, "longname not found")
def test_read_link(self):
link = ("1-LONGLINK-" * 10)[:101]
name = ("0-LONGNAME-" * 10)[:101]
try:
tarinfo = self.tar.getmember(link)
except KeyError:
tarinfo = None
self.assert_(tarinfo is not None, "longlink not found")
self.assert_(tarinfo.linkname == name, "linkname wrong")
def test_truncated_longname(self):
f = open(tarname())
fobj = StringIO.StringIO(f.read(1024))
f.close()
tar = tarfile.open(name="foo.tar", fileobj=fobj)
self.assert_(len(tar.getmembers()) == 0, "")
tar.close()
class ExtractHardlinkTest(BaseTest):
def test_hardlink(self):
"""Test hardlink extraction (bug #857297)
"""
# Prevent errors from being caught
self.tar.errorlevel = 1
self.tar.extract("0-REGTYPE", dirname())
try:
# Extract 1-LNKTYPE which is a hardlink to 0-REGTYPE
self.tar.extract("1-LNKTYPE", dirname())
except EnvironmentError, e:
import errno
if e.errno == errno.ENOENT:
self.fail("hardlink not extracted properly")
class CreateHardlinkTest(BaseTest):
"""Test the creation of LNKTYPE (hardlink) members in an archive.
In this respect tarfile.py mimics the behaviour of GNU tar: If
a file has a st_nlink > 1, it will be added a REGTYPE member
only the first time.
"""
def setUp(self):
self.tar = tarfile.open(tmpname(), "w")
self.foo = os.path.join(dirname(), "foo")
self.bar = os.path.join(dirname(), "bar")
if os.path.exists(self.foo):
os.remove(self.foo)
if os.path.exists(self.bar):
os.remove(self.bar)
f = open(self.foo, "w")
f.write("foo")
f.close()
self.tar.add(self.foo)
def test_add_twice(self):
# If st_nlink == 1 then the same file will be added as
# REGTYPE every time.
tarinfo = self.tar.gettarinfo(self.foo)
self.assertEqual(tarinfo.type, tarfile.REGTYPE,
"add file as regular failed")
def test_add_hardlink(self):
# If st_nlink > 1 then the same file will be added as
# LNKTYPE.
os.link(self.foo, self.bar)
tarinfo = self.tar.gettarinfo(self.foo)
self.assertEqual(tarinfo.type, tarfile.LNKTYPE,
"add file as hardlink failed")
tarinfo = self.tar.gettarinfo(self.bar)
self.assertEqual(tarinfo.type, tarfile.LNKTYPE,
"add file as hardlink failed")
def test_dereference_hardlink(self):
self.tar.dereference = True
os.link(self.foo, self.bar)
tarinfo = self.tar.gettarinfo(self.bar)
self.assertEqual(tarinfo.type, tarfile.REGTYPE,
"dereferencing hardlink failed")
# Gzip TestCases
class ReadTestGzip(ReadTest):
comp = "gz"
class ReadStreamTestGzip(ReadStreamTest):
comp = "gz"
class WriteTestGzip(WriteTest):
comp = "gz"
class WriteStreamTestGzip(WriteStreamTest):
comp = "gz"
class ReadDetectTestGzip(ReadDetectTest):
comp = "gz"
class ReadDetectFileobjTestGzip(ReadDetectFileobjTest):
comp = "gz"
class ReadAsteriskTestGzip(ReadAsteriskTest):
comp = "gz"
class ReadStreamAsteriskTestGzip(ReadStreamAsteriskTest):
comp = "gz"
class ReadFileobjTestGzip(ReadFileobjTest):
comp = "gz"
# Filemode test cases
class FileModeTest(unittest.TestCase):
def test_modes(self):
self.assertEqual(tarfile.filemode(0755), '-rwxr-xr-x')
self.assertEqual(tarfile.filemode(07111), '---s--s--t')
class OpenFileobjTest(BaseTest):
def test_opener(self):
# Test for SF bug #1496501.
fobj = StringIO.StringIO("foo\n")
try:
tarfile.open("", mode="r", fileobj=fobj)
except tarfile.ReadError:
self.assertEqual(fobj.tell(), 0, "fileobj's position has moved")
def test_no_name_argument(self):
fobj = open(testtar, "rb")
self.tar.close()
self.tar = tarfile.open(fileobj=fobj, mode="r")
self.assertEqual(self.tar.name, os.path.abspath(fobj.name))
fobj.close()
def test_no_name_attribute(self):
fp = open(testtar, "rb")
data = fp.read()
fp.close()
fobj = StringIO.StringIO(data)
self.assertRaises(AttributeError, getattr, fobj, "name")
self.tar.close()
self.tar = tarfile.open(fileobj=fobj, mode="r")
self.assertEqual(self.tar.name, None)
def test_empty_name_attribute(self):
fp = open(testtar, "rb")
data = fp.read()
fp.close()
fobj = StringIO.StringIO(data)
fobj.name = ""
self.tar.close()
self.tar = tarfile.open(fileobj=fobj, mode="r")
self.assertEqual(self.tar.name, None)
if bz2:
# Bzip2 TestCases
class ReadTestBzip2(ReadTestGzip):
comp = "bz2"
class ReadStreamTestBzip2(ReadStreamTestGzip):
comp = "bz2"
class WriteTestBzip2(WriteTest):
comp = "bz2"
class WriteStreamTestBzip2(WriteStreamTestGzip):
comp = "bz2"
class ReadDetectTestBzip2(ReadDetectTest):
comp = "bz2"
class ReadDetectFileobjTestBzip2(ReadDetectFileobjTest):
comp = "bz2"
class ReadAsteriskTestBzip2(ReadAsteriskTest):
comp = "bz2"
class ReadStreamAsteriskTestBzip2(ReadStreamAsteriskTest):
comp = "bz2"
class ReadFileobjTestBzip2(ReadFileobjTest):
comp = "bz2"
# If importing gzip failed, discard the Gzip TestCases.
if not gzip:
del ReadTestGzip
del ReadStreamTestGzip
del WriteTestGzip
del WriteStreamTestGzip
def test_main():
# Create archive.
f = open(tarname(), "rb")
fguts = f.read()
f.close()
if gzip:
# create testtar.tar.gz
tar = gzip.open(tarname("gz"), "wb")
tar.write(fguts)
tar.close()
if bz2:
# create testtar.tar.bz2
tar = bz2.BZ2File(tarname("bz2"), "wb")
tar.write(fguts)
tar.close()
tests = [
FileModeTest,
OpenFileobjTest,
ReadTest,
ReadStreamTest,
ReadDetectTest,
ReadDetectFileobjTest,
ReadAsteriskTest,
ReadStreamAsteriskTest,
ReadFileobjTest,
WriteTest,
Write100Test,
WriteSize0Test,
WriteStreamTest,
WriteGNULongTest,
ReadGNULongTest,
]
if hasattr(os, "link"):
tests.append(ExtractHardlinkTest)
tests.append(CreateHardlinkTest)
if gzip:
tests.extend([
ReadTestGzip, ReadStreamTestGzip,
WriteTestGzip, WriteStreamTestGzip,
ReadDetectTestGzip, ReadDetectFileobjTestGzip,
ReadAsteriskTestGzip, ReadStreamAsteriskTestGzip,
ReadFileobjTestGzip
])
if bz2:
tests.extend([
ReadTestBzip2, ReadStreamTestBzip2,
WriteTestBzip2, WriteStreamTestBzip2,
ReadDetectTestBzip2, ReadDetectFileobjTestBzip2,
ReadAsteriskTestBzip2, ReadStreamAsteriskTestBzip2,
ReadFileobjTestBzip2
])
try:
test_support.run_unittest(*tests)
finally:
if gzip:
os.remove(tarname("gz"))
if bz2:
os.remove(tarname("bz2"))
if os.path.exists(dirname()):
shutil.rmtree(dirname())
if os.path.exists(tmpname()):
os.remove(tmpname())
if __name__ == "__main__":
test_main()
|
gpl-2.0
|
Anonymouslemming/ansible
|
lib/ansible/plugins/terminal/nxos.py
|
52
|
1912
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*[>|#|%](?:\s*)$'),
re.compile(br'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*\(.+\)#(?:\s*)$')
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"^% \w+", re.M),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found", re.I),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"syntax error"),
re.compile(br"unknown command"),
re.compile(br"user not present")
]
def on_open_shell(self):
try:
for cmd in (b'terminal length 0', b'terminal width 511'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
|
gpl-3.0
|
tejal29/pants
|
src/python/pants/backend/jvm/tasks/eclipse_gen.py
|
1
|
6935
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import pkgutil
from collections import defaultdict
from twitter.common.collections import OrderedSet
from pants.backend.jvm.tasks.ide_gen import IdeGen
from pants.base.build_environment import get_buildroot
from pants.base.generator import Generator, TemplateData
from pants.util.dirutil import safe_delete, safe_mkdir, safe_open
_TEMPLATE_BASEDIR = os.path.join('templates', 'eclipse')
_VERSIONS = {
'3.5': '3.7', # 3.5-3.7 are .project/.classpath compatible
'3.6': '3.7',
'3.7': '3.7',
}
_SETTINGS = (
'org.eclipse.core.resources.prefs',
'org.eclipse.jdt.ui.prefs',
)
class EclipseGen(IdeGen):
@classmethod
def register_options(cls, register):
super(EclipseGen, cls).register_options(register)
register('--version', choices=sorted(list(_VERSIONS.keys())), default='3.6',
help='The Eclipse version the project configuration should be generated for.')
def __init__(self, *args, **kwargs):
super(EclipseGen, self).__init__(*args, **kwargs)
version = _VERSIONS[self.get_options().version]
self.project_template = os.path.join(_TEMPLATE_BASEDIR, 'project-%s.mustache' % version)
self.classpath_template = os.path.join(_TEMPLATE_BASEDIR, 'classpath-%s.mustache' % version)
self.apt_template = os.path.join(_TEMPLATE_BASEDIR, 'factorypath-%s.mustache' % version)
self.pydev_template = os.path.join(_TEMPLATE_BASEDIR, 'pydevproject-%s.mustache' % version)
self.debug_template = os.path.join(_TEMPLATE_BASEDIR, 'debug-launcher-%s.mustache' % version)
self.coreprefs_template = os.path.join(_TEMPLATE_BASEDIR,
'org.eclipse.jdt.core.prefs-%s.mustache' % version)
self.project_filename = os.path.join(self.cwd, '.project')
self.classpath_filename = os.path.join(self.cwd, '.classpath')
self.apt_filename = os.path.join(self.cwd, '.factorypath')
self.pydev_filename = os.path.join(self.cwd, '.pydevproject')
self.coreprefs_filename = os.path.join(self.cwd, '.settings', 'org.eclipse.jdt.core.prefs')
def generate_project(self, project):
def linked_folder_id(source_set):
return source_set.source_base.replace(os.path.sep, '.')
def base_path(source_set):
return os.path.join(source_set.root_dir, source_set.source_base)
def create_source_base_template(source_set):
source_base = base_path(source_set)
return source_base, TemplateData(
id=linked_folder_id(source_set),
path=source_base
)
source_bases = dict(map(create_source_base_template, project.sources))
if project.has_python:
source_bases.update(map(create_source_base_template, project.py_sources))
source_bases.update(map(create_source_base_template, project.py_libs))
def create_source_template(base_id, includes=None, excludes=None):
return TemplateData(
base=base_id,
includes='|'.join(OrderedSet(includes)) if includes else None,
excludes='|'.join(OrderedSet(excludes)) if excludes else None,
)
def create_sourcepath(base_id, sources):
def normalize_path_pattern(path):
return '%s/' % path if not path.endswith('/') else path
includes = [normalize_path_pattern(src_set.path) for src_set in sources if src_set.path]
excludes = []
for source_set in sources:
excludes.extend(normalize_path_pattern(exclude) for exclude in source_set.excludes)
return create_source_template(base_id, includes, excludes)
pythonpaths = []
if project.has_python:
for source_set in project.py_sources:
pythonpaths.append(create_source_template(linked_folder_id(source_set)))
for source_set in project.py_libs:
lib_path = source_set.path if source_set.path.endswith('.egg') else '%s/' % source_set.path
pythonpaths.append(create_source_template(linked_folder_id(source_set),
includes=[lib_path]))
configured_project = TemplateData(
name=self.project_name,
java=TemplateData(
jdk=self.java_jdk,
language_level=('1.%d' % self.java_language_level)
),
python=project.has_python,
scala=project.has_scala and not project.skip_scala,
source_bases=source_bases.values(),
pythonpaths=pythonpaths,
debug_port=project.debug_port,
)
outdir = os.path.abspath(os.path.join(self.gen_project_workdir, 'bin'))
safe_mkdir(outdir)
source_sets = defaultdict(OrderedSet) # base_id -> source_set
for source_set in project.sources:
source_sets[linked_folder_id(source_set)].add(source_set)
sourcepaths = [create_sourcepath(base_id, sources) for base_id, sources in source_sets.items()]
libs = list(project.internal_jars)
libs.extend(project.external_jars)
configured_classpath = TemplateData(
sourcepaths=sourcepaths,
has_tests=project.has_tests,
libs=libs,
scala=project.has_scala,
# Eclipse insists the outdir be a relative path unlike other paths
outdir=os.path.relpath(outdir, get_buildroot()),
)
def apply_template(output_path, template_relpath, **template_data):
with safe_open(output_path, 'w') as output:
Generator(pkgutil.get_data(__name__, template_relpath), **template_data).write(output)
apply_template(self.project_filename, self.project_template, project=configured_project)
apply_template(self.classpath_filename, self.classpath_template, classpath=configured_classpath)
apply_template(os.path.join(self.gen_project_workdir,
'Debug on port %d.launch' % project.debug_port),
self.debug_template, project=configured_project)
apply_template(self.coreprefs_filename, self.coreprefs_template, project=configured_project)
for resource in _SETTINGS:
with safe_open(os.path.join(self.cwd, '.settings', resource), 'w') as prefs:
prefs.write(pkgutil.get_data(__name__, os.path.join(_TEMPLATE_BASEDIR, resource)))
factorypath = TemplateData(
project_name=self.project_name,
# The easiest way to make sure eclipse sees all annotation processors is to put all libs on
# the apt factorypath - this does not seem to hurt eclipse performance in any noticeable way.
jarpaths=libs
)
apply_template(self.apt_filename, self.apt_template, factorypath=factorypath)
if project.has_python:
apply_template(self.pydev_filename, self.pydev_template, project=configured_project)
else:
safe_delete(self.pydev_filename)
print('\nGenerated project at %s%s' % (self.gen_project_workdir, os.sep))
|
apache-2.0
|
rhndg/openedx
|
lms/djangoapps/courseware/tests/test_password_history.py
|
128
|
13071
|
"""
This file will test through the LMS some of the PasswordHistory features
"""
import json
from mock import patch
from uuid import uuid4
from nose.plugins.attrib import attr
from django.contrib.auth.models import User
from django.utils import timezone
from datetime import timedelta
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.contrib.auth.tokens import default_token_generator
from django.utils.http import int_to_base36
from freezegun import freeze_time
from student.models import PasswordHistory
from courseware.tests.helpers import LoginEnrollmentTestCase
@attr('shard_1')
@patch.dict("django.conf.settings.FEATURES", {'ADVANCED_SECURITY': True})
class TestPasswordHistory(LoginEnrollmentTestCase):
"""
Go through some of the PasswordHistory use cases
"""
def _login(self, email, password, should_succeed=True, err_msg_check=None):
"""
Override the base implementation so we can do appropriate asserts
"""
resp = self.client.post(reverse('login'), {'email': email, 'password': password})
data = json.loads(resp.content)
self.assertEqual(resp.status_code, 200)
if should_succeed:
self.assertTrue(data['success'])
else:
self.assertFalse(data['success'])
if err_msg_check:
self.assertIn(err_msg_check, data['value'])
def _setup_user(self, is_staff=False, password=None):
"""
Override the base implementation to randomize the email
"""
email = 'foo_{0}@test.com'.format(uuid4().hex[:8])
password = password if password else 'foo'
username = 'test_{0}'.format(uuid4().hex[:8])
self.create_account(username, email, password)
self.activate_user(email)
# manually twiddle the is_staff bit, if needed
if is_staff:
user = User.objects.get(email=email)
user.is_staff = True
user.save()
return email, password
def _update_password(self, email, new_password):
"""
Helper method to reset a password
"""
user = User.objects.get(email=email)
user.set_password(new_password)
user.save()
history = PasswordHistory()
history.create(user)
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS': None})
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS': None})
def test_no_forced_password_change(self):
"""
Makes sure default behavior is correct when we don't have this turned on
"""
email, password = self._setup_user()
self._login(email, password)
email, password = self._setup_user(is_staff=True)
self._login(email, password)
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DAYS_FOR_STAFF_ACCOUNTS_PASSWORD_RESETS': 1})
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DAYS_FOR_STUDENT_ACCOUNTS_PASSWORD_RESETS': 5})
def test_forced_password_change(self):
"""
Make sure password are viewed as expired in LMS after the policy time has elapsed
"""
student_email, student_password = self._setup_user()
staff_email, staff_password = self._setup_user(is_staff=True)
self._login(student_email, student_password)
self._login(staff_email, staff_password)
staff_reset_time = timezone.now() + timedelta(days=1)
with freeze_time(staff_reset_time):
self._login(student_email, student_password)
# staff should fail because password expired
self._login(staff_email, staff_password, should_succeed=False,
err_msg_check="Your password has expired due to password policy on this account")
# if we reset the password, we should be able to log in
self._update_password(staff_email, "updated")
self._login(staff_email, "updated")
student_reset_time = timezone.now() + timedelta(days=5)
with freeze_time(student_reset_time):
# Both staff and student logins should fail because user must
# reset the password
self._login(student_email, student_password, should_succeed=False,
err_msg_check="Your password has expired due to password policy on this account")
self._update_password(student_email, "updated")
self._login(student_email, "updated")
self._login(staff_email, staff_password, should_succeed=False,
err_msg_check="Your password has expired due to password policy on this account")
self._update_password(staff_email, "updated2")
self._login(staff_email, "updated2")
def test_allow_all_password_reuse(self):
"""
Tests that password_reset flows work as expected if reuse config is missing, meaning
passwords can always be reused
"""
student_email, _ = self._setup_user()
user = User.objects.get(email=student_email)
err_msg = 'You are re-using a password that you have used recently.'
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo'
}, follow=True)
self.assertNotIn(
err_msg,
resp.content
)
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DIFFERENT_STUDENT_PASSWORDS_BEFORE_REUSE': 1})
def test_student_password_reset_reuse(self):
"""
Goes through the password reset flows to make sure the various password reuse policies are enforced
"""
student_email, _ = self._setup_user()
user = User.objects.get(email=student_email)
err_msg = 'You are re-using a password that you have used recently. You must have 1 distinct password'
success_msg = 'Your Password Reset is Complete'
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo'
}, follow=True)
self.assertIn(
err_msg,
resp.content
)
# now retry with a different password
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'bar',
'new_password2': 'bar'
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_DIFFERENT_STAFF_PASSWORDS_BEFORE_REUSE': 2})
def test_staff_password_reset_reuse(self):
"""
Goes through the password reset flows to make sure the various password reuse policies are enforced
"""
staff_email, _ = self._setup_user(is_staff=True)
user = User.objects.get(email=staff_email)
err_msg = 'You are re-using a password that you have used recently. You must have 2 distinct passwords'
success_msg = 'Your Password Reset is Complete'
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
self.assertIn(
err_msg,
resp.content
)
# now use different one
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'bar',
'new_password2': 'bar',
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
# now try again with the first one
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
# should be rejected
self.assertIn(
err_msg,
resp.content
)
# now use different one
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'baz',
'new_password2': 'baz',
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
# now we should be able to reuse the first one
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
@patch.dict("django.conf.settings.ADVANCED_SECURITY_CONFIG", {'MIN_TIME_IN_DAYS_BETWEEN_ALLOWED_RESETS': 1})
def test_password_reset_frequency_limit(self):
"""
Asserts the frequency limit on how often we can change passwords
"""
staff_email, _ = self._setup_user(is_staff=True)
success_msg = 'Your Password Reset is Complete'
# try to reset password, it should fail
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
self.assertNotIn(
success_msg,
resp.content
)
# pretend we're in the future
staff_reset_time = timezone.now() + timedelta(days=1)
with freeze_time(staff_reset_time):
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
@patch.dict("django.conf.settings.FEATURES", {'ENFORCE_PASSWORD_POLICY': True})
@override_settings(PASSWORD_MIN_LENGTH=6)
def test_password_policy_on_password_reset(self):
"""
This makes sure the proper asserts on password policy also works on password reset
"""
staff_email, _ = self._setup_user(is_staff=True, password='foofoo')
success_msg = 'Your Password Reset is Complete'
# try to reset password, it should fail
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foo',
'new_password2': 'foo',
}, follow=True)
self.assertNotIn(
success_msg,
resp.content
)
# try to reset password with a long enough password
user = User.objects.get(email=staff_email)
token = default_token_generator.make_token(user)
uidb36 = int_to_base36(user.id)
# try to do a password reset with the same password as before
resp = self.client.post('/password_reset_confirm/{0}-{1}/'.format(uidb36, token), {
'new_password1': 'foofoo',
'new_password2': 'foofoo',
}, follow=True)
self.assertIn(
success_msg,
resp.content
)
|
agpl-3.0
|
ros-controls/ros_control
|
controller_manager_tests/test/multi_cm_dummy.py
|
3
|
3515
|
#!/usr/bin/env python
# Copyright (C) 2014, PAL Robotics S.L.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of PAL Robotics S.L. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rospy
from controller_manager_tests import ControllerManagerDummy
from controller_manager_msgs.msg import ControllerState as CtrlState
from controller_manager_msgs.msg import HardwareInterfaceResources
from controller_manager_msgs.srv import ListControllersResponse, LoadController
if __name__ == '__main__':
rospy.init_node('multi_cm_dummy')
# Valid controller managers in different namespaces
cm_root = ControllerManagerDummy('/')
cm_foo1 = ControllerManagerDummy('/foo/robot/controller_manager1')
cm_foo2 = ControllerManagerDummy('/foo/robot/controller_manager2')
cm_default = ControllerManagerDummy()
ctrl_list = [
CtrlState(name='foo_controller',
state='running',
type='foo_base/foo',
claimed_resources=[
HardwareInterfaceResources(
hardware_interface='hardware_interface::FooInterface',
resources=['one', 'two', 'three'])
]),
CtrlState(name='bar_controller',
state='running',
type='bar_base/bar',
claimed_resources=[
HardwareInterfaceResources(
hardware_interface='hardware_interface::BarInterface',
resources=['four'])
])
]
resp = ListControllersResponse()
resp.controller = ctrl_list
cm_default.list_ctrl_resp = resp
# Partial controller manager ROS API: missing service
cm_incomplete = ControllerManagerDummy('/incomplete')
cm_incomplete.reload_libs.shutdown()
# Partial controller manager ROS API: service with wrong type
cm_bad_type = ControllerManagerDummy('/bad_type')
cm_bad_type.unload_ctrl.shutdown()
cm_bad_type.unload_ctrl = rospy.Service('/bad_type/unload_controller',
LoadController, # NOTE: Wrong type
cm_bad_type._unload_ctrl_cb)
rospy.spin()
|
bsd-3-clause
|
cdondrup/strands_qsr_lib
|
qsr_prob_rep/src/qsrrep_lib/rep_hmm.py
|
4
|
5263
|
# -*- coding: utf-8 -*-
#from rep_abstractclass import RepAbstractclass
from rep_io import ServiceManager
from rep_io_hmm import HMMRepRequestCreate, HMMRepRequestSample, HMMRepRequestLogLikelihood
from rep_io_hmm import HMMReqResponseCreate, HMMReqResponseSample, HMMReqResponseLogLikelihood
from qsrrep_hmms.qtcc_hmm import QTCCHMM
from qsrrep_hmms.qtcb_hmm import QTCBHMM
from qsrrep_hmms.qtcbc_hmm import QTCBCHMM
from qsrrep_hmms.rcc3_hmm import RCC3HMM
from qsrrep_hmms.generic_hmm import GenericHMM
import ghmm as gh
import json
class RepHMM(object):
TRANS = "trans"
EMI ="emi"
START = "start"
hmm_types_available = {
"qtcc": QTCCHMM,
"qtcb": QTCBHMM,
"qtcbc": QTCBCHMM,
"rcc3": RCC3HMM,
"generic": GenericHMM
}
namespace = "hmm"
def __init__(self):
pass
@ServiceManager.service_function(namespace, HMMRepRequestCreate, HMMReqResponseCreate)
def create(self, **kwargs):
"""Creates a new HMM by calling the get_hmm function in hmm_abstractclass.py.
Called by the 'HMMRepRequestCreate' request class in rep_io.py.
:param kwargs:
* qsr_type: The type of HMM, needs to be a key in 'hmm_types_available'
* qsr_seq: The list of lists of the QSR state chains
* store: Unused. Might leave that to client side.
:return: A 'HMMReqResponseCreate' object containing the resulting data
"""
hmm = self.hmm_types_available[kwargs["qsr_type"]]().get_hmm(
**kwargs
)
return HMMReqResponseCreate(data=self.__create_dict_from_hmm(hmm), qsr_type=kwargs["qsr_type"])
@ServiceManager.service_function(namespace, HMMRepRequestSample, HMMReqResponseSample)
def sample(self, **kwargs):
"""Generates samples from the given HMM by calling the get_samples
function in hmm_abstractclass.py.
Called by the 'HMMRepRequestSamples' request class in rep_io.py.
:param kwargs:
* qsr_type: The type of HMM, needs to be a key in 'hmm_types_available'
* xml: The xml representation of the HMM from which to sample
* max_length: The maximum length of the sample. This will be kept if at all possible
* num_samples: The number of samples to take
:return: A 'HMMReqResponseSamples' object containing the resulting data
"""
num_symbols = len(kwargs["lookup_table"]) if kwargs["qsr_type"] == "generic" else self.hmm_types_available[kwargs["qsr_type"]]().get_num_possible_states()
sample = self.hmm_types_available[kwargs["qsr_type"]]().get_samples(
hmm=self.__create_hmm_from_dict(dictionary=kwargs["dictionary"], qsr_type=kwargs["qsr_type"], num_symbols=num_symbols),
**kwargs
)
return HMMReqResponseSample(data=json.dumps(sample), qsr_type=kwargs["qsr_type"])
@ServiceManager.service_function(namespace, HMMRepRequestLogLikelihood, HMMReqResponseLogLikelihood)
def log_likelihood(self, **kwargs):
"""Calculates the cummulative loglikelihood for the given state chains and the given HMM
by calling the get_log_likelihood function in hmm_abstractclass.py.
Called by the 'HMMRepRequestLogLikelihood' request class in rep_io.py.
:param kwargs:
* qsr_type: The type of HMM, needs to be a key in 'hmm_types_available'
* xml: The xml representation of the HMM from which to sample
* qsr_seq: A list of lists of QSR state chains to check against the given HMM
:return: A 'HMMReqResponseLogLikelihood' object containing the resulting data
"""
num_symbols = len(kwargs["lookup_table"]) if kwargs["qsr_type"] == "generic" else self.hmm_types_available[kwargs["qsr_type"]]().get_num_possible_states()
loglike = self.hmm_types_available[kwargs["qsr_type"]]().get_log_likelihood(
hmm=self.__create_hmm_from_dict(dictionary=kwargs["dictionary"], qsr_type=kwargs["qsr_type"], num_symbols=num_symbols),
**kwargs
)
return HMMReqResponseLogLikelihood(data=json.dumps(loglike), qsr_type=kwargs["qsr_type"])
def __create_dict_from_hmm(self, hmm):
"""Creates a dictionary representation of the hmm.
:param hmm: the ghmm hmm object to be represent as xml
:return: The dictionary containing the transition, emission, and start probailities
"""
trans, emi, start = hmm.asMatrices()
ret = {
self.TRANS: trans,
self.EMI: emi,
self.START: start,
}
return ret
def __create_hmm_from_dict(self, dictionary, qsr_type, num_symbols):
"""Creates a hmm from the xml representation. Not nice to use tempfile
but not otherwise possible due to hidden code and swig in ghmm.
:param xml: The xml string
:return: the ghmm hmm object
"""
symbols = self.hmm_types_available[qsr_type]().generate_alphabet(num_symbols)
hmm = gh.HMMFromMatrices(
symbols,
gh.DiscreteDistribution(symbols),
dictionary[self.TRANS],
dictionary[self.EMI],
dictionary[self.START]
)
return hmm
|
mit
|
evaschalde/odoo
|
addons/mail/wizard/invite.py
|
268
|
5847
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.translate import _
class invite_wizard(osv.osv_memory):
""" Wizard to invite partners and make them followers. """
_name = 'mail.wizard.invite'
_description = 'Invite wizard'
def default_get(self, cr, uid, fields, context=None):
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
user_name = self.pool.get('res.users').name_get(cr, uid, [uid], context=context)[0][1]
model = result.get('res_model')
res_id = result.get('res_id')
if 'message' in fields and model and res_id:
ir_model = self.pool.get('ir.model')
model_ids = ir_model.search(cr, uid, [('model', '=', self.pool[model]._name)], context=context)
model_name = ir_model.name_get(cr, uid, model_ids, context=context)[0][1]
document_name = self.pool[model].name_get(cr, uid, [res_id], context=context)[0][1]
message = _('<div><p>Hello,</p><p>%s invited you to follow %s document: %s.<p></div>') % (user_name, model_name, document_name)
result['message'] = message
elif 'message' in fields:
result['message'] = _('<div><p>Hello,</p><p>%s invited you to follow a new document.</p></div>') % user_name
return result
_columns = {
'res_model': fields.char('Related Document Model',
required=True, select=1,
help='Model of the followed resource'),
'res_id': fields.integer('Related Document ID', select=1,
help='Id of the followed resource'),
'partner_ids': fields.many2many('res.partner', string='Recipients',
help="List of partners that will be added as follower of the current document."),
'message': fields.html('Message'),
'send_mail': fields.boolean('Send Email',
help="If checked, the partners will receive an email warning they have been "
"added in the document's followers."),
}
_defaults = {
'send_mail': True,
}
def add_followers(self, cr, uid, ids, context=None):
for wizard in self.browse(cr, uid, ids, context=context):
model_obj = self.pool[wizard.res_model]
document = model_obj.browse(cr, uid, wizard.res_id, context=context)
# filter partner_ids to get the new followers, to avoid sending email to already following partners
new_follower_ids = [p.id for p in wizard.partner_ids if p not in document.message_follower_ids]
model_obj.message_subscribe(cr, uid, [wizard.res_id], new_follower_ids, context=context)
ir_model = self.pool.get('ir.model')
model_ids = ir_model.search(cr, uid, [('model', '=', model_obj._name)], context=context)
model_name = ir_model.name_get(cr, uid, model_ids, context=context)[0][1]
# send an email if option checked and if a message exists (do not send void emails)
if wizard.send_mail and wizard.message and not wizard.message == '<br>': # when deleting the message, cleditor keeps a <br>
# add signature
# FIXME 8.0: use notification_email_send, send a wall message and let mail handle email notification + message box
signature_company = self.pool.get('mail.notification').get_signature_footer(cr, uid, user_id=uid, res_model=wizard.res_model, res_id=wizard.res_id, context=context)
wizard.message = tools.append_content_to_html(wizard.message, signature_company, plaintext=False, container_tag='div')
# send mail to new followers
# the invite wizard should create a private message not related to any object -> no model, no res_id
mail_mail = self.pool.get('mail.mail')
mail_id = mail_mail.create(cr, uid, {
'model': wizard.res_model,
'res_id': wizard.res_id,
'record_name': document.name_get()[0][1],
'email_from': self.pool['mail.message']._get_default_from(cr, uid, context=context),
'reply_to': self.pool['mail.message']._get_default_from(cr, uid, context=context),
'subject': _('Invitation to follow %s: %s') % (model_name, document.name_get()[0][1]),
'body_html': '%s' % wizard.message,
'auto_delete': True,
'message_id': self.pool['mail.message']._get_message_id(cr, uid, {'no_auto_thread': True}, context=context),
'recipient_ids': [(4, id) for id in new_follower_ids]
}, context=context)
mail_mail.send(cr, uid, [mail_id], context=context)
return {'type': 'ir.actions.act_window_close'}
|
agpl-3.0
|
ccarouge/cwsl-ctools
|
indices/nino34.py
|
4
|
1823
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description: Calculate the Niño 3.4 index from an input netCDF.
Requires program CDO (Climate Data Operators)
Authors: Tim Bedin [email protected]
Copyright: 2014 CSIRO
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tempfile
import subprocess
import argparse
def main(infile, outfile):
timeseries_tempfile = tempfile.NamedTemporaryFile()
# Select the lon/lat box required
# 170W to 120W, 5N to 5S.
ts_call = ['cdo', '-s', '-fldmean',
'-sellonlatbox,-170,-120,5,-5',
infile,
timeseries_tempfile.name]
subprocess.call(ts_call)
# Subtract the mean of the timeseries from the
# timeseries to calculate the index.
final_call = ['cdo', '-s', '-sub',
timeseries_tempfile.name,
'-timmean', timeseries_tempfile.name,
outfile]
subprocess.call(final_call)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculate the Niño 3.4 index from an input sea surface temperature file.')
parser.add_argument('input', help='input sea surface temperature netCDF')
parser.add_argument('output', help='output netCDF Niño 3.4 timeseries ')
args = parser.parse_args()
main(args.input, args.output)
|
apache-2.0
|
shepdelacreme/ansible
|
lib/ansible/modules/storage/purestorage/purefa_snap.py
|
32
|
6610
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_snap
version_added: '2.4'
short_description: Manage volume snapshots on Pure Storage FlashArrays
description:
- Create or delete volumes and volume snapshots on Pure Storage FlashArray.
author:
- Simon Dodsley (@sdodsley)
options:
name:
description:
- The name of the source volume.
required: true
suffix:
description:
- Suffix of snapshot name.
target:
description:
- Name of target volume if creating from snapshot.
overwrite:
description:
- Define whether to overwrite existing volume when creating from snapshot.
type: bool
default: 'no'
state:
description:
- Define whether the volume snapshot should exist or not.
choices: [ absent, copy, present ]
default: present
eradicate:
description:
- Define whether to eradicate the snapshot on delete or leave in trash.
type: bool
default: 'no'
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Create snapshot foo.ansible
purefa_snap:
name: foo
suffix: ansible
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: present
- name: Create R/W clone foo_clone from snapshot foo.snap
purefa_snap:
name: foo
suffix: snap
target: foo_clone
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: copy
- name: Overwrite existing volume foo_clone with snapshot foo.snap
purefa_snap:
name: foo
suffix: snap
target: foo_clone
overwrite: true
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: copy
- name: Delete and eradicate snapshot named foo.snap
purefa_snap:
name: foo
suffix: snap
eradicate: true
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
state: absent
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
from datetime import datetime
try:
from purestorage import purestorage
HAS_PURESTORAGE = True
except ImportError:
HAS_PURESTORAGE = False
def get_volume(module, array):
"""Return Volume or None"""
try:
return array.get_volume(module.params['name'])
except:
return None
def get_target(module, array):
"""Return Volume or None"""
try:
return array.get_volume(module.params['target'])
except:
return None
def get_snapshot(module, array):
"""Return Snapshot or None"""
try:
snapname = module.params['name'] + "." + module.params['suffix']
for s in array.get_volume(module.params['name'], snap='true'):
if s['name'] == snapname:
return snapname
except:
return None
def create_snapshot(module, array):
"""Create Snapshot"""
changed = True
if not module.check_mode:
try:
array.create_snapshot(module.params['name'], suffix=module.params['suffix'])
except:
changed = False
module.exit_json(changed=changed)
def create_from_snapshot(module, array):
"""Create Volume from Snapshot"""
source = module.params['name'] + "." + module.params['suffix']
tgt = get_target(module, array)
if tgt is None:
changed = True
if not module.check_mode:
array.copy_volume(source,
module.params['target'])
elif tgt is not None and module.params['overwrite']:
changed = True
if not module.check_mode:
array.copy_volume(source,
module.params['target'],
overwrite=module.params['overwrite'])
elif tgt is not None and not module.params['overwrite']:
changed = False
module.exit_json(changed=changed)
def update_snapshot(module, array):
"""Update Snapshot"""
changed = False
module.exit_json(changed=changed)
def delete_snapshot(module, array):
""" Delete Snapshot"""
changed = True
if not module.check_mode:
snapname = module.params['name'] + "." + module.params['suffix']
try:
array.destroy_volume(snapname)
if module.params['eradicate']:
try:
array.eradicate_volume(snapname)
except:
changed = False
except:
changed = False
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=True),
suffix=dict(type='str'),
target=dict(type='str'),
overwrite=dict(type='bool', default=False),
eradicate=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'copy', 'present']),
))
required_if = [('state', 'copy', ['target', 'suffix'])]
module = AnsibleModule(argument_spec,
required_if=required_if,
supports_check_mode=True)
if not HAS_PURESTORAGE:
module.fail_json(msg='purestorage sdk is required for this module in volume')
if module.params['suffix'] is None:
suffix = "snap-" + str((datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0, 0)).total_seconds())
module.params['suffix'] = suffix.replace(".", "")
state = module.params['state']
array = get_system(module)
volume = get_volume(module, array)
target = get_target(module, array)
snap = get_snapshot(module, array)
if state == 'present' and volume and not snap:
create_snapshot(module, array)
elif state == 'present' and volume and snap:
update_snapshot(module, array)
elif state == 'present' and not volume:
update_snapshot(module, array)
elif state == 'copy' and snap:
create_from_snapshot(module, array)
elif state == 'copy' and not snap:
update_snapshot(module, array)
elif state == 'absent' and snap:
delete_snapshot(module, array)
elif state == 'absent' and not snap:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
|
gpl-3.0
|
sloot14/flexifod
|
simplejson/scanner.py
|
674
|
2560
|
"""JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
from simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
|
mit
|
usakhelo/FreeCAD
|
src/Mod/OpenSCAD/ply/yacc.py
|
9
|
128492
|
# -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammar is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup ([email protected]),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "3.4"
__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
import re, types, sys, os.path
# Compatibility function for python 2.6/3.0
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# Compatibility
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
# Python 2.x/3.0 compatibility.
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# --! DEBUG
debug.info("PLY: PARSE DEBUG START")
# --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == "$end":
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
import re
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> <empty>" % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
# Precompute the list of productions immediately following. Hack. Remove later
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = { } # A dictionary of precomputed FIRST(x) symbols
self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = { } # Precedence rules that were actually used by the grammar.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
# Look for literal tokens
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
|
lgpl-2.1
|
disqus/django-old
|
django/contrib/sitemaps/tests/basic.py
|
1
|
8058
|
import os
from datetime import date
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.sitemaps import Sitemap, GenericSitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils.unittest import skipUnless
from django.utils.formats import localize
from django.utils.translation import activate, deactivate
class SitemapTests(TestCase):
urls = 'django.contrib.sitemaps.tests.urls'
def setUp(self):
if Site._meta.installed:
self.base_url = 'http://example.com'
else:
self.base_url = 'http://testserver'
self.old_USE_L10N = settings.USE_L10N
self.old_Site_meta_installed = Site._meta.installed
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
self.old_Site_meta_installed = Site._meta.installed
settings.TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
)
# Create a user that will double as sitemap content
User.objects.create_user('testuser', '[email protected]', 's3krit')
def tearDown(self):
settings.USE_L10N = self.old_USE_L10N
Site._meta.installed = self.old_Site_meta_installed
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
Site._meta.installed = self.old_Site_meta_installed
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
# Retrieve the sitemap.
response = self.client.get('/simple/index.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
# Retrieve the sitemap.
response = self.client.get('/simple/custom-index.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/simple/sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today().strftime('%Y-%m-%d')))
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
# Retrieve the sitemap.
response = self.client.get('/simple/custom-sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today().strftime('%Y-%m-%d')))
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
# Localization should be active
settings.USE_L10N = True
activate('fr')
self.assertEqual(u'0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today().strftime('%Y-%m-%d'))
deactivate()
def test_generic_sitemap(self):
"A minimal generic sitemap can be rendered"
# Retrieve the sitemap.
response = self.client.get('/generic/sitemap.xml')
expected = ''
for username in User.objects.values_list("username", flat=True):
expected += "<url><loc>%s/users/%s/</loc></url>" % (self.base_url, username)
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
%s
</urlset>
""" % expected)
@skipUnless("django.contrib.flatpages" in settings.INSTALLED_APPS, "django.contrib.flatpages app not installed.")
def test_flatpage_sitemap(self):
"Basic FlatPage sitemap test"
# Import FlatPage inside the test so that when django.contrib.flatpages
# is not installed we don't get problems trying to delete Site
# objects (FlatPage has an M2M to Site, Site.delete() tries to
# delete related objects, but the M2M table doesn't exist.
from django.contrib.flatpages.models import FlatPage
public = FlatPage.objects.create(
url=u'/public/',
title=u'Public Page',
enable_comments=True,
registration_required=False,
)
public.sites.add(settings.SITE_ID)
private = FlatPage.objects.create(
url=u'/private/',
title=u'Private Page',
enable_comments=True,
registration_required=True
)
private.sites.add(settings.SITE_ID)
response = self.client.get('/flatpages/sitemap.xml')
# Public flatpage should be in the sitemap
self.assertContains(response, '<loc>%s%s</loc>' % (self.base_url, public.url))
# Private flatpage should not be in the sitemap
self.assertNotContains(response, '<loc>%s%s</loc>' % (self.base_url, private.url))
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception
Site._meta.installed = False
# Retrieve the sitemap.
response = self.client.get('/simple/sitemap.xml')
# Check for all the important bits:
self.assertEqual(response.content, """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today().strftime('%Y-%m-%d'))
@skipUnless("django.contrib.sites" in settings.INSTALLED_APPS, "django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
Site._meta.installed = False
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
user_sitemap = GenericSitemap({'queryset': User.objects.all()})
def is_user(url):
return isinstance(url['item'], User)
item_in_url_info = all(map(is_user, user_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
|
bsd-3-clause
|
iansf/engine
|
build/util/version.py
|
114
|
5040
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
version.py -- Chromium version string substitution utility.
"""
import argparse
import os
import sys
def fetch_values_from_file(values_dict, file_name):
"""
Fetches KEYWORD=VALUE settings from the specified file.
Everything to the left of the first '=' is the keyword,
everything to the right is the value. No stripping of
white space, so beware.
The file must exist, otherwise you get the Python exception from open().
"""
for line in open(file_name, 'r').readlines():
key, val = line.rstrip('\r\n').split('=', 1)
values_dict[key] = val
def fetch_values(file_list):
"""
Returns a dictionary of values to be used for substitution, populating
the dictionary with KEYWORD=VALUE settings from the files in 'file_list'.
Explicitly adds the following value from internal calculations:
OFFICIAL_BUILD
"""
CHROME_BUILD_TYPE = os.environ.get('CHROME_BUILD_TYPE')
if CHROME_BUILD_TYPE == '_official':
official_build = '1'
else:
official_build = '0'
values = dict(
OFFICIAL_BUILD = official_build,
)
for file_name in file_list:
fetch_values_from_file(values, file_name)
return values
def subst_template(contents, values):
"""
Returns the template with substituted values from the specified dictionary.
Keywords to be substituted are surrounded by '@': @KEYWORD@.
No attempt is made to avoid recursive substitution. The order
of evaluation is random based on the order of the keywords returned
by the Python dictionary. So do NOT substitute a value that
contains any @KEYWORD@ strings expecting them to be recursively
substituted, okay?
"""
for key, val in values.iteritems():
try:
contents = contents.replace('@' + key + '@', val)
except TypeError:
print repr(key), repr(val)
return contents
def subst_file(file_name, values):
"""
Returns the contents of the specified file_name with substituted
values from the specified dictionary.
This is like subst_template, except it operates on a file.
"""
template = open(file_name, 'r').read()
return subst_template(template, values);
def write_if_changed(file_name, contents):
"""
Writes the specified contents to the specified file_name
iff the contents are different than the current contents.
"""
try:
old_contents = open(file_name, 'r').read()
except EnvironmentError:
pass
else:
if contents == old_contents:
return
os.unlink(file_name)
open(file_name, 'w').write(contents)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', action='append', default=[],
help='Read variables from FILE.')
parser.add_argument('-i', '--input', default=None,
help='Read strings to substitute from FILE.')
parser.add_argument('-o', '--output', default=None,
help='Write substituted strings to FILE.')
parser.add_argument('-t', '--template', default=None,
help='Use TEMPLATE as the strings to substitute.')
parser.add_argument('-e', '--eval', action='append', default=[],
help='Evaluate VAL after reading variables. Can be used '
'to synthesize variables. e.g. -e \'PATCH_HI=int('
'PATCH)/256.')
parser.add_argument('args', nargs=argparse.REMAINDER,
help='For compatibility: INPUT and OUTPUT can be '
'passed as positional arguments.')
options = parser.parse_args()
evals = {}
for expression in options.eval:
try:
evals.update(dict([expression.split('=', 1)]))
except ValueError:
parser.error('-e requires VAR=VAL')
# Compatibility with old versions that considered the first two positional
# arguments shorthands for --input and --output.
while len(options.args) and (options.input is None or \
options.output is None):
if options.input is None:
options.input = options.args.pop(0)
elif options.output is None:
options.output = options.args.pop(0)
if options.args:
parser.error('Unexpected arguments: %r' % options.args)
values = fetch_values(options.file)
for key, val in evals.iteritems():
values[key] = str(eval(val, globals(), values))
if options.template is not None:
contents = subst_template(options.template, values)
elif options.input:
contents = subst_file(options.input, values)
else:
# Generate a default set of version information.
contents = """MAJOR=%(MAJOR)s
MINOR=%(MINOR)s
BUILD=%(BUILD)s
PATCH=%(PATCH)s
LASTCHANGE=%(LASTCHANGE)s
OFFICIAL_BUILD=%(OFFICIAL_BUILD)s
""" % values
if options.output is not None:
write_if_changed(options.output, contents)
else:
print contents
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
supergentle/migueltutorial
|
flask/lib/python2.7/site-packages/pip/_vendor/requests/models.py
|
217
|
25372
|
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import logging
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import DecodeError
from .exceptions import (
HTTPError, RequestException, MissingSchema, InvalidURL,
ChunkedEncodingError, ContentDecodingError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring, IncompleteRead)
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
log = logging.getLogger(__name__)
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, str):
fp = StringIO(fp)
if isinstance(fp, bytes):
fp = BytesIO(fp)
rf = RequestField(name=k, data=fp.read(),
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach the request. If a dictionary is provided, form-encoding will take place.
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy()
p._cookies = self._cookies.copy()
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
try:
url = unicode(url)
except NameError:
# We're on Python 3.
url = str(url)
except UnicodeDecodeError:
pass
# Don't do any URL preparation for oddball schemes
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
scheme, auth, host, port, path, query, fragment = parse_url(url)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, basestring),
not isinstance(data, list),
not isinstance(data, dict)
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data:
body = self._encode_params(data)
if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if (content_type) and (not 'content-type' in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif self.method not in ('GET', 'HEAD'):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except RequestException:
return False
return True
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the lovely Charade library
(Thanks, Ian!)."""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
"""
if self._content_consumed:
# simulate reading small chunks of the content
return iter_slices(self._content, chunk_size)
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size,
decode_content=True):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
gen = generate()
if decode_unicode:
gen = stream_decode_response_unicode(gen, self)
return gen
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size,
decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based soley on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
return json.loads(self.content.decode(encoding), **kwargs)
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Closes the underlying file descriptor and releases the connection
back to the pool.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
|
bsd-3-clause
|
detiber/ansible
|
lib/ansible/modules/system/openwrt_init.py
|
58
|
6912
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Andrew Gaffney <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: openwrt_init
author:
- "Andrew Gaffney (@agaffney)"
version_added: "2.3"
short_description: Manage services on OpenWrt.
description:
- Controls OpenWrt services on remote hosts.
options:
name:
required: true
description:
- Name of the service.
aliases: ['service']
state:
required: false
default: null
choices: [ 'started', 'stopped', 'restarted', 'reloaded' ]
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
enabled:
required: false
choices: [ "yes", "no" ]
default: null
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
pattern:
required: false
description:
- If the service does not respond to the 'running' command, name a
substring to look for as would be found in the output of the I(ps)
command as a stand-in for a 'running' result. If the string is found,
the service will be assumed to be running.
notes:
- One option other than name is required.
requirements:
- An OpenWrt system
'''
EXAMPLES = '''
# Example action to start service httpd, if not running
- openwrt_init:
state: started
name: httpd
# Example action to stop service cron, if running
- openwrt_init:
name: cron
state: stopped
# Example action to reload service httpd, in all cases
- openwrt_init:
name: httpd
state: reloaded
# Example action to enable service httpd
- openwrt_init:
name: httpd
enabled: yes
'''
RETURN = '''
'''
import os
import glob
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
module = None
init_script = None
# ===============================
# Check if service is enabled
def is_enabled():
(rc, out, err) = module.run_command("%s enabled" % init_script)
if rc == 0:
return True
return False
# ===========================================
# Main control flow
def main():
global module, init_script
# init
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True, type='str', aliases=['service']),
state = dict(choices=['started', 'stopped', 'restarted', 'reloaded'], type='str'),
enabled = dict(type='bool'),
pattern = dict(required=False, default=None),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled']],
)
# initialize
service = module.params['name']
init_script = '/etc/init.d/' + service
rc = 0
out = err = ''
result = {
'name': service,
'changed': False,
}
# check if service exists
if not os.path.exists(init_script):
module.fail_json(msg='service %s does not exist' % service)
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
# do we need to enable the service?
enabled = is_enabled()
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s" % (init_script, action))
# openwrt init scripts can return a non-zero exit code on a successful 'enable'
# command if the init script doesn't contain a STOP value, so we ignore the exit
# code and explicitly check if the service is now in the desired state
if is_enabled() != module.params['enabled']:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
result['enabled'] = not enabled
if module.params['state'] is not None:
running = False
# check if service is currently running
if module.params['pattern']:
# Find ps binary
psbin = module.get_bin_path('ps', True)
# this should be busybox ps, so we only want/need to the 'w' option
(rc, psout, pserr) = module.run_command('%s w' % psbin)
# If rc is 0, set running as appropriate
if rc == 0:
lines = psout.split("\n")
for line in lines:
if module.params['pattern'] in line and not "pattern=" in line:
# so as to not confuse ./hacking/test-module
running = True
break
else:
(rc, out, err) = module.run_command("%s running" % init_script)
if rc == 0:
running = True
# default to desired state
result['state'] = module.params['state']
# determine action, if any
action = None
if module.params['state'] == 'started':
if not running:
action = 'start'
result['changed'] = True
elif module.params['state'] == 'stopped':
if running:
action = 'stop'
result['changed'] = True
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
result['changed'] = True
if action:
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s" % (init_script, action))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, service, err))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
MarcJoan/django
|
django/conf/project_template/project_name/settings.py
|
92
|
3218
|
"""
Django settings for {{ project_name }} project.
Generated by 'django-admin startproject' using Django {{ django_version }}.
For more information on this file, see
https://docs.djangoproject.com/en/{{ docs_version }}/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/{{ docs_version }}/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/{{ docs_version }}/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/{{ docs_version }}/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/{{ docs_version }}/howto/static-files/
STATIC_URL = '/static/'
|
bsd-3-clause
|
BhallaLab/moose
|
moose-examples/tutorials/Rdesigneur/ex7.5_bidirectional_transport.py
|
2
|
1294
|
########################################################################
# This example illustrates molecular transport of an oscillatory reaction
# system, along a closed-end cylinder. Here all the molecules are
#, transported, a left to right and b and s right to left.
########################################################################
import moose
import numpy as np
import pylab
import rdesigneur as rd
rdes = rd.rdesigneur(
turnOffElec = True,
diffusionLength = 2e-6,
numWaveFrames = 50,
chemProto = [['makeChemOscillator()', 'osc']],
chemDistrib = [['osc', 'soma', 'install', '1' ]],
plotList = [
['soma', '1', 'dend/a', 'conc', 'Concentration of a', 'wave', 0, 1800],
['soma', '1', 'dend/b', 'conc', 'Concentration of b', 'wave', 0, 500],
['soma', '1', 'dend/s', 'conc', 'Concentration of s', 'wave', 0, 1200],
],
moogList = [['soma', '1', 'dend/a', 'conc', 'a Conc', 0, 600 ]]
)
a = moose.element( '/library/osc/kinetics/a' )
b = moose.element( '/library/osc/kinetics/b' )
s = moose.element( '/library/osc/kinetics/s' )
a.diffConst = 0
b.diffConst = 0
a.motorConst = 2e-6
b.motorConst = -2e-6
s.motorConst = -2e-6
rdes.buildModel()
moose.reinit()
rdes.displayMoogli( 1, 250, rotation = 0, azim = -np.pi/2, elev = 0.0 )
|
gpl-3.0
|
mryanlam/f5-ansible
|
scripts/plugin_formatter.py
|
1
|
17953
|
#!/usr/bin/env python
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# (c) 2012-2014, Michael DeHaan <[email protected]> and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
__metaclass__ = type
import os
import glob
import sys
import yaml
import re
import optparse
import datetime
import cgi
import warnings
from collections import defaultdict
from jinja2 import Environment, FileSystemLoader
from six import iteritems
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes
from ansible.utils import plugin_docs
#####################################################################################
# constants and paths
# if a module is added in a version of Ansible older than this, don't print the version added information
# in the module documentation because everyone is assumed to be running something newer than this already.
TO_OLD_TO_BE_NOTABLE = 1.3
# Get parent directory of the directory this script lives in
MODULEDIR=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'lib', 'ansible', 'modules'
))
# The name of the DOCUMENTATION template
EXAMPLE_YAML=os.path.abspath(os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir, 'examples', 'DOCUMENTATION.yml'
))
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
DEPRECATED = " (D)"
#####################################################################################
def rst_ify(text):
''' convert symbols like I(this is in italics) to valid restructured text '''
try:
t = _ITALIC.sub(r'*' + r"\1" + r"*", text)
t = _BOLD.sub(r'**' + r"\1" + r"**", t)
t = _MODULE.sub(r':ref:`' + r"\1 <\1>" + r"`", t)
t = _URL.sub(r"\1", t)
t = _CONST.sub(r'``' + r"\1" + r"``", t)
except Exception as e:
raise AnsibleError("Could not process (%s) : %s" % (str(text), str(e)))
return t
#####################################################################################
def html_ify(text):
''' convert symbols like I(this is in italics) to valid HTML '''
t = cgi.escape(text)
t = _ITALIC.sub("<em>" + r"\1" + "</em>", t)
t = _BOLD.sub("<b>" + r"\1" + "</b>", t)
t = _MODULE.sub("<span class='module'>" + r"\1" + "</span>", t)
t = _URL.sub("<a href='" + r"\1" + "'>" + r"\1" + "</a>", t)
t = _CONST.sub("<code>" + r"\1" + "</code>", t)
return t
#####################################################################################
def rst_fmt(text, fmt):
''' helper for Jinja2 to do format strings '''
return fmt % (text)
#####################################################################################
def rst_xline(width, char="="):
''' return a restructured text line of a given length '''
return char * width
#####################################################################################
def write_data(text, options, outputname, module):
''' dumps module output to a file or the screen, as requested '''
if options.output_dir is not None:
fname = os.path.join(options.output_dir, outputname % module)
fname = fname.replace(".py","")
f = open(fname, 'w')
f.write(text.encode('utf-8'))
f.close()
else:
print(text)
#####################################################################################
def list_modules(module_dir, depth=0):
''' returns a hash of categories, each category being a hash of module names to file paths '''
categories = dict()
module_info = dict()
aliases = defaultdict(set)
# * windows powershell modules have documentation stubs in python docstring
# format (they are not executed) so skip the ps1 format files
# * One glob level for every module level that we're going to traverse
files = (
glob.glob("%s/*.py" % module_dir) +
glob.glob("%s/*/*.py" % module_dir) +
glob.glob("%s/*/*/*.py" % module_dir) +
glob.glob("%s/*/*/*/*.py" % module_dir)
)
for module_path in files:
if module_path.endswith('__init__.py'):
continue
category = categories
mod_path_only = module_path
# Start at the second directory because we don't want the "vendor"
mod_path_only = os.path.dirname(module_path[len(module_dir):])
# directories (core, extras)
for new_cat in ['network', 'f5']:
if new_cat not in category:
category[new_cat] = dict()
category = category[new_cat]
module = os.path.splitext(os.path.basename(module_path))[0]
if module in plugin_docs.BLACKLIST['MODULE']:
# Do not list blacklisted modules
continue
if module.startswith("_") and os.path.islink(module_path):
source = os.path.splitext(os.path.basename(os.path.realpath(module_path)))[0]
module = module.replace("_","",1)
aliases[source].add(module)
continue
category[module] = module_path
module_info[module] = module_path
# keep module tests out of becoming module docs
if 'test' in categories:
del categories['test']
return module_info, categories, aliases
#####################################################################################
def generate_parser():
''' generate an optparse parser '''
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options] arg1 arg2',
description='Generate module documentation from metadata',
)
p.add_option("-A", "--ansible-version", action="store", dest="ansible_version", default="unknown", help="Ansible version number")
p.add_option("-M", "--module-dir", action="store", dest="module_dir", default=MODULEDIR, help="Ansible library path")
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="hacking/templates", help="directory containing Jinja2 templates")
p.add_option("-t", "--type", action='store', dest='type', choices=['rst'], default='rst', help="Document type")
p.add_option("-v", "--verbose", action='store_true', default=False, help="Verbose")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default=None, help="Output directory for module files")
p.add_option("-I", "--includes-file", action="store", dest="includes_file", default=None, help="Create a file containing list of processed modules")
p.add_option('-V', action='version', help='Show version number and exit')
return p
#####################################################################################
def jinja2_environment(template_dir, typ):
env = Environment(loader=FileSystemLoader(template_dir),
variable_start_string="@{",
variable_end_string="}@",
trim_blocks=True,
)
env.globals['xline'] = rst_xline
if typ == 'rst':
env.filters['convert_symbols_to_format'] = rst_ify
env.filters['html_ify'] = html_ify
env.filters['fmt'] = rst_fmt
env.filters['xline'] = rst_xline
template = env.get_template('plugin.rst.j2')
outputname = "%s_module.rst"
else:
raise Exception("unknown module format type: %s" % typ)
return env, template, outputname
#####################################################################################
def too_old(added):
if not added:
return False
try:
added_tokens = str(added).split(".")
readded = added_tokens[0] + "." + added_tokens[1]
added_float = float(readded)
except ValueError as e:
warnings.warn("Could not parse %s: %s" % (added, str(e)))
return False
return (added_float < TO_OLD_TO_BE_NOTABLE)
def process_module(module, options, env, template, outputname, module_map, aliases):
fname = module_map[module]
if isinstance(fname, dict):
return "SKIPPED"
basename = os.path.basename(fname)
deprecated = False
# ignore files with extensions
if not basename.endswith(".py"):
return
elif module.startswith("_"):
if os.path.islink(fname):
return # ignore, its an alias
deprecated = True
module = module.replace("_","",1)
print("rendering: %s" % module)
# use ansible core library to parse out doc metadata YAML and plaintext examples
doc, examples, returndocs, metadata = plugin_docs.get_docstring(fname, verbose=options.verbose)
# crash if module is missing documentation and not explicitly hidden from docs index
if doc is None:
sys.exit("*** ERROR: MODULE MISSING DOCUMENTATION: %s, %s ***\n" % (fname, module))
if metadata is None:
sys.exit("*** ERROR: MODULE MISSING METADATA: %s, %s ***\n" % (fname, module))
if deprecated and 'deprecated' not in doc:
sys.exit("*** ERROR: DEPRECATED MODULE MISSING 'deprecated' DOCUMENTATION: %s, %s ***\n" % (fname, module))
if module in aliases:
doc['aliases'] = aliases[module]
all_keys = []
if not 'version_added' in doc:
sys.exit("*** ERROR: missing version_added in: %s ***\n" % module)
added = 0
if doc['version_added'] == 'historical':
del doc['version_added']
else:
added = doc['version_added']
# don't show version added information if it's too old to be called out
if too_old(added):
del doc['version_added']
if 'options' in doc and doc['options']:
for (k,v) in iteritems(doc['options']):
# don't show version added information if it's too old to be called out
if 'version_added' in doc['options'][k] and too_old(doc['options'][k]['version_added']):
del doc['options'][k]['version_added']
if not 'description' in doc['options'][k]:
raise AnsibleError("Missing required description for option %s in %s " % (k, module))
required_value = doc['options'][k].get('required', False)
if not isinstance(required_value, bool):
raise AnsibleError("Invalid required value '%s' for option '%s' in '%s' (must be truthy)" % (required_value, k, module))
if not isinstance(doc['options'][k]['description'],list):
doc['options'][k]['description'] = [doc['options'][k]['description']]
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = fname
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['ansible_version'] = options.ansible_version
doc['plainexamples'] = examples #plain text
doc['metadata'] = metadata
if returndocs:
try:
doc['returndocs'] = yaml.safe_load(returndocs)
except:
print("could not load yaml: %s" % returndocs)
raise
else:
doc['returndocs'] = None
# here is where we build the table of contents...
try:
text = template.render(doc)
except Exception as e:
raise AnsibleError("Failed to render doc for %s: %s" % (fname, str(e)))
write_data(text, options, outputname, module)
return doc['short_description']
#####################################################################################
def print_modules(module, category_file, deprecated, options, env, template, outputname, module_map, aliases):
modstring = module
if modstring.startswith('_'):
modstring = module[1:]
modname = modstring
if module in deprecated:
modstring = modstring + DEPRECATED
category_file.write(" %s - %s <%s_module>\n" % (to_bytes(modstring), to_bytes(rst_ify(module_map[module][1])), to_bytes(modname)))
def process_category(category, categories, options, env, template, outputname):
### FIXME:
# We no longer conceptually deal with a mapping of category names to
# modules to file paths. Instead we want several different records:
# (1) Mapping of module names to file paths (what's presently used
# as categories['all']
# (2) Mapping of category names to lists of module names (what you'd
# presently get from categories[category_name][subcategory_name].keys()
# (3) aliases (what's presently in categories['_aliases']
#
# list_modules() now returns those. Need to refactor this function and
# main to work with them.
module_map = categories[category]
module_info = categories['all']
aliases = {}
if '_aliases' in categories:
aliases = categories['_aliases']
category_file_path = os.path.join(options.output_dir, "list_of_%s_modules.rst" % category)
category_file = open(category_file_path, "w")
print("*** recording category %s in %s ***" % (category, category_file_path))
# start a new category file
category = category.replace("_"," ")
category = category.title()
modules = []
deprecated = []
for module in module_map.keys():
if isinstance(module_map[module], dict):
for mod in (m for m in module_map[module].keys() if m in module_info):
if mod.startswith("_"):
deprecated.append(mod)
else:
if module not in module_info:
continue
if module.startswith("_"):
deprecated.append(module)
modules.append(module)
modules.sort(key=lambda k: k[1:] if k.startswith('_') else k)
category_header = "%s Modules" % (category.title())
underscores = "`" * len(category_header)
category_file.write("""\
%s
%s
.. toctree:: :maxdepth: 1
""" % (category_header, underscores))
sections = []
for module in modules:
if module in module_map and isinstance(module_map[module], dict):
sections.append(module)
continue
else:
print_modules(module, category_file, deprecated, options, env, template, outputname, module_info, aliases)
sections.sort()
for section in sections:
category_file.write("\n%s\n%s\n\n" % (section.replace("_"," ").title(),'-' * len(section)))
category_file.write(".. toctree:: :maxdepth: 1\n\n")
section_modules = module_map[section].keys()
section_modules.sort(key=lambda k: k[1:] if k.startswith('_') else k)
#for module in module_map[section]:
for module in (m for m in section_modules if m in module_info):
print_modules(module, category_file, deprecated, options, env, template, outputname, module_info, aliases)
category_file.write("""\n\n
.. note::
- %s: This marks a module as deprecated, which means a module is kept for backwards compatibility but usage is discouraged.
The module documentation details page may explain more about this rationale.
""" % DEPRECATED)
category_file.close()
# TODO: end a new category file
#####################################################################################
def validate_options(options):
''' validate option parser options '''
if not options.module_dir:
sys.exit("--module-dir is required", file=sys.stderr)
if not os.path.exists(options.module_dir):
sys.exit("--module-dir does not exist: %s" % options.module_dir, file=sys.stderr)
if not options.template_dir:
sys.exit("--template-dir must be specified")
#####################################################################################
def main():
p = generate_parser()
(options, args) = p.parse_args()
validate_options(options)
env, template, outputname = jinja2_environment(options.template_dir, options.type)
mod_info, categories, aliases = list_modules(options.module_dir)
categories['all'] = mod_info
categories['_aliases'] = aliases
category_names = [c for c in categories.keys() if not c.startswith('_')]
category_names.sort()
# Write master category list
category_list_path = os.path.join(options.output_dir, "modules_by_category.rst")
with open(category_list_path, "w") as category_list_file:
category_list_file.write("Module Index\n")
category_list_file.write("============\n")
category_list_file.write("\n\n")
category_list_file.write(".. toctree::\n")
category_list_file.write(" :maxdepth: 1\n\n")
for category in category_names:
category_list_file.write(" list_of_%s_modules\n" % category)
#
# Import all the docs into memory
#
module_map = mod_info.copy()
for modname in module_map:
result = process_module(modname, options, env, template, outputname, module_map, aliases)
if result == 'SKIPPED':
del categories['all'][modname]
else:
categories['all'][modname] = (categories['all'][modname], result)
#
# Render all the docs to rst via category pages
#
for category in category_names:
process_category(category, categories, options, env, template, outputname)
if __name__ == '__main__':
main()
|
gpl-3.0
|
sodafree/backend
|
django/conf/locale/sl/formats.py
|
257
|
1834
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd. F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j. M. Y'
SHORT_DATETIME_FORMAT = 'j.n.Y. H:i'
FIRST_DAY_OF_WEEK = 0
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d-%m-%Y %H:%M:%S', # '25-10-2006 14:30:59'
'%d-%m-%Y %H:%M', # '25-10-2006 14:30'
'%d-%m-%Y', # '25-10-2006'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
|
bsd-3-clause
|
felix-dumit/yowsup-tutorial
|
yowsup/layers/protocol_ib/protocolentities/clean_iq.py
|
70
|
1297
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from yowsup.layers.protocol_iq.protocolentities import IqProtocolEntity
class CleanIqProtocolEntity(IqProtocolEntity):
'''
<iq id="" type="set" to="self.domain" xmlns="urn:xmpp:whatsapp:dirty">
<clean type="{{dirty_type}}"></clean>
</iq>
'''
def __init__(self, cleanType, to, _id = None):
super(CleanIqProtocolEntity, self).__init__(
"urn:xmpp:whatsapp:dirty",
_id = _id,
_type = "set",
to = to
)
self.setProps(cleanType)
def setProps(self, cleanType):
self.cleanType = cleanType
def __str__(self):
out = super(CleanIqProtocolEntity, self).__str__()
out += "Clean Type: %s\n" % self.cleanType
return out
def toProtocolTreeNode(self):
node = super(CleanIqProtocolEntity, self).toProtocolTreeNode()
cleanNode = ProtocolTreeNode("clean", {"type": self.cleanType})
node.addChild(cleanNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = IqProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = CleanIqProtocolEntity
entity.setProps(node.getChild("clean").getAttributeValue("type"))
return entity
|
mit
|
mmardini/django
|
tests/signing/tests.py
|
36
|
4807
|
from __future__ import unicode_literals
import time
from django.core import signing
from django.test import TestCase
from django.utils.encoding import force_str
from django.utils import six
class TestSigner(TestCase):
def test_signature(self):
"signature() method should generate a signature"
signer = signing.Signer('predictable-secret')
signer2 = signing.Signer('predictable-secret2')
for s in (
b'hello',
b'3098247:529:087:',
'\u2019'.encode('utf-8'),
):
self.assertEqual(
signer.signature(s),
signing.base64_hmac(signer.salt + 'signer', s,
'predictable-secret').decode()
)
self.assertNotEqual(signer.signature(s), signer2.signature(s))
def test_signature_with_salt(self):
"signature(value, salt=...) should work"
signer = signing.Signer('predictable-secret', salt='extra-salt')
self.assertEqual(
signer.signature('hello'),
signing.base64_hmac('extra-salt' + 'signer',
'hello', 'predictable-secret').decode()
)
self.assertNotEqual(
signing.Signer('predictable-secret', salt='one').signature('hello'),
signing.Signer('predictable-secret', salt='two').signature('hello'))
def test_sign_unsign(self):
"sign/unsign should be reversible"
signer = signing.Signer('predictable-secret')
examples = [
'q;wjmbk;wkmb',
'3098247529087',
'3098247:529:087:',
'jkw osanteuh ,rcuh nthu aou oauh ,ud du',
'\u2019',
]
if six.PY2:
examples.append(b'a byte string')
for example in examples:
signed = signer.sign(example)
self.assertIsInstance(signed, str)
self.assertNotEqual(force_str(example), signed)
self.assertEqual(example, signer.unsign(signed))
def unsign_detects_tampering(self):
"unsign should raise an exception if the value has been tampered with"
signer = signing.Signer('predictable-secret')
value = 'Another string'
signed_value = signer.sign(value)
transforms = (
lambda s: s.upper(),
lambda s: s + 'a',
lambda s: 'a' + s[1:],
lambda s: s.replace(':', ''),
)
self.assertEqual(value, signer.unsign(signed_value))
for transform in transforms:
self.assertRaises(
signing.BadSignature, signer.unsign, transform(signed_value))
def test_dumps_loads(self):
"dumps and loads be reversible for any JSON serializable object"
objects = [
['a', 'list'],
'a unicode string \u2019',
{'a': 'dictionary'},
]
if six.PY2:
objects.append(b'a byte string')
for o in objects:
self.assertNotEqual(o, signing.dumps(o))
self.assertEqual(o, signing.loads(signing.dumps(o)))
self.assertNotEqual(o, signing.dumps(o, compress=True))
self.assertEqual(o, signing.loads(signing.dumps(o, compress=True)))
def test_decode_detects_tampering(self):
"loads should raise exception for tampered objects"
transforms = (
lambda s: s.upper(),
lambda s: s + 'a',
lambda s: 'a' + s[1:],
lambda s: s.replace(':', ''),
)
value = {
'foo': 'bar',
'baz': 1,
}
encoded = signing.dumps(value)
self.assertEqual(value, signing.loads(encoded))
for transform in transforms:
self.assertRaises(
signing.BadSignature, signing.loads, transform(encoded))
def test_works_with_non_ascii_keys(self):
binary_key = b'\xe7' # Set some binary (non-ASCII key)
s = signing.Signer(binary_key)
self.assertEqual('foo:6NB0fssLW5RQvZ3Y-MTerq2rX7w', s.sign('foo'))
class TestTimestampSigner(TestCase):
def test_timestamp_signer(self):
value = 'hello'
_time = time.time
time.time = lambda: 123456789
try:
signer = signing.TimestampSigner('predictable-key')
ts = signer.sign(value)
self.assertNotEqual(ts,
signing.Signer('predictable-key').sign(value))
self.assertEqual(signer.unsign(ts), value)
time.time = lambda: 123456800
self.assertEqual(signer.unsign(ts, max_age=12), value)
self.assertEqual(signer.unsign(ts, max_age=11), value)
self.assertRaises(
signing.SignatureExpired, signer.unsign, ts, max_age=10)
finally:
time.time = _time
|
bsd-3-clause
|
ResearchSoftwareInstitute/MyHPOM
|
hs_core/tests/api/native/test_utils.py
|
2
|
4061
|
from django.contrib.auth.models import Group
from django.contrib.sites.models import Site
from django.test import TestCase
from mezzanine.conf import settings
from hs_core.hydroshare import utils
from hs_core.models import GenericResource, BaseResource
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
class TestUtils(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(TestUtils, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.user = hydroshare.create_account(
'[email protected]',
username='user1',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[]
)
self.user2 = hydroshare.create_account(
'[email protected]',
username='user2',
first_name='user2_FirstName',
last_name='user2_LastName',
superuser=False,
groups=[]
)
self.res = hydroshare.create_resource(
'GenericResource',
self.user,
'test resource',
)
self.res.doi = 'doi1000100010001'
self.res.save()
def test_get_resource_types(self):
res_types = utils.get_resource_types()
self.assertIn(GenericResource, res_types)
for res_type in res_types:
self.assertTrue(issubclass(res_type, BaseResource))
def test_get_resource_instance(self):
self.assertEqual(
utils.get_resource_instance('hs_core', 'GenericResource', self.res.pk),
self.res
)
def test_get_resource_by_shortkey(self):
self.assertEqual(
utils.get_resource_by_shortkey(self.res.short_id),
self.res
)
def test_get_resource_by_doi(self):
self.assertEqual(
utils.get_resource_by_doi('doi1000100010001'),
self.res
)
def test_user_from_id(self):
self.assertEqual(
utils.user_from_id(self.user),
self.user,
msg='user passthrough failed'
)
self.assertEqual(
utils.user_from_id('[email protected]'),
self.user,
msg='lookup by email address failed'
)
self.assertEqual(
utils.user_from_id('user1'),
self.user,
msg='lookup by username failed'
)
def test_group_from_id(self):
self.assertEqual(
utils.group_from_id(self.group),
self.group,
msg='group passthrough failed'
)
self.assertEqual(
utils.group_from_id('Resource Author'),
self.group,
msg='lookup by group name failed'
)
def test_get_user_profile(self):
self.assertEquals(self.user.userprofile, utils.get_profile(self.user))
def test_get_mime_type(self):
test_file = 'my_file.txt'
self.assertEquals(utils.get_file_mime_type(test_file), 'text/plain')
test_file = 'my_file.tif'
self.assertEquals(utils.get_file_mime_type(test_file), 'image/tiff')
test_file = 'my_file.abc'
self.assertEquals(utils.get_file_mime_type(test_file), 'application/abc')
def test_get_current_site_url(self):
current_site = Site.objects.get_current()
protocol = getattr(settings, 'MY_SITE_PROTOCOL', 'http')
url = '%s://%s' % (protocol, current_site.domain)
self.assertEquals(utils.current_site_url(), url)
def test_resource_modified(self):
modified_date1 = self.res.metadata.dates.filter(type='modified').first()
self.assertEquals(self.res.last_changed_by, self.user)
utils.resource_modified(self.res, self.user2)
modified_date2 = self.res.metadata.dates.filter(type='modified').first()
self.assertTrue((modified_date2.start_date - modified_date1.start_date).total_seconds() > 0)
self.assertEquals(self.res.last_changed_by, self.user2)
|
bsd-3-clause
|
jkonecny12/anaconda
|
pyanaconda/modules/common/structures/iscsi.py
|
6
|
4801
|
#
# DBus structures for iSCSI.
#
# Copyright (C) 2019 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from dasbus.structure import DBusData
from dasbus.typing import * # pylint: disable=wildcard-import
__all__ = ["Portal", "Credentials", "Node"]
class Portal(DBusData):
"""Data for iSCSI portal."""
def __init__(self):
self._ip_address = ""
self._port = "3260"
@property
def ip_address(self) -> Str:
"""IP address.
:return: a string with an IP address
"""
return self._ip_address
@ip_address.setter
def ip_address(self, address: Str):
self._ip_address = address
@property
def port(self) -> Str:
"""Port.
:return: a string with the port
"""
return self._port
@port.setter
def port(self, port: Str):
self._port = port
def __eq__(self, other):
return (self._ip_address, self._port) == (other.ip_address, other.port)
class Credentials(DBusData):
"""Data for iSCSI credentials."""
def __init__(self):
self._username = ""
self._password = ""
self._reverse_username = ""
self._reverse_password = ""
@property
def username(self) -> Str:
"""CHAP user name.
:return: a string with a name
"""
return self._username
@username.setter
def username(self, name: Str):
self._username = name
@property
def password(self) -> Str:
"""CHAP password.
:return: a string with a password
"""
return self._password
@password.setter
def password(self, password: Str):
self._password = password
@property
def reverse_username(self) -> Str:
"""Reverse CHAP user name.
:return: a string with a name
"""
return self._reverse_username
@reverse_username.setter
def reverse_username(self, name: Str):
self._reverse_username = name
@property
def reverse_password(self) -> Str:
"""Reverse CHAP password.
:return: a string with a password
"""
return self._reverse_password
@reverse_password.setter
def reverse_password(self, password: Str):
self._reverse_password = password
def __eq__(self, other):
return (self._username, self._password, self._reverse_username, self._reverse_password) == \
(other.username, other.password, other.reverse_username, other.reverse_password)
class Node(DBusData):
"""Data for iSCSI node."""
def __init__(self):
self._name = ""
self._address = ""
self._port = ""
self._iface = ""
self._net_ifacename = ""
@property
def name(self) -> Str:
"""Name.
:return: a string with a name
"""
return self._name
@name.setter
def name(self, name: Str):
self._name = name
@property
def address(self) -> Str:
"""Address.
:return: a string with an address
"""
return self._address
@address.setter
def address(self, address: Str):
self._address = address
@property
def port(self) -> Str:
"""Port.
:return: a string with a port
"""
return self._port
@port.setter
def port(self, port: Str):
self._port = port
@property
def iface(self) -> Str:
"""ISCSI Interface.
:return: a string with an interface name (eg "iface0")
"""
return self._iface
@iface.setter
def iface(self, iscsi_iface: Str):
self._iface = iscsi_iface
@property
def net_ifacename(self) -> Str:
"""Network layer's interface name.
:return: a string with an interface name (eg "ens3")
"""
return self._net_ifacename
@net_ifacename.setter
def net_ifacename(self, net_ifacename: Str):
self._net_ifacename = net_ifacename
def __eq__(self, other):
return (self._name, self._address, self._port, self._iface, self._net_ifacename) == \
(other.name, other.address, other.port, other.iface, other.net_ifacename)
|
gpl-2.0
|
farrajota/dbcollection
|
dbcollection/core/api/process.py
|
2
|
6154
|
"""
Process API class.
"""
from __future__ import print_function
import os
from dbcollection.core.manager import CacheManager
from .metadata import MetadataConstructor
def process(name, task='default', verbose=True):
"""Process a dataset's metadata and stores it to file.
The data is stored a a HSF5 file for each task composing the dataset's tasks.
Parameters
----------
name : str
Name of the dataset.
task : str, optional
Name of the task to process.
verbose : bool, optional
Displays text information (if true).
Raises
------
KeyError
If a task does not exist for a dataset.
Examples
--------
>>> import dbcollection as dbc
Download the CIFAR10 dataset to disk.
>>> dbc.process('cifar10', task='classification', verbose=False)
"""
assert name, 'Must input a valid dataset name.'
processer = ProcessAPI(name=name,
task=task,
verbose=verbose)
processer.run()
class ProcessAPI(object):
"""Dataset metadata process API class.
This class contains methods to correctly process
the dataset's data files and convert their metadata
to disk.
Parameters
----------
name : str
Name of the dataset.
task : str
Name of the task to process.
verbose : bool
Displays text information (if true).
Attributes
----------
name : str
Name of the dataset.
task : str
Name of the task to process.
verbose : bool
Displays text information (if true).
extract_data : bool
Flag to extract data (if True).
cache_manager : CacheManager
Cache manager object.
Raises
------
KeyError
If a task does not exist for a dataset.
"""
def __init__(self, name, task, verbose):
"""Initialize class."""
assert isinstance(name, str), 'Must input a valid dataset name.'
assert isinstance(task, str), 'Must input a valid task name.'
assert isinstance(verbose, bool), "Must input a valid boolean for verbose."
self.name = name
self.task = task
self.verbose = verbose
self.extract_data = False
self.cache_manager = self.get_cache_manager()
def get_cache_manager(self):
return CacheManager()
def run(self):
"""Main method."""
if self.verbose:
print('==> Setup directories to store the data files.')
self.set_dirs_processed_metadata()
if self.verbose:
print('==> Process \'{}\' metadata to disk...'.format(self.name))
task_info = self.process_dataset()
if self.verbose:
print('==> Updating the cache manager')
self.update_cache(task_info)
if self.verbose:
print('==> Dataset processing complete.')
def set_dirs_processed_metadata(self):
cache_dir = self.get_dataset_cache_dir_path()
self.create_dir(cache_dir)
def get_dataset_cache_dir_path(self):
cache_dir = self.get_cache_dir_path_from_cache()
return os.path.join(cache_dir, self.name)
def get_cache_dir_path_from_cache(self):
return self.cache_manager.info.cache_dir
def create_dir(self, path):
"""Create a directory in the disk."""
if not os.path.exists(path):
os.makedirs(path)
def process_dataset(self):
"""Process the dataset's metadata."""
data_dir = self.get_dataset_data_dir_path()
cache_dir = self.get_dataset_cache_dir_path()
task = self.parse_task_name(self.task)
self.check_if_task_exists_in_database(task)
return self.process_dataset_metadata(data_dir, cache_dir, task)
def get_dataset_constructor(self):
db_metadata = self.get_dataset_metadata_obj(self.name)
constructor = db_metadata.get_constructor()
return constructor
def get_dataset_metadata_obj(self, name):
return MetadataConstructor(name)
def get_dataset_data_dir_path(self):
dataset_metadata = self.get_dataset_metadata_from_cache()
data_dir = dataset_metadata['data_dir']
if not self.name == os.path.basename(data_dir):
data_dir = os.path.join(data_dir, self.name)
return data_dir
def get_dataset_metadata_from_cache(self):
return self.cache_manager.dataset.get(self.name)
def parse_task_name(self, task):
"""Parse the input task string."""
if task == '' or task == 'default':
task_parsed = self.get_default_task()
else:
task_parsed = task
return task_parsed
def get_default_task(self):
"""Returns the default task for this dataset."""
db_metadata = self.get_dataset_metadata_obj(self.name)
default_task = db_metadata.get_default_task()
return default_task
def check_if_task_exists_in_database(self, task):
"""Check if task exists in the list of available tasks for processing."""
if not self.exists_task(task):
raise KeyError('The task \'{}\' does not exists for loading/processing.'
.format(self.task))
def exists_task(self, task):
"""Checks if a task exists for a dataset."""
db_metadata = self.get_dataset_metadata_obj(self.name)
return task in db_metadata.get_tasks()
def process_dataset_metadata(self, data_dir, cache_dir, task):
constructor = self.get_dataset_constructor()
db = constructor(data_path=data_dir,
cache_path=cache_dir,
extract_data=self.extract_data,
verbose=self.verbose)
task_info = db.process(task)
return task_info
def update_cache(self, task_info):
"""Update the cache manager information for this dataset."""
cache_dir = self.get_dataset_cache_dir_path()
self.cache_manager.dataset.update(name=self.name,
cache_dir=cache_dir,
tasks=task_info)
|
mit
|
KaranToor/MA450
|
google-cloud-sdk/.install/.backup/platform/gsutil/gslib/boto_translation.py
|
3
|
73541
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XML/boto gsutil Cloud API implementation for GCS and Amazon S3."""
from __future__ import absolute_import
import base64
import binascii
import datetime
import errno
import httplib
import json
import multiprocessing
import os
import pickle
import random
import re
import socket
import tempfile
import textwrap
import threading
import time
import xml
from xml.dom.minidom import parseString as XmlParseString
from xml.sax import _exceptions as SaxExceptions
import boto
from boto import handler
from boto.exception import ResumableDownloadException as BotoResumableDownloadException
from boto.exception import ResumableTransferDisposition
from boto.gs.cors import Cors
from boto.gs.lifecycle import LifecycleConfig
from boto.s3.cors import CORSConfiguration as S3Cors
from boto.s3.deletemarker import DeleteMarker
from boto.s3.lifecycle import Lifecycle as S3Lifecycle
from boto.s3.prefix import Prefix
from gslib.boto_resumable_upload import BotoResumableUpload
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import ArgumentException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import CloudApi
from gslib.cloud_api import NotEmptyException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import PreconditionException
from gslib.cloud_api import ResumableDownloadException
from gslib.cloud_api import ResumableUploadAbortException
from gslib.cloud_api import ResumableUploadException
from gslib.cloud_api import ResumableUploadStartOverException
from gslib.cloud_api import ServiceException
from gslib.cloud_api_helper import ListToGetFields
from gslib.cloud_api_helper import ValidateDstObjectMetadata
# Imported for boto AuthHandler purposes.
import gslib.devshell_auth_plugin # pylint: disable=unused-import
from gslib.exception import CommandException
from gslib.exception import InvalidUrlError
from gslib.hashing_helper import Base64EncodeHash
from gslib.hashing_helper import Base64ToHexHash
from gslib.project_id import GOOG_PROJ_ID_HDR
from gslib.project_id import PopulateProjectId
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.translation_helper import AclTranslation
from gslib.translation_helper import AddS3MarkerAclToObjectMetadata
from gslib.translation_helper import CorsTranslation
from gslib.translation_helper import CreateBucketNotFoundException
from gslib.translation_helper import CreateNotFoundExceptionForObjectWrite
from gslib.translation_helper import CreateObjectNotFoundException
from gslib.translation_helper import DEFAULT_CONTENT_TYPE
from gslib.translation_helper import EncodeStringAsLong
from gslib.translation_helper import GenerationFromUrlAndString
from gslib.translation_helper import HeadersFromObjectMetadata
from gslib.translation_helper import LifecycleTranslation
from gslib.translation_helper import REMOVE_CORS_CONFIG
from gslib.translation_helper import S3MarkerAclFromObjectMetadata
from gslib.util import AddAcceptEncodingGzipIfNeeded
from gslib.util import ConfigureNoOpAuthIfNeeded
from gslib.util import DEFAULT_FILE_BUFFER_SIZE
from gslib.util import GetMaxRetryDelay
from gslib.util import GetNumRetries
from gslib.util import S3_DELETE_MARKER_GUID
from gslib.util import TWO_MIB
from gslib.util import UnaryDictToXml
from gslib.util import UTF8
from gslib.util import XML_PROGRESS_CALLBACKS
TRANSLATABLE_BOTO_EXCEPTIONS = (boto.exception.BotoServerError,
boto.exception.InvalidUriError,
boto.exception.ResumableDownloadException,
boto.exception.ResumableUploadException,
boto.exception.StorageCreateError,
boto.exception.StorageResponseError)
# pylint: disable=global-at-module-level
global boto_auth_initialized, boto_auth_initialized_lock
# If multiprocessing is available, these will be overridden to process-safe
# variables in InitializeMultiprocessingVariables.
boto_auth_initialized_lock = threading.Lock()
boto_auth_initialized = False
NON_EXISTENT_OBJECT_REGEX = re.compile(r'.*non-\s*existent\s*object',
flags=re.DOTALL)
# Determines whether an etag is a valid MD5.
MD5_REGEX = re.compile(r'^"*[a-fA-F0-9]{32}"*$')
def InitializeMultiprocessingVariables(): # pylint: disable=invalid-name
"""Perform necessary initialization for multiprocessing.
See gslib.command.InitializeMultiprocessingVariables for an explanation
of why this is necessary.
"""
# pylint: disable=global-variable-undefined
global boto_auth_initialized, boto_auth_initialized_lock
boto_auth_initialized_lock = gslib.util.CreateLock()
boto_auth_initialized = multiprocessing.Value('i', 0)
class DownloadProxyCallbackHandler(object):
"""Intermediary callback to keep track of the number of bytes downloaded."""
def __init__(self, start_byte, callback):
self._start_byte = start_byte
self._callback = callback
def call(self, bytes_downloaded, total_size):
"""Saves necessary data and then calls the given Cloud API callback.
Args:
bytes_downloaded: Number of bytes processed so far.
total_size: Total size of the ongoing operation.
"""
if self._callback:
self._callback(self._start_byte + bytes_downloaded, total_size)
class BotoTranslation(CloudApi):
"""Boto-based XML translation implementation of gsutil Cloud API.
This class takes gsutil Cloud API objects, translates them to XML service
calls, and translates the results back into gsutil Cloud API objects for
use by the caller.
This class does not support encryption and ignores encryption and decryption
parameters. Behavior when encountering encrypted objects is undefined.
TODO: Implement support.
"""
def __init__(self, bucket_storage_uri_class, logger, status_queue,
provider=None, credentials=None, debug=0, trace_token=None,
perf_trace_token=None):
"""Performs necessary setup for interacting with the cloud storage provider.
Args:
bucket_storage_uri_class: boto storage_uri class, used by APIs that
provide boto translation or mocking.
logger: logging.logger for outputting log messages.
status_queue: Queue for relaying status to UI.
provider: Provider prefix describing cloud storage provider to connect to.
'gs' and 's3' are supported. Function implementations ignore
the provider argument and use this one instead.
credentials: Unused.
debug: Debug level for the API implementation (0..3).
trace_token: Unused in this subclass.
perf_trace_token: Performance trace token to use when making API calls
('gs' provider only).
"""
super(BotoTranslation, self).__init__(
bucket_storage_uri_class, logger, status_queue, provider=provider,
debug=debug, trace_token=trace_token, perf_trace_token=perf_trace_token)
_ = credentials
# pylint: disable=global-variable-undefined, global-variable-not-assigned
global boto_auth_initialized, boto_auth_initialized_lock
with boto_auth_initialized_lock:
ConfigureNoOpAuthIfNeeded()
if isinstance(boto_auth_initialized, bool):
boto_auth_initialized = True
else:
boto_auth_initialized.value = 1
self.api_version = boto.config.get_value(
'GSUtil', 'default_api_version', '1')
def GetBucket(self, bucket_name, provider=None, fields=None):
"""See CloudApi class for function doc strings."""
_ = provider
bucket_uri = self._StorageUriForBucket(bucket_name)
headers = self._CreateBaseHeaders()
try:
return self._BotoBucketToBucket(bucket_uri.get_bucket(validate=True,
headers=headers),
fields=fields)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
def ListBuckets(self, project_id=None, provider=None, fields=None):
"""See CloudApi class for function doc strings."""
_ = provider
get_fields = ListToGetFields(list_fields=fields)
headers = self._CreateBaseHeaders()
if self.provider == 'gs':
headers[GOOG_PROJ_ID_HDR] = PopulateProjectId(project_id)
try:
provider_uri = boto.storage_uri(
'%s://' % self.provider,
suppress_consec_slashes=False,
bucket_storage_uri_class=self.bucket_storage_uri_class,
debug=self.debug)
buckets_iter = provider_uri.get_all_buckets(headers=headers)
for bucket in buckets_iter:
if self.provider == 's3' and bucket.name.lower() != bucket.name:
# S3 listings can return buckets with upper-case names, but boto
# can't successfully call them.
continue
yield self._BotoBucketToBucket(bucket, fields=get_fields)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
def PatchBucket(self, bucket_name, metadata, canned_acl=None,
canned_def_acl=None, preconditions=None, provider=None,
fields=None):
"""See CloudApi class for function doc strings."""
_ = provider
bucket_uri = self._StorageUriForBucket(bucket_name)
headers = self._CreateBaseHeaders()
self._AddPreconditionsToHeaders(preconditions, headers)
try:
if metadata.acl:
boto_acl = AclTranslation.BotoAclFromMessage(metadata.acl)
bucket_uri.set_xml_acl(boto_acl.to_xml(), headers=headers)
if canned_acl:
canned_acls = bucket_uri.canned_acls()
if canned_acl not in canned_acls:
raise CommandException('Invalid canned ACL "%s".' % canned_acl)
bucket_uri.set_acl(canned_acl, bucket_uri.object_name, headers=headers)
if canned_def_acl:
canned_acls = bucket_uri.canned_acls()
if canned_def_acl not in canned_acls:
raise CommandException('Invalid canned ACL "%s".' % canned_def_acl)
bucket_uri.set_def_acl(canned_def_acl, bucket_uri.object_name,
headers=headers)
if metadata.cors:
if metadata.cors == REMOVE_CORS_CONFIG:
metadata.cors = []
boto_cors = CorsTranslation.BotoCorsFromMessage(metadata.cors)
bucket_uri.set_cors(boto_cors, False, headers=headers)
if metadata.defaultObjectAcl:
boto_acl = AclTranslation.BotoAclFromMessage(
metadata.defaultObjectAcl)
bucket_uri.set_def_xml_acl(boto_acl.to_xml(), headers=headers)
if metadata.lifecycle:
boto_lifecycle = LifecycleTranslation.BotoLifecycleFromMessage(
metadata.lifecycle)
bucket_uri.configure_lifecycle(boto_lifecycle, False, headers=headers)
if metadata.logging:
if metadata.logging.logBucket and metadata.logging.logObjectPrefix:
bucket_uri.enable_logging(metadata.logging.logBucket,
metadata.logging.logObjectPrefix,
False, headers=headers)
else: # Logging field is present and empty. Disable logging.
bucket_uri.disable_logging(False, headers=headers)
if metadata.versioning:
bucket_uri.configure_versioning(metadata.versioning.enabled,
headers=headers)
if metadata.website:
main_page_suffix = metadata.website.mainPageSuffix
error_page = metadata.website.notFoundPage
bucket_uri.set_website_config(main_page_suffix, error_page,
headers=headers)
return self.GetBucket(bucket_name, fields=fields)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
def CreateBucket(self, bucket_name, project_id=None, metadata=None,
provider=None, fields=None):
"""See CloudApi class for function doc strings."""
_ = provider
bucket_uri = self._StorageUriForBucket(bucket_name)
location = ''
if metadata and metadata.location:
location = metadata.location.lower()
# Pass storage_class param only if this is a GCS bucket. (In S3 the
# storage class is specified on the key object.)
headers = self._CreateBaseHeaders()
if bucket_uri.scheme == 'gs':
headers[GOOG_PROJ_ID_HDR] = PopulateProjectId(project_id)
storage_class = ''
if metadata and metadata.storageClass:
storage_class = metadata.storageClass
try:
bucket_uri.create_bucket(headers=headers, location=location,
storage_class=storage_class)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
else:
try:
bucket_uri.create_bucket(headers=headers, location=location)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
return self.GetBucket(bucket_name, fields=fields)
def DeleteBucket(self, bucket_name, preconditions=None, provider=None):
"""See CloudApi class for function doc strings."""
_ = provider, preconditions
bucket_uri = self._StorageUriForBucket(bucket_name)
headers = self._CreateBaseHeaders()
try:
bucket_uri.delete_bucket(headers=headers)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
translated_exception = self._TranslateBotoException(
e, bucket_name=bucket_name)
if (translated_exception and
'BucketNotEmpty' in translated_exception.reason):
try:
if bucket_uri.get_versioning_config():
if self.provider == 's3':
raise NotEmptyException(
'VersionedBucketNotEmpty (%s). Currently, gsutil does not '
'support listing or removing S3 DeleteMarkers, so you may '
'need to delete these using another tool to successfully '
'delete this bucket.' % bucket_name, status=e.status)
raise NotEmptyException(
'VersionedBucketNotEmpty (%s)' % bucket_name, status=e.status)
else:
raise NotEmptyException('BucketNotEmpty (%s)' % bucket_name,
status=e.status)
except TRANSLATABLE_BOTO_EXCEPTIONS, e2:
self._TranslateExceptionAndRaise(e2, bucket_name=bucket_name)
elif translated_exception and translated_exception.status == 404:
raise NotFoundException('Bucket %s does not exist.' % bucket_name)
else:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
def ListObjects(self, bucket_name, prefix=None, delimiter=None,
all_versions=None, provider=None, fields=None):
"""See CloudApi class for function doc strings."""
_ = provider
get_fields = ListToGetFields(list_fields=fields)
bucket_uri = self._StorageUriForBucket(bucket_name)
headers = self._CreateBaseHeaders()
yield_prefixes = fields is None or 'prefixes' in fields
yield_objects = fields is None or any(
field.startswith('items/') for field in fields)
try:
objects_iter = bucket_uri.list_bucket(prefix=prefix or '',
delimiter=delimiter or '',
all_versions=all_versions,
headers=headers)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
try:
for key in objects_iter:
if yield_prefixes and isinstance(key, Prefix):
yield CloudApi.CsObjectOrPrefix(key.name,
CloudApi.CsObjectOrPrefixType.PREFIX)
elif yield_objects:
key_to_convert = key
# Listed keys are populated with these fields during bucket listing.
key_http_fields = set(['bucket', 'etag', 'name', 'updated',
'generation', 'metageneration', 'size'])
# When fields == None, the caller is requesting all possible fields.
# If the caller requested any fields that are not populated by bucket
# listing, we'll need to make a separate HTTP call for each object to
# get its metadata and populate the remaining fields with the result.
if not get_fields or (get_fields and not
get_fields.issubset(key_http_fields)):
generation = None
if getattr(key, 'generation', None):
generation = key.generation
if getattr(key, 'version_id', None):
generation = key.version_id
key_to_convert = self._GetBotoKey(bucket_name, key.name,
generation=generation)
return_object = self._BotoKeyToObject(key_to_convert,
fields=get_fields)
yield CloudApi.CsObjectOrPrefix(return_object,
CloudApi.CsObjectOrPrefixType.OBJECT)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name)
def GetObjectMetadata(self, bucket_name, object_name, generation=None,
provider=None, fields=None):
"""See CloudApi class for function doc strings."""
_ = provider
try:
return self._BotoKeyToObject(self._GetBotoKey(bucket_name, object_name,
generation=generation),
fields=fields)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name,
object_name=object_name,
generation=generation)
def _CurryDigester(self, digester_object):
"""Curries a digester object into a form consumable by boto.
Key instantiates its own digesters by calling hash_algs[alg]() [note there
are no arguments to this function]. So in order to pass in our caught-up
digesters during a resumable download, we need to pass the digester
object but don't get to look it up based on the algorithm name. Here we
use a lambda to make lookup implicit.
Args:
digester_object: Input object to be returned by the created function.
Returns:
A function which when called will return the input object.
"""
return lambda: digester_object
def GetObjectMedia(
self, bucket_name, object_name, download_stream, provider=None,
generation=None, object_size=None,
compressed_encoding=False,
download_strategy=CloudApi.DownloadStrategy.ONE_SHOT,
start_byte=0, end_byte=None, progress_callback=None,
serialization_data=None, digesters=None, decryption_tuple=None):
"""See CloudApi class for function doc strings."""
# This implementation will get the object metadata first if we don't pass it
# in via serialization_data.
headers = self._CreateBaseHeaders()
AddAcceptEncodingGzipIfNeeded(
headers, compressed_encoding=compressed_encoding)
if end_byte is not None:
headers['range'] = 'bytes=%s-%s' % (start_byte, end_byte)
elif start_byte > 0:
headers['range'] = 'bytes=%s-' % (start_byte)
elif start_byte < 0:
headers['range'] = 'bytes=%s' % (start_byte)
# Since in most cases we already made a call to get the object metadata,
# here we avoid an extra HTTP call by unpickling the key. This is coupled
# with the implementation in _BotoKeyToObject.
if serialization_data:
serialization_dict = json.loads(serialization_data)
key = pickle.loads(binascii.a2b_base64(serialization_dict['url']))
else:
key = self._GetBotoKey(bucket_name, object_name, generation=generation)
if digesters and self.provider == 'gs':
hash_algs = {}
for alg in digesters:
hash_algs[alg] = self._CurryDigester(digesters[alg])
else:
hash_algs = {}
total_size = object_size or 0
if serialization_data:
total_size = json.loads(serialization_data)['total_size']
if total_size:
num_progress_callbacks = max(int(total_size) / TWO_MIB,
XML_PROGRESS_CALLBACKS)
else:
num_progress_callbacks = XML_PROGRESS_CALLBACKS
try:
if download_strategy is CloudApi.DownloadStrategy.RESUMABLE:
self._PerformResumableDownload(
download_stream, start_byte, end_byte, key,
headers=headers, callback=progress_callback,
num_callbacks=num_progress_callbacks, hash_algs=hash_algs)
elif download_strategy is CloudApi.DownloadStrategy.ONE_SHOT:
self._PerformSimpleDownload(
download_stream, key, progress_callback=progress_callback,
num_progress_callbacks=num_progress_callbacks, headers=headers,
hash_algs=hash_algs)
else:
raise ArgumentException('Unsupported DownloadStrategy: %s' %
download_strategy)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name,
object_name=object_name,
generation=generation)
if self.provider == 's3':
if digesters:
class HashToDigester(object):
"""Wrapper class to expose hash digests.
boto creates its own digesters in s3's get_file, returning on-the-fly
hashes only by way of key.local_hashes. To propagate the digest back
to the caller, this stub class implements the digest() function.
"""
def __init__(self, hash_val):
self.hash_val = hash_val
def digest(self): # pylint: disable=invalid-name
return self.hash_val
for alg_name in digesters:
if ((download_strategy == CloudApi.DownloadStrategy.RESUMABLE and
start_byte != 0) or
not ((getattr(key, 'local_hashes', None) and
alg_name in key.local_hashes))):
# For resumable downloads, boto does not provide a mechanism to
# catch up the hash in the case of a partially complete download.
# In this case or in the case where no digest was successfully
# calculated, set the digester to None, which indicates that we'll
# need to manually calculate the hash from the local file once it
# is complete.
digesters[alg_name] = None
else:
# Use the on-the-fly hash.
digesters[alg_name] = HashToDigester(key.local_hashes[alg_name])
def _PerformSimpleDownload(self, download_stream, key, progress_callback=None,
num_progress_callbacks=XML_PROGRESS_CALLBACKS,
headers=None, hash_algs=None):
try:
key.get_contents_to_file(download_stream, cb=progress_callback,
num_cb=num_progress_callbacks, headers=headers,
hash_algs=hash_algs)
except TypeError: # s3 and mocks do not support hash_algs
key.get_contents_to_file(download_stream, cb=progress_callback,
num_cb=num_progress_callbacks, headers=headers)
def _PerformResumableDownload(self, fp, start_byte, end_byte, key,
headers=None, callback=None,
num_callbacks=XML_PROGRESS_CALLBACKS,
hash_algs=None):
"""Downloads bytes from key to fp, resuming as needed.
Args:
fp: File pointer into which data should be downloaded.
start_byte: Start byte of the download.
end_byte: End byte of the download.
key: Key object from which data is to be downloaded
headers: Headers to send when retrieving the file
callback: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters. The first integer represents the number of
bytes that have been successfully transmitted from the service. The
second represents the total number of bytes that need to be
transmitted.
num_callbacks: (optional) If a callback is specified with the callback
parameter, this determines the granularity of the callback
by defining the maximum number of times the callback will be
called during the file transfer.
hash_algs: Dict of hash algorithms to apply to downloaded bytes.
Raises:
ResumableDownloadException on error.
"""
retryable_exceptions = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
debug = key.bucket.connection.debug
num_retries = GetNumRetries()
progress_less_iterations = 0
last_progress_byte = start_byte
while True: # Retry as long as we're making progress.
try:
cb_handler = DownloadProxyCallbackHandler(start_byte, callback)
headers = headers.copy()
headers['Range'] = 'bytes=%d-%d' % (start_byte, end_byte)
# Disable AWSAuthConnection-level retry behavior, since that would
# cause downloads to restart from scratch.
try:
key.get_file(fp, headers, cb_handler.call, num_callbacks,
override_num_retries=0, hash_algs=hash_algs)
except TypeError:
key.get_file(fp, headers, cb_handler.call, num_callbacks,
override_num_retries=0)
fp.flush()
# Download succeeded.
return
except retryable_exceptions, e: # pylint: disable=catching-non-exception
if debug >= 1:
self.logger.info('Caught exception (%s)', repr(e))
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close and reopen the key before resuming
# the download.
if self.provider == 's3':
key.get_file(fp, headers, cb_handler.call, num_callbacks,
override_num_retries=0)
else: # self.provider == 'gs'
key.get_file(fp, headers, cb_handler.call, num_callbacks,
override_num_retries=0, hash_algs=hash_algs)
except BotoResumableDownloadException, e:
if (e.disposition ==
ResumableTransferDisposition.ABORT_CUR_PROCESS):
raise ResumableDownloadException(e.message)
else:
if debug >= 1:
self.logger.info('Caught ResumableDownloadException (%s) - will '
'retry', e.message)
# At this point we had a re-tryable failure; see if made progress.
start_byte = fp.tell()
if start_byte > last_progress_byte:
last_progress_byte = start_byte
progress_less_iterations = 0
else:
progress_less_iterations += 1
if progress_less_iterations > num_retries:
# Don't retry any longer in the current process.
raise ResumableDownloadException(
'Too many resumable download attempts failed without '
'progress. You might try this download again later')
# Close the key, in case a previous download died partway
# through and left data in the underlying key HTTP buffer.
# Do this within a try/except block in case the connection is
# closed (since key.close() attempts to do a final read, in which
# case this read attempt would get an IncompleteRead exception,
# which we can safely ignore).
try:
key.close()
except httplib.IncompleteRead:
pass
sleep_time_secs = min(random.random() * (2 ** progress_less_iterations),
GetMaxRetryDelay())
if debug >= 1:
self.logger.info(
'Got retryable failure (%d progress-less in a row).\nSleeping %d '
'seconds before re-trying', progress_less_iterations,
sleep_time_secs)
time.sleep(sleep_time_secs)
def PatchObjectMetadata(self, bucket_name, object_name, metadata,
canned_acl=None, generation=None, preconditions=None,
provider=None, fields=None):
"""See CloudApi class for function doc strings."""
_ = provider
object_uri = self._StorageUriForObject(bucket_name, object_name,
generation=generation)
headers = self._CreateBaseHeaders()
meta_headers = HeadersFromObjectMetadata(metadata, self.provider)
metadata_plus = {}
metadata_minus = set()
metadata_changed = False
for k, v in meta_headers.iteritems():
metadata_changed = True
if v is None:
metadata_minus.add(k)
else:
metadata_plus[k] = v
self._AddPreconditionsToHeaders(preconditions, headers)
if metadata_changed:
try:
object_uri.set_metadata(metadata_plus, metadata_minus, False,
headers=headers)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name,
object_name=object_name,
generation=generation)
if metadata.acl:
boto_acl = AclTranslation.BotoAclFromMessage(metadata.acl)
try:
object_uri.set_xml_acl(boto_acl.to_xml(), key_name=object_name,
headers=headers)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name,
object_name=object_name,
generation=generation)
if canned_acl:
canned_acls = object_uri.canned_acls()
if canned_acl not in canned_acls:
raise CommandException('Invalid canned ACL "%s".' % canned_acl)
object_uri.set_acl(canned_acl, object_uri.object_name,
headers=headers)
return self.GetObjectMetadata(bucket_name, object_name,
generation=generation, fields=fields)
def _PerformSimpleUpload(self, dst_uri, upload_stream, md5=None,
canned_acl=None, progress_callback=None,
headers=None):
dst_uri.set_contents_from_file(upload_stream, md5=md5, policy=canned_acl,
cb=progress_callback, headers=headers)
def _PerformStreamingUpload(self, dst_uri, upload_stream, canned_acl=None,
progress_callback=None, headers=None):
if dst_uri.get_provider().supports_chunked_transfer():
dst_uri.set_contents_from_stream(upload_stream, policy=canned_acl,
cb=progress_callback, headers=headers)
else:
# Provider doesn't support chunked transfer, so copy to a temporary
# file.
(temp_fh, temp_path) = tempfile.mkstemp()
try:
with open(temp_path, 'wb') as out_fp:
stream_bytes = upload_stream.read(DEFAULT_FILE_BUFFER_SIZE)
while stream_bytes:
out_fp.write(stream_bytes)
stream_bytes = upload_stream.read(DEFAULT_FILE_BUFFER_SIZE)
with open(temp_path, 'rb') as in_fp:
dst_uri.set_contents_from_file(in_fp, policy=canned_acl,
headers=headers)
finally:
os.close(temp_fh)
os.unlink(temp_path)
def _PerformResumableUpload(self, key, upload_stream, upload_size,
tracker_callback, canned_acl=None,
serialization_data=None, progress_callback=None,
headers=None):
resumable_upload = BotoResumableUpload(
tracker_callback, self.logger, resume_url=serialization_data)
resumable_upload.SendFile(key, upload_stream, upload_size,
canned_acl=canned_acl, cb=progress_callback,
headers=headers)
def _UploadSetup(self, object_metadata, preconditions=None):
"""Shared upload implementation.
Args:
object_metadata: Object metadata describing destination object.
preconditions: Optional gsutil Cloud API preconditions.
Returns:
Headers dictionary, StorageUri for upload (based on inputs)
"""
ValidateDstObjectMetadata(object_metadata)
headers = self._CreateBaseHeaders()
headers.update(HeadersFromObjectMetadata(object_metadata, self.provider))
if object_metadata.crc32c:
if 'x-goog-hash' in headers:
headers['x-goog-hash'] += (
',crc32c=%s' % object_metadata.crc32c.rstrip('\n'))
else:
headers['x-goog-hash'] = (
'crc32c=%s' % object_metadata.crc32c.rstrip('\n'))
if object_metadata.md5Hash:
if 'x-goog-hash' in headers:
headers['x-goog-hash'] += (
',md5=%s' % object_metadata.md5Hash.rstrip('\n'))
else:
headers['x-goog-hash'] = (
'md5=%s' % object_metadata.md5Hash.rstrip('\n'))
if 'content-type' in headers and not headers['content-type']:
headers['content-type'] = 'application/octet-stream'
self._AddPreconditionsToHeaders(preconditions, headers)
dst_uri = self._StorageUriForObject(object_metadata.bucket,
object_metadata.name)
return headers, dst_uri
def _HandleSuccessfulUpload(self, dst_uri, object_metadata, fields=None):
"""Set ACLs on an uploaded object and return its metadata.
Args:
dst_uri: Generation-specific StorageUri describing the object.
object_metadata: Metadata for the object, including an ACL if applicable.
fields: If present, return only these Object metadata fields.
Returns:
gsutil Cloud API Object metadata.
Raises:
CommandException if the object was overwritten / deleted concurrently.
"""
try:
# The XML API does not support if-generation-match for GET requests.
# Therefore, if the object gets overwritten before the ACL and get_key
# operations, the best we can do is warn that it happened.
self._SetObjectAcl(object_metadata, dst_uri)
return self._BotoKeyToObject(dst_uri.get_key(), fields=fields)
except boto.exception.InvalidUriError as e:
if e.message and NON_EXISTENT_OBJECT_REGEX.match(e.message.encode(UTF8)):
raise CommandException('\n'.join(textwrap.wrap(
'Uploaded object (%s) was deleted or overwritten immediately '
'after it was uploaded. This can happen if you attempt to upload '
'to the same object multiple times concurrently.' % dst_uri.uri)))
else:
raise
def _SetObjectAcl(self, object_metadata, dst_uri):
"""Sets the ACL (if present in object_metadata) on an uploaded object."""
headers = self._CreateBaseHeaders()
if object_metadata.acl:
boto_acl = AclTranslation.BotoAclFromMessage(object_metadata.acl)
dst_uri.set_xml_acl(boto_acl.to_xml(), headers=headers)
elif self.provider == 's3':
s3_acl = S3MarkerAclFromObjectMetadata(object_metadata)
if s3_acl:
dst_uri.set_xml_acl(s3_acl, headers=headers)
def UploadObjectResumable(
self, upload_stream, object_metadata, canned_acl=None, preconditions=None,
size=None, serialization_data=None, tracker_callback=None,
progress_callback=None, encryption_tuple=None, provider=None,
fields=None):
"""See CloudApi class for function doc strings."""
if self.provider == 's3':
# Resumable uploads are not supported for s3.
return self.UploadObject(
upload_stream, object_metadata, canned_acl=canned_acl,
preconditions=preconditions, fields=fields, size=size)
headers, dst_uri = self._UploadSetup(object_metadata,
preconditions=preconditions)
if not tracker_callback:
raise ArgumentException('No tracker callback function set for '
'resumable upload of %s' % dst_uri)
try:
self._PerformResumableUpload(dst_uri.new_key(headers=headers),
upload_stream, size, tracker_callback,
canned_acl=canned_acl,
serialization_data=serialization_data,
progress_callback=progress_callback,
headers=headers)
return self._HandleSuccessfulUpload(dst_uri, object_metadata,
fields=fields)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
not_found_exception = CreateNotFoundExceptionForObjectWrite(
self.provider, object_metadata.bucket)
self._TranslateExceptionAndRaise(e, bucket_name=object_metadata.bucket,
object_name=object_metadata.name,
not_found_exception=not_found_exception)
def UploadObjectStreaming(self, upload_stream, object_metadata,
canned_acl=None, progress_callback=None,
preconditions=None, encryption_tuple=None,
provider=None, fields=None):
"""See CloudApi class for function doc strings."""
headers, dst_uri = self._UploadSetup(object_metadata,
preconditions=preconditions)
try:
self._PerformStreamingUpload(
dst_uri, upload_stream, canned_acl=canned_acl,
progress_callback=progress_callback, headers=headers)
return self._HandleSuccessfulUpload(dst_uri, object_metadata,
fields=fields)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
not_found_exception = CreateNotFoundExceptionForObjectWrite(
self.provider, object_metadata.bucket)
self._TranslateExceptionAndRaise(e, bucket_name=object_metadata.bucket,
object_name=object_metadata.name,
not_found_exception=not_found_exception)
def UploadObject(self, upload_stream, object_metadata, canned_acl=None,
preconditions=None, size=None, progress_callback=None,
encryption_tuple=None, provider=None, fields=None):
"""See CloudApi class for function doc strings."""
headers, dst_uri = self._UploadSetup(object_metadata,
preconditions=preconditions)
try:
md5 = None
if object_metadata.md5Hash:
md5 = []
# boto expects hex at index 0, base64 at index 1
md5.append(Base64ToHexHash(object_metadata.md5Hash))
md5.append(object_metadata.md5Hash.strip('\n"\''))
self._PerformSimpleUpload(dst_uri, upload_stream, md5=md5,
canned_acl=canned_acl,
progress_callback=progress_callback,
headers=headers)
return self._HandleSuccessfulUpload(dst_uri, object_metadata,
fields=fields)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
not_found_exception = CreateNotFoundExceptionForObjectWrite(
self.provider, object_metadata.bucket)
self._TranslateExceptionAndRaise(e, bucket_name=object_metadata.bucket,
object_name=object_metadata.name,
not_found_exception=not_found_exception)
def DeleteObject(self, bucket_name, object_name, preconditions=None,
generation=None, provider=None):
"""See CloudApi class for function doc strings."""
_ = provider
headers = self._CreateBaseHeaders()
self._AddPreconditionsToHeaders(preconditions, headers)
uri = self._StorageUriForObject(bucket_name, object_name,
generation=generation)
try:
uri.delete_key(validate=False, headers=headers)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name,
object_name=object_name,
generation=generation)
def CopyObject(self, src_obj_metadata, dst_obj_metadata, src_generation=None,
canned_acl=None, preconditions=None, progress_callback=None,
max_bytes_per_call=None, encryption_tuple=None,
decryption_tuple=None, provider=None, fields=None):
"""See CloudApi class for function doc strings."""
_ = provider
if max_bytes_per_call is not None:
raise NotImplementedError('XML API does not suport max_bytes_per_call')
dst_uri = self._StorageUriForObject(dst_obj_metadata.bucket,
dst_obj_metadata.name)
# Usually it's okay to treat version_id and generation as
# the same, but in this case the underlying boto call determines the
# provider based on the presence of one or the other.
src_version_id = None
if self.provider == 's3':
src_version_id = src_generation
src_generation = None
headers = self._CreateBaseHeaders()
headers.update(HeadersFromObjectMetadata(dst_obj_metadata, self.provider))
self._AddPreconditionsToHeaders(preconditions, headers)
if canned_acl:
headers[dst_uri.get_provider().acl_header] = canned_acl
preserve_acl = True if dst_obj_metadata.acl else False
if self.provider == 's3':
s3_acl = S3MarkerAclFromObjectMetadata(dst_obj_metadata)
if s3_acl:
preserve_acl = True
try:
new_key = dst_uri.copy_key(
src_obj_metadata.bucket, src_obj_metadata.name,
preserve_acl=preserve_acl, headers=headers,
src_version_id=src_version_id, src_generation=src_generation)
return self._BotoKeyToObject(new_key, fields=fields)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
not_found_exception = CreateNotFoundExceptionForObjectWrite(
self.provider, dst_obj_metadata.bucket, src_provider=self.provider,
src_bucket_name=src_obj_metadata.bucket,
src_object_name=src_obj_metadata.name, src_generation=src_generation)
self._TranslateExceptionAndRaise(e, bucket_name=dst_obj_metadata.bucket,
object_name=dst_obj_metadata.name,
not_found_exception=not_found_exception)
def ComposeObject(self, src_objs_metadata, dst_obj_metadata,
preconditions=None, encryption_tuple=None, provider=None,
fields=None):
"""See CloudApi class for function doc strings."""
_ = provider
ValidateDstObjectMetadata(dst_obj_metadata)
dst_obj_name = dst_obj_metadata.name
dst_obj_metadata.name = None
dst_bucket_name = dst_obj_metadata.bucket
dst_obj_metadata.bucket = None
headers = self._CreateBaseHeaders()
headers.update(HeadersFromObjectMetadata(dst_obj_metadata, self.provider))
self._AddPreconditionsToHeaders(preconditions, headers)
if not dst_obj_metadata.contentType:
dst_obj_metadata.contentType = DEFAULT_CONTENT_TYPE
headers['content-type'] = dst_obj_metadata.contentType
dst_uri = self._StorageUriForObject(dst_bucket_name, dst_obj_name)
src_components = []
for src_obj in src_objs_metadata:
src_uri = self._StorageUriForObject(dst_bucket_name, src_obj.name,
generation=src_obj.generation)
src_components.append(src_uri)
try:
dst_uri.compose(src_components, headers=headers)
return self.GetObjectMetadata(dst_bucket_name, dst_obj_name,
fields=fields)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, dst_obj_metadata.bucket,
dst_obj_metadata.name)
def _AddPreconditionsToHeaders(self, preconditions, headers):
"""Adds preconditions (if any) to headers."""
if preconditions and self.provider == 'gs':
if preconditions.gen_match is not None:
headers['x-goog-if-generation-match'] = str(preconditions.gen_match)
if preconditions.meta_gen_match is not None:
headers['x-goog-if-metageneration-match'] = str(
preconditions.meta_gen_match)
def _CreateBaseHeaders(self):
"""Creates base headers used for all API calls in this class."""
base_headers = {}
if self.provider == 'gs':
base_headers['x-goog-api-version'] = self.api_version
if self.provider == 'gs' and self.perf_trace_token:
base_headers['cookie'] = self.perf_trace_token
return base_headers
def _GetMD5FromETag(self, src_etag):
"""Returns an MD5 from the etag iff the etag is a valid MD5 hash.
Args:
src_etag: Object etag for which to return the MD5.
Returns:
MD5 in hex string format, or None.
"""
if src_etag and MD5_REGEX.search(src_etag):
return src_etag.strip('"\'').lower()
def _StorageUriForBucket(self, bucket):
"""Returns a boto storage_uri for the given bucket name.
Args:
bucket: Bucket name (string).
Returns:
Boto storage_uri for the bucket.
"""
return boto.storage_uri(
'%s://%s' % (self.provider, bucket),
suppress_consec_slashes=False,
bucket_storage_uri_class=self.bucket_storage_uri_class,
debug=self.debug, validate=False)
def _StorageUriForObject(self, bucket, object_name, generation=None):
"""Returns a boto storage_uri for the given object.
Args:
bucket: Bucket name (string).
object_name: Object name (string).
generation: Generation or version_id of object. If None, live version
of the object is used.
Returns:
Boto storage_uri for the object.
"""
uri_string = '%s://%s/%s' % (self.provider, bucket, object_name)
if generation:
uri_string += '#%s' % generation
return boto.storage_uri(
uri_string, suppress_consec_slashes=False,
bucket_storage_uri_class=self.bucket_storage_uri_class,
debug=self.debug)
def _GetBotoKey(self, bucket_name, object_name, generation=None):
"""Gets the boto key for an object.
Args:
bucket_name: Bucket containing the object.
object_name: Object name.
generation: Generation or version of the object to retrieve.
Returns:
Boto key for the object.
"""
object_uri = self._StorageUriForObject(bucket_name, object_name,
generation=generation)
try:
headers = self._CreateBaseHeaders()
key = object_uri.get_key(headers=headers)
if not key:
raise CreateObjectNotFoundException('404', self.provider,
bucket_name, object_name,
generation=generation)
return key
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket_name,
object_name=object_name,
generation=generation)
# pylint: disable=too-many-statements
def _BotoBucketToBucket(self, bucket, fields=None):
"""Constructs an apitools Bucket from a boto bucket.
Args:
bucket: Boto bucket.
fields: If present, construct the apitools Bucket with only this set of
metadata fields.
Returns:
apitools Bucket.
"""
bucket_uri = self._StorageUriForBucket(bucket.name)
cloud_api_bucket = apitools_messages.Bucket(name=bucket.name,
id=bucket.name)
headers = self._CreateBaseHeaders()
if self.provider == 'gs':
if not fields or 'storageClass' in fields:
if hasattr(bucket, 'get_storage_class'):
cloud_api_bucket.storageClass = bucket.get_storage_class()
if not fields or 'acl' in fields:
for acl in AclTranslation.BotoBucketAclToMessage(
bucket.get_acl(headers=headers)):
try:
cloud_api_bucket.acl.append(acl)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
translated_exception = self._TranslateBotoException(
e, bucket_name=bucket.name)
if (translated_exception and
isinstance(translated_exception,
AccessDeniedException)):
# JSON API doesn't differentiate between a blank ACL list
# and an access denied, so this is intentionally left blank.
pass
else:
self._TranslateExceptionAndRaise(e, bucket_name=bucket.name)
if not fields or 'cors' in fields:
try:
boto_cors = bucket_uri.get_cors(headers=headers)
cloud_api_bucket.cors = CorsTranslation.BotoCorsToMessage(boto_cors)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket.name)
if not fields or 'defaultObjectAcl' in fields:
for acl in AclTranslation.BotoObjectAclToMessage(
bucket.get_def_acl(headers=headers)):
try:
cloud_api_bucket.defaultObjectAcl.append(acl)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
translated_exception = self._TranslateBotoException(
e, bucket_name=bucket.name)
if (translated_exception and
isinstance(translated_exception,
AccessDeniedException)):
# JSON API doesn't differentiate between a blank ACL list
# and an access denied, so this is intentionally left blank.
pass
else:
self._TranslateExceptionAndRaise(e, bucket_name=bucket.name)
if not fields or 'lifecycle' in fields:
try:
boto_lifecycle = bucket_uri.get_lifecycle_config(headers=headers)
cloud_api_bucket.lifecycle = (
LifecycleTranslation.BotoLifecycleToMessage(boto_lifecycle))
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket.name)
if not fields or 'logging' in fields:
try:
boto_logging = bucket_uri.get_logging_config(headers=headers)
if boto_logging and 'Logging' in boto_logging:
logging_config = boto_logging['Logging']
log_object_prefix_present = 'LogObjectPrefix' in logging_config
log_bucket_present = 'LogBucket' in logging_config
if log_object_prefix_present or log_bucket_present:
cloud_api_bucket.logging = apitools_messages.Bucket.LoggingValue()
if log_object_prefix_present:
cloud_api_bucket.logging.logObjectPrefix = (
logging_config['LogObjectPrefix'])
if log_bucket_present:
cloud_api_bucket.logging.logBucket = logging_config['LogBucket']
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket.name)
if not fields or 'website' in fields:
try:
boto_website = bucket_uri.get_website_config(headers=headers)
if boto_website and 'WebsiteConfiguration' in boto_website:
website_config = boto_website['WebsiteConfiguration']
main_page_suffix_present = 'MainPageSuffix' in website_config
not_found_page_present = 'NotFoundPage' in website_config
if main_page_suffix_present or not_found_page_present:
cloud_api_bucket.website = apitools_messages.Bucket.WebsiteValue()
if main_page_suffix_present:
cloud_api_bucket.website.mainPageSuffix = (
website_config['MainPageSuffix'])
if not_found_page_present:
cloud_api_bucket.website.notFoundPage = (
website_config['NotFoundPage'])
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e, bucket_name=bucket.name)
if not fields or 'location' in fields:
cloud_api_bucket.location = bucket_uri.get_location(headers=headers)
if not fields or 'versioning' in fields:
versioning = bucket_uri.get_versioning_config(headers=headers)
if versioning:
if (self.provider == 's3' and 'Versioning' in versioning and
versioning['Versioning'] == 'Enabled'):
cloud_api_bucket.versioning = (
apitools_messages.Bucket.VersioningValue(enabled=True))
elif self.provider == 'gs':
cloud_api_bucket.versioning = (
apitools_messages.Bucket.VersioningValue(enabled=True))
# For S3 long bucket listing we do not support CORS, lifecycle, website, and
# logging translation. The individual commands can be used to get
# the XML equivalents for S3.
return cloud_api_bucket
def _BotoKeyToObject(self, key, fields=None):
"""Constructs an apitools Object from a boto key.
Args:
key: Boto key to construct Object from.
fields: If present, construct the apitools Object with only this set of
metadata fields.
Returns:
apitools Object corresponding to key.
"""
custom_metadata = None
if not fields or 'metadata' in fields or len(
[field for field in fields if field.startswith('metadata/')]) >= 1:
custom_metadata = self._TranslateBotoKeyCustomMetadata(key)
cache_control = None
if not fields or 'cacheControl' in fields:
cache_control = getattr(key, 'cache_control', None)
component_count = None
if not fields or 'componentCount' in fields:
component_count = getattr(key, 'component_count', None)
content_disposition = None
if not fields or 'contentDisposition' in fields:
content_disposition = getattr(key, 'content_disposition', None)
# Other fields like updated and ACL depend on the generation
# of the object, so populate that regardless of whether it was requested.
generation = self._TranslateBotoKeyGeneration(key)
metageneration = None
if not fields or 'metageneration' in fields:
metageneration = self._TranslateBotoKeyMetageneration(key)
time_created = None
if not fields or 'timeCreated' in fields:
# Translation code to avoid a dependency on dateutil.
time_created = self._TranslateBotoKeyTimestamp(key)
etag = None
if not fields or 'etag' in fields:
etag = getattr(key, 'etag', None)
if etag:
etag = etag.strip('"\'')
crc32c = None
if not fields or 'crc32c' in fields:
if hasattr(key, 'cloud_hashes') and 'crc32c' in key.cloud_hashes:
crc32c = base64.encodestring(key.cloud_hashes['crc32c']).rstrip('\n')
md5_hash = None
if not fields or 'md5Hash' in fields:
if hasattr(key, 'cloud_hashes') and 'md5' in key.cloud_hashes:
md5_hash = base64.encodestring(key.cloud_hashes['md5']).rstrip('\n')
elif self._GetMD5FromETag(getattr(key, 'etag', None)):
md5_hash = Base64EncodeHash(self._GetMD5FromETag(key.etag))
elif self.provider == 's3':
# S3 etags are MD5s for non-multi-part objects, but multi-part objects
# (which include all objects >= 5 GB) have a custom checksum
# implementation that is not currently supported by gsutil.
self.logger.warn(
'Non-MD5 etag (%s) present for key %s, data integrity checks are '
'not possible.', key.etag, key)
# Serialize the boto key in the media link if it is requested. This
# way we can later access the key without adding an HTTP call.
media_link = None
if not fields or 'mediaLink' in fields:
media_link = binascii.b2a_base64(
pickle.dumps(key, pickle.HIGHEST_PROTOCOL))
size = None
if not fields or 'size' in fields:
size = key.size or 0
storage_class = None
if not fields or 'storageClass' in fields:
# TODO: Scrub all callers requesting the storageClass field and then
# revert this to storage_class; the base storage_class
# attribute calls GET on the bucket if the storage class is not already
# populated in the key, which can fail if the user does not have
# permission on the bucket.
storage_class = getattr(key, '_storage_class', None)
cloud_api_object = apitools_messages.Object(
bucket=key.bucket.name,
name=key.name,
size=size,
contentEncoding=key.content_encoding,
contentLanguage=key.content_language,
contentType=key.content_type,
cacheControl=cache_control,
contentDisposition=content_disposition,
etag=etag,
crc32c=crc32c,
md5Hash=md5_hash,
generation=generation,
metageneration=metageneration,
componentCount=component_count,
timeCreated=time_created,
metadata=custom_metadata,
mediaLink=media_link,
storageClass=storage_class)
# Remaining functions amend cloud_api_object.
self._TranslateDeleteMarker(key, cloud_api_object)
if not fields or 'acl' in fields:
generation_str = GenerationFromUrlAndString(
StorageUrlFromString(self.provider), generation)
self._TranslateBotoKeyAcl(key, cloud_api_object,
generation=generation_str)
return cloud_api_object
def _TranslateBotoKeyCustomMetadata(self, key):
"""Populates an apitools message from custom metadata in the boto key."""
custom_metadata = None
if getattr(key, 'metadata', None):
custom_metadata = apitools_messages.Object.MetadataValue(
additionalProperties=[])
for k, v in key.metadata.iteritems():
if k.lower() == 'content-language':
# Work around content-language being inserted into custom metadata.
continue
custom_metadata.additionalProperties.append(
apitools_messages.Object.MetadataValue.AdditionalProperty(
key=k, value=v))
return custom_metadata
def _TranslateBotoKeyGeneration(self, key):
"""Returns the generation/version_id number from the boto key if present."""
generation = None
if self.provider == 'gs':
if getattr(key, 'generation', None):
generation = long(key.generation)
elif self.provider == 's3':
if getattr(key, 'version_id', None):
generation = EncodeStringAsLong(key.version_id)
return generation
def _TranslateBotoKeyMetageneration(self, key):
"""Returns the metageneration number from the boto key if present."""
metageneration = None
if self.provider == 'gs':
if getattr(key, 'metageneration', None):
metageneration = long(key.metageneration)
return metageneration
def _TranslateBotoKeyTimestamp(self, key):
"""Parses the timestamp from the boto key into an datetime object.
This avoids a dependency on dateutil.
Args:
key: Boto key to get timestamp from.
Returns:
datetime object if string is parsed successfully, None otherwise.
"""
if key.last_modified:
if '.' in key.last_modified:
key_us_timestamp = key.last_modified.rstrip('Z') + '000Z'
else:
key_us_timestamp = key.last_modified.rstrip('Z') + '.000000Z'
fmt = '%Y-%m-%dT%H:%M:%S.%fZ'
try:
return datetime.datetime.strptime(key_us_timestamp, fmt)
except ValueError:
try:
# Try alternate format
fmt = '%a, %d %b %Y %H:%M:%S %Z'
return datetime.datetime.strptime(key.last_modified, fmt)
except ValueError:
# Could not parse the time; leave updated as None.
return None
def _TranslateDeleteMarker(self, key, cloud_api_object):
"""Marks deleted objects with a metadata value (for S3 compatibility)."""
if isinstance(key, DeleteMarker):
if not cloud_api_object.metadata:
cloud_api_object.metadata = apitools_messages.Object.MetadataValue()
cloud_api_object.metadata.additionalProperties = []
cloud_api_object.metadata.additionalProperties.append(
apitools_messages.Object.MetadataValue.AdditionalProperty(
key=S3_DELETE_MARKER_GUID, value=True))
def _TranslateBotoKeyAcl(self, key, cloud_api_object, generation=None):
"""Updates cloud_api_object with the ACL from the boto key."""
storage_uri_for_key = self._StorageUriForObject(key.bucket.name, key.name,
generation=generation)
headers = self._CreateBaseHeaders()
try:
if self.provider == 'gs':
key_acl = storage_uri_for_key.get_acl(headers=headers)
# key.get_acl() does not support versioning so we need to use
# storage_uri to ensure we're getting the versioned ACL.
for acl in AclTranslation.BotoObjectAclToMessage(key_acl):
cloud_api_object.acl.append(acl)
if self.provider == 's3':
key_acl = key.get_xml_acl(headers=headers)
# ACLs for s3 are different and we use special markers to represent
# them in the gsutil Cloud API.
AddS3MarkerAclToObjectMetadata(cloud_api_object, key_acl)
except boto.exception.GSResponseError, e:
if e.status == 403:
# Consume access denied exceptions to mimic JSON behavior of simply
# returning None if sufficient permission is not present. The caller
# needs to handle the case where the ACL is not populated.
pass
else:
raise
def _TranslateExceptionAndRaise(self, e, bucket_name=None, object_name=None,
generation=None, not_found_exception=None):
"""Translates a Boto exception and raises the translated or original value.
Args:
e: Any Exception.
bucket_name: Optional bucket name in request that caused the exception.
object_name: Optional object name in request that caused the exception.
generation: Optional generation in request that caused the exception.
not_found_exception: Optional exception to raise in the not-found case.
Raises:
Translated CloudApi exception, or the original exception if it was not
translatable.
"""
translated_exception = self._TranslateBotoException(
e, bucket_name=bucket_name, object_name=object_name,
generation=generation, not_found_exception=not_found_exception)
if translated_exception:
raise translated_exception
else:
raise
def _TranslateBotoException(self, e, bucket_name=None, object_name=None,
generation=None, not_found_exception=None):
"""Translates boto exceptions into their gsutil Cloud API equivalents.
Args:
e: Any exception in TRANSLATABLE_BOTO_EXCEPTIONS.
bucket_name: Optional bucket name in request that caused the exception.
object_name: Optional object name in request that caused the exception.
generation: Optional generation in request that caused the exception.
not_found_exception: Optional exception to raise in the not-found case.
Returns:
CloudStorageApiServiceException for translatable exceptions, None
otherwise.
Because we're using isinstance, check for subtypes first.
"""
if isinstance(e, boto.exception.StorageResponseError):
if e.status == 400:
return BadRequestException(e.code, status=e.status, body=e.body)
elif e.status == 401 or e.status == 403:
return AccessDeniedException(e.code, status=e.status, body=e.body)
elif e.status == 404:
if not_found_exception:
# The exception is pre-constructed prior to translation; the HTTP
# status code isn't available at that time.
setattr(not_found_exception, 'status', e.status)
return not_found_exception
elif bucket_name:
if object_name:
return CreateObjectNotFoundException(e.status, self.provider,
bucket_name, object_name,
generation=generation)
return CreateBucketNotFoundException(e.status, self.provider,
bucket_name)
return NotFoundException(e.message, status=e.status, body=e.body)
elif e.status == 409 and e.code and 'BucketNotEmpty' in e.code:
return NotEmptyException('BucketNotEmpty (%s)' % bucket_name,
status=e.status, body=e.body)
elif e.status == 410:
# 410 errors should always cause us to start over - either the UploadID
# has expired or there was a server-side problem that requires starting
# the upload over from scratch.
return ResumableUploadStartOverException(e.message)
elif e.status == 412:
return PreconditionException(e.code, status=e.status, body=e.body)
if isinstance(e, boto.exception.StorageCreateError):
return ServiceException('Bucket already exists.', status=e.status,
body=e.body)
if isinstance(e, boto.exception.BotoServerError):
return ServiceException(e.message, status=e.status, body=e.body)
if isinstance(e, boto.exception.InvalidUriError):
# Work around textwrap when searching for this string.
if e.message and NON_EXISTENT_OBJECT_REGEX.match(e.message.encode(UTF8)):
return NotFoundException(e.message, status=404)
return InvalidUrlError(e.message)
if isinstance(e, boto.exception.ResumableUploadException):
if e.disposition == boto.exception.ResumableTransferDisposition.ABORT:
return ResumableUploadAbortException(e.message)
elif (e.disposition ==
boto.exception.ResumableTransferDisposition.START_OVER):
return ResumableUploadStartOverException(e.message)
else:
return ResumableUploadException(e.message)
if isinstance(e, boto.exception.ResumableDownloadException):
return ResumableDownloadException(e.message)
return None
# For function docstrings, see CloudApiDelegator class.
def XmlPassThroughGetAcl(self, storage_url, def_obj_acl=False):
"""See CloudApiDelegator class for function doc strings."""
try:
uri = boto.storage_uri(
storage_url.url_string, suppress_consec_slashes=False,
bucket_storage_uri_class=self.bucket_storage_uri_class,
debug=self.debug)
if def_obj_acl:
return uri.get_def_acl()
else:
return uri.get_acl()
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
def XmlPassThroughSetAcl(self, acl_text, storage_url, canned=True,
def_obj_acl=False):
"""See CloudApiDelegator class for function doc strings."""
headers = self._CreateBaseHeaders()
try:
uri = boto.storage_uri(
storage_url.url_string, suppress_consec_slashes=False,
bucket_storage_uri_class=self.bucket_storage_uri_class,
debug=self.debug)
if canned:
if def_obj_acl:
canned_acls = uri.canned_acls()
if acl_text not in canned_acls:
raise CommandException('Invalid canned ACL "%s".' % acl_text)
uri.set_def_acl(acl_text, uri.object_name, headers=headers)
else:
canned_acls = uri.canned_acls()
if acl_text not in canned_acls:
raise CommandException('Invalid canned ACL "%s".' % acl_text)
uri.set_acl(acl_text, uri.object_name, headers=headers)
else:
if def_obj_acl:
uri.set_def_xml_acl(acl_text, uri.object_name, headers=headers)
else:
uri.set_xml_acl(acl_text, uri.object_name, headers=headers)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
# pylint: disable=catching-non-exception
def XmlPassThroughSetCors(self, cors_text, storage_url):
"""See CloudApiDelegator class for function doc strings."""
headers = self._CreateBaseHeaders()
# Parse XML document and convert into Cors object.
if storage_url.scheme == 's3':
cors_obj = S3Cors()
else:
cors_obj = Cors()
h = handler.XmlHandler(cors_obj, None)
try:
xml.sax.parseString(cors_text, h)
except SaxExceptions.SAXParseException, e:
raise CommandException('Requested CORS is invalid: %s at line %s, '
'column %s' % (e.getMessage(), e.getLineNumber(),
e.getColumnNumber()))
try:
uri = boto.storage_uri(
storage_url.url_string, suppress_consec_slashes=False,
bucket_storage_uri_class=self.bucket_storage_uri_class,
debug=self.debug)
uri.set_cors(cors_obj, False, headers=headers)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
def XmlPassThroughGetCors(self, storage_url):
"""See CloudApiDelegator class for function doc strings."""
headers = self._CreateBaseHeaders()
uri = boto.storage_uri(
storage_url.url_string, suppress_consec_slashes=False,
bucket_storage_uri_class=self.bucket_storage_uri_class,
debug=self.debug)
try:
cors = uri.get_cors(False, headers=headers)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
parsed_xml = xml.dom.minidom.parseString(cors.to_xml().encode(UTF8))
# Pretty-print the XML to make it more easily human editable.
return parsed_xml.toprettyxml(indent=' ')
def XmlPassThroughGetLifecycle(self, storage_url):
"""See CloudApiDelegator class for function doc strings."""
headers = self._CreateBaseHeaders()
try:
uri = boto.storage_uri(
storage_url.url_string, suppress_consec_slashes=False,
bucket_storage_uri_class=self.bucket_storage_uri_class,
debug=self.debug)
lifecycle = uri.get_lifecycle_config(False, headers=headers)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
parsed_xml = xml.dom.minidom.parseString(lifecycle.to_xml().encode(UTF8))
# Pretty-print the XML to make it more easily human editable.
return parsed_xml.toprettyxml(indent=' ')
def XmlPassThroughSetLifecycle(self, lifecycle_text, storage_url):
"""See CloudApiDelegator class for function doc strings."""
headers = self._CreateBaseHeaders()
# Parse XML document and convert into lifecycle object.
if storage_url.scheme == 's3':
lifecycle_obj = S3Lifecycle()
else:
lifecycle_obj = LifecycleConfig()
h = handler.XmlHandler(lifecycle_obj, None)
try:
xml.sax.parseString(lifecycle_text, h)
except SaxExceptions.SAXParseException, e:
raise CommandException(
'Requested lifecycle config is invalid: %s at line %s, column %s' %
(e.getMessage(), e.getLineNumber(), e.getColumnNumber()))
try:
uri = boto.storage_uri(
storage_url.url_string, suppress_consec_slashes=False,
bucket_storage_uri_class=self.bucket_storage_uri_class,
debug=self.debug)
uri.configure_lifecycle(lifecycle_obj, False, headers=headers)
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
def XmlPassThroughGetLogging(self, storage_url):
"""See CloudApiDelegator class for function doc strings."""
headers = self._CreateBaseHeaders()
try:
uri = boto.storage_uri(
storage_url.url_string, suppress_consec_slashes=False,
bucket_storage_uri_class=self.bucket_storage_uri_class,
debug=self.debug)
logging_config_xml = UnaryDictToXml(uri.get_logging_config(
headers=headers))
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
return XmlParseString(logging_config_xml).toprettyxml()
def XmlPassThroughGetWebsite(self, storage_url):
"""See CloudApiDelegator class for function doc strings."""
headers = self._CreateBaseHeaders()
try:
uri = boto.storage_uri(
storage_url.url_string, suppress_consec_slashes=False,
bucket_storage_uri_class=self.bucket_storage_uri_class,
debug=self.debug)
web_config_xml = UnaryDictToXml(uri.get_website_config(headers=headers))
except TRANSLATABLE_BOTO_EXCEPTIONS, e:
self._TranslateExceptionAndRaise(e)
return XmlParseString(web_config_xml).toprettyxml()
|
apache-2.0
|
rwillmer/django
|
tests/model_fields/test_durationfield.py
|
296
|
2724
|
import datetime
import json
from django import forms
from django.core import exceptions, serializers
from django.db import models
from django.test import SimpleTestCase, TestCase
from .models import DurationModel, NullDurationModel
class TestSaveLoad(TestCase):
def test_simple_roundtrip(self):
duration = datetime.timedelta(days=123, seconds=123, microseconds=123)
DurationModel.objects.create(field=duration)
loaded = DurationModel.objects.get()
self.assertEqual(loaded.field, duration)
def test_create_empty(self):
NullDurationModel.objects.create()
loaded = NullDurationModel.objects.get()
self.assertEqual(loaded.field, None)
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
DurationModel.objects.create(field=datetime.timedelta(days=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=1)),
DurationModel.objects.create(field=datetime.timedelta(seconds=-1)),
]
def test_exact(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field=datetime.timedelta(days=1)),
[self.objs[0]]
)
def test_gt(self):
self.assertSequenceEqual(
DurationModel.objects.filter(field__gt=datetime.timedelta(days=0)),
[self.objs[0], self.objs[1]]
)
class TestSerialization(SimpleTestCase):
test_data = '[{"fields": {"field": "1 01:00:00"}, "model": "model_fields.durationmodel", "pk": null}]'
def test_dumping(self):
instance = DurationModel(field=datetime.timedelta(days=1, hours=1))
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, datetime.timedelta(days=1, hours=1))
class TestValidation(SimpleTestCase):
def test_invalid_string(self):
field = models.DurationField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('not a datetime', None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(
cm.exception.message % cm.exception.params,
"'not a datetime' value has an invalid format. "
"It must be in [DD] [HH:[MM:]]ss[.uuuuuu] format."
)
class TestFormField(SimpleTestCase):
# Tests for forms.DurationField are in the forms_tests app.
def test_formfield(self):
field = models.DurationField()
self.assertIsInstance(field.formfield(), forms.DurationField)
|
bsd-3-clause
|
pgmillon/ansible
|
lib/ansible/modules/system/runit.py
|
43
|
8633
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Brian Coca <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
# This is a modification of @bcoca's `svc` module
DOCUMENTATION = r'''
---
module: runit
author:
- James Sumners (@jsumners)
version_added: "2.3"
short_description: Manage runit services
description:
- Controls runit services on remote hosts using the sv utility.
options:
name:
description:
- Name of the service to manage.
type: str
required: yes
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
service (sv restart) and C(killed) will always bounce the service (sv force-stop).
C(reloaded) will send a HUP (sv reload).
C(once) will run a normally downed sv once (sv once), not really
an idempotent operation.
type: str
choices: [ killed, once, reloaded, restarted, started, stopped ]
enabled:
description:
- Whether the service is enabled or not, if disabled it also implies stopped.
type: bool
service_dir:
description:
- directory runsv watches for services
type: str
default: /var/service
service_src:
description:
- directory where services are defined, the source of symlinks to service_dir.
type: str
default: /etc/sv
'''
EXAMPLES = r'''
- name: Start sv dnscache, if not running
runit:
name: dnscache
state: started
- name: Stop sv dnscache, if running
runit:
name: dnscache
state: stopped
- name: Kill sv dnscache, in all cases
runit:
name: dnscache
state: killed
- name: Restart sv dnscache, in all cases
runit:
name: dnscache
state: restarted
- name: Reload sv dnscache, in all cases
runit:
name: dnscache
state: reloaded
- name: Use alternative sv directory location
runit:
name: dnscache
state: reloaded
service_dir: /run/service
'''
import os
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def _load_dist_subclass(cls, *args, **kwargs):
'''
Used for derivative implementations
'''
subclass = None
distro = kwargs['module'].params['distro']
# get the most specific superclass for this platform
if distro is not None:
for sc in cls.__subclasses__():
if sc.distro is not None and sc.distro == distro:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
class Sv(object):
"""
Main class that handles daemontools, can be subclassed and overridden in case
we want to use a 'derivative' like encore, s6, etc
"""
# def __new__(cls, *args, **kwargs):
# return _load_dist_subclass(cls, args, kwargs)
def __init__(self, module):
self.extra_paths = []
self.report_vars = ['state', 'enabled', 'svc_full', 'src_full', 'pid', 'duration', 'full_state']
self.module = module
self.name = module.params['name']
self.service_dir = module.params['service_dir']
self.service_src = module.params['service_src']
self.enabled = None
self.full_state = None
self.state = None
self.pid = None
self.duration = None
self.svc_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths, required=True)
self.svstat_cmd = module.get_bin_path('sv', opt_dirs=self.extra_paths)
self.svc_full = '/'.join([self.service_dir, self.name])
self.src_full = '/'.join([self.service_src, self.name])
self.enabled = os.path.lexists(self.svc_full)
if self.enabled:
self.get_status()
else:
self.state = 'stopped'
def enable(self):
if os.path.exists(self.src_full):
try:
os.symlink(self.src_full, self.svc_full)
except OSError as e:
self.module.fail_json(path=self.src_full, msg='Error while linking: %s' % to_native(e))
else:
self.module.fail_json(msg="Could not find source for service to enable (%s)." % self.src_full)
def disable(self):
self.execute_command([self.svc_cmd, 'force-stop', self.src_full])
try:
os.unlink(self.svc_full)
except OSError as e:
self.module.fail_json(path=self.svc_full, msg='Error while unlinking: %s' % to_native(e))
def get_status(self):
(rc, out, err) = self.execute_command([self.svstat_cmd, 'status', self.svc_full])
if err is not None and err:
self.full_state = self.state = err
else:
self.full_state = out
# full_state *may* contain information about the logger:
# "down: /etc/service/service-without-logger: 1s, normally up\n"
# "down: /etc/service/updater: 127s, normally up; run: log: (pid 364) 263439s\n"
full_state_no_logger = self.full_state.split("; ")[0]
m = re.search(r'\(pid (\d+)\)', full_state_no_logger)
if m:
self.pid = m.group(1)
m = re.search(r' (\d+)s', full_state_no_logger)
if m:
self.duration = m.group(1)
if re.search(r'^run:', full_state_no_logger):
self.state = 'started'
elif re.search(r'^down:', full_state_no_logger):
self.state = 'stopped'
else:
self.state = 'unknown'
return
def started(self):
return self.start()
def start(self):
return self.execute_command([self.svc_cmd, 'start', self.svc_full])
def stopped(self):
return self.stop()
def stop(self):
return self.execute_command([self.svc_cmd, 'stop', self.svc_full])
def once(self):
return self.execute_command([self.svc_cmd, 'once', self.svc_full])
def reloaded(self):
return self.reload()
def reload(self):
return self.execute_command([self.svc_cmd, 'reload', self.svc_full])
def restarted(self):
return self.restart()
def restart(self):
return self.execute_command([self.svc_cmd, 'restart', self.svc_full])
def killed(self):
return self.kill()
def kill(self):
return self.execute_command([self.svc_cmd, 'force-stop', self.svc_full])
def execute_command(self, cmd):
try:
(rc, out, err) = self.module.run_command(' '.join(cmd))
except Exception as e:
self.module.fail_json(msg="failed to execute: %s" % to_native(e))
return (rc, out, err)
def report(self):
self.get_status()
states = {}
for k in self.report_vars:
states[k] = self.__dict__[k]
return states
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', choices=['killed', 'once', 'reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
dist=dict(type='str', default='runit'),
service_dir=dict(type='str', default='/var/service'),
service_src=dict(type='str', default='/etc/sv'),
),
supports_check_mode=True,
)
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
state = module.params['state']
enabled = module.params['enabled']
sv = Sv(module)
changed = False
orig_state = sv.report()
if enabled is not None and enabled != sv.enabled:
changed = True
if not module.check_mode:
try:
if enabled:
sv.enable()
else:
sv.disable()
except (OSError, IOError) as e:
module.fail_json(msg="Could not change service link: %s" % to_native(e))
if state is not None and state != sv.state:
changed = True
if not module.check_mode:
getattr(sv, state)()
module.exit_json(changed=changed, sv=sv.report())
if __name__ == '__main__':
main()
|
gpl-3.0
|
dcclogin/TextGenerator
|
TitleCrawler/ccf_conference/categories/network/conext2014.py
|
1
|
3281
|
# -*- coding: utf-8 -*-
import re
import copy
import random
import os, sys
import MySQLdb
import requests
from time import sleep
from threading import Thread
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf-8')
clade = 'http://dblp.uni-trier.de/db/conf/conext/'
months = {
'January': '01',
'February': '02',
'March': '03',
'April': '04',
'May': '05',
'June': '06',
'July': '07',
'August': '08',
'September': '09',
'October': '10',
'November': '11',
'December': '12'
}
# regex to match months in <h2> tags
re_mons=r'(January|February|March|April|May|June|July|August|September|October|November|December)'
repeato_mons=r'([ /-]*'+re_mons+r'*)*'
pattern_mons=re_mons+repeato_mons
# regex to match years in <h2> tags
re_year=r'((19|20)\d+)'
repeato_year=r'([ /-]*'+re_year+r'*)*'
pattern_year=re_year+repeato_year
def get_leaves(clade):
r = requests.get(clade)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'lxml')
leaves = []
late = soup.find('ul', class_='publ-list')
tags = late.find_all('div', class_='data', itemprop='headline')
for tag in tags:
leaves.append(tag.find_all('a')[-1]['href'])
return leaves
def sub_months(match_obj):
""" transfer months to digital form (in-place change)
"""
for m in months:
match_obj = re.sub(m, months[m], match_obj)
return match_obj
def get_yymm(leaf):
r = requests.get(leaf)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'lxml')
lat = soup.find('div', class_='data', itemprop='headline')
tag = lat.find('span', class_='title', itemprop='name')
txt = tag.get_text()
try:
match_obj_mons = re.search(pattern_mons, txt)
match_obj_mons = match_obj_mons.group().strip()
match_obj_mons = sub_months(match_obj_mons)
month = match_obj_mons
except Exception, error_mons:
print '[-]', error_mons
month = None
try:
match_obj_year = re.search(pattern_year, txt)
match_obj_year = match_obj_year.group().strip()
year = match_obj_year
except Exception, error_year:
print '[-]', error_year
year = None
return year, month
def get_titles(leaf):
r = requests.get(leaf)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'lxml')
title_lst = []
tags = soup.find_all('span', class_='title', itemprop='name')
for tag in tags:
title_lst.append(tag.get_text())
return title_lst
def incert_mysql(year, month, title_lst):
try:
tablename = 'papertitle'
conn = MySQLdb.connect(host='127.0.0.1', user='root', passwd='13917331612', db='conference')
c = conn.cursor()
conn.set_character_set('utf8')
c.execute('SET NAMES utf8;')
c.execute('SET CHARACTER SET utf8;')
c.execute('SET character_set_connection=utf8;')
for p in title_lst:
sql = "insert into " + tablename + "(year, month, name, title, class, category) \
values(%s, %s, %s, %s, %s, %s)"
param = (year, month, 'CoNEXT', p, 'B', 'network')
c.execute(sql, param)
print ">>>> [+] Insert paper <%s> : done." %(p)
conn.commit()
c.close()
except MySQLdb.Error, e:
print "[-] Mysql Error %d: %s" % (e.args[0], e.args[1])
return None
def build():
leaves = get_leaves(clade)
for leaf in leaves:
title_lst = get_titles(leaf)
year, month = get_yymm(leaf)
incert_mysql(year, month, title_lst)
return None
build()
|
mit
|
255BITS/HyperGAN
|
hypergan/tk_viewer.py
|
1
|
5690
|
"""
Opens a window that displays an image.
Usage:
from viewer import GlobalViewer
GlobalViewer.update(image)
"""
import numpy as np
import os
import contextlib
class TkViewer:
def __init__(self, title="HyperGAN", viewer_size=1, enabled=True):
self.screen = None
self.title = title
self.viewer_size = viewer_size
self.enabled = enabled
self.enable_menu = True
def update(self, gan, image):
if not self.enabled: return
if len(np.shape(image)) == 2:
s = np.shape(image)
image = np.reshape(image, [s[0], s[1], 1])
image = np.tile(image, [1,1,3])
image = np.transpose(image, [1, 0,2])
if not self.screen:
with contextlib.redirect_stdout(None):
import pygame
import tkinter as tk
import tkinter.ttk
class ResizableFrame(tk.Frame):
def __init__(self,parent,tkviewer=None,**kwargs):
tk.Frame.__init__(self,parent,**kwargs)
self.bind("<Configure>", self.on_resize)
self.height = kwargs['height']
self.width = kwargs['width']
self.tkviewer = tkviewer
self.aspect_ratio = float(self.width)/float(self.height)
def on_resize(self,event):
wscale = float(event.width)/self.width
hscale = float(event.height)/self.height
self.width = event.width
self.height = event.height
self.config(width=self.width, height=self.height)
self.tkviewer.size = [self.width, self.height]
self.tkviewer.screen = self.tkviewer.pg.display.set_mode(self.tkviewer.size,self.tkviewer.pg.RESIZABLE)
self.enforce_aspect_ratio(event)
def enforce_aspect_ratio(self, event):
desired_width = event.width
desired_height = int(event.width / self.aspect_ratio)
if desired_height > event.height:
desired_height = event.height
desired_width = int(event.height * self.aspect_ratio)
self.config(width=desired_width, height=desired_height)
self.tkviewer.size = [desired_width, desired_height]
self.tkviewer.screen = self.tkviewer.pg.display.set_mode(self.tkviewer.size,self.tkviewer.pg.RESIZABLE)
self.size = [int(image.shape[0] * self.viewer_size), int(image.shape[1] * self.viewer_size)]
self.pg = pygame
root = tk.Tk()
embed = ResizableFrame(root, width=self.size[0], height=self.size[1], tkviewer=self)
embed.winfo_toplevel().title(self.title)
root.rowconfigure(0,weight=1)
root.rowconfigure(1,weight=1)
root.columnconfigure(0,weight=1)
root.columnconfigure(1,weight=1)
embed.pack(expand=tk.YES, fill=tk.BOTH)
def _save_model(*args):
gan.save(gan.save_file)
def _exit(*args):
gan.exit()
def _create_status_bar(root):
statusbar = tk.Frame(root, height=24)
statusbar.pack(side=tk.BOTTOM, fill=tk.X)
label_training = tk.Label(statusbar, text="Training", font=12)
label_training.grid(row=0,column=0)
sep = tkinter.ttk.Separator(statusbar, orient=tk.VERTICAL).grid(column=1, row=0, sticky='ns')
label = tk.Label(statusbar, text="Starting", font=12)
label.grid(row=0, column=2)
def __update_step():
if hasattr(gan, 'step_count'):
label['text']=("Step " + str(gan.step_count))
root.after(1000, __update_step)
__update_step()
return statusbar
menubar = tk.Menu(root)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="Save", command=_save_model, underline=0, accelerator="Ctrl+S")
filemenu.add_separator()
filemenu.add_command(label="Save and Exit", command=_exit, underline=10, accelerator="Ctrl+Q")
menubar.add_cascade(label="File", menu=filemenu, underline=0)
root.bind_all("<Control-q>", _exit)
root.bind_all("<Control-s>", _save_model)
if self.enable_menu:
root.config(menu=menubar)
_create_status_bar(root)
# Tell pygame's SDL window which window ID to use
os.environ['SDL_WINDOWID'] = str(embed.winfo_id())
# Show the window so it's assigned an ID.
root.update()
self.root = root
# Usual pygame initialization
if self.viewer_size <= 0:
self.viewer_size = 0.1
self.aspect_w = image.shape[1] / image.shape[0]
self.aspect_h = image.shape[0] / image.shape[1]
self.temp_size = self.size
self.screen = self.pg.display.set_mode(self.size,self.pg.RESIZABLE)
self.pg.display.set_caption(self.title)
surface = self.pg.Surface([image.shape[0],image.shape[1]])
self.pg.surfarray.blit_array(surface, image)
self.screen.blit(self.pg.transform.scale(surface,self.size),(0,0))
self.pg.display.flip()
def tick(self):
"""
Called repeatedly regardless of gan state.
"""
if hasattr(self, 'root'):
self.root.update()
|
mit
|
mandeepdhami/nova
|
nova/tests/unit/fake_ldap.py
|
66
|
9265
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake LDAP server for test harness.
This class does very little error checking, and knows nothing about ldap
class definitions. It implements the minimum emulation of the python ldap
library to work with nova.
"""
import fnmatch
from oslo_serialization import jsonutils
import six
from six.moves import range
class Store(object):
def __init__(self):
if hasattr(self.__class__, '_instance'):
raise Exception('Attempted to instantiate singleton')
@classmethod
def instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = _StorageDict()
return cls._instance
class _StorageDict(dict):
def keys(self, pat=None):
ret = super(_StorageDict, self).keys()
if pat is not None:
ret = fnmatch.filter(ret, pat)
return ret
def delete(self, key):
try:
del self[key]
except KeyError:
pass
def flushdb(self):
self.clear()
def hgetall(self, key):
"""Returns the hash for the given key
Creates the hash if the key doesn't exist.
"""
try:
return self[key]
except KeyError:
self[key] = {}
return self[key]
def hget(self, key, field):
hashdict = self.hgetall(key)
try:
return hashdict[field]
except KeyError:
hashdict[field] = {}
return hashdict[field]
def hset(self, key, field, val):
hashdict = self.hgetall(key)
hashdict[field] = val
def hmset(self, key, value_dict):
hashdict = self.hgetall(key)
for field, val in value_dict.items():
hashdict[field] = val
SCOPE_BASE = 0
SCOPE_ONELEVEL = 1 # Not implemented
SCOPE_SUBTREE = 2
MOD_ADD = 0
MOD_DELETE = 1
MOD_REPLACE = 2
class NO_SUCH_OBJECT(Exception):
"""Duplicate exception class from real LDAP module."""
pass
class OBJECT_CLASS_VIOLATION(Exception):
"""Duplicate exception class from real LDAP module."""
pass
class SERVER_DOWN(Exception):
"""Duplicate exception class from real LDAP module."""
pass
def initialize(_uri):
"""Opens a fake connection with an LDAP server."""
return FakeLDAP()
def _match_query(query, attrs):
"""Match an ldap query to an attribute dictionary.
The characters &, |, and ! are supported in the query. No syntax checking
is performed, so malformed queries will not work correctly.
"""
# cut off the parentheses
inner = query[1:-1]
if inner.startswith('&'):
# cut off the &
l, r = _paren_groups(inner[1:])
return _match_query(l, attrs) and _match_query(r, attrs)
if inner.startswith('|'):
# cut off the |
l, r = _paren_groups(inner[1:])
return _match_query(l, attrs) or _match_query(r, attrs)
if inner.startswith('!'):
# cut off the ! and the nested parentheses
return not _match_query(query[2:-1], attrs)
(k, _sep, v) = inner.partition('=')
return _match(k, v, attrs)
def _paren_groups(source):
"""Split a string into parenthesized groups."""
count = 0
start = 0
result = []
for pos in range(len(source)):
if source[pos] == '(':
if count == 0:
start = pos
count += 1
if source[pos] == ')':
count -= 1
if count == 0:
result.append(source[start:pos + 1])
return result
def _match(key, value, attrs):
"""Match a given key and value against an attribute list."""
if key not in attrs:
return False
# This is a wild card search. Implemented as all or nothing for now.
if value == "*":
return True
if key != "objectclass":
return value in attrs[key]
# it is an objectclass check, so check subclasses
values = _subs(value)
for v in values:
if v in attrs[key]:
return True
return False
def _subs(value):
"""Returns a list of subclass strings.
The strings represent the ldap object class plus any subclasses that
inherit from it. Fakeldap doesn't know about the ldap object structure,
so subclasses need to be defined manually in the dictionary below.
"""
subs = {'groupOfNames': ['novaProject']}
if value in subs:
return [value] + subs[value]
return [value]
def _from_json(encoded):
"""Convert attribute values from json representation.
Args:
encoded -- a json encoded string
Returns a list of strings
"""
return [str(x) for x in jsonutils.loads(encoded)]
def _to_json(unencoded):
"""Convert attribute values into json representation.
Args:
unencoded -- an unencoded string or list of strings. If it
is a single string, it will be converted into a list.
Returns a json string
"""
return jsonutils.dumps(list(unencoded))
server_fail = False
class FakeLDAP(object):
"""Fake LDAP connection."""
def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN()
pass
def unbind_s(self):
"""This method is ignored, but provided for compatibility."""
if server_fail:
raise SERVER_DOWN()
pass
def add_s(self, dn, attr):
"""Add an object with the specified attributes at dn."""
if server_fail:
raise SERVER_DOWN()
key = "%s%s" % (self.__prefix, dn)
value_dict = {k: _to_json(v) for k, v in attr}
Store.instance().hmset(key, value_dict)
def delete_s(self, dn):
"""Remove the ldap object at specified dn."""
if server_fail:
raise SERVER_DOWN()
Store.instance().delete("%s%s" % (self.__prefix, dn))
def modify_s(self, dn, attrs):
"""Modify the object at dn using the attribute list.
:param dn: a dn
:param attrs: a list of tuples in the following form::
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
if server_fail:
raise SERVER_DOWN()
store = Store.instance()
key = "%s%s" % (self.__prefix, dn)
for cmd, k, v in attrs:
values = _from_json(store.hget(key, k))
if cmd == MOD_ADD:
values.append(v)
elif cmd == MOD_REPLACE:
values = [v]
else:
values.remove(v)
store.hset(key, k, _to_json(values))
def modrdn_s(self, dn, newrdn):
oldobj = self.search_s(dn, SCOPE_BASE)
if not oldobj:
raise NO_SUCH_OBJECT()
newdn = "%s,%s" % (newrdn, dn.partition(',')[2])
newattrs = oldobj[0][1]
modlist = []
for attrtype in newattrs.keys():
modlist.append((attrtype, newattrs[attrtype]))
self.add_s(newdn, modlist)
self.delete_s(dn)
def search_s(self, dn, scope, query=None, fields=None):
"""Search for all matching objects under dn using the query.
Args:
dn -- dn to search under
scope -- only SCOPE_BASE and SCOPE_SUBTREE are supported
query -- query to filter objects by
fields -- fields to return. Returns all fields if not specified
"""
if server_fail:
raise SERVER_DOWN()
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
store = Store.instance()
if scope == SCOPE_BASE:
pattern = "%s%s" % (self.__prefix, dn)
keys = store.keys(pattern)
else:
keys = store.keys("%s*%s" % (self.__prefix, dn))
if not keys:
raise NO_SUCH_OBJECT()
objects = []
for key in keys:
# get the attributes from the store
attrs = store.hgetall(key)
# turn the values from the store into lists
attrs = {k: _from_json(v) for k, v in six.iteritems(attrs)}
# filter the objects by query
if not query or _match_query(query, attrs):
# filter the attributes by fields
attrs = {k: v for k, v in six.iteritems(attrs)
if not fields or k in fields}
objects.append((key[len(self.__prefix):], attrs))
return objects
@property
def __prefix(self):
"""Get the prefix to use for all keys."""
return 'ldap:'
|
apache-2.0
|
brbsix/subnuker
|
setup.py
|
1
|
1543
|
# -*- coding: utf-8 -*-
from setuptools import setup
from subnuker import __program__
from subnuker import __version__
def read(filename):
with open(filename) as f:
return f.read()
setup(
name=__program__,
version=__version__,
author='Brian Beffa',
author_email='[email protected]',
description='Remove spam and advertising from subtitle files',
long_description=read('README.rst'),
url='https://github.com/brbsix/subnuker',
license='GPLv3',
keywords=['advertising', 'srt', 'subtitle'],
py_modules=['subnuker'],
install_requires=['batchpath', 'chardet'],
entry_points={
'console_scripts': ['subnuker=subnuker:main'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Multimedia :: Video',
'Topic :: Text Processing',
'Topic :: Utilities',
],
)
|
gpl-3.0
|
abaditsegay/arangodb
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_descrtut.py
|
51
|
12049
|
# This contains most of the executable examples from Guido's descr
# tutorial, once at
#
# http://www.python.org/2.2/descrintro.html
#
# A few examples left implicit in the writeup were fleshed out, a few were
# skipped due to lack of interest (e.g., faking super() by hand isn't
# of much interest anymore), and a few were fiddled to make the output
# deterministic.
from test.test_support import sortdict
import pprint
class defaultdict(dict):
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
def merge(self, other):
for key in other:
if key not in self:
self[key] = other[key]
test_1 = """
Here's the new type at work:
>>> print defaultdict # show our type
<class 'test.test_descrtut.defaultdict'>
>>> print type(defaultdict) # its metatype
<type 'type'>
>>> a = defaultdict(default=0.0) # create an instance
>>> print a # show the instance
{}
>>> print type(a) # show its type
<class 'test.test_descrtut.defaultdict'>
>>> print a.__class__ # show its class
<class 'test.test_descrtut.defaultdict'>
>>> print type(a) is a.__class__ # its type is its class
True
>>> a[1] = 3.25 # modify the instance
>>> print a # show the new value
{1: 3.25}
>>> print a[1] # show the new item
3.25
>>> print a[0] # a non-existant item
0.0
>>> a.merge({1:100, 2:200}) # use a dict method
>>> print sortdict(a) # show the result
{1: 3.25, 2: 200}
>>>
We can also use the new type in contexts where classic only allows "real"
dictionaries, such as the locals/globals dictionaries for the exec
statement or the built-in function eval():
>>> def sorted(seq):
... seq.sort()
... return seq
>>> print sorted(a.keys())
[1, 2]
>>> exec "x = 3; print x" in a
3
>>> print sorted(a.keys())
[1, 2, '__builtins__', 'x']
>>> print a['x']
3
>>>
Now I'll show that defaultdict instances have dynamic instance variables,
just like classic classes:
>>> a.default = -1
>>> print a["noway"]
-1
>>> a.default = -1000
>>> print a["noway"]
-1000
>>> 'default' in dir(a)
True
>>> a.x1 = 100
>>> a.x2 = 200
>>> print a.x1
100
>>> d = dir(a)
>>> 'default' in d and 'x1' in d and 'x2' in d
True
>>> print sortdict(a.__dict__)
{'default': -1000, 'x1': 100, 'x2': 200}
>>>
"""
class defaultdict2(dict):
__slots__ = ['default']
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
def merge(self, other):
for key in other:
if key not in self:
self[key] = other[key]
test_2 = """
The __slots__ declaration takes a list of instance variables, and reserves
space for exactly these in the instance. When __slots__ is used, other
instance variables cannot be assigned to:
>>> a = defaultdict2(default=0.0)
>>> a[1]
0.0
>>> a.default = -1
>>> a[1]
-1
>>> a.x1 = 1
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: 'defaultdict2' object has no attribute 'x1'
>>>
"""
test_3 = """
Introspecting instances of built-in types
For instance of built-in types, x.__class__ is now the same as type(x):
>>> type([])
<type 'list'>
>>> [].__class__
<type 'list'>
>>> list
<type 'list'>
>>> isinstance([], list)
True
>>> isinstance([], dict)
False
>>> isinstance([], object)
True
>>>
Under the new proposal, the __methods__ attribute no longer exists:
>>> [].__methods__
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: 'list' object has no attribute '__methods__'
>>>
Instead, you can get the same information from the list type:
>>> pprint.pprint(dir(list)) # like list.__dict__.keys(), but sorted
['__add__',
'__class__',
'__contains__',
'__delattr__',
'__delitem__',
'__delslice__',
'__doc__',
'__eq__',
'__format__',
'__ge__',
'__getattribute__',
'__getitem__',
'__getslice__',
'__gt__',
'__hash__',
'__iadd__',
'__imul__',
'__init__',
'__iter__',
'__le__',
'__len__',
'__lt__',
'__mul__',
'__ne__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__reversed__',
'__rmul__',
'__setattr__',
'__setitem__',
'__setslice__',
'__sizeof__',
'__str__',
'__subclasshook__',
'append',
'count',
'extend',
'index',
'insert',
'pop',
'remove',
'reverse',
'sort']
The new introspection API gives more information than the old one: in
addition to the regular methods, it also shows the methods that are
normally invoked through special notations, e.g. __iadd__ (+=), __len__
(len), __ne__ (!=). You can invoke any method from this list directly:
>>> a = ['tic', 'tac']
>>> list.__len__(a) # same as len(a)
2
>>> a.__len__() # ditto
2
>>> list.append(a, 'toe') # same as a.append('toe')
>>> a
['tic', 'tac', 'toe']
>>>
This is just like it is for user-defined classes.
"""
test_4 = """
Static methods and class methods
The new introspection API makes it possible to add static methods and class
methods. Static methods are easy to describe: they behave pretty much like
static methods in C++ or Java. Here's an example:
>>> class C:
...
... @staticmethod
... def foo(x, y):
... print "staticmethod", x, y
>>> C.foo(1, 2)
staticmethod 1 2
>>> c = C()
>>> c.foo(1, 2)
staticmethod 1 2
Class methods use a similar pattern to declare methods that receive an
implicit first argument that is the *class* for which they are invoked.
>>> class C:
... @classmethod
... def foo(cls, y):
... print "classmethod", cls, y
>>> C.foo(1)
classmethod test.test_descrtut.C 1
>>> c = C()
>>> c.foo(1)
classmethod test.test_descrtut.C 1
>>> class D(C):
... pass
>>> D.foo(1)
classmethod test.test_descrtut.D 1
>>> d = D()
>>> d.foo(1)
classmethod test.test_descrtut.D 1
This prints "classmethod __main__.D 1" both times; in other words, the
class passed as the first argument of foo() is the class involved in the
call, not the class involved in the definition of foo().
But notice this:
>>> class E(C):
... @classmethod
... def foo(cls, y): # override C.foo
... print "E.foo() called"
... C.foo(y)
>>> E.foo(1)
E.foo() called
classmethod test.test_descrtut.C 1
>>> e = E()
>>> e.foo(1)
E.foo() called
classmethod test.test_descrtut.C 1
In this example, the call to C.foo() from E.foo() will see class C as its
first argument, not class E. This is to be expected, since the call
specifies the class C. But it stresses the difference between these class
methods and methods defined in metaclasses (where an upcall to a metamethod
would pass the target class as an explicit first argument).
"""
test_5 = """
Attributes defined by get/set methods
>>> class property(object):
...
... def __init__(self, get, set=None):
... self.__get = get
... self.__set = set
...
... def __get__(self, inst, type=None):
... return self.__get(inst)
...
... def __set__(self, inst, value):
... if self.__set is None:
... raise AttributeError, "this attribute is read-only"
... return self.__set(inst, value)
Now let's define a class with an attribute x defined by a pair of methods,
getx() and and setx():
>>> class C(object):
...
... def __init__(self):
... self.__x = 0
...
... def getx(self):
... return self.__x
...
... def setx(self, x):
... if x < 0: x = 0
... self.__x = x
...
... x = property(getx, setx)
Here's a small demonstration:
>>> a = C()
>>> a.x = 10
>>> print a.x
10
>>> a.x = -10
>>> print a.x
0
>>>
Hmm -- property is builtin now, so let's try it that way too.
>>> del property # unmask the builtin
>>> property
<type 'property'>
>>> class C(object):
... def __init__(self):
... self.__x = 0
... def getx(self):
... return self.__x
... def setx(self, x):
... if x < 0: x = 0
... self.__x = x
... x = property(getx, setx)
>>> a = C()
>>> a.x = 10
>>> print a.x
10
>>> a.x = -10
>>> print a.x
0
>>>
"""
test_6 = """
Method resolution order
This example is implicit in the writeup.
>>> class A: # classic class
... def save(self):
... print "called A.save()"
>>> class B(A):
... pass
>>> class C(A):
... def save(self):
... print "called C.save()"
>>> class D(B, C):
... pass
>>> D().save()
called A.save()
>>> class A(object): # new class
... def save(self):
... print "called A.save()"
>>> class B(A):
... pass
>>> class C(A):
... def save(self):
... print "called C.save()"
>>> class D(B, C):
... pass
>>> D().save()
called C.save()
"""
class A(object):
def m(self):
return "A"
class B(A):
def m(self):
return "B" + super(B, self).m()
class C(A):
def m(self):
return "C" + super(C, self).m()
class D(C, B):
def m(self):
return "D" + super(D, self).m()
test_7 = """
Cooperative methods and "super"
>>> print D().m() # "DCBA"
DCBA
"""
test_8 = """
Backwards incompatibilities
>>> class A:
... def foo(self):
... print "called A.foo()"
>>> class B(A):
... pass
>>> class C(A):
... def foo(self):
... B.foo(self)
>>> C().foo()
Traceback (most recent call last):
...
TypeError: unbound method foo() must be called with B instance as first argument (got C instance instead)
>>> class C(A):
... def foo(self):
... A.foo(self)
>>> C().foo()
called A.foo()
"""
__test__ = {"tut1": test_1,
"tut2": test_2,
"tut3": test_3,
"tut4": test_4,
"tut5": test_5,
"tut6": test_6,
"tut7": test_7,
"tut8": test_8}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
# Obscure: import this module as test.test_descrtut instead of as
# plain test_descrtut because the name of this module works its way
# into the doctest examples, and unless the full test.test_descrtut
# business is used the name can change depending on how the test is
# invoked.
from test import test_support, test_descrtut
test_support.run_doctest(test_descrtut, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
|
apache-2.0
|
philotas/enigma2
|
lib/python/Plugins/SystemPlugins/SoftwareManager/ImageBackup.py
|
5
|
17614
|
#################################################################################
# FULL BACKUP UYILITY FOR ENIGMA2, SUPPORTS THE MODELS OE-A 2.0 #
# #
# MAKES A FULLBACK-UP READY FOR FLASHING. #
# #
#################################################################################
from enigma import getEnigmaVersionString
from Screens.Screen import Screen
from Components.Button import Button
from Components.Label import Label
from Components.ActionMap import ActionMap
from Components.About import about
from Screens.Console import Console
from Screens.MessageBox import MessageBox
from time import time, strftime, localtime
from os import path, system, makedirs, listdir, walk, statvfs
import commands
import datetime
from boxbranding import getBoxType, getMachineBrand, getMachineName, getDriverDate, getImageVersion, getImageBuild, getBrandOEM, getMachineBuild, getImageFolder, getMachineUBINIZE, getMachineMKUBIFS, getMachineMtdKernel, getMachineKernelFile, getMachineRootFile, getImageFileSystem
VERSION = "Version 4.1 openATV"
HaveGZkernel = True
if getBrandOEM() in ("fulan"):
HaveGZkernel = False
def Freespace(dev):
statdev = statvfs(dev)
space = (statdev.f_bavail * statdev.f_frsize) / 1024
print "[FULL BACKUP] Free space on %s = %i kilobytes" %(dev, space)
return space
class ImageBackup(Screen):
skin = """
<screen position="center,center" size="560,400" title="Image Backup">
<ePixmap position="0,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/red.png" transparent="1" alphatest="on" />
<ePixmap position="140,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/green.png" transparent="1" alphatest="on" />
<ePixmap position="280,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/yellow.png" transparent="1" alphatest="on" />
<ePixmap position="420,360" zPosition="1" size="140,40" pixmap="skin_default/buttons/blue.png" transparent="1" alphatest="on" />
<widget name="key_red" position="0,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_green" position="140,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_yellow" position="280,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="key_blue" position="420,360" zPosition="2" size="140,40" valign="center" halign="center" font="Regular;21" transparent="1" shadowColor="black" shadowOffset="-1,-1" />
<widget name="info-hdd" position="10,30" zPosition="1" size="450,100" font="Regular;20" halign="left" valign="top" transparent="1" />
<widget name="info-usb" position="10,150" zPosition="1" size="450,200" font="Regular;20" halign="left" valign="top" transparent="1" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
self.session = session
self.MODEL = getBoxType()
self.OEM = getBrandOEM()
self.MACHINEBUILD = getMachineBuild()
self.MACHINENAME = getMachineName()
self.MACHINEBRAND = getMachineBrand()
self.IMAGEFOLDER = getImageFolder()
self.UBINIZE_ARGS = getMachineUBINIZE()
self.MKUBIFS_ARGS = getMachineMKUBIFS()
self.MTDKERNEL = getMachineMtdKernel()
self.ROOTFSBIN = getMachineRootFile()
self.KERNELBIN = getMachineKernelFile()
self.ROOTFSTYPE = getImageFileSystem()
print "[FULL BACKUP] BOX MACHINEBUILD = >%s<" %self.MACHINEBUILD
print "[FULL BACKUP] BOX MACHINENAME = >%s<" %self.MACHINENAME
print "[FULL BACKUP] BOX MACHINEBRAND = >%s<" %self.MACHINEBRAND
print "[FULL BACKUP] BOX MODEL = >%s<" %self.MODEL
print "[FULL BACKUP] OEM MODEL = >%s<" %self.OEM
print "[FULL BACKUP] IMAGEFOLDER = >%s<" %self.IMAGEFOLDER
print "[FULL BACKUP] UBINIZE = >%s<" %self.UBINIZE_ARGS
print "[FULL BACKUP] MKUBIFS = >%s<" %self.MKUBIFS_ARGS
print "[FULL BACKUP] MTDKERNEL = >%s<" %self.MTDKERNEL
print "[FULL BACKUP] ROOTFSTYPE = >%s<" %self.ROOTFSTYPE
self["key_green"] = Button("USB")
self["key_red"] = Button("HDD")
self["key_blue"] = Button(_("Exit"))
self["key_yellow"] = Button("")
self["info-usb"] = Label(_("USB = Do you want to make a back-up on USB?\nThis will take between 4 and 15 minutes depending on the used filesystem and is fully automatic.\nMake sure you first insert an USB flash drive before you select USB."))
self["info-hdd"] = Label(_("HDD = Do you want to make an USB-back-up image on HDD? \nThis only takes 2 or 10 minutes and is fully automatic."))
self["actions"] = ActionMap(["OkCancelActions", "ColorActions"],
{
"blue": self.quit,
"yellow": self.yellow,
"green": self.green,
"red": self.red,
"cancel": self.quit,
}, -2)
def check_hdd(self):
if not path.exists("/media/hdd"):
self.session.open(MessageBox, _("No /hdd found !!\nPlease make sure you have a HDD mounted.\n"), type = MessageBox.TYPE_ERROR)
return False
if Freespace('/media/hdd') < 300000:
self.session.open(MessageBox, _("Not enough free space on /hdd !!\nYou need at least 300Mb free space.\n"), type = MessageBox.TYPE_ERROR)
return False
return True
def check_usb(self, dev):
if Freespace(dev) < 300000:
self.session.open(MessageBox, _("Not enough free space on %s !!\nYou need at least 300Mb free space.\n" % dev), type = MessageBox.TYPE_ERROR)
return False
return True
def quit(self):
self.close()
def red(self):
if self.check_hdd():
self.doFullBackup("/hdd")
def green(self):
USB_DEVICE = self.SearchUSBcanidate()
if USB_DEVICE == 'XX':
text = _("No USB-Device found for fullbackup !!\n\n\n")
text += _("To back-up directly to the USB-stick, the USB-stick MUST\n")
text += _("contain a file with the name: \n\n")
text += _("backupstick or backupstick.txt")
self.session.open(MessageBox, text, type = MessageBox.TYPE_ERROR)
else:
if self.check_usb(USB_DEVICE):
self.doFullBackup(USB_DEVICE)
def yellow(self):
#// Not used
pass
def SearchUSBcanidate(self):
for paths, subdirs, files in walk("/media"):
for dir in subdirs:
if not dir == 'hdd' and not dir == 'net':
for file in listdir("/media/" + dir):
if file.find("backupstick") > -1:
print "USB-DEVICE found on: /media/%s" % dir
return "/media/" + dir
break
return "XX"
def doFullBackup(self, DIRECTORY):
self.DIRECTORY = DIRECTORY
self.TITLE = _("Full back-up on %s") % (self.DIRECTORY)
self.START = time()
self.DATE = strftime("%Y%m%d_%H%M", localtime(self.START))
self.IMAGEVERSION = self.imageInfo() #strftime("%Y%m%d", localtime(self.START))
if "ubi" in self.ROOTFSTYPE.split():
self.MKFS = "/usr/sbin/mkfs.ubifs"
else:
self.MKFS = "/usr/sbin/mkfs.jffs2"
self.UBINIZE = "/usr/sbin/ubinize"
self.NANDDUMP = "/usr/sbin/nanddump"
self.WORKDIR= "%s/bi" %self.DIRECTORY
self.TARGET="XX"
## TESTING IF ALL THE TOOLS FOR THE BUILDING PROCESS ARE PRESENT
if not path.exists(self.MKFS):
text = "%s not found !!" %self.MKFS
self.session.open(MessageBox, _(text), type = MessageBox.TYPE_ERROR)
return
if not path.exists(self.NANDDUMP):
text = "%s not found !!" %self.NANDDUMP
self.session.open(MessageBox, _(text), type = MessageBox.TYPE_ERROR)
return
self.SHOWNAME = "%s %s" %(self.MACHINEBRAND, self.MODEL)
self.MAINDESTOLD = "%s/%s" %(self.DIRECTORY, self.MODEL)
self.MAINDEST = "%s/%s" %(self.DIRECTORY,self.IMAGEFOLDER)
self.EXTRA = "%s/fullbackup_%s/%s/%s" % (self.DIRECTORY, self.MODEL, self.DATE, self.IMAGEFOLDER)
self.EXTRAOLD = "%s/fullbackup_%s/%s/%s" % (self.DIRECTORY, self.MODEL, self.DATE, self.MODEL)
self.message = "echo -e '\n"
self.message += (_("Back-up Tool for a %s\n" %self.SHOWNAME)).upper()
self.message += VERSION + '\n'
self.message += "_________________________________________________\n\n"
self.message += _("Please be patient, a backup will now be made,\n")
if self.ROOTFSTYPE == "ubi":
self.message += _("because of the used filesystem the back-up\n")
self.message += _("will take about 3-12 minutes for this system\n")
else:
self.message += _("this will take between 2 and 9 minutes\n")
self.message += "\n_________________________________________________\n\n"
self.message += "'"
## PREPARING THE BUILDING ENVIRONMENT
system("rm -rf %s" %self.WORKDIR)
if not path.exists(self.WORKDIR):
makedirs(self.WORKDIR)
if not path.exists("/tmp/bi/root"):
makedirs("/tmp/bi/root")
system("sync")
system("mount --bind / /tmp/bi/root")
if "jffs2" in self.ROOTFSTYPE.split():
cmd1 = "%s --root=/tmp/bi/root --faketime --output=%s/root.jffs2 %s" % (self.MKFS, self.WORKDIR, self.MKUBIFS_ARGS)
cmd2 = None
else:
f = open("%s/ubinize.cfg" %self.WORKDIR, "w")
f.write("[ubifs]\n")
f.write("mode=ubi\n")
f.write("image=%s/root.ubi\n" %self.WORKDIR)
f.write("vol_id=0\n")
f.write("vol_type=dynamic\n")
f.write("vol_name=rootfs\n")
f.write("vol_flags=autoresize\n")
f.close()
ff = open("%s/root.ubi" %self.WORKDIR, "w")
ff.close()
cmd1 = "%s -r /tmp/bi/root -o %s/root.ubi %s" % (self.MKFS, self.WORKDIR, self.MKUBIFS_ARGS)
cmd2 = "%s -o %s/root.ubifs %s %s/ubinize.cfg" % (self.UBINIZE, self.WORKDIR, self.UBINIZE_ARGS, self.WORKDIR)
cmd3 = "mv %s/root.ubifs %s/root.%s" %(self.WORKDIR, self.WORKDIR, self.ROOTFSTYPE)
cmdlist = []
cmdlist.append(self.message)
cmdlist.append('echo "Create: root.%s\n"' %self.ROOTFSTYPE)
cmdlist.append(cmd1)
if cmd2:
cmdlist.append(cmd2)
cmdlist.append(cmd3)
cmdlist.append("chmod 644 %s/root.%s" %(self.WORKDIR, self.ROOTFSTYPE))
cmdlist.append('echo " "')
cmdlist.append('echo "Create: kerneldump"')
cmdlist.append('echo " "')
cmdlist.append("nanddump -a -f %s/vmlinux.gz /dev/%s" % (self.WORKDIR, self.MTDKERNEL))
cmdlist.append('echo " "')
if HaveGZkernel:
cmdlist.append('echo "Check: kerneldump"')
cmdlist.append("sync")
self.session.open(Console, title = self.TITLE, cmdlist = cmdlist, finishedCallback = self.doFullBackupCB, closeOnSuccess = True)
def doFullBackupCB(self):
if HaveGZkernel:
ret = commands.getoutput(' gzip -d %s/vmlinux.gz -c > /tmp/vmlinux.bin' % self.WORKDIR)
if ret:
text = "Kernel dump error\n"
text += "Please Flash your Kernel new and Backup again"
system('rm -rf /tmp/vmlinux.bin')
self.session.open(MessageBox, _(text), type = MessageBox.TYPE_ERROR)
return
cmdlist = []
cmdlist.append(self.message)
if HaveGZkernel:
cmdlist.append('echo "Kernel dump OK"')
cmdlist.append("rm -rf /tmp/vmlinux.bin")
cmdlist.append('echo "_________________________________________________"')
cmdlist.append('echo "Almost there... "')
cmdlist.append('echo "Now building the USB-Image"')
system('rm -rf %s' %self.MAINDEST)
if not path.exists(self.MAINDEST):
makedirs(self.MAINDEST)
if not path.exists(self.EXTRA):
makedirs(self.EXTRA)
f = open("%s/imageversion" %self.MAINDEST, "w")
f.write(self.IMAGEVERSION)
f.close()
system('mv %s/root.%s %s/%s' %(self.WORKDIR, self.ROOTFSTYPE, self.MAINDEST, self.ROOTFSBIN))
system('mv %s/vmlinux.gz %s/%s' %(self.WORKDIR, self.MAINDEST, self.KERNELBIN))
cmdlist.append('echo "rename this file to "force" to force an update without confirmation" > %s/noforce' %self.MAINDEST)
if self.MODEL in ("gbquad", "gbquadplus", "gb800ue", "gb800ueplus", "gbultraue", "twinboxlcd"):
lcdwaitkey = '/usr/share/lcdwaitkey.bin'
lcdwarning = '/usr/share/lcdwarning.bin'
if path.exists(lcdwaitkey):
system('cp %s %s/lcdwaitkey.bin' %(lcdwaitkey, self.MAINDEST))
if path.exists(lcdwarning):
system('cp %s %s/lcdwarning.bin' %(lcdwarning, self.MAINDEST))
if self.MODEL == "gb800solo":
burnbat = "%s/fullbackup_%s/%s" % (self.DIRECTORY, self.MODEL, self.DATE)
f = open("%s/burn.bat" % (burnbat), "w")
f.write("flash -noheader usbdisk0:gigablue/solo/kernel.bin flash0.kernel\n")
f.write("flash -noheader usbdisk0:gigablue/solo/rootfs.bin flash0.rootfs\n")
f.write('setenv -p STARTUP "boot -z -elf flash0.kernel: ')
f.write("'rootfstype=jffs2 bmem=106M@150M root=/dev/mtdblock6 rw '")
f.write('"\n')
f.close()
cmdlist.append('cp -r %s/* %s/' % (self.MAINDEST, self.EXTRA))
cmdlist.append("sync")
file_found = True
if not path.exists("%s/%s" % (self.MAINDEST, self.ROOTFSBIN)):
print 'ROOTFS bin file not found'
file_found = False
if not path.exists("%s/%s" % (self.MAINDEST, self.KERNELBIN)):
print 'KERNEL bin file not found'
file_found = False
if path.exists("%s/noforce" % self.MAINDEST):
print 'NOFORCE bin file not found'
file_found = False
if file_found:
cmdlist.append('echo "_________________________________________________\n"')
cmdlist.append('echo "USB Image created on:" %s' %self.MAINDEST)
cmdlist.append('echo "and there is made an extra copy on:"')
cmdlist.append('echo %s' %self.EXTRA)
cmdlist.append('echo "_________________________________________________\n"')
cmdlist.append('echo " "')
cmdlist.append('echo "\nPlease wait...almost ready! "')
cmdlist.append('echo " "')
cmdlist.append('echo "To restore the image:"')
cmdlist.append('echo "Please check the manual of the receiver"')
cmdlist.append('echo "on how to restore the image"')
else:
cmdlist.append('echo "_________________________________________________\n"')
cmdlist.append('echo "Image creation failed - "')
cmdlist.append('echo "Probable causes could be"')
cmdlist.append('echo " wrong back-up destination "')
cmdlist.append('echo " no space left on back-up device"')
cmdlist.append('echo " no writing permission on back-up device"')
cmdlist.append('echo " "')
if self.DIRECTORY == "/hdd":
self.TARGET = self.SearchUSBcanidate()
print "TARGET = %s" % self.TARGET
if self.TARGET == 'XX':
cmdlist.append('echo " "')
else:
cmdlist.append('echo "_________________________________________________\n"')
cmdlist.append('echo " "')
cmdlist.append('echo "There is a valid USB-flash drive detected in one "')
cmdlist.append('echo "of the USB-ports, therefor an extra copy of the "')
cmdlist.append('echo "back-up image will now be copied to that USB- "')
cmdlist.append('echo "flash drive. "')
cmdlist.append('echo "This only takes about 1 or 2 minutes"')
cmdlist.append('echo " "')
cmdlist.append('mkdir -p %s/%s' % (self.TARGET, self.IMAGEFOLDER))
cmdlist.append('cp -r %s %s/' % (self.MAINDEST, self.TARGET))
cmdlist.append("sync")
cmdlist.append('echo "Backup finished and copied to your USB-flash drive"')
cmdlist.append("umount /tmp/bi/root")
cmdlist.append("rmdir /tmp/bi/root")
cmdlist.append("rmdir /tmp/bi")
cmdlist.append("rm -rf %s" % self.WORKDIR)
cmdlist.append("sleep 5")
END = time()
DIFF = int(END - self.START)
TIMELAP = str(datetime.timedelta(seconds=DIFF))
cmdlist.append('echo " Time required for this process: %s"' %TIMELAP)
self.session.open(Console, title = self.TITLE, cmdlist = cmdlist, closeOnSuccess = False)
def imageInfo(self):
AboutText = _("Full Image Backup ")
AboutText += _("By openATV Image Team") + "\n"
AboutText += _("Support at") + " www.opena.tv\n\n"
AboutText += _("[Image Info]\n")
AboutText += _("Model: %s %s\n") % (getMachineBrand(), getMachineName())
AboutText += _("Backup Date: %s\n") % strftime("%Y-%m-%d", localtime(self.START))
if path.exists('/proc/stb/info/chipset'):
AboutText += _("Chipset: BCM%s") % about.getChipSetString().lower().replace('\n','').replace('bcm','') + "\n"
AboutText += _("CPU: %s") % about.getCPUString() + "\n"
AboutText += _("Cores: %s") % about.getCpuCoresString() + "\n"
AboutText += _("Version: %s") % getImageVersion() + "\n"
AboutText += _("Build: %s") % getImageBuild() + "\n"
AboutText += _("Kernel: %s") % about.getKernelVersionString() + "\n"
string = getDriverDate()
year = string[0:4]
month = string[4:6]
day = string[6:8]
driversdate = '-'.join((year, month, day))
AboutText += _("Drivers:\t%s") % driversdate + "\n"
AboutText += _("Last update:\t%s") % getEnigmaVersionString() + "\n\n"
AboutText += _("[Enigma2 Settings]\n")
AboutText += commands.getoutput("cat /etc/enigma2/settings")
AboutText += _("\n\n[User - bouquets (TV)]\n")
try:
f = open("/etc/enigma2/bouquets.tv","r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("#SERVICE:"):
bouqet = line.split()
if len(bouqet) > 3:
bouqet[3] = bouqet[3].replace('"','')
f = open("/etc/enigma2/" + bouqet[3],"r")
userbouqet = f.readline()
AboutText += userbouqet.replace('#NAME ','')
f.close()
except:
AboutText += "Error reading bouquets.tv"
AboutText += _("\n[User - bouquets (RADIO)]\n")
try:
f = open("/etc/enigma2/bouquets.radio","r")
lines = f.readlines()
f.close()
for line in lines:
if line.startswith("#SERVICE:"):
bouqet = line.split()
if len(bouqet) > 3:
bouqet[3] = bouqet[3].replace('"','')
f = open("/etc/enigma2/" + bouqet[3],"r")
userbouqet = f.readline()
AboutText += userbouqet.replace('#NAME ','')
f.close()
except:
AboutText += "Error reading bouquets.radio"
AboutText += _("\n[Installed Plugins]\n")
AboutText += commands.getoutput("opkg list_installed | grep enigma2-plugin-")
return AboutText
|
gpl-2.0
|
spelteam/spel
|
src/python/h36m.py
|
1
|
4373
|
#! /usr/bin/env python2.7
import glob
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import argparse
import numpy as np
import matplotlib.image as mpimg
from matplotlib.lines import Line2D
from pylab import figure, show
import math
import os
#os.putenv("CDF_LIB", '/usr/local/cdf')
from spacepy import pycdf
import re
from itertools import tee, izip
from scipy.misc import imread
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return izip(a, b)
def usage():
print("Author: Mykyta Fastovets / poselib project / 2015")
print("This utility is a tool for reading and convering 2D joint location GT data from H3.6M dataset into poseLib project files.")
print("Example usage: ./h36m.py ~/file.cdf file.txt")
parser = argparse.ArgumentParser(description='1 non-optional argument')
parser.add_argument('IN', action="store")
#parser.add_argument('OUT', action="store")
parseResult = parser.parse_args()
projectName = parseResult.IN.strip().split('/')[-1]
projectName = projectName[:-4]
print projectName
outFile = projectName+"_GT.xml"
fo = open(outFile, 'a')
cdf = pycdf.CDF(parseResult.IN) #read the CDF file
data = cdf['Pose'][0]
#So these are[0,1,2,3,4,5,6,7, 8, 9, 10,11,12,13,14,15,16]
bodyJoints = [0,1,2,3,6,7,8,12,13,14,15,17,18,19,25,26,27]
#bodyJoints = [0,1,2,3]
bodyParts = [[0,7], [0,1], [1,2], [2,3], [0,4], [4,5], [5,6], [7,8], [8,9], [9,10], [7,11], [11,12], [12,13], [7,14], [14,15], [15,16]]
bodyJointNames = ["Root", "Right Hip", "Right Knee", "Right Ankle", "Left Hip", "Left Knee", "Left Ankle", "Mid Back",
"Neck Bottom", "Neck Top", "Head Top", "Left Shoulder", "Left Elbow", "Left Wrist", "Right Shoulder", "Right Elbow", "Right Wrist"]
bodyPartNames = ["Root", "Right Hip", "Right Femur", "Right Tibia", "Left Hip", "Left Femur", "Left Tibia", "Upper Back", #
"Neck", "Head", "Left Clavicle", "Left Humerus", "Left Radius", "Right Clavicle", "Right Humerus", "Right Radius"]
partLwRatio = [1.9, 3.2, 3.059, 4.727, 3.2, 3.059, 4.727, 1.9, 1.454, 1.125, 3.4, 3.0, 3.75, 3.4, 3.0, 3.75]
partRelativeLength = [1.48, 0.0647, 0.2694, 0.2694, 0.0647, 0.2694, 0.2694, 1.48, 0.04663, 0.1088, 0.0881, 0.1554, 0.1554, 0.0881, 0.1554, 0.1554]
#First write the XML header with limb structure
fo.write('<?xml version="1.0"?>\n')
fo.write('<Project name="'+projectName+'" imgFolderPath="../seq/" maskFolderPath="../mask/" camFolderPath="" allowScaling="true" simMatPath="" exportPath="">\n')
fo.write(' <BodyJoints>\n')
for i in range(17):
fo.write(' <BodyJoint id="'+str(i)+'" name="'+bodyJointNames[i]+'"/>\n')
fo.write(' </BodyJoints>\n')
fo.write(' <BodyParts>\n')
for i in range(16):
fo.write(' <BodyPart id="'+str(i)+'" name="'+str(bodyPartNames[i])+'" parentJointId="'+str(bodyParts[i][0])+'" childJointId="'+str(bodyParts[i][1])+
'" expectedDistance="0" lwRatio="'+str(partLwRatio[i])+'" relativeLength="'+str(partRelativeLength[i])+'"/>\n')
fo.write(' </BodyParts>\n')
fo.write(' <Frames>\n')
frameCounter=1
for frame in data:
#fig = plt.figure()
#ax = fig.add_subplot(111)
rawFrameData=[]
cnt=0
for x,y in pairwise(frame):
if cnt%2==0:
rawFrameData.append([x,y])
cnt+=1
#print rawFrameData
#image = imread('/home/mfastovets/phd/H3.6M/img0001.png')
#plt.imshow(image, zorder=0);
# for part in bodyParts:
# p0=frameData[part[0]]
# p1=frameData[part[1]]
# ax.plot([p0[0], p1[0]], [p0[1], p1[1]], color='blue', linestyle='-', linewidth=2)
# ax.scatter([item[0] for item in frameData],[item[1] for item in frameData], color = 'green', marker = 'o', zorder=1)
# plt.show()
fo.write(' <Frame id="'+str(frameCounter)+'" imgPath="'+str(frameCounter).zfill(4)+'.png" maskPath="'+str(frameCounter).zfill(4)+
'.pgm" camPath="" isKeyframe="true" gpX="" gpY="">\n')
fo.write(' <BodyJoints>\n')
frameData=[]
for p in bodyJoints:
frameData.append(rawFrameData[p])
for i in range(17):
fo.write(' <BodyJoint x="'+str(int(frameData[i][0]))+'" y="'+str(int(frameData[i][1]))+'" depthSign="false" id="'+str(i)+'"/>\n')
fo.write(' </BodyJoints>\n')
fo.write(' <BodyParts>\n')
for i in range(16):
fo.write(' <BodyPart id="'+str(i)+'" isOccluded="false"/>\n')
fo.write(' </BodyParts>\n')
fo.write(' </Frame>\n')
frameCounter+=1
#break
fo.write(' </Frames>\n')
fo.write('</Project>')
|
gpl-3.0
|
mrkipling/maraschino
|
lib/sqlalchemy/ext/declarative.py
|
14
|
62372
|
# ext/declarative.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Synopsis
========
SQLAlchemy object-relational configuration involves the
combination of :class:`.Table`, :func:`.mapper`, and class
objects to define a mapped class.
:mod:`~sqlalchemy.ext.declarative` allows all three to be
expressed at once within the class declaration. As much as
possible, regular SQLAlchemy schema and ORM constructs are
used directly, so that configuration between "classical" ORM
usage and declarative remain highly similar.
As a simple example::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class SomeClass(Base):
__tablename__ = 'some_table'
id = Column(Integer, primary_key=True)
name = Column(String(50))
Above, the :func:`declarative_base` callable returns a new base class from
which all mapped classes should inherit. When the class definition is
completed, a new :class:`.Table` and
:func:`.mapper` will have been generated.
The resulting table and mapper are accessible via
``__table__`` and ``__mapper__`` attributes on the
``SomeClass`` class::
# access the mapped Table
SomeClass.__table__
# access the Mapper
SomeClass.__mapper__
Defining Attributes
===================
In the previous example, the :class:`.Column` objects are
automatically named with the name of the attribute to which they are
assigned.
To name columns explicitly with a name distinct from their mapped attribute,
just give the column a name. Below, column "some_table_id" is mapped to the
"id" attribute of `SomeClass`, but in SQL will be represented as "some_table_id"::
class SomeClass(Base):
__tablename__ = 'some_table'
id = Column("some_table_id", Integer, primary_key=True)
Attributes may be added to the class after its construction, and they will be
added to the underlying :class:`.Table` and
:func:`.mapper()` definitions as appropriate::
SomeClass.data = Column('data', Unicode)
SomeClass.related = relationship(RelatedInfo)
Classes which are constructed using declarative can interact freely
with classes that are mapped explicitly with :func:`mapper`.
It is recommended, though not required, that all tables
share the same underlying :class:`~sqlalchemy.schema.MetaData` object,
so that string-configured :class:`~sqlalchemy.schema.ForeignKey`
references can be resolved without issue.
Accessing the MetaData
=======================
The :func:`declarative_base` base class contains a
:class:`.MetaData` object where newly defined
:class:`.Table` objects are collected. This object is
intended to be accessed directly for
:class:`.MetaData`-specific operations. Such as, to issue
CREATE statements for all tables::
engine = create_engine('sqlite://')
Base.metadata.create_all(engine)
The usual techniques of associating :class:`.MetaData:` with :class:`.Engine`
apply, such as assigning to the ``bind`` attribute::
Base.metadata.bind = create_engine('sqlite://')
To associate the engine with the :func:`declarative_base` at time
of construction, the ``bind`` argument is accepted::
Base = declarative_base(bind=create_engine('sqlite://'))
:func:`declarative_base` can also receive a pre-existing
:class:`.MetaData` object, which allows a
declarative setup to be associated with an already
existing traditional collection of :class:`~sqlalchemy.schema.Table`
objects::
mymetadata = MetaData()
Base = declarative_base(metadata=mymetadata)
Configuring Relationships
=========================
Relationships to other classes are done in the usual way, with the added
feature that the class specified to :func:`~sqlalchemy.orm.relationship`
may be a string name. The "class registry" associated with ``Base``
is used at mapper compilation time to resolve the name into the actual
class object, which is expected to have been defined once the mapper
configuration is used::
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
name = Column(String(50))
addresses = relationship("Address", backref="user")
class Address(Base):
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
email = Column(String(50))
user_id = Column(Integer, ForeignKey('users.id'))
Column constructs, since they are just that, are immediately usable,
as below where we define a primary join condition on the ``Address``
class using them::
class Address(Base):
__tablename__ = 'addresses'
id = Column(Integer, primary_key=True)
email = Column(String(50))
user_id = Column(Integer, ForeignKey('users.id'))
user = relationship(User, primaryjoin=user_id == User.id)
In addition to the main argument for :func:`~sqlalchemy.orm.relationship`,
other arguments which depend upon the columns present on an as-yet
undefined class may also be specified as strings. These strings are
evaluated as Python expressions. The full namespace available within
this evaluation includes all classes mapped for this declarative base,
as well as the contents of the ``sqlalchemy`` package, including
expression functions like :func:`~sqlalchemy.sql.expression.desc` and
:attr:`~sqlalchemy.sql.expression.func`::
class User(Base):
# ....
addresses = relationship("Address",
order_by="desc(Address.email)",
primaryjoin="Address.user_id==User.id")
As an alternative to string-based attributes, attributes may also be
defined after all classes have been created. Just add them to the target
class after the fact::
User.addresses = relationship(Address,
primaryjoin=Address.user_id==User.id)
Configuring Many-to-Many Relationships
======================================
Many-to-many relationships are also declared in the same way
with declarative as with traditional mappings. The
``secondary`` argument to
:func:`.relationship` is as usual passed a
:class:`.Table` object, which is typically declared in the
traditional way. The :class:`.Table` usually shares
the :class:`.MetaData` object used by the declarative base::
keywords = Table(
'keywords', Base.metadata,
Column('author_id', Integer, ForeignKey('authors.id')),
Column('keyword_id', Integer, ForeignKey('keywords.id'))
)
class Author(Base):
__tablename__ = 'authors'
id = Column(Integer, primary_key=True)
keywords = relationship("Keyword", secondary=keywords)
As with traditional mapping, its generally not a good idea to use
a :class:`.Table` as the "secondary" argument which is also mapped to
a class, unless the :class:`.relationship` is declared with ``viewonly=True``.
Otherwise, the unit-of-work system may attempt duplicate INSERT and
DELETE statements against the underlying table.
.. _declarative_sql_expressions:
Defining SQL Expressions
========================
See :ref:`mapper_sql_expressions` for examples on declaratively
mapping attributes to SQL expressions.
.. _declarative_table_args:
Table Configuration
===================
Table arguments other than the name, metadata, and mapped Column
arguments are specified using the ``__table_args__`` class attribute.
This attribute accommodates both positional as well as keyword
arguments that are normally sent to the
:class:`~sqlalchemy.schema.Table` constructor.
The attribute can be specified in one of two forms. One is as a
dictionary::
class MyClass(Base):
__tablename__ = 'sometable'
__table_args__ = {'mysql_engine':'InnoDB'}
The other, a tuple, where each argument is positional
(usually constraints)::
class MyClass(Base):
__tablename__ = 'sometable'
__table_args__ = (
ForeignKeyConstraint(['id'], ['remote_table.id']),
UniqueConstraint('foo'),
)
Keyword arguments can be specified with the above form by
specifying the last argument as a dictionary::
class MyClass(Base):
__tablename__ = 'sometable'
__table_args__ = (
ForeignKeyConstraint(['id'], ['remote_table.id']),
UniqueConstraint('foo'),
{'autoload':True}
)
Using a Hybrid Approach with __table__
=======================================
As an alternative to ``__tablename__``, a direct
:class:`~sqlalchemy.schema.Table` construct may be used. The
:class:`~sqlalchemy.schema.Column` objects, which in this case require
their names, will be added to the mapping just like a regular mapping
to a table::
class MyClass(Base):
__table__ = Table('my_table', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50))
)
``__table__`` provides a more focused point of control for establishing
table metadata, while still getting most of the benefits of using declarative.
An application that uses reflection might want to load table metadata elsewhere
and simply pass it to declarative classes::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
Base.metadata.reflect(some_engine)
class User(Base):
__table__ = metadata.tables['user']
class Address(Base):
__table__ = metadata.tables['address']
Some configuration schemes may find it more appropriate to use ``__table__``,
such as those which already take advantage of the data-driven nature of
:class:`.Table` to customize and/or automate schema definition.
Note that when the ``__table__`` approach is used, the object is immediately
usable as a plain :class:`.Table` within the class declaration body itself,
as a Python class is only another syntactical block. Below this is illustrated
by using the ``id`` column in the ``primaryjoin`` condition of a :func:`.relationship`::
class MyClass(Base):
__table__ = Table('my_table', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50))
)
widgets = relationship(Widget,
primaryjoin=Widget.myclass_id==__table__.c.id)
Similarly, mapped attributes which refer to ``__table__`` can be placed inline,
as below where we assign the ``name`` column to the attribute ``_name``, generating
a synonym for ``name``::
from sqlalchemy.ext.declarative import synonym_for
class MyClass(Base):
__table__ = Table('my_table', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50))
)
_name = __table__.c.name
@synonym_for("_name")
def name(self):
return "Name: %s" % _name
Mapper Configuration
====================
Declarative makes use of the :func:`~.orm.mapper` function internally
when it creates the mapping to the declared table. The options
for :func:`~.orm.mapper` are passed directly through via the ``__mapper_args__``
class attribute. As always, arguments which reference locally
mapped columns can reference them directly from within the
class declaration::
from datetime import datetime
class Widget(Base):
__tablename__ = 'widgets'
id = Column(Integer, primary_key=True)
timestamp = Column(DateTime, nullable=False)
__mapper_args__ = {
'version_id_col': timestamp,
'version_id_generator': lambda v:datetime.now()
}
.. _declarative_inheritance:
Inheritance Configuration
=========================
Declarative supports all three forms of inheritance as intuitively
as possible. The ``inherits`` mapper keyword argument is not needed
as declarative will determine this from the class itself. The various
"polymorphic" keyword arguments are specified using ``__mapper_args__``.
Joined Table Inheritance
~~~~~~~~~~~~~~~~~~~~~~~~
Joined table inheritance is defined as a subclass that defines its own
table::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column(Integer, ForeignKey('people.id'), primary_key=True)
primary_language = Column(String(50))
Note that above, the ``Engineer.id`` attribute, since it shares the
same attribute name as the ``Person.id`` attribute, will in fact
represent the ``people.id`` and ``engineers.id`` columns together, and
will render inside a query as ``"people.id"``.
To provide the ``Engineer`` class with an attribute that represents
only the ``engineers.id`` column, give it a different attribute name::
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
engineer_id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column(String(50))
Single Table Inheritance
~~~~~~~~~~~~~~~~~~~~~~~~
Single table inheritance is defined as a subclass that does not have
its own table; you just leave out the ``__table__`` and ``__tablename__``
attributes::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
When the above mappers are configured, the ``Person`` class is mapped
to the ``people`` table *before* the ``primary_language`` column is
defined, and this column will not be included in its own mapping.
When ``Engineer`` then defines the ``primary_language`` column, the
column is added to the ``people`` table so that it is included in the
mapping for ``Engineer`` and is also part of the table's full set of
columns. Columns which are not mapped to ``Person`` are also excluded
from any other single or joined inheriting classes using the
``exclude_properties`` mapper argument. Below, ``Manager`` will have
all the attributes of ``Person`` and ``Manager`` but *not* the
``primary_language`` attribute of ``Engineer``::
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
The attribute exclusion logic is provided by the
``exclude_properties`` mapper argument, and declarative's default
behavior can be disabled by passing an explicit ``exclude_properties``
collection (empty or otherwise) to the ``__mapper_args__``.
Concrete Table Inheritance
~~~~~~~~~~~~~~~~~~~~~~~~~~
Concrete is defined as a subclass which has its own table and sets the
``concrete`` keyword argument to ``True``::
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
name = Column(String(50))
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'concrete':True}
id = Column(Integer, primary_key=True)
primary_language = Column(String(50))
name = Column(String(50))
Usage of an abstract base class is a little less straightforward as it
requires usage of :func:`~sqlalchemy.orm.util.polymorphic_union`,
which needs to be created with the :class:`.Table` objects
before the class is built::
engineers = Table('engineers', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('primary_language', String(50))
)
managers = Table('managers', Base.metadata,
Column('id', Integer, primary_key=True),
Column('name', String(50)),
Column('golf_swing', String(50))
)
punion = polymorphic_union({
'engineer':engineers,
'manager':managers
}, 'type', 'punion')
class Person(Base):
__table__ = punion
__mapper_args__ = {'polymorphic_on':punion.c.type}
class Engineer(Person):
__table__ = engineers
__mapper_args__ = {'polymorphic_identity':'engineer', 'concrete':True}
class Manager(Person):
__table__ = managers
__mapper_args__ = {'polymorphic_identity':'manager', 'concrete':True}
.. _declarative_concrete_helpers:
Using the Concrete Helpers
^^^^^^^^^^^^^^^^^^^^^^^^^^^
New helper classes released in 0.7.3 provides a simpler pattern for concrete inheritance.
With these objects, the ``__declare_last__`` helper is used to configure the "polymorphic"
loader for the mapper after all subclasses have been declared.
An abstract base can be declared using the :class:`.AbstractConcreteBase` class::
from sqlalchemy.ext.declarative import AbstractConcreteBase
class Employee(AbstractConcreteBase, Base):
pass
To have a concrete ``employee`` table, use :class:`.ConcreteBase` instead::
from sqlalchemy.ext.declarative import ConcreteBase
class Employee(ConcreteBase, Base):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee',
'concrete':True}
Either ``Employee`` base can be used in the normal fashion::
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
engineer_info = Column(String(40))
__mapper_args__ = {'polymorphic_identity':'engineer',
'concrete':True}
.. _declarative_mixins:
Mixin and Custom Base Classes
==============================
A common need when using :mod:`~sqlalchemy.ext.declarative` is to
share some functionality, such as a set of common columns, some common
table options, or other mapped properties, across many
classes. The standard Python idioms for this is to have the classes
inherit from a base which includes these common features.
When using :mod:`~sqlalchemy.ext.declarative`, this idiom is allowed
via the usage of a custom declarative base class, as well as a "mixin" class
which is inherited from in addition to the primary base. Declarative
includes several helper features to make this work in terms of how
mappings are declared. An example of some commonly mixed-in
idioms is below::
from sqlalchemy.ext.declarative import declared_attr
class MyMixin(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {'mysql_engine': 'InnoDB'}
__mapper_args__= {'always_refresh': True}
id = Column(Integer, primary_key=True)
class MyModel(MyMixin, Base):
name = Column(String(1000))
Where above, the class ``MyModel`` will contain an "id" column
as the primary key, a ``__tablename__`` attribute that derives
from the name of the class itself, as well as ``__table_args__``
and ``__mapper_args__`` defined by the ``MyMixin`` mixin class.
There's no fixed convention over whether ``MyMixin`` precedes
``Base`` or not. Normal Python method resolution rules apply, and
the above example would work just as well with::
class MyModel(Base, MyMixin):
name = Column(String(1000))
This works because ``Base`` here doesn't define any of the
variables that ``MyMixin`` defines, i.e. ``__tablename__``,
``__table_args__``, ``id``, etc. If the ``Base`` did define
an attribute of the same name, the class placed first in the
inherits list would determine which attribute is used on the
newly defined class.
Augmenting the Base
~~~~~~~~~~~~~~~~~~~
In addition to using a pure mixin, most of the techniques in this
section can also be applied to the base class itself, for patterns that
should apply to all classes derived from a particular base. This
is achieved using the ``cls`` argument of the :func:`.declarative_base` function::
from sqlalchemy.ext.declarative import declared_attr
class Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {'mysql_engine': 'InnoDB'}
id = Column(Integer, primary_key=True)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base(cls=Base)
class MyModel(Base):
name = Column(String(1000))
Where above, ``MyModel`` and all other classes that derive from ``Base`` will have
a table name derived from the class name, an ``id`` primary key column, as well as
the "InnoDB" engine for MySQL.
Mixing in Columns
~~~~~~~~~~~~~~~~~
The most basic way to specify a column on a mixin is by simple
declaration::
class TimestampMixin(object):
created_at = Column(DateTime, default=func.now())
class MyModel(TimestampMixin, Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
name = Column(String(1000))
Where above, all declarative classes that include ``TimestampMixin``
will also have a column ``created_at`` that applies a timestamp to
all row insertions.
Those familiar with the SQLAlchemy expression language know that
the object identity of clause elements defines their role in a schema.
Two ``Table`` objects ``a`` and ``b`` may both have a column called
``id``, but the way these are differentiated is that ``a.c.id``
and ``b.c.id`` are two distinct Python objects, referencing their
parent tables ``a`` and ``b`` respectively.
In the case of the mixin column, it seems that only one
:class:`.Column` object is explicitly created, yet the ultimate
``created_at`` column above must exist as a distinct Python object
for each separate destination class. To accomplish this, the declarative
extension creates a **copy** of each :class:`.Column` object encountered on
a class that is detected as a mixin.
This copy mechanism is limited to simple columns that have no foreign
keys, as a :class:`.ForeignKey` itself contains references to columns
which can't be properly recreated at this level. For columns that
have foreign keys, as well as for the variety of mapper-level constructs
that require destination-explicit context, the
:func:`~.declared_attr` decorator (renamed from ``sqlalchemy.util.classproperty`` in 0.6.5)
is provided so that
patterns common to many classes can be defined as callables::
from sqlalchemy.ext.declarative import declared_attr
class ReferenceAddressMixin(object):
@declared_attr
def address_id(cls):
return Column(Integer, ForeignKey('address.id'))
class User(ReferenceAddressMixin, Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
Where above, the ``address_id`` class-level callable is executed at the
point at which the ``User`` class is constructed, and the declarative
extension can use the resulting :class:`.Column` object as returned by
the method without the need to copy it.
Columns generated by :func:`~.declared_attr` can also be
referenced by ``__mapper_args__`` to a limited degree, currently
by ``polymorphic_on`` and ``version_id_col``, by specifying the
classdecorator itself into the dictionary - the declarative extension
will resolve them at class construction time::
class MyMixin:
@declared_attr
def type_(cls):
return Column(String(50))
__mapper_args__= {'polymorphic_on':type_}
class MyModel(MyMixin, Base):
__tablename__='test'
id = Column(Integer, primary_key=True)
Mixing in Relationships
~~~~~~~~~~~~~~~~~~~~~~~
Relationships created by :func:`~sqlalchemy.orm.relationship` are provided
with declarative mixin classes exclusively using the
:func:`.declared_attr` approach, eliminating any ambiguity
which could arise when copying a relationship and its possibly column-bound
contents. Below is an example which combines a foreign key column and a
relationship so that two classes ``Foo`` and ``Bar`` can both be configured to
reference a common target class via many-to-one::
class RefTargetMixin(object):
@declared_attr
def target_id(cls):
return Column('target_id', ForeignKey('target.id'))
@declared_attr
def target(cls):
return relationship("Target")
class Foo(RefTargetMixin, Base):
__tablename__ = 'foo'
id = Column(Integer, primary_key=True)
class Bar(RefTargetMixin, Base):
__tablename__ = 'bar'
id = Column(Integer, primary_key=True)
class Target(Base):
__tablename__ = 'target'
id = Column(Integer, primary_key=True)
:func:`~sqlalchemy.orm.relationship` definitions which require explicit
primaryjoin, order_by etc. expressions should use the string forms
for these arguments, so that they are evaluated as late as possible.
To reference the mixin class in these expressions, use the given ``cls``
to get it's name::
class RefTargetMixin(object):
@declared_attr
def target_id(cls):
return Column('target_id', ForeignKey('target.id'))
@declared_attr
def target(cls):
return relationship("Target",
primaryjoin="Target.id==%s.target_id" % cls.__name__
)
Mixing in deferred(), column_property(), etc.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Like :func:`~sqlalchemy.orm.relationship`, all
:class:`~sqlalchemy.orm.interfaces.MapperProperty` subclasses such as
:func:`~sqlalchemy.orm.deferred`, :func:`~sqlalchemy.orm.column_property`,
etc. ultimately involve references to columns, and therefore, when
used with declarative mixins, have the :func:`.declared_attr`
requirement so that no reliance on copying is needed::
class SomethingMixin(object):
@declared_attr
def dprop(cls):
return deferred(Column(Integer))
class Something(SomethingMixin, Base):
__tablename__ = "something"
Controlling table inheritance with mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``__tablename__`` attribute in conjunction with the hierarchy of
classes involved in a declarative mixin scenario controls what type of
table inheritance, if any,
is configured by the declarative extension.
If the ``__tablename__`` is computed by a mixin, you may need to
control which classes get the computed attribute in order to get the
type of table inheritance you require.
For example, if you had a mixin that computes ``__tablename__`` but
where you wanted to use that mixin in a single table inheritance
hierarchy, you can explicitly specify ``__tablename__`` as ``None`` to
indicate that the class should not have a table mapped::
from sqlalchemy.ext.declarative import declared_attr
class Tablename:
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
class Person(Tablename, Base):
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = None
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
Alternatively, you can make the mixin intelligent enough to only
return a ``__tablename__`` in the event that no table is already
mapped in the inheritance hierarchy. To help with this, a
:func:`~sqlalchemy.ext.declarative.has_inherited_table` helper
function is provided that returns ``True`` if a parent class already
has a mapped table.
As an example, here's a mixin that will only allow single table
inheritance::
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import has_inherited_table
class Tablename(object):
@declared_attr
def __tablename__(cls):
if has_inherited_table(cls):
return None
return cls.__name__.lower()
class Person(Tablename, Base):
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
primary_language = Column(String(50))
__mapper_args__ = {'polymorphic_identity': 'engineer'}
If you want to use a similar pattern with a mix of single and joined
table inheritance, you would need a slightly different mixin and use
it on any joined table child classes in addition to their parent
classes::
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.declarative import has_inherited_table
class Tablename(object):
@declared_attr
def __tablename__(cls):
if (has_inherited_table(cls) and
Tablename not in cls.__bases__):
return None
return cls.__name__.lower()
class Person(Tablename, Base):
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
# This is single table inheritance
class Engineer(Person):
primary_language = Column(String(50))
__mapper_args__ = {'polymorphic_identity': 'engineer'}
# This is joined table inheritance
class Manager(Tablename, Person):
id = Column(Integer, ForeignKey('person.id'), primary_key=True)
preferred_recreation = Column(String(50))
__mapper_args__ = {'polymorphic_identity': 'engineer'}
Combining Table/Mapper Arguments from Multiple Mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the case of ``__table_args__`` or ``__mapper_args__``
specified with declarative mixins, you may want to combine
some parameters from several mixins with those you wish to
define on the class iteself. The
:func:`.declared_attr` decorator can be used
here to create user-defined collation routines that pull
from multiple collections::
from sqlalchemy.ext.declarative import declared_attr
class MySQLSettings(object):
__table_args__ = {'mysql_engine':'InnoDB'}
class MyOtherMixin(object):
__table_args__ = {'info':'foo'}
class MyModel(MySQLSettings, MyOtherMixin, Base):
__tablename__='my_model'
@declared_attr
def __table_args__(cls):
args = dict()
args.update(MySQLSettings.__table_args__)
args.update(MyOtherMixin.__table_args__)
return args
id = Column(Integer, primary_key=True)
Creating Indexes with Mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To define a named, potentially multicolumn :class:`.Index` that applies to all
tables derived from a mixin, use the "inline" form of :class:`.Index` and establish
it as part of ``__table_args__``::
class MyMixin(object):
a = Column(Integer)
b = Column(Integer)
@declared_attr
def __table_args__(cls):
return (Index('test_idx_%s' % cls.__tablename__, 'a', 'b'),)
class MyModel(MyMixin, Base):
__tablename__ = 'atable'
c = Column(Integer,primary_key=True)
Special Directives
==================
``__declare_last__()``
~~~~~~~~~~~~~~~~~~~~~~
The ``__declare_last__()`` hook, introduced in 0.7.3, allows definition of
a class level function that is automatically called by the :meth:`.MapperEvents.after_configured`
event, which occurs after mappings are assumed to be completed and the 'configure' step
has finished::
class MyClass(Base):
@classmethod
def __declare_last__(cls):
""
# do something with mappings
``__abstract__``
~~~~~~~~~~~~~~~~~~~
``__abstract__`` is introduced in 0.7.3 and causes declarative to skip the production
of a table or mapper for the class entirely. A class can be added within a hierarchy
in the same way as mixin (see :ref:`declarative_mixins`), allowing subclasses to extend
just from the special class::
class SomeAbstractBase(Base):
__abstract__ = True
def some_helpful_method(self):
""
@declared_attr
def __mapper_args__(cls):
return {"helpful mapper arguments":True}
class MyMappedClass(SomeAbstractBase):
""
Class Constructor
=================
As a convenience feature, the :func:`declarative_base` sets a default
constructor on classes which takes keyword arguments, and assigns them
to the named attributes::
e = Engineer(primary_language='python')
Sessions
========
Note that ``declarative`` does nothing special with sessions, and is
only intended as an easier way to configure mappers and
:class:`~sqlalchemy.schema.Table` objects. A typical application
setup using :func:`~sqlalchemy.orm.scoped_session` might look like::
engine = create_engine('postgresql://scott:tiger@localhost/test')
Session = scoped_session(sessionmaker(autocommit=False,
autoflush=False,
bind=engine))
Base = declarative_base()
Mapped instances then make usage of
:class:`~sqlalchemy.orm.session.Session` in the usual way.
"""
from sqlalchemy.schema import Table, Column, MetaData, _get_table_key
from sqlalchemy.orm import synonym as _orm_synonym, mapper,\
comparable_property, class_mapper
from sqlalchemy.orm.interfaces import MapperProperty
from sqlalchemy.orm.properties import RelationshipProperty, ColumnProperty, CompositeProperty
from sqlalchemy.orm.util import _is_mapped_class
from sqlalchemy import util, exc
from sqlalchemy.sql import util as sql_util, expression
from sqlalchemy import event
from sqlalchemy.orm.util import polymorphic_union, _mapper_or_none
__all__ = 'declarative_base', 'synonym_for', \
'comparable_using', 'instrument_declarative'
def instrument_declarative(cls, registry, metadata):
"""Given a class, configure the class declaratively,
using the given registry, which can be any dictionary, and
MetaData object.
"""
if '_decl_class_registry' in cls.__dict__:
raise exc.InvalidRequestError(
"Class %r already has been "
"instrumented declaratively" % cls)
cls._decl_class_registry = registry
cls.metadata = metadata
_as_declarative(cls, cls.__name__, cls.__dict__)
def has_inherited_table(cls):
"""Given a class, return True if any of the classes it inherits from has a
mapped table, otherwise return False.
"""
for class_ in cls.__mro__:
if getattr(class_,'__table__',None) is not None:
return True
return False
def _as_declarative(cls, classname, dict_):
# dict_ will be a dictproxy, which we can't write to, and we need to!
dict_ = dict(dict_)
column_copies = {}
potential_columns = {}
mapper_args = {}
table_args = inherited_table_args = None
tablename = None
parent_columns = ()
declarative_props = (declared_attr, util.classproperty)
for base in cls.__mro__:
_is_declarative_inherits = hasattr(base, '_decl_class_registry')
if '__declare_last__' in base.__dict__:
@event.listens_for(mapper, "after_configured")
def go():
cls.__declare_last__()
if '__abstract__' in base.__dict__:
if (base is cls or
(base in cls.__bases__ and not _is_declarative_inherits)
):
return
class_mapped = _is_mapped_class(base)
if class_mapped:
parent_columns = base.__table__.c.keys()
for name,obj in vars(base).items():
if name == '__mapper_args__':
if not mapper_args and (
not class_mapped or
isinstance(obj, declarative_props)
):
mapper_args = cls.__mapper_args__
elif name == '__tablename__':
if not tablename and (
not class_mapped or
isinstance(obj, declarative_props)
):
tablename = cls.__tablename__
elif name == '__table_args__':
if not table_args and (
not class_mapped or
isinstance(obj, declarative_props)
):
table_args = cls.__table_args__
if not isinstance(table_args, (tuple, dict, type(None))):
raise exc.ArgumentError(
"__table_args__ value must be a tuple, "
"dict, or None")
if base is not cls:
inherited_table_args = True
elif class_mapped:
if isinstance(obj, declarative_props):
util.warn("Regular (i.e. not __special__) "
"attribute '%s.%s' uses @declared_attr, "
"but owning class %s is mapped - "
"not applying to subclass %s."
% (base.__name__, name, base, cls))
continue
elif base is not cls:
# we're a mixin.
if isinstance(obj, Column):
if obj.foreign_keys:
raise exc.InvalidRequestError(
"Columns with foreign keys to other columns "
"must be declared as @declared_attr callables "
"on declarative mixin classes. ")
if name not in dict_ and not (
'__table__' in dict_ and
(obj.name or name) in dict_['__table__'].c
) and name not in potential_columns:
potential_columns[name] = \
column_copies[obj] = \
obj.copy()
column_copies[obj]._creation_order = \
obj._creation_order
elif isinstance(obj, MapperProperty):
raise exc.InvalidRequestError(
"Mapper properties (i.e. deferred,"
"column_property(), relationship(), etc.) must "
"be declared as @declared_attr callables "
"on declarative mixin classes.")
elif isinstance(obj, declarative_props):
dict_[name] = ret = \
column_copies[obj] = getattr(cls, name)
if isinstance(ret, (Column, MapperProperty)) and \
ret.doc is None:
ret.doc = obj.__doc__
# apply inherited columns as we should
for k, v in potential_columns.items():
if tablename or (v.name or k) not in parent_columns:
dict_[k] = v
if inherited_table_args and not tablename:
table_args = None
# make sure that column copies are used rather
# than the original columns from any mixins
for k in ('version_id_col', 'polymorphic_on',):
if k in mapper_args:
v = mapper_args[k]
mapper_args[k] = column_copies.get(v,v)
if classname in cls._decl_class_registry:
util.warn("The classname %r is already in the registry of this"
" declarative base, mapped to %r" % (
classname,
cls._decl_class_registry[classname]
))
cls._decl_class_registry[classname] = cls
our_stuff = util.OrderedDict()
for k in dict_:
value = dict_[k]
if isinstance(value, declarative_props):
value = getattr(cls, k)
if (isinstance(value, tuple) and len(value) == 1 and
isinstance(value[0], (Column, MapperProperty))):
util.warn("Ignoring declarative-like tuple value of attribute "
"%s: possibly a copy-and-paste error with a comma "
"left at the end of the line?" % k)
continue
if not isinstance(value, (Column, MapperProperty)):
continue
if k == 'metadata':
raise exc.InvalidRequestError(
"Attribute name 'metadata' is reserved "
"for the MetaData instance when using a "
"declarative base class."
)
prop = _deferred_relationship(cls, value)
our_stuff[k] = prop
# set up attributes in the order they were created
our_stuff.sort(key=lambda key: our_stuff[key]._creation_order)
# extract columns from the class dict
cols = set()
for key, c in our_stuff.iteritems():
if isinstance(c, (ColumnProperty, CompositeProperty)):
for col in c.columns:
if isinstance(col, Column) and \
col.table is None:
_undefer_column_name(key, col)
cols.add(col)
elif isinstance(c, Column):
_undefer_column_name(key, c)
cols.add(c)
# if the column is the same name as the key,
# remove it from the explicit properties dict.
# the normal rules for assigning column-based properties
# will take over, including precedence of columns
# in multi-column ColumnProperties.
if key == c.key:
del our_stuff[key]
cols = sorted(cols, key=lambda c:c._creation_order)
table = None
if '__table__' not in dict_:
if tablename is not None:
args, table_kw = (), {}
if table_args:
if isinstance(table_args, dict):
table_kw = table_args
elif isinstance(table_args, tuple):
if isinstance(table_args[-1], dict):
args, table_kw = table_args[0:-1], table_args[-1]
else:
args = table_args
autoload = dict_.get('__autoload__')
if autoload:
table_kw['autoload'] = True
cls.__table__ = table = Table(tablename, cls.metadata,
*(tuple(cols) + tuple(args)),
**table_kw)
else:
table = cls.__table__
if cols:
for c in cols:
if not table.c.contains_column(c):
raise exc.ArgumentError(
"Can't add additional column %r when "
"specifying __table__" % c.key
)
if 'inherits' not in mapper_args:
for c in cls.__bases__:
if _is_mapped_class(c):
mapper_args['inherits'] = c
break
if hasattr(cls, '__mapper_cls__'):
mapper_cls = util.unbound_method_to_callable(cls.__mapper_cls__)
else:
mapper_cls = mapper
if table is None and 'inherits' not in mapper_args:
raise exc.InvalidRequestError(
"Class %r does not have a __table__ or __tablename__ "
"specified and does not inherit from an existing "
"table-mapped class." % cls
)
elif 'inherits' in mapper_args and not mapper_args.get('concrete', False):
inherited_mapper = class_mapper(mapper_args['inherits'],
compile=False)
inherited_table = inherited_mapper.local_table
if table is None:
# single table inheritance.
# ensure no table args
if table_args:
raise exc.ArgumentError(
"Can't place __table_args__ on an inherited class "
"with no table."
)
# add any columns declared here to the inherited table.
for c in cols:
if c.primary_key:
raise exc.ArgumentError(
"Can't place primary key columns on an inherited "
"class with no table."
)
if c.name in inherited_table.c:
raise exc.ArgumentError(
"Column '%s' on class %s conflicts with "
"existing column '%s'" %
(c, cls, inherited_table.c[c.name])
)
inherited_table.append_column(c)
# single or joined inheritance
# exclude any cols on the inherited table which are not mapped on the
# parent class, to avoid
# mapping columns specific to sibling/nephew classes
inherited_mapper = class_mapper(mapper_args['inherits'],
compile=False)
inherited_table = inherited_mapper.local_table
if 'exclude_properties' not in mapper_args:
mapper_args['exclude_properties'] = exclude_properties = \
set([c.key for c in inherited_table.c
if c not in inherited_mapper._columntoproperty])
exclude_properties.difference_update([c.key for c in cols])
# look through columns in the current mapper that
# are keyed to a propname different than the colname
# (if names were the same, we'd have popped it out above,
# in which case the mapper makes this combination).
# See if the superclass has a similar column property.
# If so, join them together.
for k, col in our_stuff.items():
if not isinstance(col, expression.ColumnElement):
continue
if k in inherited_mapper._props:
p = inherited_mapper._props[k]
if isinstance(p, ColumnProperty):
# note here we place the superclass column
# first. this corresponds to the
# append() in mapper._configure_property().
# change this ordering when we do [ticket:1892]
our_stuff[k] = p.columns + [col]
cls.__mapper__ = mapper_cls(cls,
table,
properties=our_stuff,
**mapper_args)
class DeclarativeMeta(type):
def __init__(cls, classname, bases, dict_):
if '_decl_class_registry' in cls.__dict__:
return type.__init__(cls, classname, bases, dict_)
else:
_as_declarative(cls, classname, cls.__dict__)
return type.__init__(cls, classname, bases, dict_)
def __setattr__(cls, key, value):
if '__mapper__' in cls.__dict__:
if isinstance(value, Column):
_undefer_column_name(key, value)
cls.__table__.append_column(value)
cls.__mapper__.add_property(key, value)
elif isinstance(value, ColumnProperty):
for col in value.columns:
if isinstance(col, Column) and col.table is None:
_undefer_column_name(key, col)
cls.__table__.append_column(col)
cls.__mapper__.add_property(key, value)
elif isinstance(value, MapperProperty):
cls.__mapper__.add_property(
key,
_deferred_relationship(cls, value)
)
else:
type.__setattr__(cls, key, value)
else:
type.__setattr__(cls, key, value)
class _GetColumns(object):
def __init__(self, cls):
self.cls = cls
def __getattr__(self, key):
mapper = class_mapper(self.cls, compile=False)
if mapper:
if not mapper.has_property(key):
raise exc.InvalidRequestError(
"Class %r does not have a mapped column named %r"
% (self.cls, key))
prop = mapper.get_property(key)
if not isinstance(prop, ColumnProperty):
raise exc.InvalidRequestError(
"Property %r is not an instance of"
" ColumnProperty (i.e. does not correspond"
" directly to a Column)." % key)
return getattr(self.cls, key)
class _GetTable(object):
def __init__(self, key, metadata):
self.key = key
self.metadata = metadata
def __getattr__(self, key):
return self.metadata.tables[
_get_table_key(key, self.key)
]
def _deferred_relationship(cls, prop):
def resolve_arg(arg):
import sqlalchemy
def access_cls(key):
if key in cls._decl_class_registry:
return _GetColumns(cls._decl_class_registry[key])
elif key in cls.metadata.tables:
return cls.metadata.tables[key]
elif key in cls.metadata._schemas:
return _GetTable(key, cls.metadata)
else:
return sqlalchemy.__dict__[key]
d = util.PopulateDict(access_cls)
def return_cls():
try:
x = eval(arg, globals(), d)
if isinstance(x, _GetColumns):
return x.cls
else:
return x
except NameError, n:
raise exc.InvalidRequestError(
"When initializing mapper %s, expression %r failed to "
"locate a name (%r). If this is a class name, consider "
"adding this relationship() to the %r class after "
"both dependent classes have been defined." %
(prop.parent, arg, n.args[0], cls)
)
return return_cls
if isinstance(prop, RelationshipProperty):
for attr in ('argument', 'order_by', 'primaryjoin', 'secondaryjoin',
'secondary', '_user_defined_foreign_keys', 'remote_side'):
v = getattr(prop, attr)
if isinstance(v, basestring):
setattr(prop, attr, resolve_arg(v))
if prop.backref and isinstance(prop.backref, tuple):
key, kwargs = prop.backref
for attr in ('primaryjoin', 'secondaryjoin', 'secondary',
'foreign_keys', 'remote_side', 'order_by'):
if attr in kwargs and isinstance(kwargs[attr], basestring):
kwargs[attr] = resolve_arg(kwargs[attr])
return prop
def synonym_for(name, map_column=False):
"""Decorator, make a Python @property a query synonym for a column.
A decorator version of :func:`~sqlalchemy.orm.synonym`. The function being
decorated is the 'descriptor', otherwise passes its arguments through to
synonym()::
@synonym_for('col')
@property
def prop(self):
return 'special sauce'
The regular ``synonym()`` is also usable directly in a declarative setting
and may be convenient for read/write properties::
prop = synonym('col', descriptor=property(_read_prop, _write_prop))
"""
def decorate(fn):
return _orm_synonym(name, map_column=map_column, descriptor=fn)
return decorate
def comparable_using(comparator_factory):
"""Decorator, allow a Python @property to be used in query criteria.
This is a decorator front end to
:func:`~sqlalchemy.orm.comparable_property` that passes
through the comparator_factory and the function being decorated::
@comparable_using(MyComparatorType)
@property
def prop(self):
return 'special sauce'
The regular ``comparable_property()`` is also usable directly in a
declarative setting and may be convenient for read/write properties::
prop = comparable_property(MyComparatorType)
"""
def decorate(fn):
return comparable_property(comparator_factory, fn)
return decorate
class declared_attr(property):
"""Mark a class-level method as representing the definition of
a mapped property or special declarative member name.
.. note:: @declared_attr is available as
``sqlalchemy.util.classproperty`` for SQLAlchemy versions
0.6.2, 0.6.3, 0.6.4.
@declared_attr turns the attribute into a scalar-like
property that can be invoked from the uninstantiated class.
Declarative treats attributes specifically marked with
@declared_attr as returning a construct that is specific
to mapping or declarative table configuration. The name
of the attribute is that of what the non-dynamic version
of the attribute would be.
@declared_attr is more often than not applicable to mixins,
to define relationships that are to be applied to different
implementors of the class::
class ProvidesUser(object):
"A mixin that adds a 'user' relationship to classes."
@declared_attr
def user(self):
return relationship("User")
It also can be applied to mapped classes, such as to provide
a "polymorphic" scheme for inheritance::
class Employee(Base):
id = Column(Integer, primary_key=True)
type = Column(String(50), nullable=False)
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
@declared_attr
def __mapper_args__(cls):
if cls.__name__ == 'Employee':
return {
"polymorphic_on":cls.type,
"polymorphic_identity":"Employee"
}
else:
return {"polymorphic_identity":cls.__name__}
"""
def __init__(self, fget, *arg, **kw):
super(declared_attr, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
def _declarative_constructor(self, **kwargs):
"""A simple constructor that allows initialization from kwargs.
Sets attributes on the constructed instance using the names and
values in ``kwargs``.
Only keys that are present as
attributes of the instance's class are allowed. These could be,
for example, any mapped columns or relationships.
"""
cls_ = type(self)
for k in kwargs:
if not hasattr(cls_, k):
raise TypeError(
"%r is an invalid keyword argument for %s" %
(k, cls_.__name__))
setattr(self, k, kwargs[k])
_declarative_constructor.__name__ = '__init__'
def declarative_base(bind=None, metadata=None, mapper=None, cls=object,
name='Base', constructor=_declarative_constructor,
metaclass=DeclarativeMeta):
"""Construct a base class for declarative class definitions.
The new base class will be given a metaclass that produces
appropriate :class:`~sqlalchemy.schema.Table` objects and makes
the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the
information provided declaratively in the class and any subclasses
of the class.
:param bind: An optional
:class:`~sqlalchemy.engine.base.Connectable`, will be assigned
the ``bind`` attribute on the :class:`~sqlalchemy.MetaData`
instance.
:param metadata:
An optional :class:`~sqlalchemy.MetaData` instance. All
:class:`~sqlalchemy.schema.Table` objects implicitly declared by
subclasses of the base will share this MetaData. A MetaData instance
will be created if none is provided. The
:class:`~sqlalchemy.MetaData` instance will be available via the
`metadata` attribute of the generated declarative base class.
:param mapper:
An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will
be used to map subclasses to their Tables.
:param cls:
Defaults to :class:`object`. A type to use as the base for the generated
declarative base class. May be a class or tuple of classes.
:param name:
Defaults to ``Base``. The display name for the generated
class. Customizing this is not required, but can improve clarity in
tracebacks and debugging.
:param constructor:
Defaults to
:func:`~sqlalchemy.ext.declarative._declarative_constructor`, an
__init__ implementation that assigns \**kwargs for declared
fields and relationships to an instance. If ``None`` is supplied,
no __init__ will be provided and construction will fall back to
cls.__init__ by way of the normal Python semantics.
:param metaclass:
Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
compatible callable to use as the meta type of the generated
declarative base class.
"""
lcl_metadata = metadata or MetaData()
if bind:
lcl_metadata.bind = bind
bases = not isinstance(cls, tuple) and (cls,) or cls
class_dict = dict(_decl_class_registry=dict(),
metadata=lcl_metadata)
if constructor:
class_dict['__init__'] = constructor
if mapper:
class_dict['__mapper_cls__'] = mapper
return metaclass(name, bases, class_dict)
def _undefer_column_name(key, column):
if column.key is None:
column.key = key
if column.name is None:
column.name = key
class ConcreteBase(object):
"""A helper class for 'concrete' declarative mappings.
:class:`.ConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :func:`.MapperEvents.after_configured` event.
:class:`.ConcreteBase` produces a mapped
table for the class itself. Compare to :class:`.AbstractConcreteBase`,
which does not.
Example::
from sqlalchemy.ext.declarative import ConcreteBase
class Employee(ConcreteBase, Base):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee',
'concrete':True}
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
"""
@classmethod
def _create_polymorphic_union(cls, mappers):
return polymorphic_union(dict(
(mapper.polymorphic_identity, mapper.local_table)
for mapper in mappers
), 'type', 'pjoin')
@classmethod
def __declare_last__(cls):
m = cls.__mapper__
if m.with_polymorphic:
return
mappers = list(m.self_and_descendants)
pjoin = cls._create_polymorphic_union(mappers)
m._set_with_polymorphic(("*",pjoin))
m._set_polymorphic_on(pjoin.c.type)
class AbstractConcreteBase(ConcreteBase):
"""A helper class for 'concrete' declarative mappings.
:class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :func:`.MapperEvents.after_configured` event.
:class:`.AbstractConcreteBase` does not produce a mapped
table for the class itself. Compare to :class:`.ConcreteBase`,
which does.
Example::
from sqlalchemy.ext.declarative import ConcreteBase
class Employee(AbstractConcreteBase, Base):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'concrete':True}
"""
__abstract__ = True
@classmethod
def __declare_last__(cls):
if hasattr(cls, '__mapper__'):
return
# can't rely on 'self_and_descendants' here
# since technically an immediate subclass
# might not be mapped, but a subclass
# may be.
mappers = []
stack = list(cls.__subclasses__())
while stack:
klass = stack.pop()
stack.extend(klass.__subclasses__())
mn = _mapper_or_none(klass)
if mn is not None:
mappers.append(mn)
pjoin = cls._create_polymorphic_union(mappers)
cls.__mapper__ = m = mapper(cls, pjoin, polymorphic_on=pjoin.c.type)
for scls in cls.__subclasses__():
sm = _mapper_or_none(scls)
if sm.concrete and cls in scls.__bases__:
sm._set_concrete_base(m)
|
mit
|
DLR-SC/DataFinder
|
src/datafinder/persistence/adapters/filesystem/util.py
|
1
|
10939
|
# pylint: disable=E1101,E0611,F0401
# E1101: Pylint cannot resolve specific win32 modules.
# E0611: "shell" exists in win32com but Pylint cannot detect it.
# F0401: "win32com.shell" exists but Pylint cannot import.
#
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module provides some utility classes / function for platform-independent
file system access.
"""
_WIN32_PLATFORM = "win32"
import logging
import os
import sys
if sys.platform == _WIN32_PLATFORM:
import pythoncom
import pywintypes
import win32api
from win32com.shell import shell
import win32netcon
import win32wnet
from datafinder.persistence.error import PersistenceError
__version__ = "$Revision-Id:$"
_log = logging.getLogger(None)
class ShortCut(object):
""" Implements platform-independent shortcut / symbolic link implementation. """
_WINDOWS_LINK_EXTENSION = ".lnk"
def __init__(self, destination):
""" Constructor. """
self._destination = destination
def create(self, source):
""" Creates the shortcut / symbolic link. """
if sys.platform == _WIN32_PLATFORM:
try:
sh = pythoncom.CoCreateInstance(shell.CLSID_ShellLink, None, \
pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink)
persist = sh.QueryInterface(pythoncom.IID_IPersistFile)
sh.SetPath(source)
persist.Save(self._destination, 1)
except pywintypes.com_error, error:
errorMessage = "Cannot create symbolic link '%s'. Reason: '%s'." % (self._destination, error[0])
raise PersistenceError(errorMessage)
else:
try:
os.symlink(source, self._destination)
except OSError, error:
reason = os.strerror(error.errno)
errorMessage = "Cannot create symbolic link '%s'. Reason: '%s'" % (self._destination, reason)
raise PersistenceError(errorMessage)
def resolve(self):
""" Resolves the link. """
if sys.platform == _WIN32_PLATFORM:
try:
sh = pythoncom.CoCreateInstance(shell.CLSID_ShellLink, None, \
pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink)
persist = sh.QueryInterface(pythoncom.IID_IPersistFile)
persist.Load(self._destination)
return sh.GetPath(shell.SLGP_UNCPRIORITY)[0]
except pywintypes.com_error, error:
errorMessage = "Cannot resolve symbolic link '%s'. Reason: '%s'." % (self._destination, error[0])
raise PersistenceError(errorMessage)
else:
try:
return os.readlink(self._destination)
except OSError, error:
reason = os.strerror(error.errno)
errorMessage = "Cannot resolve symbolic link '%s'. Reason: '%s'" % (self._destination, reason)
raise PersistenceError(errorMessage)
def isLink(self):
""" Figures out if the associated path is a symbolic link. """
if sys.platform == _WIN32_PLATFORM:
result = False
if self._destination.endswith(self._WINDOWS_LINK_EXTENSION):
try:
sh = pythoncom.CoCreateInstance(shell.CLSID_ShellLink, None, \
pythoncom.CLSCTX_INPROC_SERVER, shell.IID_IShellLink)
persist = sh.QueryInterface(pythoncom.IID_IPersistFile)
persist.Load(self._destination)
result = True
except pywintypes.com_error:
result = False
return result
else:
return os.path.islink(self._destination)
def createShortcut(path):
""" Creates a platform-specific shortcut representation. """
return ShortCut(path)
def isWindowsRootPath(path):
""" Checks whether the given path corresponds to the virtual root directory on WIndows. """
isWindowsRootPath_ = False
if path == "/" and sys.platform == _WIN32_PLATFORM:
isWindowsRootPath_ = True
return isWindowsRootPath_
def listDirectory(directoryPath):
""" Lists the given directory. """
if directoryPath == "/" and sys.platform == _WIN32_PLATFORM:
result = [driveLetter for driveLetter in win32api.GetLogicalDriveStrings().split("\000") if driveLetter]
else:
result = list()
if directoryPath.endswith(":") and sys.platform == _WIN32_PLATFORM: # it is a drive letter
directoryPath += "\\" # Required to fix issue with os.listdir / os.path.join in Python 2.7.10
for path in os.listdir(directoryPath):
path = os.path.join(directoryPath, path)
decodedPath = _binaryToUnicodeFilePathDecoding(path)
if not decodedPath is None:
result.append(decodedPath)
else:
_log.debug("Unable to decode path string. Ignoring it.")
return result
def _binaryToUnicodeFilePathDecoding(binaryString):
"""
Decodes the given binary string into an unicode string.
The primarily use is for decoding file system paths.
In order to perform the decoding the default file system encoding
is used. If it fails on non-Windows operating systems, it will be tried
to use the Windows encoding "cp437". This encoding is used when a
a file name is written via a Samba share from a Windows client.
If the given string is already an unicode string this string is returned and
no conversion is tried.
@param binaryString: String to decode.
@type binaryString: C{string}
@retrun: Unicode representation of the binary string.
@rtype: C{unicode}
"""
fileSystemEncoding = sys.getfilesystemencoding()
if fileSystemEncoding is None:
fileSystemEncoding = "utf-8"
if not isinstance(binaryString, unicode):
try:
unicodeString = binaryString.decode(fileSystemEncoding)
except UnicodeDecodeError:
if sys.platform != "win32":
try:
unicodeString = binaryString.decode("cp437")
except UnicodeDecodeError:
return None
else:
unicodeString = binaryString
return unicodeString
def connectWindowsShare(share, username, password):
"""
Connects a windows-share.
@param share: Windows share in UNC path representation.
@type share: C{unicode}
@raise PersistenecError: raised if connection to a SMB-share failed
"""
if sys.platform == _WIN32_PLATFORM:
components = os.path.normpath(share).split("\\")
if len(components) < 3:
raise PersistenceError("Wrong file share configuration information!")
else:
if not os.path.exists(share):
try:
win32wnet.WNetAddConnection2(win32netcon.RESOURCETYPE_DISK,
None, #unused_drive,
share,
None,
username,
password,
0)
except pywintypes.error, error:
raise PersistenceError("Could not connect to '%s'.\nReason: %s" % (share, error[2]))
class ItemIdentifierMapper(object):
""" Maps identifiers. """
def __init__(self, basePath):
"""
Constructor.
@param basePath: Root path of the file system.
@type basePath: C{unicode}
"""
self.__basePath = basePath
def mapIdentifier(self, identifier):
"""
Maps the identifier to persistence representation.
"""
mappedId = os.path.join(self.__basePath, identifier[1:])
if mappedId.startswith("/"): # Ensures correct format on WIN32 when addressing a drive letter
driveLetter, _ = os.path.splitdrive(mappedId[1:])
if len(driveLetter) > 0:
mappedId = mappedId[1:]
if mappedId == driveLetter:
mappedId += "/"
return mappedId
def mapPersistenceIdentifier(self, persisentenceId):
"""
Maps the persistence identifier to the path used to address the items logically.
"""
mappedId = persisentenceId
if persisentenceId.startswith(self.__basePath):
mappedId = persisentenceId[len(self.__basePath):]
if not mappedId.startswith("/"):
mappedId = "/" + mappedId
mappedId = mappedId.replace("\\", "/")
if mappedId.endswith("/"):
mappedId = mappedId[:-1]
return mappedId
|
bsd-3-clause
|
ntymtsiv/CloudFerry
|
cloud/cloud_ferry.py
|
11
|
1457
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
class CloudFerry(object):
def __new__(cls, config):
if cls != CloudFerry:
# Call is already for a subclass, so pass it through
return super(CloudFerry, cls).__new__(cls)
if (config.src.type == 'os') and (config.dst.type == 'os'):
# Maybe it is better to provide new param in config such as
# 'migration_type'? Or as Alex mentioned, make smth like paste.ini?
# And specify it there for first time? It can be directly names of
# classes or any human readable mapping. And later some day
# implement smth like auto discovering, if it will be needed
return __import__('cloud').os2os.OS2OSFerry(config)
return super(CloudFerry, cls).__new__(cls)
def __init__(self, config):
self.config = config
def migrate(self, scenario=None):
pass
|
apache-2.0
|
vfulco/PyPDF2
|
Sample_Code/basic_features.py
|
7
|
1523
|
from PyPDF2 import PdfFileWriter, PdfFileReader
output = PdfFileWriter()
input1 = PdfFileReader(open("document1.pdf", "rb"))
# print how many pages input1 has:
print "document1.pdf has %d pages." % input1.getNumPages()
# add page 1 from input1 to output document, unchanged
output.addPage(input1.getPage(0))
# add page 2 from input1, but rotated clockwise 90 degrees
output.addPage(input1.getPage(1).rotateClockwise(90))
# add page 3 from input1, rotated the other way:
output.addPage(input1.getPage(2).rotateCounterClockwise(90))
# alt: output.addPage(input1.getPage(2).rotateClockwise(270))
# add page 4 from input1, but first add a watermark from another PDF:
page4 = input1.getPage(3)
watermark = PdfFileReader(open("watermark.pdf", "rb"))
page4.mergePage(watermark.getPage(0))
output.addPage(page4)
# add page 5 from input1, but crop it to half size:
page5 = input1.getPage(4)
page5.mediaBox.upperRight = (
page5.mediaBox.getUpperRight_x() / 2,
page5.mediaBox.getUpperRight_y() / 2
)
output.addPage(page5)
# add some Javascript to launch the print window on opening this PDF.
# the password dialog may prevent the print dialog from being shown,
# comment the the encription lines, if that's the case, to try this out
output.addJS("this.print({bUI:true,bSilent:false,bShrinkToFit:true});")
# encrypt your new PDF and add a password
password = "secret"
output.encrypt(password)
# finally, write "output" to document-output.pdf
outputStream = file("PyPDF2-output.pdf", "wb")
output.write(outputStream)
|
bsd-3-clause
|
bsmrstu-warriors/Moytri--The-Drone-Aider
|
Lib/distutils/cygwinccompiler.py
|
132
|
17270
|
"""distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler (UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# class CygwinCCompiler
# the same as cygwin plus some additional parameters
class Mingw32CCompiler (CygwinCCompiler):
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
compiler_so='gcc -mno-cygwin -mdll -O -Wall',
compiler_cxx='g++ -mno-cygwin -O -Wall',
linker_exe='gcc -mno-cygwin',
linker_so='%s -mno-cygwin %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
# class Mingw32CCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
try:
s = f.read()
finally:
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version','r')
out_string = out.read()
out.close()
result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
|
gpl-3.0
|
punalpatel/st2
|
st2common/tests/unit/test_db_liveaction.py
|
6
|
6203
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.db.notification import NotificationSchema, NotificationSubSchema
from st2common.persistence.liveaction import LiveAction
from st2common.transport.publishers import PoolPublisher
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.util import date as date_utils
from st2tests import DbTestCase
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class LiveActionModelTest(DbTestCase):
def test_liveaction_crud_no_notify(self):
created = LiveActionDB()
created.action = 'core.local'
created.description = ''
created.status = 'running'
created.parameters = {}
saved = LiveActionModelTest._save_liveaction(created)
retrieved = LiveAction.get_by_id(saved.id)
self.assertEqual(saved.action, retrieved.action,
'Same triggertype was not returned.')
self.assertEqual(retrieved.notify, None)
# Test update
self.assertTrue(retrieved.end_timestamp is None)
retrieved.end_timestamp = date_utils.get_datetime_utc_now()
updated = LiveAction.add_or_update(retrieved)
self.assertTrue(updated.end_timestamp == retrieved.end_timestamp)
# Test delete
LiveActionModelTest._delete([retrieved])
try:
retrieved = LiveAction.get_by_id(saved.id)
except StackStormDBObjectNotFoundError:
retrieved = None
self.assertIsNone(retrieved, 'managed to retrieve after failure.')
def test_liveaction_create_with_notify_on_complete_only(self):
created = LiveActionDB()
created.action = 'core.local'
created.description = ''
created.status = 'running'
created.parameters = {}
notify_db = NotificationSchema()
notify_sub_schema = NotificationSubSchema()
notify_sub_schema.message = 'Action complete.'
notify_sub_schema.data = {
'foo': 'bar',
'bar': 1,
'baz': {'k1': 'v1'}
}
notify_db.on_complete = notify_sub_schema
created.notify = notify_db
saved = LiveActionModelTest._save_liveaction(created)
retrieved = LiveAction.get_by_id(saved.id)
self.assertEqual(saved.action, retrieved.action,
'Same triggertype was not returned.')
# Assert notify settings saved are right.
self.assertEqual(notify_sub_schema.message, retrieved.notify.on_complete.message)
self.assertDictEqual(notify_sub_schema.data, retrieved.notify.on_complete.data)
self.assertListEqual(notify_sub_schema.routes, retrieved.notify.on_complete.routes)
self.assertEqual(retrieved.notify.on_success, None)
self.assertEqual(retrieved.notify.on_failure, None)
def test_liveaction_create_with_notify_on_success_only(self):
created = LiveActionDB()
created.action = 'core.local'
created.description = ''
created.status = 'running'
created.parameters = {}
notify_db = NotificationSchema()
notify_sub_schema = NotificationSubSchema()
notify_sub_schema.message = 'Action succeeded.'
notify_sub_schema.data = {
'foo': 'bar',
'bar': 1,
'baz': {'k1': 'v1'}
}
notify_db.on_success = notify_sub_schema
created.notify = notify_db
saved = LiveActionModelTest._save_liveaction(created)
retrieved = LiveAction.get_by_id(saved.id)
self.assertEqual(saved.action, retrieved.action,
'Same triggertype was not returned.')
# Assert notify settings saved are right.
self.assertEqual(notify_sub_schema.message,
retrieved.notify.on_success.message)
self.assertDictEqual(notify_sub_schema.data, retrieved.notify.on_success.data)
self.assertListEqual(notify_sub_schema.routes, retrieved.notify.on_success.routes)
self.assertEqual(retrieved.notify.on_failure, None)
self.assertEqual(retrieved.notify.on_complete, None)
def test_liveaction_create_with_notify_both_on_success_and_on_error(self):
created = LiveActionDB()
created.action = 'core.local'
created.description = ''
created.status = 'running'
created.parameters = {}
on_success = NotificationSubSchema(message='Action succeeded.')
on_failure = NotificationSubSchema(message='Action failed.')
created.notify = NotificationSchema(on_success=on_success,
on_failure=on_failure)
saved = LiveActionModelTest._save_liveaction(created)
retrieved = LiveAction.get_by_id(saved.id)
self.assertEqual(saved.action, retrieved.action,
'Same triggertype was not returned.')
# Assert notify settings saved are right.
self.assertEqual(on_success.message, retrieved.notify.on_success.message)
self.assertEqual(on_failure.message, retrieved.notify.on_failure.message)
self.assertEqual(retrieved.notify.on_complete, None)
@staticmethod
def _save_liveaction(liveaction):
return LiveAction.add_or_update(liveaction)
@staticmethod
def _delete(model_objects):
for model_object in model_objects:
model_object.delete()
|
apache-2.0
|
muzena/deluge
|
deluge/plugins/Feeder/deluge/plugins/feeder/core.py
|
8
|
16082
|
#
# core.py
#
# Copyright (C) 2008-2009 Fredrik Eriksson <[email protected]>
# Copyright (C) 2009 David Mohr <[email protected]>
#
# Basic plugin template created by:
# Copyright (C) 2008 Martijn Voncken <[email protected]>
# Copyright (C) 2007-2009 Andrew Resch <[email protected]>
#
# Deluge is free software.
#
# You may redistribute it and/or modify it under the terms of the
# GNU General Public License, as published by the Free Software
# Foundation; either version 3 of the License, or (at your option)
# any later version.
#
# deluge is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with deluge. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
import logging
import feedparser # for parsing rss feeds
import threading # for threaded updates
import re # for regular expressions
from twisted.internet.task import LoopingCall
from deluge.plugins.pluginbase import CorePluginBase
import deluge.component as component
import deluge.configmanager
from deluge.core.rpcserver import export
log = logging.getLogger(__name__)
DEFAULT_PREFS = {
"feeds": {},
"filters": {},
"updatetime": 15,
"history": []
}
# Helper classes
class Feed:
"""
Class for the Feed object (containging feed configurations)
"""
def __init__(self):
self.url = ""
self.cookies = {}
self.updatetime = 15
def get_config(self):
try:
tmp = self.cookies
except Exception, e:
log.debug("Old feed without cookies... updating")
self.cookies = {}
return {'url': self.url, 'updatetime': self.updatetime, 'cookies': self.cookies}
def set_config(self, config):
self.url = config['url']
self.updatetime = config['updatetime']
self.cookies = config['cookies']
class Filter:
"""
Class for the Filter object (containing filter configurations)
"""
def __init__(self):
self.regex = ""
self.feeds = [] #TODO activate filter per feed
self.all_feeds = True
self.active = True
# by default, set the configuration to match
# the per-torrent settings in deluge
def_conf = component.get("Core").get_config()
self.max_download_speed = def_conf['max_download_speed_per_torrent']
self.max_upload_speed = def_conf['max_upload_speed_per_torrent']
self.max_connections = def_conf['max_connections_per_torrent']
self.max_upload_slots = def_conf['max_upload_slots_per_torrent']
self.prioritize_first_last_pieces = def_conf['prioritize_first_last_pieces']
self.auto_managed = def_conf['auto_managed']
self.download_location = def_conf['download_location']
self.stop_at_ratio = def_conf['stop_seed_at_ratio']
self.stop_ratio = def_conf['stop_seed_ratio']
self.remove_at_ratio = def_conf['remove_seed_at_ratio']
def get_config(self):
def_conf = component.get("Core").get_config()
try:
tmp = self.active
except Exception, e:
log.debug("Old filter detected (pre 0.3), updating...")
self.active = True
try:
tmp = self.stop_at_ratio
tmp = self.stop_ratio
tmp = self.remove_at_ratio
except:
log.debug("Old filter detected (pre 0.4), updating...")
self.stop_at_ratio = def_conf['stop_seed_at_ratio']
self.stop_ratio = def_conf['stop_seed_ratio']
self.remove_at_ratio = def_conf['remove_seed_at_ratio']
conf = {
'regex': self.regex,
'feeds': self.feeds,
'all_feeds': self.all_feeds,
'active' : self.active,
'max_download_speed': self.max_download_speed,
'max_upload_speed': self.max_upload_speed,
'max_connections': self.max_connections,
'max_upload_slots': self.max_upload_slots,
'prioritize_first_last_pieces': self.prioritize_first_last_pieces,
'auto_managed': self.auto_managed,
'download_location':self.download_location,
'remove_at_ratio':self.remove_at_ratio,
'stop_ratio': self.stop_ratio,
'stop_at_ratio': self.stop_at_ratio }
return conf
def set_config(self, conf):
self.regex = conf['regex']
self.feeds = conf['feeds']
self.all_feeds = conf['all_feeds']
self.active = conf['active']
self.max_download_speed = int(conf['max_download_speed'])
self.max_upload_speed = int(conf['max_upload_speed'])
self.max_connections = int(conf['max_connections'])
self.max_upload_slots = int(conf['max_upload_slots'])
self.prioritize_first_last_pieces = conf['prioritize_first_last_pieces']
self.auto_managed = conf['auto_managed']
self.download_location = conf['download_location']
self.remove_at_ratio = conf['remove_at_ratio']
self.stop_ratio = float(conf['stop_ratio'])
self.stop_at_ratio = conf['stop_at_ratio']
class Core(CorePluginBase):
def enable(self):
self.config = deluge.configmanager.ConfigManager("feeder.conf", DEFAULT_PREFS)
self.feeds = {}
self.timers = {}
self.history = self.config['history']
self.time = 0
# Setting default timer to configured update time
for feed in self.config['feeds']:
self.timers[feed] = LoopingCall(self.update_feed, feed)
self.timers[feed].start( self.config['feeds'][feed].updatetime * 60)
def disable(self):
self.config['history'] = self.history
self.config.save()
def update(self):
pass
#=================Exported functions==================
@export
def set_config(self, config):
"""sets the config dictionary"""
for key in config.keys():
self.config[key] = config[key]
self.config.save()
####################Configuration Getters##################
@export
def get_config(self):
"""returns the config dictionary"""
return self.config.config
@export
def get_feed_config(self, feedname):
"""Returns configuration for a feed"""
return self.config['feeds'][feedname].get_config()
@export
def get_filter_config(self, filtername):
"""Returns a configuration for a filter"""
return self.config['filters'][filtername].get_config()
####################Information Getters####################
@export
def get_feeds(self):
"""Returns a list of the configured feeds"""
feeds = []
for feedname in self.config['feeds']:
feeds.append(feedname)
feeds.sort(key=string.lower)
return feeds
@export
def get_filters(self):
"""Returns a list of all available filters"""
filters = []
for filter in self.config['filters']:
filters.append(filter)
filters.sort(key=string.lower)
return filters
@export
def get_items(self, feedname):
"""Returns a dictionary with feedname:link"""
try:
items = {}
feed = self.feeds[feedname]
for entry in feed['entries']:
items[entry.title] = entry.link
except Exception, e:
items = {}
log.warning("Feed '%s' not loaded", feedname)
return items
@export
def test_filter(self, regex):
filters = { "to_test":Filter() }
conf = filters["to_test"].get_config()
conf["regex"] = regex
filters["to_test"].set_config(conf)
hits = {}
for feed in self.feeds:
hits.update(self.run_filters(feed, filters, test=True))
return hits
@export
def add_feed(self, config):
"""adds/updates a feed and, for whatever reason, sets the default timeout"""
# save the feedname and remove it from the config
feedname = config['name']
del config['name']
# check if the feed already exists and save config
try:
conf = self.config['feeds'][feedname].get_config()
del self.config['feeds'][feedname]
except Exception, e:
conf = {}
# update configuration
for var in config:
conf[var] = config[var]
# save as default update time
try:
self.config['updatetime'] = config['updatetime']
except Exception, e:
log.warning("updatetime not set when adding feed %s", feedname)
# Create the new feed
newfeed = Feed()
newfeed.set_config(conf)
# Add a timer (with default timer for now, since we can't get ttl just yet)...
self.timers[feedname] = LoopingCall(self.update_feed, feedname)
# Save the new feed
self.config['feeds'].update({feedname: newfeed })
self.config.save()
# Start the timeout, which will also update the new feed
self.timers[feedname].start(newfeed.updatetime * 60)
@export
def remove_feed(self, feedname):
"""Remove a feed"""
if self.feeds.has_key(feedname): # Check if we have the feed saved and remove it
del self.feeds[feedname]
if self.timers.has_key(feedname): # Check if we have a timer for this feed and remove it
self.timers[feedname].stop()
del self.timers[feedname]
if self.config['feeds'].has_key(feedname): # Check if we have the feed in the configuration and remove it
del self.config['feeds'][feedname]
self.config.save()
@export
def add_filter(self, name):
"""Adds a new filter to the configuration"""
if not self.config['filters'].has_key(name): # we don't want to add a filter that already exists
self.config['filters'][name] = Filter()
self.config.save()
@export
def set_filter_config(self, filtername, conf):
"""Changes the options for a filter"""
oldconf = self.config['filters'][filtername].get_config()
for item in conf:
oldconf[item] = conf[item]
self.config['filters'][filtername].set_config(oldconf)
self.config.save()
for feed in self.config['feeds']: # we would like to check if the filter now matches something new
self.run_filters(feed)
@export
def remove_filter(self, name):
"""Removes a filter"""
if self.config['filters'].has_key(name): # Can't remove a filter that doesn't exists
del self.config['filters'][name]
self.config.save()
#=================Internal functions================
def update_feed(self, feedname):
"""Start a thread to update a single feed"""
threading.Thread(
target=self.update_feed_thread,
args=(self.on_feed_updated, feedname)).start()
# Need to return true to not destoy timer...
return True
def update_feed_thread(self, callback, feedname):
"""updates a feed"""
feed = self.config['feeds'][feedname]
try:
self.feeds[feedname] = feedparser.parse(feed.url)
except Exception, e:
log.warning("Error parsing feed %s: %s", feedname, e)
else:
callback(feedname)
def on_feed_updated(self, feedname):
"""Run stuff when a feed has been updated"""
# Not all feeds contain a ttl value, but if it does
# we would like to obey it
try:
if not self.feeds[feedname].ttl == self.config['feeds'][feedname].updatetime:
log.debug("feed '%s' request a ttl of %s, updating timer", feedname, self.feeds[feedname].ttl)
self.config['feeds'][feedname].updatetime = self.feeds[feedname].ttl
self.timers[feedname].stop()
self.timers[feedname].start(self.config['feeds'][feedname].updatetime * 60)
except Exception, e:
log.debug("feed '%s' has no ttl set, will use default timer", feedname)
# Run filters on the feed
self.run_filters(feedname)
def run_filters(self, feedname, filters={}, test=False):
"""Test all available filters on the given feed"""
if not filters:
filters = self.config['filters']
log.debug("will test filters %s", filters)
hits = {}
# Test every entry...
for entry in self.feeds[feedname]['entries']:
# ...and every filter
for filter in filters:
# We need to be able to run feeds saved before implementation of actiave/deactivate filter (pre 0.3) TODO
try:
if not filters[filter].active:
continue
except:
log.debug("old filter, will assume filter is activated")
if filters[filter].regex == "": # we don't want a empty regex...
log.warning("Filter '%s' has not been configured, ignoring!", filter)
continue
# if the filter isn't supposed to be run on this feed we don't want to run it...
# if filter.all_feeds or self.config['filters'][filter].feeds.has_element(feedname) : # ...apparently has_element doesn't work on arrays... TODO
if self.test_filter(entry, filters[filter].regex):
if test:
hits[entry.title] = entry.link
else:
opts = filters[filter].get_config()
#remove filter options that should not be passed on to the torrent.
del opts['regex']
del opts['feeds']
del opts['all_feeds']
# history patch from Darrell Enns, slightly modified :)
# check history to prevent multiple adds of the same torrent
log.debug("testing %s", entry.link)
if not entry.link in self.history:
self.add_torrent(entry.link, opts, self.feeds[feedname].cookies)
self.history.append(entry.link)
#limit history to 50 entries
if len(self.history)>50:
self.history=self.history[-50:]
log.debug("wrapping history")
else:
log.debug("'%s' is in history, will not download", entry.link)
return hits
def test_filter(self, entry, filter):
"""Tests a filter to a given rss entry"""
f = re.compile(filter, re.IGNORECASE)
if f.search(entry.title) or f.search(entry.link):
log.debug("RSS item '%s' matches filter '%s'", entry.title, filter)
return True
else:
return False
def add_torrent(self, url, torrent_options, headers):
log.debug("Attempting to add torrent %s", url)
component.get("Core").add_torrent_url(url, torrent_options, headers)
|
gpl-3.0
|
40223244/cdb-2
|
static/Brython3.1.1-20150328-091302/Lib/configparser.py
|
692
|
50025
|
"""Configuration file parser.
A configuration file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None, dict_type=_default_dict, allow_no_value=False,
delimiters=('=', ':'), comment_prefixes=('#', ';'),
inline_comment_prefixes=None, strict=True,
empty_lines_in_values=True):
Create the parser. When `defaults' is given, it is initialized into the
dictionary or intrinsic defaults. The keys must be strings, the values
must be appropriate for %()s string interpolation.
When `dict_type' is given, it will be used to create the dictionary
objects for the list of sections, for the options within a section, and
for the default values.
When `delimiters' is given, it will be used as the set of substrings
that divide keys from values.
When `comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in empty lines. Comments can be
indented.
When `inline_comment_prefixes' is given, it will be used as the set of
substrings that prefix comments in non-empty lines.
When `strict` is True, the parser won't allow for any section or option
duplicates while reading from a single source (file, string or
dictionary). Default is True.
When `empty_lines_in_values' is False (default: True), each empty line
marks the end of an option. Otherwise, internal empty lines of
a multiline option are kept as part of the value.
When `allow_no_value' is True (default: False), options without
values are accepted; the value presented for these is None.
sections()
Return all the configuration section names, sans DEFAULT.
has_section(section)
Return whether the given section exists.
has_option(section, option)
Return whether the given option exists in the given section.
options(section)
Return list of configuration options for the named section.
read(filenames, encoding=None)
Read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
read_file(f, filename=None)
Read and parse one configuration file, given as a file object.
The filename defaults to f.name; it is only used in error
messages (if f has no `name' attribute, the string `<???>' is used).
read_string(string)
Read configuration from a given string.
read_dict(dictionary)
Read configuration from a dictionary. Keys are section names,
values are dictionaries with keys and values that should be present
in the section. If the used dictionary type preserves order, sections
and their keys will be added in order. Values are automatically
converted to strings.
get(section, option, raw=False, vars=None, fallback=_UNSET)
Return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults. If `option' is a key in
`vars', the value from `vars' is used.
getint(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to an integer.
getfloat(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a float.
getboolean(section, options, raw=False, vars=None, fallback=_UNSET)
Like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section=_UNSET, raw=False, vars=None)
If section is given, return a list of tuples with (name, value) for
each option in the section. Otherwise, return a list of tuples with
(section_name, section_proxy) for each section, including DEFAULTSECT.
remove_section(section)
Remove the given file section and all its options.
remove_option(section, option)
Remove the given option from the given section.
set(section, option, value)
Set the given option.
write(fp, space_around_delimiters=True)
Write the configuration state in .ini format. If
`space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
from collections.abc import MutableMapping
from collections import OrderedDict as _default_dict, ChainMap as _ChainMap
import functools
import io
import itertools
import re
import sys
import warnings
__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError",
"NoOptionError", "InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException.
"""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException.
"""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is repeated in an input source.
Possible repetitions that raise this exception are: multiple creation
using the API or in strict parsers when a section is found more than once
in a single input file, string or dictionary.
"""
def __init__(self, section, source=None, lineno=None):
msg = [repr(section), " already exists"]
if source is not None:
message = ["While reading from ", source]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": section ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Section ")
Error.__init__(self, "".join(msg))
self.section = section
self.source = source
self.lineno = lineno
self.args = (section, source, lineno)
class DuplicateOptionError(Error):
"""Raised by strict parsers when an option is repeated in an input source.
Current implementation raises this exception only when an option is found
more than once in a single file, string or dictionary.
"""
def __init__(self, section, option, source=None, lineno=None):
msg = [repr(option), " in section ", repr(section),
" already exists"]
if source is not None:
message = ["While reading from ", source]
if lineno is not None:
message.append(" [line {0:2d}]".format(lineno))
message.append(": option ")
message.extend(msg)
msg = message
else:
msg.insert(0, "Option ")
Error.__init__(self, "".join(msg))
self.section = section
self.option = option
self.source = source
self.lineno = lineno
self.args = (section, option, source, lineno)
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text contains invalid syntax.
Current implementation raises this exception when the source text into
which substitutions are made does not conform to the required syntax.
"""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, source=None, filename=None):
# Exactly one of `source'/`filename' arguments has to be given.
# `filename' kept for compatibility.
if filename and source:
raise ValueError("Cannot specify both `filename' and `source'. "
"Use `source'.")
elif not filename and not source:
raise ValueError("Required argument `source' not given.")
elif filename:
source = filename
Error.__init__(self, 'Source contains parsing errors: %s' % source)
self.source = source
self.errors = []
self.args = (source, )
@property
def filename(self):
"""Deprecated, use `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
return self.source
@filename.setter
def filename(self, value):
"""Deprecated, user `source'."""
warnings.warn(
"The 'filename' attribute will be removed in future versions. "
"Use 'source' instead.",
DeprecationWarning, stacklevel=2
)
self.source = value
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.source = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
# Used in parser getters to indicate the default behaviour when a specific
# option is not found it to raise an exception. Created to enable `None' as
# a valid fallback value.
_UNSET = object()
class Interpolation:
"""Dummy interpolation that passes the value through with no changes."""
def before_get(self, parser, section, option, value, defaults):
return value
def before_set(self, parser, section, option, value):
return value
def before_read(self, parser, section, option, value):
return value
def before_write(self, parser, section, option, value):
return value
class BasicInterpolation(Interpolation):
"""Interpolation as implemented in the classic ConfigParser.
The option values can contain format strings which refer to other values in
the same section, or values in the special default section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand. If a user needs to use a bare % in
a configuration file, she can escape it by writing %%. Other % usage
is considered a user error and raises `InterpolationSyntaxError'."""
_KEYCRE = re.compile(r"%\(([^)]+)\)s")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('%%', '') # escaped percent signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = parser.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(parser, option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', "
"found: %r" % (rest,))
class ExtendedInterpolation(Interpolation):
"""Advanced variant of interpolation, supports the syntax used by
`zc.buildout'. Enables interpolation between sections."""
_KEYCRE = re.compile(r"\$\{([^}]+)\}")
def before_get(self, parser, section, option, value, defaults):
L = []
self._interpolate_some(parser, option, L, value, section, defaults, 1)
return ''.join(L)
def before_set(self, parser, section, option, value):
tmp_value = value.replace('$$', '') # escaped dollar signs
tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax
if '$' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
return value
def _interpolate_some(self, parser, option, accum, rest, section, map,
depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("$")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "$":
accum.append("$")
rest = rest[2:]
elif c == "{":
m = self._KEYCRE.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
path = m.group(1).split(':')
rest = rest[m.end():]
sect = section
opt = option
try:
if len(path) == 1:
opt = parser.optionxform(path[0])
v = map[opt]
elif len(path) == 2:
sect = path[0]
opt = parser.optionxform(path[1])
v = parser.get(sect, opt, raw=True)
else:
raise InterpolationSyntaxError(
option, section,
"More than one ':' found: %r" % (rest,))
except (KeyError, NoSectionError, NoOptionError):
raise InterpolationMissingOptionError(
option, section, rest, ":".join(path))
if "$" in v:
self._interpolate_some(parser, opt, accum, v, sect,
dict(parser.items(sect, raw=True)),
depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'$' must be followed by '$' or '{', "
"found: %r" % (rest,))
class LegacyInterpolation(Interpolation):
"""Deprecated interpolation used in old versions of ConfigParser.
Use BasicInterpolation or ExtendedInterpolation instead."""
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def before_get(self, parser, section, option, value, vars):
rawval = value
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
replace = functools.partial(self._interpolation_replace,
parser=parser)
value = self._KEYCRE.sub(replace, value)
try:
value = value % vars
except KeyError as e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
def before_set(self, parser, section, option, value):
return value
@staticmethod
def _interpolation_replace(match, parser):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % parser.optionxform(s)
class RawConfigParser(MutableMapping):
"""ConfigParser that does not do interpolation."""
# Regular expressions for parsing section headers and options
_SECT_TMPL = r"""
\[ # [
(?P<header>[^]]+) # very permissive!
\] # ]
"""
_OPT_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?P<vi>{delim})\s* # any number of space/tab,
# followed by any of the
# allowed delimiters,
# followed by any space/tab
(?P<value>.*)$ # everything up to eol
"""
_OPT_NV_TMPL = r"""
(?P<option>.*?) # very permissive!
\s*(?: # any number of space/tab,
(?P<vi>{delim})\s* # optionally followed by
# any of the allowed
# delimiters, followed by any
# space/tab
(?P<value>.*))?$ # everything up to eol
"""
# Interpolation algorithm to be used if the user does not specify another
_DEFAULT_INTERPOLATION = Interpolation()
# Compiled regular expression for matching sections
SECTCRE = re.compile(_SECT_TMPL, re.VERBOSE)
# Compiled regular expression for matching options with typical separators
OPTCRE = re.compile(_OPT_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching options with optional values
# delimited using typical separators
OPTCRE_NV = re.compile(_OPT_NV_TMPL.format(delim="=|:"), re.VERBOSE)
# Compiled regular expression for matching leading whitespace in a line
NONSPACECRE = re.compile(r"\S")
# Possible boolean values in the configuration.
BOOLEAN_STATES = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False, *, delimiters=('=', ':'),
comment_prefixes=('#', ';'), inline_comment_prefixes=None,
strict=True, empty_lines_in_values=True,
default_section=DEFAULTSECT,
interpolation=_UNSET):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
self._proxies = self._dict()
self._proxies[default_section] = SectionProxy(self, default_section)
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
self._delimiters = tuple(delimiters)
if delimiters == ('=', ':'):
self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE
else:
d = "|".join(re.escape(d) for d in delimiters)
if allow_no_value:
self._optcre = re.compile(self._OPT_NV_TMPL.format(delim=d),
re.VERBOSE)
else:
self._optcre = re.compile(self._OPT_TMPL.format(delim=d),
re.VERBOSE)
self._comment_prefixes = tuple(comment_prefixes or ())
self._inline_comment_prefixes = tuple(inline_comment_prefixes or ())
self._strict = strict
self._allow_no_value = allow_no_value
self._empty_lines_in_values = empty_lines_in_values
self.default_section=default_section
self._interpolation = interpolation
if self._interpolation is _UNSET:
self._interpolation = self._DEFAULT_INTERPOLATION
if self._interpolation is None:
self._interpolation = Interpolation()
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return list(self._sections.keys())
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT.
"""
if section == self.default_section:
raise ValueError('Invalid section name: %r' % section)
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
self._proxies[section] = SectionProxy(self, section)
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
return list(opts.keys())
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as fp:
self._read(fp, filename)
except IOError:
continue
read_ok.append(filename)
return read_ok
def read_file(self, f, source=None):
"""Like read() but the argument must be a file-like object.
The `f' argument must be iterable, returning one line at a time.
Optional second argument is the `source' specifying the name of the
file being read. If not given, it is taken from f.name. If `f' has no
`name' attribute, `<???>' is used.
"""
if source is None:
try:
source = f.name
except AttributeError:
source = '<???>'
self._read(f, source)
def read_string(self, string, source='<string>'):
"""Read configuration from a given string."""
sfile = io.StringIO(string)
self.read_file(sfile, source)
def read_dict(self, dictionary, source='<dict>'):
"""Read configuration from a dictionary.
Keys are section names, values are dictionaries with keys and values
that should be present in the section. If the used dictionary type
preserves order, sections and their keys will be added in order.
All types held in the dictionary are converted to strings during
reading, including section names, option names and keys.
Optional second argument is the `source' specifying the name of the
dictionary being read.
"""
elements_added = set()
for section, keys in dictionary.items():
section = str(section)
try:
self.add_section(section)
except (DuplicateSectionError, ValueError):
if self._strict and section in elements_added:
raise
elements_added.add(section)
for key, value in keys.items():
key = self.optionxform(str(key))
if value is not None:
value = str(value)
if self._strict and (section, key) in elements_added:
raise DuplicateOptionError(section, key, source)
elements_added.add((section, key))
self.set(section, key, value)
def readfp(self, fp, filename=None):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, source=filename)
def get(self, section, option, *, raw=False, vars=None, fallback=_UNSET):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `DEFAULTSECT' in that order.
If the key is not found and `fallback' is provided, it is used as
a fallback value. `None' can be provided as a `fallback' value.
If interpolation is enabled and the optional argument `raw' is False,
all interpolations are expanded in the return values.
Arguments `raw', `vars', and `fallback' are keyword only.
The section DEFAULT is special.
"""
try:
d = self._unify_values(section, vars)
except NoSectionError:
if fallback is _UNSET:
raise
else:
return fallback
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
if fallback is _UNSET:
raise NoOptionError(option, section)
else:
return fallback
if raw or value is None:
return value
else:
return self._interpolation.before_get(self, section, option, value,
d)
def _get(self, section, conv, option, **kwargs):
return conv(self.get(section, option, **kwargs))
def getint(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, int, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getfloat(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, float, option, raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def getboolean(self, section, option, *, raw=False, vars=None,
fallback=_UNSET):
try:
return self._get(section, self._convert_to_boolean, option,
raw=raw, vars=vars)
except (NoSectionError, NoOptionError):
if fallback is _UNSET:
raise
else:
return fallback
def items(self, section=_UNSET, raw=False, vars=None):
"""Return a list of (name, value) tuples for each option in a section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
if section is _UNSET:
return super().items()
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
value_getter = lambda option: self._interpolation.before_get(self,
section, option, d[option], d)
if raw:
value_getter = lambda option: d[option]
return [(option, value_getter(option)) for option in d.keys()]
def popitem(self):
"""Remove a section from the parser and return it as
a (section_name, section_proxy) tuple. If no section is present, raise
KeyError.
The section DEFAULT is never returned because it cannot be removed.
"""
for key in self.sections():
value = self[key]
del self[key]
return key, value
raise KeyError
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section.
If the specified `section' is None or an empty string, DEFAULT is
assumed. If the specified `section' does not exist, returns False."""
if not section or section == self.default_section:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if value:
value = self._interpolation.before_set(self, section, option,
value)
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp, space_around_delimiters=True):
"""Write an .ini-format representation of the configuration state.
If `space_around_delimiters' is True (the default), delimiters
between keys and values are surrounded by spaces.
"""
if space_around_delimiters:
d = " {} ".format(self._delimiters[0])
else:
d = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section,
self._defaults.items(), d)
for section in self._sections:
self._write_section(fp, section,
self._sections[section].items(), d)
def _write_section(self, fp, section_name, section_items, delimiter):
"""Write a single section to the specified `fp'."""
fp.write("[{}]\n".format(section_name))
for key, value in section_items:
value = self._interpolation.before_write(self, section_name, key,
value)
if value is not None or not self._allow_no_value:
value = delimiter + str(value).replace('\n', '\n\t')
else:
value = ""
fp.write("{}{}\n".format(key, value))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == self.default_section:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
del self._proxies[section]
return existed
def __getitem__(self, key):
if key != self.default_section and not self.has_section(key):
raise KeyError(key)
return self._proxies[key]
def __setitem__(self, key, value):
# To conform with the mapping protocol, overwrites existing values in
# the section.
# XXX this is not atomic if read_dict fails at any point. Then again,
# no update method in configparser is atomic in this implementation.
if key == self.default_section:
self._defaults.clear()
elif key in self._sections:
self._sections[key].clear()
self.read_dict({key: value})
def __delitem__(self, key):
if key == self.default_section:
raise ValueError("Cannot remove the default section.")
if not self.has_section(key):
raise KeyError(key)
self.remove_section(key)
def __contains__(self, key):
return key == self.default_section or self.has_section(key)
def __len__(self):
return len(self._sections) + 1 # the default section
def __iter__(self):
# XXX does it break when underlying container state changed?
return itertools.chain((self.default_section,), self._sections.keys())
def _read(self, fp, fpname):
"""Parse a sectioned configuration file.
Each section in a configuration file contains a header, indicated by
a name in square brackets (`[]'), plus key/value options, indicated by
`name' and `value' delimited with a specific substring (`=' or `:' by
default).
Values can span multiple lines, as long as they are indented deeper
than the first line of the value. Depending on the parser's mode, blank
lines may be treated as parts of multiline values or ignored.
Configuration files may include comments, prefixed by specific
characters (`#' and `;' by default). Comments may appear on their own
in an otherwise empty line or may be entered in lines holding values or
section names.
"""
elements_added = set()
cursect = None # None, or a dictionary
sectname = None
optname = None
lineno = 0
indent_level = 0
e = None # None, or an exception
for lineno, line in enumerate(fp, start=1):
comment_start = sys.maxsize
# strip inline comments
inline_prefixes = {p: -1 for p in self._inline_comment_prefixes}
while comment_start == sys.maxsize and inline_prefixes:
next_prefixes = {}
for prefix, index in inline_prefixes.items():
index = line.find(prefix, index+1)
if index == -1:
continue
next_prefixes[prefix] = index
if index == 0 or (index > 0 and line[index-1].isspace()):
comment_start = min(comment_start, index)
inline_prefixes = next_prefixes
# strip full line comments
for prefix in self._comment_prefixes:
if line.strip().startswith(prefix):
comment_start = 0
break
if comment_start == sys.maxsize:
comment_start = None
value = line[:comment_start].strip()
if not value:
if self._empty_lines_in_values:
# add empty line to the value, but only if there was no
# comment on the line
if (comment_start is None and
cursect is not None and
optname and
cursect[optname] is not None):
cursect[optname].append('') # newlines added at join
else:
# empty line marks end of value
indent_level = sys.maxsize
continue
# continuation line?
first_nonspace = self.NONSPACECRE.search(line)
cur_indent_level = first_nonspace.start() if first_nonspace else 0
if (cursect is not None and optname and
cur_indent_level > indent_level):
cursect[optname].append(value)
# a section header or option header?
else:
indent_level = cur_indent_level
# is it a section header?
mo = self.SECTCRE.match(value)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
if self._strict and sectname in elements_added:
raise DuplicateSectionError(sectname, fpname,
lineno)
cursect = self._sections[sectname]
elements_added.add(sectname)
elif sectname == self.default_section:
cursect = self._defaults
else:
cursect = self._dict()
self._sections[sectname] = cursect
self._proxies[sectname] = SectionProxy(self, sectname)
elements_added.add(sectname)
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(value)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if not optname:
e = self._handle_error(e, fpname, lineno, line)
optname = self.optionxform(optname.rstrip())
if (self._strict and
(sectname, optname) in elements_added):
raise DuplicateOptionError(sectname, optname,
fpname, lineno)
elements_added.add((sectname, optname))
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
optval = optval.strip()
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = None
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
e = self._handle_error(e, fpname, lineno, line)
# if any parsing errors occurred, raise an exception
if e:
raise e
self._join_multiline_values()
def _join_multiline_values(self):
defaults = self.default_section, self._defaults
all_sections = itertools.chain((defaults,),
self._sections.items())
for section, options in all_sections:
for name, val in options.items():
if isinstance(val, list):
val = '\n'.join(val).rstrip()
options[name] = self._interpolation.before_read(self,
section,
name, val)
def _handle_error(self, exc, fpname, lineno, line):
if not exc:
exc = ParsingError(fpname)
exc.append(lineno, repr(line))
return exc
def _unify_values(self, section, vars):
"""Create a sequence of lookups with 'vars' taking priority over
the 'section' which takes priority over the DEFAULTSECT.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != self.default_section:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
if value is not None:
value = str(value)
vardict[self.optionxform(key)] = value
return _ChainMap(vardict, sectiondict, self._defaults)
def _convert_to_boolean(self, value):
"""Return a boolean value translating from other types if necessary.
"""
if value.lower() not in self.BOOLEAN_STATES:
raise ValueError('Not a boolean: %s' % value)
return self.BOOLEAN_STATES[value.lower()]
def _validate_value_types(self, *, section="", option="", value=""):
"""Raises a TypeError for non-string values.
The only legal non-string value if we allow valueless
options is None, so we need to check if the value is a
string if:
- we do not allow valueless options, or
- we allow valueless options but the value is not None
For compatibility reasons this method is not used in classic set()
for RawConfigParsers. It is invoked in every case for mapping protocol
access and in ConfigParser.set().
"""
if not isinstance(section, str):
raise TypeError("section names must be strings")
if not isinstance(option, str):
raise TypeError("option keys must be strings")
if not self._allow_no_value or value:
if not isinstance(value, str):
raise TypeError("option values must be strings")
class ConfigParser(RawConfigParser):
"""ConfigParser implementing interpolation."""
_DEFAULT_INTERPOLATION = BasicInterpolation()
def set(self, section, option, value=None):
"""Set an option. Extends RawConfigParser.set by validating type and
interpolation syntax on the value."""
self._validate_value_types(option=option, value=value)
super().set(section, option, value)
def add_section(self, section):
"""Create a new section in the configuration. Extends
RawConfigParser.add_section by validating if the section name is
a string."""
self._validate_value_types(section=section)
super().add_section(section)
class SafeConfigParser(ConfigParser):
"""ConfigParser alias for backwards compatibility purposes."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
warnings.warn(
"The SafeConfigParser class has been renamed to ConfigParser "
"in Python 3.2. This alias will be removed in future versions."
" Use ConfigParser directly instead.",
DeprecationWarning, stacklevel=2
)
class SectionProxy(MutableMapping):
"""A proxy for a single section from a parser."""
def __init__(self, parser, name):
"""Creates a view on a section of the specified `name` in `parser`."""
self._parser = parser
self._name = name
def __repr__(self):
return '<Section: {}>'.format(self._name)
def __getitem__(self, key):
if not self._parser.has_option(self._name, key):
raise KeyError(key)
return self._parser.get(self._name, key)
def __setitem__(self, key, value):
self._parser._validate_value_types(option=key, value=value)
return self._parser.set(self._name, key, value)
def __delitem__(self, key):
if not (self._parser.has_option(self._name, key) and
self._parser.remove_option(self._name, key)):
raise KeyError(key)
def __contains__(self, key):
return self._parser.has_option(self._name, key)
def __len__(self):
return len(self._options())
def __iter__(self):
return self._options().__iter__()
def _options(self):
if self._name != self._parser.default_section:
return self._parser.options(self._name)
else:
return self._parser.defaults()
def get(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.get(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getint(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getint(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getfloat(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getfloat(self._name, option, raw=raw, vars=vars,
fallback=fallback)
def getboolean(self, option, fallback=None, *, raw=False, vars=None):
return self._parser.getboolean(self._name, option, raw=raw, vars=vars,
fallback=fallback)
@property
def parser(self):
# The parser object of the proxy is read-only.
return self._parser
@property
def name(self):
# The name of the section on a proxy is read-only.
return self._name
|
gpl-3.0
|
praekelt/django-socialregistration
|
socialregistration/models.py
|
1
|
2100
|
from django.db import models
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
class FacebookProfile(models.Model):
user = models.ForeignKey(User)
site = models.ForeignKey(Site, default=Site.objects.get_current)
uid = models.CharField(max_length=255, blank=False, null=False)
oauth_access_token = models.CharField(max_length=255, blank=False, null=False)
def __unicode__(self):
return u'%s: %s' % (self.user, self.uid)
def authenticate(self):
return authenticate(uid=self.uid)
class TwitterProfile(models.Model):
user = models.ForeignKey(User)
site = models.ForeignKey(Site, default=Site.objects.get_current)
twitter_id = models.PositiveIntegerField()
def __unicode__(self):
return u'%s: %s' % (self.user, self.twitter_id)
def authenticate(self):
return authenticate(twitter_id=self.twitter_id)
class OpenIDProfile(models.Model):
user = models.ForeignKey(User)
site = models.ForeignKey(Site, default=Site.objects.get_current)
identity = models.TextField()
def __unicode__(self):
return u'OpenID Profile for %s, via provider %s' % (self.user, self.identity)
def authenticate(self):
return authenticate(identity=self.identity)
class OpenIDStore(models.Model):
site = models.ForeignKey(Site, default=Site.objects.get_current)
server_url = models.CharField(max_length=255)
handle = models.CharField(max_length=255)
secret = models.TextField()
issued = models.IntegerField()
lifetime = models.IntegerField()
assoc_type = models.TextField()
def __unicode__(self):
return u'OpenID Store %s for %s' % (self.server_url, self.site)
class OpenIDNonce(models.Model):
server_url = models.CharField(max_length=255)
timestamp = models.IntegerField()
salt = models.CharField(max_length=255)
date_created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return u'OpenID Nonce for %s' % self.server_url
|
mit
|
flx2015/ns-3-dev-git
|
src/bridge/bindings/modulegen__gcc_ILP32.py
|
30
|
182442
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.bridge', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## address.h (module 'network'): ns3::Address [class]
module.add_class('Address', import_from_module='ns.network')
## address.h (module 'network'): ns3::Address::MaxSize_e [enumeration]
module.add_enum('MaxSize_e', ['MAX_SIZE'], outer_class=root_module['ns3::Address'], import_from_module='ns.network')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper [class]
module.add_class('BridgeHelper')
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
module.add_class('Ipv4Address', import_from_module='ns.network')
## ipv4-address.h (module 'network'): ns3::Ipv4Address [class]
root_module['ns3::Ipv4Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask [class]
module.add_class('Ipv4Mask', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
module.add_class('Ipv6Address', import_from_module='ns.network')
## ipv6-address.h (module 'network'): ns3::Ipv6Address [class]
root_module['ns3::Ipv6Address'].implicitly_converts_to(root_module['ns3::Address'])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix [class]
module.add_class('Ipv6Prefix', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
module.add_class('Mac48Address', import_from_module='ns.network')
## mac48-address.h (module 'network'): ns3::Mac48Address [class]
root_module['ns3::Mac48Address'].implicitly_converts_to(root_module['ns3::Address'])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer [class]
module.add_class('NetDeviceContainer', import_from_module='ns.network')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## object-factory.h (module 'core'): ns3::ObjectFactory [class]
module.add_class('ObjectFactory', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## tag-buffer.h (module 'network'): ns3::TagBuffer [class]
module.add_class('TagBuffer', import_from_module='ns.network')
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## channel.h (module 'network'): ns3::Channel [class]
module.add_class('Channel', import_from_module='ns.network', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker [class]
module.add_class('Ipv4AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue [class]
module.add_class('Ipv4AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker [class]
module.add_class('Ipv4MaskChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue [class]
module.add_class('Ipv4MaskValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker [class]
module.add_class('Ipv6AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue [class]
module.add_class('Ipv6AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker [class]
module.add_class('Ipv6PrefixChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue [class]
module.add_class('Ipv6PrefixValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker [class]
module.add_class('Mac48AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue [class]
module.add_class('Mac48AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## net-device.h (module 'network'): ns3::NetDevice [class]
module.add_class('NetDevice', import_from_module='ns.network', parent=root_module['ns3::Object'])
## net-device.h (module 'network'): ns3::NetDevice::PacketType [enumeration]
module.add_enum('PacketType', ['PACKET_HOST', 'NS3_PACKET_HOST', 'PACKET_BROADCAST', 'NS3_PACKET_BROADCAST', 'PACKET_MULTICAST', 'NS3_PACKET_MULTICAST', 'PACKET_OTHERHOST', 'NS3_PACKET_OTHERHOST'], outer_class=root_module['ns3::NetDevice'], import_from_module='ns.network')
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## address.h (module 'network'): ns3::AddressChecker [class]
module.add_class('AddressChecker', import_from_module='ns.network', parent=root_module['ns3::AttributeChecker'])
## address.h (module 'network'): ns3::AddressValue [class]
module.add_class('AddressValue', import_from_module='ns.network', parent=root_module['ns3::AttributeValue'])
## bridge-channel.h (module 'bridge'): ns3::BridgeChannel [class]
module.add_class('BridgeChannel', parent=root_module['ns3::Channel'])
## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice [class]
module.add_class('BridgeNetDevice', parent=root_module['ns3::NetDevice'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
## Register a nested module for the namespace TracedValueCallback
nested_module = module.add_cpp_namespace('TracedValueCallback')
register_types_ns3_TracedValueCallback(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_types_ns3_TracedValueCallback(module):
root_module = module.get_root()
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *', u'ns3::TracedValueCallback::Time')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) **', u'ns3::TracedValueCallback::Time*')
typehandlers.add_type_alias(u'void ( * ) ( ns3::Time, ns3::Time ) *&', u'ns3::TracedValueCallback::Time&')
def register_methods(root_module):
register_Ns3Address_methods(root_module, root_module['ns3::Address'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3BridgeHelper_methods(root_module, root_module['ns3::BridgeHelper'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3Ipv4Address_methods(root_module, root_module['ns3::Ipv4Address'])
register_Ns3Ipv4Mask_methods(root_module, root_module['ns3::Ipv4Mask'])
register_Ns3Ipv6Address_methods(root_module, root_module['ns3::Ipv6Address'])
register_Ns3Ipv6Prefix_methods(root_module, root_module['ns3::Ipv6Prefix'])
register_Ns3Mac48Address_methods(root_module, root_module['ns3::Mac48Address'])
register_Ns3NetDeviceContainer_methods(root_module, root_module['ns3::NetDeviceContainer'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TagBuffer_methods(root_module, root_module['ns3::TagBuffer'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3Channel_methods(root_module, root_module['ns3::Channel'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3Ipv4AddressChecker_methods(root_module, root_module['ns3::Ipv4AddressChecker'])
register_Ns3Ipv4AddressValue_methods(root_module, root_module['ns3::Ipv4AddressValue'])
register_Ns3Ipv4MaskChecker_methods(root_module, root_module['ns3::Ipv4MaskChecker'])
register_Ns3Ipv4MaskValue_methods(root_module, root_module['ns3::Ipv4MaskValue'])
register_Ns3Ipv6AddressChecker_methods(root_module, root_module['ns3::Ipv6AddressChecker'])
register_Ns3Ipv6AddressValue_methods(root_module, root_module['ns3::Ipv6AddressValue'])
register_Ns3Ipv6PrefixChecker_methods(root_module, root_module['ns3::Ipv6PrefixChecker'])
register_Ns3Ipv6PrefixValue_methods(root_module, root_module['ns3::Ipv6PrefixValue'])
register_Ns3Mac48AddressChecker_methods(root_module, root_module['ns3::Mac48AddressChecker'])
register_Ns3Mac48AddressValue_methods(root_module, root_module['ns3::Mac48AddressValue'])
register_Ns3NetDevice_methods(root_module, root_module['ns3::NetDevice'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3AddressChecker_methods(root_module, root_module['ns3::AddressChecker'])
register_Ns3AddressValue_methods(root_module, root_module['ns3::AddressValue'])
register_Ns3BridgeChannel_methods(root_module, root_module['ns3::BridgeChannel'])
register_Ns3BridgeNetDevice_methods(root_module, root_module['ns3::BridgeNetDevice'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## address.h (module 'network'): ns3::Address::Address() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::Address::Address(uint8_t type, uint8_t const * buffer, uint8_t len) [constructor]
cls.add_constructor([param('uint8_t', 'type'), param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): ns3::Address::Address(ns3::Address const & address) [copy constructor]
cls.add_constructor([param('ns3::Address const &', 'address')])
## address.h (module 'network'): bool ns3::Address::CheckCompatible(uint8_t type, uint8_t len) const [member function]
cls.add_method('CheckCompatible',
'bool',
[param('uint8_t', 'type'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyAllFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyAllFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyAllTo(uint8_t * buffer, uint8_t len) const [member function]
cls.add_method('CopyAllTo',
'uint32_t',
[param('uint8_t *', 'buffer'), param('uint8_t', 'len')],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::CopyFrom(uint8_t const * buffer, uint8_t len) [member function]
cls.add_method('CopyFrom',
'uint32_t',
[param('uint8_t const *', 'buffer'), param('uint8_t', 'len')])
## address.h (module 'network'): uint32_t ns3::Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'uint32_t',
[param('uint8_t *', 'buffer')],
is_const=True)
## address.h (module 'network'): void ns3::Address::Deserialize(ns3::TagBuffer buffer) [member function]
cls.add_method('Deserialize',
'void',
[param('ns3::TagBuffer', 'buffer')])
## address.h (module 'network'): uint8_t ns3::Address::GetLength() const [member function]
cls.add_method('GetLength',
'uint8_t',
[],
is_const=True)
## address.h (module 'network'): uint32_t ns3::Address::GetSerializedSize() const [member function]
cls.add_method('GetSerializedSize',
'uint32_t',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsInvalid() const [member function]
cls.add_method('IsInvalid',
'bool',
[],
is_const=True)
## address.h (module 'network'): bool ns3::Address::IsMatchingType(uint8_t type) const [member function]
cls.add_method('IsMatchingType',
'bool',
[param('uint8_t', 'type')],
is_const=True)
## address.h (module 'network'): static uint8_t ns3::Address::Register() [member function]
cls.add_method('Register',
'uint8_t',
[],
is_static=True)
## address.h (module 'network'): void ns3::Address::Serialize(ns3::TagBuffer buffer) const [member function]
cls.add_method('Serialize',
'void',
[param('ns3::TagBuffer', 'buffer')],
is_const=True)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3BridgeHelper_methods(root_module, cls):
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper(ns3::BridgeHelper const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BridgeHelper const &', 'arg0')])
## bridge-helper.h (module 'bridge'): ns3::BridgeHelper::BridgeHelper() [constructor]
cls.add_constructor([])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(ns3::Ptr<ns3::Node> node, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('ns3::Ptr< ns3::Node >', 'node'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): ns3::NetDeviceContainer ns3::BridgeHelper::Install(std::string nodeName, ns3::NetDeviceContainer c) [member function]
cls.add_method('Install',
'ns3::NetDeviceContainer',
[param('std::string', 'nodeName'), param('ns3::NetDeviceContainer', 'c')])
## bridge-helper.h (module 'bridge'): void ns3::BridgeHelper::SetDeviceAttribute(std::string n1, ns3::AttributeValue const & v1) [member function]
cls.add_method('SetDeviceAttribute',
'void',
[param('std::string', 'n1'), param('ns3::AttributeValue const &', 'v1')])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3Ipv4Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(ns3::Ipv4Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(uint32_t address) [constructor]
cls.add_constructor([param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address::Ipv4Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::CombineMask(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('CombineMask',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv4Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv4Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Address::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4Address::GetSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('GetSubnetDirectedBroadcast',
'ns3::Ipv4Address',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Address ns3::Ipv4Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Address',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsEqual(ns3::Ipv4Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Address const &', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsLocalMulticast() const [member function]
cls.add_method('IsLocalMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): static bool ns3::Ipv4Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Address::IsSubnetDirectedBroadcast(ns3::Ipv4Mask const & mask) const [member function]
cls.add_method('IsSubnetDirectedBroadcast',
'bool',
[param('ns3::Ipv4Mask const &', 'mask')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(uint32_t address) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'address')])
## ipv4-address.h (module 'network'): void ns3::Ipv4Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
return
def register_Ns3Ipv4Mask_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(ns3::Ipv4Mask const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(uint32_t mask) [constructor]
cls.add_constructor([param('uint32_t', 'mask')])
## ipv4-address.h (module 'network'): ns3::Ipv4Mask::Ipv4Mask(char const * mask) [constructor]
cls.add_constructor([param('char const *', 'mask')])
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::Get() const [member function]
cls.add_method('Get',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): uint32_t ns3::Ipv4Mask::GetInverse() const [member function]
cls.add_method('GetInverse',
'uint32_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): uint16_t ns3::Ipv4Mask::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint16_t',
[],
is_const=True)
## ipv4-address.h (module 'network'): static ns3::Ipv4Mask ns3::Ipv4Mask::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv4Mask',
[],
is_static=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsEqual(ns3::Ipv4Mask other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv4Mask', 'other')],
is_const=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4Mask::IsMatch(ns3::Ipv4Address a, ns3::Ipv4Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv4Address', 'a'), param('ns3::Ipv4Address', 'b')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4Mask::Set(uint32_t mask) [member function]
cls.add_method('Set',
'void',
[param('uint32_t', 'mask')])
return
def register_Ns3Ipv6Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(char const * address) [constructor]
cls.add_constructor([param('char const *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(uint8_t * address) [constructor]
cls.add_constructor([param('uint8_t *', 'address')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const & addr) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address::Ipv6Address(ns3::Ipv6Address const * addr) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const *', 'addr')])
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6Address::CombinePrefix(ns3::Ipv6Prefix const & prefix) [member function]
cls.add_method('CombinePrefix',
'ns3::Ipv6Address',
[param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Ipv6Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::Deserialize(uint8_t const * buf) [member function]
cls.add_method('Deserialize',
'ns3::Ipv6Address',
[param('uint8_t const *', 'buf')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllHostsMulticast() [member function]
cls.add_method('GetAllHostsMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllNodesMulticast() [member function]
cls.add_method('GetAllNodesMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAllRoutersMulticast() [member function]
cls.add_method('GetAllRoutersMulticast',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetAny() [member function]
cls.add_method('GetAny',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv6Address::GetIpv4MappedAddress() const [member function]
cls.add_method('GetIpv4MappedAddress',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Address',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllHostsMulticast() const [member function]
cls.add_method('IsAllHostsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllNodesMulticast() const [member function]
cls.add_method('IsAllNodesMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAllRoutersMulticast() const [member function]
cls.add_method('IsAllRoutersMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsAny() const [member function]
cls.add_method('IsAny',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsDocumentation() const [member function]
cls.add_method('IsDocumentation',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsEqual(ns3::Ipv6Address const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Address const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsIpv4MappedAddress() const [member function]
cls.add_method('IsIpv4MappedAddress',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocal() const [member function]
cls.add_method('IsLinkLocal',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLinkLocalMulticast() const [member function]
cls.add_method('IsLinkLocalMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsLocalhost() const [member function]
cls.add_method('IsLocalhost',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static bool ns3::Ipv6Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Address::IsSolicitedMulticast() const [member function]
cls.add_method('IsSolicitedMulticast',
'bool',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac16Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac48Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredAddress(ns3::Mac64Address addr, ns3::Ipv6Address prefix) [member function]
cls.add_method('MakeAutoconfiguredAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'addr'), param('ns3::Ipv6Address', 'prefix')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac16Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac16Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac48Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac48Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeAutoconfiguredLinkLocalAddress(ns3::Mac64Address mac) [member function]
cls.add_method('MakeAutoconfiguredLinkLocalAddress',
'ns3::Ipv6Address',
[param('ns3::Mac64Address', 'mac')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeIpv4MappedAddress(ns3::Ipv4Address addr) [member function]
cls.add_method('MakeIpv4MappedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv4Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Address ns3::Ipv6Address::MakeSolicitedAddress(ns3::Ipv6Address addr) [member function]
cls.add_method('MakeSolicitedAddress',
'ns3::Ipv6Address',
[param('ns3::Ipv6Address', 'addr')],
is_static=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Serialize(uint8_t * buf) const [member function]
cls.add_method('Serialize',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(char const * address) [member function]
cls.add_method('Set',
'void',
[param('char const *', 'address')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Address::Set(uint8_t * address) [member function]
cls.add_method('Set',
'void',
[param('uint8_t *', 'address')])
return
def register_Ns3Ipv6Prefix_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t * prefix) [constructor]
cls.add_constructor([param('uint8_t *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(char const * prefix) [constructor]
cls.add_constructor([param('char const *', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(uint8_t prefix) [constructor]
cls.add_constructor([param('uint8_t', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const & prefix) [copy constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'prefix')])
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix::Ipv6Prefix(ns3::Ipv6Prefix const * prefix) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const *', 'prefix')])
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::GetBytes(uint8_t * buf) const [member function]
cls.add_method('GetBytes',
'void',
[param('uint8_t *', 'buf')],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetLoopback() [member function]
cls.add_method('GetLoopback',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetOnes() [member function]
cls.add_method('GetOnes',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): uint8_t ns3::Ipv6Prefix::GetPrefixLength() const [member function]
cls.add_method('GetPrefixLength',
'uint8_t',
[],
is_const=True)
## ipv6-address.h (module 'network'): static ns3::Ipv6Prefix ns3::Ipv6Prefix::GetZero() [member function]
cls.add_method('GetZero',
'ns3::Ipv6Prefix',
[],
is_static=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsEqual(ns3::Ipv6Prefix const & other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ipv6Prefix const &', 'other')],
is_const=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6Prefix::IsMatch(ns3::Ipv6Address a, ns3::Ipv6Address b) const [member function]
cls.add_method('IsMatch',
'bool',
[param('ns3::Ipv6Address', 'a'), param('ns3::Ipv6Address', 'b')],
is_const=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6Prefix::Print(std::ostream & os) const [member function]
cls.add_method('Print',
'void',
[param('std::ostream &', 'os')],
is_const=True)
return
def register_Ns3Mac48Address_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(ns3::Mac48Address const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48Address::Mac48Address(char const * str) [constructor]
cls.add_constructor([param('char const *', 'str')])
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::Allocate() [member function]
cls.add_method('Allocate',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::ConvertFrom(ns3::Address const & address) [member function]
cls.add_method('ConvertFrom',
'ns3::Mac48Address',
[param('ns3::Address const &', 'address')],
is_static=True)
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyFrom(uint8_t const * buffer) [member function]
cls.add_method('CopyFrom',
'void',
[param('uint8_t const *', 'buffer')])
## mac48-address.h (module 'network'): void ns3::Mac48Address::CopyTo(uint8_t * buffer) const [member function]
cls.add_method('CopyTo',
'void',
[param('uint8_t *', 'buffer')],
is_const=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetBroadcast() [member function]
cls.add_method('GetBroadcast',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv4Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv4Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast(ns3::Ipv6Address address) [member function]
cls.add_method('GetMulticast',
'ns3::Mac48Address',
[param('ns3::Ipv6Address', 'address')],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticast6Prefix() [member function]
cls.add_method('GetMulticast6Prefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): static ns3::Mac48Address ns3::Mac48Address::GetMulticastPrefix() [member function]
cls.add_method('GetMulticastPrefix',
'ns3::Mac48Address',
[],
is_static=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): bool ns3::Mac48Address::IsGroup() const [member function]
cls.add_method('IsGroup',
'bool',
[],
is_const=True)
## mac48-address.h (module 'network'): static bool ns3::Mac48Address::IsMatchingType(ns3::Address const & address) [member function]
cls.add_method('IsMatchingType',
'bool',
[param('ns3::Address const &', 'address')],
is_static=True)
return
def register_Ns3NetDeviceContainer_methods(root_module, cls):
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'arg0')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer() [constructor]
cls.add_constructor([])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::Ptr<ns3::NetDevice> dev) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(std::string devName) [constructor]
cls.add_constructor([param('std::string', 'devName')])
## net-device-container.h (module 'network'): ns3::NetDeviceContainer::NetDeviceContainer(ns3::NetDeviceContainer const & a, ns3::NetDeviceContainer const & b) [constructor]
cls.add_constructor([param('ns3::NetDeviceContainer const &', 'a'), param('ns3::NetDeviceContainer const &', 'b')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::NetDeviceContainer other) [member function]
cls.add_method('Add',
'void',
[param('ns3::NetDeviceContainer', 'other')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(ns3::Ptr<ns3::NetDevice> device) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device')])
## net-device-container.h (module 'network'): void ns3::NetDeviceContainer::Add(std::string deviceName) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'deviceName')])
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::NetDevice>*,std::vector<ns3::Ptr<ns3::NetDevice>, std::allocator<ns3::Ptr<ns3::NetDevice> > > > ns3::NetDeviceContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::NetDevice > const, std::vector< ns3::Ptr< ns3::NetDevice > > >',
[],
is_const=True)
## net-device-container.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::NetDeviceContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True)
## net-device-container.h (module 'network'): uint32_t ns3::NetDeviceContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactory::ObjectFactory(std::string typeId) [constructor]
cls.add_constructor([param('std::string', 'typeId')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h (module 'core'): ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h (module 'core'): void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h (module 'core'): void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TagBuffer_methods(root_module, cls):
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(ns3::TagBuffer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TagBuffer const &', 'arg0')])
## tag-buffer.h (module 'network'): ns3::TagBuffer::TagBuffer(uint8_t * start, uint8_t * end) [constructor]
cls.add_constructor([param('uint8_t *', 'start'), param('uint8_t *', 'end')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::CopyFrom(ns3::TagBuffer o) [member function]
cls.add_method('CopyFrom',
'void',
[param('ns3::TagBuffer', 'o')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Read(uint8_t * buffer, uint32_t size) [member function]
cls.add_method('Read',
'void',
[param('uint8_t *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): double ns3::TagBuffer::ReadDouble() [member function]
cls.add_method('ReadDouble',
'double',
[])
## tag-buffer.h (module 'network'): uint16_t ns3::TagBuffer::ReadU16() [member function]
cls.add_method('ReadU16',
'uint16_t',
[])
## tag-buffer.h (module 'network'): uint32_t ns3::TagBuffer::ReadU32() [member function]
cls.add_method('ReadU32',
'uint32_t',
[])
## tag-buffer.h (module 'network'): uint64_t ns3::TagBuffer::ReadU64() [member function]
cls.add_method('ReadU64',
'uint64_t',
[])
## tag-buffer.h (module 'network'): uint8_t ns3::TagBuffer::ReadU8() [member function]
cls.add_method('ReadU8',
'uint8_t',
[])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::TrimAtEnd(uint32_t trim) [member function]
cls.add_method('TrimAtEnd',
'void',
[param('uint32_t', 'trim')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::Write(uint8_t const * buffer, uint32_t size) [member function]
cls.add_method('Write',
'void',
[param('uint8_t const *', 'buffer'), param('uint32_t', 'size')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteDouble(double v) [member function]
cls.add_method('WriteDouble',
'void',
[param('double', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU16(uint16_t data) [member function]
cls.add_method('WriteU16',
'void',
[param('uint16_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU32(uint32_t data) [member function]
cls.add_method('WriteU32',
'void',
[param('uint32_t', 'data')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU64(uint64_t v) [member function]
cls.add_method('WriteU64',
'void',
[param('uint64_t', 'v')])
## tag-buffer.h (module 'network'): void ns3::TagBuffer::WriteU8(uint8_t v) [member function]
cls.add_method('WriteU8',
'void',
[param('uint8_t', 'v')])
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3Channel_methods(root_module, cls):
## channel.h (module 'network'): ns3::Channel::Channel(ns3::Channel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Channel const &', 'arg0')])
## channel.h (module 'network'): ns3::Channel::Channel() [constructor]
cls.add_constructor([])
## channel.h (module 'network'): ns3::Ptr<ns3::NetDevice> ns3::Channel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetId() const [member function]
cls.add_method('GetId',
'uint32_t',
[],
is_const=True)
## channel.h (module 'network'): uint32_t ns3::Channel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## channel.h (module 'network'): static ns3::TypeId ns3::Channel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3Ipv4AddressChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressChecker::Ipv4AddressChecker(ns3::Ipv4AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv4AddressValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4AddressValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4AddressValue::Ipv4AddressValue(ns3::Ipv4Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Address const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Address ns3::Ipv4AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Address',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4AddressValue::Set(ns3::Ipv4Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Address const &', 'value')])
return
def register_Ns3Ipv4MaskChecker_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskChecker::Ipv4MaskChecker(ns3::Ipv4MaskChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskChecker const &', 'arg0')])
return
def register_Ns3Ipv4MaskValue_methods(root_module, cls):
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue() [constructor]
cls.add_constructor([])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4MaskValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv4MaskValue const &', 'arg0')])
## ipv4-address.h (module 'network'): ns3::Ipv4MaskValue::Ipv4MaskValue(ns3::Ipv4Mask const & value) [constructor]
cls.add_constructor([param('ns3::Ipv4Mask const &', 'value')])
## ipv4-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv4MaskValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): bool ns3::Ipv4MaskValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv4-address.h (module 'network'): ns3::Ipv4Mask ns3::Ipv4MaskValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv4Mask',
[],
is_const=True)
## ipv4-address.h (module 'network'): std::string ns3::Ipv4MaskValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv4-address.h (module 'network'): void ns3::Ipv4MaskValue::Set(ns3::Ipv4Mask const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv4Mask const &', 'value')])
return
def register_Ns3Ipv6AddressChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressChecker::Ipv6AddressChecker(ns3::Ipv6AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressChecker const &', 'arg0')])
return
def register_Ns3Ipv6AddressValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6AddressValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6AddressValue::Ipv6AddressValue(ns3::Ipv6Address const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Address const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Address ns3::Ipv6AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Address',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6AddressValue::Set(ns3::Ipv6Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Address const &', 'value')])
return
def register_Ns3Ipv6PrefixChecker_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixChecker::Ipv6PrefixChecker(ns3::Ipv6PrefixChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixChecker const &', 'arg0')])
return
def register_Ns3Ipv6PrefixValue_methods(root_module, cls):
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue() [constructor]
cls.add_constructor([])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6PrefixValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Ipv6PrefixValue const &', 'arg0')])
## ipv6-address.h (module 'network'): ns3::Ipv6PrefixValue::Ipv6PrefixValue(ns3::Ipv6Prefix const & value) [constructor]
cls.add_constructor([param('ns3::Ipv6Prefix const &', 'value')])
## ipv6-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Ipv6PrefixValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): bool ns3::Ipv6PrefixValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## ipv6-address.h (module 'network'): ns3::Ipv6Prefix ns3::Ipv6PrefixValue::Get() const [member function]
cls.add_method('Get',
'ns3::Ipv6Prefix',
[],
is_const=True)
## ipv6-address.h (module 'network'): std::string ns3::Ipv6PrefixValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## ipv6-address.h (module 'network'): void ns3::Ipv6PrefixValue::Set(ns3::Ipv6Prefix const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Ipv6Prefix const &', 'value')])
return
def register_Ns3Mac48AddressChecker_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressChecker::Mac48AddressChecker(ns3::Mac48AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressChecker const &', 'arg0')])
return
def register_Ns3Mac48AddressValue_methods(root_module, cls):
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue() [constructor]
cls.add_constructor([])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Mac48AddressValue const &', 'arg0')])
## mac48-address.h (module 'network'): ns3::Mac48AddressValue::Mac48AddressValue(ns3::Mac48Address const & value) [constructor]
cls.add_constructor([param('ns3::Mac48Address const &', 'value')])
## mac48-address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::Mac48AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): bool ns3::Mac48AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## mac48-address.h (module 'network'): ns3::Mac48Address ns3::Mac48AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Mac48Address',
[],
is_const=True)
## mac48-address.h (module 'network'): std::string ns3::Mac48AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## mac48-address.h (module 'network'): void ns3::Mac48AddressValue::Set(ns3::Mac48Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Mac48Address const &', 'value')])
return
def register_Ns3NetDevice_methods(root_module, cls):
## net-device.h (module 'network'): ns3::NetDevice::NetDevice() [constructor]
cls.add_constructor([])
## net-device.h (module 'network'): ns3::NetDevice::NetDevice(ns3::NetDevice const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NetDevice const &', 'arg0')])
## net-device.h (module 'network'): void ns3::NetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Channel> ns3::NetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint32_t ns3::NetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): uint16_t ns3::NetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Address ns3::NetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): ns3::Ptr<ns3::Node> ns3::NetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): static ns3::TypeId ns3::NetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): void ns3::NetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_pure_virtual=True, is_virtual=True)
## net-device.h (module 'network'): bool ns3::NetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h (module 'core'): ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h (module 'core'): ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h (module 'core'): std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h (module 'core'): void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3AddressChecker_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressChecker::AddressChecker() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressChecker::AddressChecker(ns3::AddressChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressChecker const &', 'arg0')])
return
def register_Ns3AddressValue_methods(root_module, cls):
## address.h (module 'network'): ns3::AddressValue::AddressValue() [constructor]
cls.add_constructor([])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::AddressValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AddressValue const &', 'arg0')])
## address.h (module 'network'): ns3::AddressValue::AddressValue(ns3::Address const & value) [constructor]
cls.add_constructor([param('ns3::Address const &', 'value')])
## address.h (module 'network'): ns3::Ptr<ns3::AttributeValue> ns3::AddressValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## address.h (module 'network'): bool ns3::AddressValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## address.h (module 'network'): ns3::Address ns3::AddressValue::Get() const [member function]
cls.add_method('Get',
'ns3::Address',
[],
is_const=True)
## address.h (module 'network'): std::string ns3::AddressValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## address.h (module 'network'): void ns3::AddressValue::Set(ns3::Address const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Address const &', 'value')])
return
def register_Ns3BridgeChannel_methods(root_module, cls):
## bridge-channel.h (module 'bridge'): static ns3::TypeId ns3::BridgeChannel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bridge-channel.h (module 'bridge'): ns3::BridgeChannel::BridgeChannel() [constructor]
cls.add_constructor([])
## bridge-channel.h (module 'bridge'): void ns3::BridgeChannel::AddChannel(ns3::Ptr<ns3::Channel> bridgedChannel) [member function]
cls.add_method('AddChannel',
'void',
[param('ns3::Ptr< ns3::Channel >', 'bridgedChannel')])
## bridge-channel.h (module 'bridge'): uint32_t ns3::BridgeChannel::GetNDevices() const [member function]
cls.add_method('GetNDevices',
'uint32_t',
[],
is_const=True, is_virtual=True)
## bridge-channel.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeChannel::GetDevice(uint32_t i) const [member function]
cls.add_method('GetDevice',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'i')],
is_const=True, is_virtual=True)
return
def register_Ns3BridgeNetDevice_methods(root_module, cls):
## bridge-net-device.h (module 'bridge'): ns3::BridgeNetDevice::BridgeNetDevice() [constructor]
cls.add_constructor([])
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddBridgePort(ns3::Ptr<ns3::NetDevice> bridgePort) [member function]
cls.add_method('AddBridgePort',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'bridgePort')])
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::AddLinkChangeCallback(ns3::Callback<void,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> callback) [member function]
cls.add_method('AddLinkChangeCallback',
'void',
[param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetAddress() const [member function]
cls.add_method('GetAddress',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetBridgePort(uint32_t n) const [member function]
cls.add_method('GetBridgePort',
'ns3::Ptr< ns3::NetDevice >',
[param('uint32_t', 'n')],
is_const=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetBroadcast() const [member function]
cls.add_method('GetBroadcast',
'ns3::Address',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Channel> ns3::BridgeNetDevice::GetChannel() const [member function]
cls.add_method('GetChannel',
'ns3::Ptr< ns3::Channel >',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetIfIndex() const [member function]
cls.add_method('GetIfIndex',
'uint32_t',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint16_t ns3::BridgeNetDevice::GetMtu() const [member function]
cls.add_method('GetMtu',
'uint16_t',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv4Address multicastGroup) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv4Address', 'multicastGroup')],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): ns3::Address ns3::BridgeNetDevice::GetMulticast(ns3::Ipv6Address addr) const [member function]
cls.add_method('GetMulticast',
'ns3::Address',
[param('ns3::Ipv6Address', 'addr')],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): uint32_t ns3::BridgeNetDevice::GetNBridgePorts() const [member function]
cls.add_method('GetNBridgePorts',
'uint32_t',
[],
is_const=True)
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::Node> ns3::BridgeNetDevice::GetNode() const [member function]
cls.add_method('GetNode',
'ns3::Ptr< ns3::Node >',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): static ns3::TypeId ns3::BridgeNetDevice::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBridge() const [member function]
cls.add_method('IsBridge',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsBroadcast() const [member function]
cls.add_method('IsBroadcast',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsLinkUp() const [member function]
cls.add_method('IsLinkUp',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsMulticast() const [member function]
cls.add_method('IsMulticast',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::IsPointToPoint() const [member function]
cls.add_method('IsPointToPoint',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::NeedsArp() const [member function]
cls.add_method('NeedsArp',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::Send(ns3::Ptr<ns3::Packet> packet, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('Send',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SendFrom(ns3::Ptr<ns3::Packet> packet, ns3::Address const & source, ns3::Address const & dest, uint16_t protocolNumber) [member function]
cls.add_method('SendFrom',
'bool',
[param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'dest'), param('uint16_t', 'protocolNumber')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetAddress(ns3::Address address) [member function]
cls.add_method('SetAddress',
'void',
[param('ns3::Address', 'address')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetIfIndex(uint32_t const index) [member function]
cls.add_method('SetIfIndex',
'void',
[param('uint32_t const', 'index')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SetMtu(uint16_t const mtu) [member function]
cls.add_method('SetMtu',
'bool',
[param('uint16_t const', 'mtu')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetNode(ns3::Ptr<ns3::Node> node) [member function]
cls.add_method('SetNode',
'void',
[param('ns3::Ptr< ns3::Node >', 'node')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetPromiscReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::Address const&, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetPromiscReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::Address const &, ns3::NetDevice::PacketType, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::SetReceiveCallback(ns3::Callback<bool, ns3::Ptr<ns3::NetDevice>, ns3::Ptr<ns3::Packet const>, unsigned short, ns3::Address const&, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> cb) [member function]
cls.add_method('SetReceiveCallback',
'void',
[param('ns3::Callback< bool, ns3::Ptr< ns3::NetDevice >, ns3::Ptr< ns3::Packet const >, unsigned short, ns3::Address const &, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')],
is_virtual=True)
## bridge-net-device.h (module 'bridge'): bool ns3::BridgeNetDevice::SupportsSendFrom() const [member function]
cls.add_method('SupportsSendFrom',
'bool',
[],
is_const=True, is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardBroadcast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function]
cls.add_method('ForwardBroadcast',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ForwardUnicast(ns3::Ptr<ns3::NetDevice> incomingPort, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Mac48Address src, ns3::Mac48Address dst) [member function]
cls.add_method('ForwardUnicast',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'incomingPort'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Mac48Address', 'src'), param('ns3::Mac48Address', 'dst')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): ns3::Ptr<ns3::NetDevice> ns3::BridgeNetDevice::GetLearnedState(ns3::Mac48Address source) [member function]
cls.add_method('GetLearnedState',
'ns3::Ptr< ns3::NetDevice >',
[param('ns3::Mac48Address', 'source')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::Learn(ns3::Mac48Address source, ns3::Ptr<ns3::NetDevice> port) [member function]
cls.add_method('Learn',
'void',
[param('ns3::Mac48Address', 'source'), param('ns3::Ptr< ns3::NetDevice >', 'port')],
visibility='protected')
## bridge-net-device.h (module 'bridge'): void ns3::BridgeNetDevice::ReceiveFromDevice(ns3::Ptr<ns3::NetDevice> device, ns3::Ptr<const ns3::Packet> packet, uint16_t protocol, ns3::Address const & source, ns3::Address const & destination, ns3::NetDevice::PacketType packetType) [member function]
cls.add_method('ReceiveFromDevice',
'void',
[param('ns3::Ptr< ns3::NetDevice >', 'device'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocol'), param('ns3::Address const &', 'source'), param('ns3::Address const &', 'destination'), param('ns3::NetDevice::PacketType', 'packetType')],
visibility='protected')
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
register_functions_ns3_TracedValueCallback(module.get_submodule('TracedValueCallback'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def register_functions_ns3_TracedValueCallback(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
gpl-2.0
|
chrys87/fenrir
|
src/fenrirscreenreader/commands/commands/set_bookmark_1.py
|
1
|
1527
|
#!/bin/python
# -*- coding: utf-8 -*-
# Fenrir TTY screen reader
# By Chrys, Storm Dragon, and contributers.
from fenrirscreenreader.core import debug
class command():
def __init__(self):
self.ID = '1'
def initialize(self, environment):
self.env = environment
self.env['commandBuffer']['bookMarks'][self.ID] = {}
def shutdown(self):
pass
def getDescription(self):
return _('set Bookmark {0}').format(self.ID,)
def run(self):
if not self.env['commandBuffer']['Marks']['1']:
self.env['runtime']['outputManager'].presentText(_("No mark found"), interrupt=True)
return
currApp = self.env['runtime']['applicationManager'].getCurrentApplication()
self.env['commandBuffer']['bookMarks'][self.ID][currApp] = {}
self.env['commandBuffer']['bookMarks'][self.ID][currApp]['1'] = self.env['commandBuffer']['Marks']['1'].copy()
if self.env['commandBuffer']['Marks']['2']:
self.env['commandBuffer']['bookMarks'][self.ID][currApp]['2'] = self.env['commandBuffer']['Marks']['2'].copy()
else:
self.env['commandBuffer']['bookMarks'][self.ID][currApp]['2'] = None
self.env['runtime']['outputManager'].presentText(_('Bookmark {0} set for application {1}').format(self.ID, currApp), interrupt=True)
self.env['commandBuffer']['Marks']['1'] = None
self.env['commandBuffer']['Marks']['2'] = None
def setCallback(self, callback):
pass
|
lgpl-3.0
|
tsabi/Odoo-tsabi-fixes
|
addons/analytic/report/analytic_balance.py
|
23
|
7061
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_analytic_balance(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_analytic_balance, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'get_objects': self._get_objects,
'lines_g': self._lines_g,
'move_sum': self._move_sum,
'sum_all': self._sum_all,
'sum_balance': self._sum_balance,
'move_sum_balance': self._move_sum_balance,
})
self.acc_ids = []
self.read_data = []
self.empty_acc = False
self.acc_data_dict = {}# maintains a relation with an account with its successors.
self.acc_sum_list = []# maintains a list of all ids
def get_children(self, ids):
read_data = self.pool.get('account.analytic.account').read(self.cr, self.uid, ids,['child_ids','code','complete_name','balance'])
for data in read_data:
if (data['id'] not in self.acc_ids):
inculde_empty = True
if (not self.empty_acc) and data['balance'] == 0.00:
inculde_empty = False
if inculde_empty:
self.acc_ids.append(data['id'])
self.read_data.append(data)
if data['child_ids']:
self.get_children(data['child_ids'])
return True
def _get_objects(self, empty_acc):
if self.read_data:
return self.read_data
self.empty_acc = empty_acc
self.read_data = []
self.get_children(self.ids)
return self.read_data
def _lines_g(self, account_id, date1, date2):
account_analytic_obj = self.pool.get('account.analytic.account')
ids = account_analytic_obj.search(self.cr, self.uid,
[('parent_id', 'child_of', [account_id])])
self.cr.execute("SELECT aa.name AS name, aa.code AS code, \
sum(aal.amount) AS balance, sum(aal.unit_amount) AS quantity \
FROM account_analytic_line AS aal, account_account AS aa \
WHERE (aal.general_account_id=aa.id) \
AND (aal.account_id IN %s)\
AND (date>=%s) AND (date<=%s) AND aa.active \
GROUP BY aal.general_account_id, aa.name, aa.code, aal.code \
ORDER BY aal.code", (tuple(ids), date1, date2))
res = self.cr.dictfetchall()
for r in res:
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['balance'] == 0
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _move_sum(self, account_id, date1, date2, option):
if account_id not in self.acc_data_dict:
account_analytic_obj = self.pool.get('account.analytic.account')
ids = account_analytic_obj.search(self.cr, self.uid,[('parent_id', 'child_of', [account_id])])
self.acc_data_dict[account_id] = ids
else:
ids = self.acc_data_dict[account_id]
query_params = (tuple(ids), date1, date2)
if option == "credit":
self.cr.execute("SELECT COALESCE(-sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0",query_params)
elif option == "debit":
self.cr.execute("SELECT COALESCE(sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s\
AND date>=%s AND date<=%s AND amount>0",query_params)
elif option == "quantity":
self.cr.execute("SELECT COALESCE(sum(unit_amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s\
AND date>=%s AND date<=%s",query_params)
return self.cr.fetchone()[0] or 0.0
def _move_sum_balance(self, account_id, date1, date2):
debit = self._move_sum(account_id, date1, date2, 'debit')
credit = self._move_sum(account_id, date1, date2, 'credit')
return (debit-credit)
def _sum_all(self, accounts, date1, date2, option):
account_analytic_obj = self.pool.get('account.analytic.account')
ids = map(lambda x: x['id'], accounts)
if not ids:
return 0.0
if not self.acc_sum_list:
ids2 = account_analytic_obj.search(self.cr, self.uid,[('parent_id', 'child_of', ids)])
self.acc_sum_list = ids2
else:
ids2 = self.acc_sum_list
query_params = (tuple(ids2), date1, date2)
if option == "debit":
self.cr.execute("SELECT COALESCE(sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount>0",query_params)
elif option == "credit":
self.cr.execute("SELECT COALESCE(-sum(amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0",query_params)
elif option == "quantity":
self.cr.execute("SELECT COALESCE(sum(unit_amount),0.0) FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s",query_params)
return self.cr.fetchone()[0] or 0.0
def _sum_balance(self, accounts, date1, date2):
debit = self._sum_all(accounts, date1, date2, 'debit') or 0.0
credit = self._sum_all(accounts, date1, date2, 'credit') or 0.0
return (debit-credit)
class report_analyticbalance(osv.AbstractModel):
_name = 'report.account.report_analyticbalance'
_inherit = 'report.abstract_report'
_template = 'analytic.report_analyticbalance'
_wrapped_report_class = account_analytic_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
krishauser/Klampt
|
Python/klampt/vis/visualization.py
|
1
|
170432
|
"""Klamp't visualization routines. See
`vistemplate.py in Klampt-examples <https://github.com/krishauser/Klampt-examples/Python/demos/vistemplate.py>`_
for an example of how to run this module.
OVERVIEW
--------
The visualization module lets you draw most Klamp't objects in a 3D world
using a simple interface. It also lets you customize the GUI using Qt
widgets, OpenGL drawing, and keyboard/mouse intercept routines.
Main features include:
- Simple interface to modify the visualization
- Simple interface to animate and render trajectories
- Simple interface to edit certain Klamp't objects (configurations, points,
transforms)
- Simple interface to drawing text and text labels, and drawing plots
- Multi-window, multi-viewport support
- Automatic camera setup
- Unified interface to PyQt, GLUT, IPython, and HTML backends.
- PyQT is the backend with the fullest amount of features.
- GLUT loses resource editing and advanced windowing functionality.
- IPython loses plugins, resource editing, custom drawing, and advanced
windowing functionality.
- HTML loses plugins, resource editing, custom drawing, and advanced
windowing functionality.
The resource editing functionality in the :mod:`klampt.io.resource` module
(which uses the widgets in :mod:`klampt.vis.editors`) use this module as well.
INSTRUCTIONS
-------------------
Basic use of the vis module is fairly straightforward:
0. (optional) Configure the rendering backend.
1. Add things to the visualization scene with ``vis.add(name,thing)``. Worlds,
geometries, points, transforms, trajectories, contact points, and more can
be added in this manner.
2. Modify the appearance of things using modifier calls like
``vis.setColor(name,r,g,b,a)``.
3. Launch windows and/or visualization thread (OpenGL or IPython modes)
4. Continue adding, modifying, and removing things as you desire.
More advanced functions allow you to dynamically launch multiple windows,
capture user input, and embed the visualization into Qt windows.
The default scene manager lets you set up and modify the visualization scene
(Steps 1 and 2). These just mirror the methods in
:class:`VisualizationScene`, which is how the default scene manager is
implemented. See Klampt-examples/Python/demos/vistemplate.py for more
examples.
To capture user interaction and add other functionality, you may create a
:class:`~klampt.vis.glinterface.GLPluginInterface` subclass to add
functionality on top of the default visualization world. To do so, call
``vis.pushPlugin(plugin)``. Note that custom rendering (the ``display()``
method) is only available with the OpenGL rendering backend.
Only one rendering backend can be chosen during the lifetime of your process,
and each backend has its own quirks with regards to window launching and
configuration. We'll describe the different options below.
OpenGL (PyQt, GLUT)
~~~~~~~~~~~~~~~~~~~~
OpenGL-based visualizations use either PyQt or GLUT to handle windowing and are
used by default. They run in the current process, have the best performance,
and offer the richest set of features.
Quick start
^^^^^^^^^^^^^^^
- To show the visualization and quit when the user closes the window::
vis.run()
- To show the visualization and return when the user closes the window::
vis.dialog()
... do stuff afterwards ...
vis.kill() #cleanup, not strictly needed but good practice
- To show the visualization and run a script alongside it until the user
closes the window (multithreaded mode)::
vis.show()
while vis.shown():
vis.lock()
... do stuff ...
[to exit the loop call vis.show(False)]
vis.unlock()
time.sleep(dt)
... do stuff afterwards ...
vis.kill() #cleanup, not strictly needed but good practice
- To show the visualization and run python commands until the user closes
the window (single-threaded mode)::
def callback():
... do stuff ...
[to exit the loop manually call vis.show(False)]
vis.loop(setup=vis.show,callback=callback)
vis.kill() #cleanup, not strictly needed but good practice
- To run a window with a custom plugin (GLPluginInterface) and terminate on
closure::
vis.run(plugin)
- To show a dialog or parallel window::
vis.setPlugin(plugin)
... then call
vis.dialog()
... or
vis.show()
... do stuff afterwards ...
vis.kill()
- To add a GLPluginInterface that just customizes a few things on top of
the default visualization::
vis.pushPlugin(plugin)
vis.dialog()
vis.popPlugin()
- To run plugins side-by-side in the same window::
vis.setPlugin(plugin1)
vis.addPlugin(plugin2) #this creates a new split-screen
vis.dialog()
... or
vis.show()
... do stuff afterwards ...
vis.kill()
- To run a custom Qt window or dialog containing a visualization window::
vis.setPlugin([desired plugin or None for visualization])
def makeMyUI(qtglwidget):
return MyQtMainWindow(qtglwidget)
vis.customUI(makeMyUI)
vis.dialog() #if you return a QDialog
... or
vis.show() #if you return a QWidget or QMainWindow
... do stuff afterwards ...
vis.kill()
- To launch a second window after the first is closed: just call whatever you
want again. Note: if show was previously called with a plugin and you wish to
revert to the default visualization, you should call setPlugin(None) first to
restore the default.
- To create a separate window with a given plugin::
w1 = vis.createWindow("Window 1") #w1=0
show()
w2 = vis.createWindow("Window 2") #w2=1
vis.setPlugin(plugin)
vis.dialog()
#to restore commands to the original window
vis.setWindow(w1)
while vis.shown():
...
vis.kill()
Implementation Details
^^^^^^^^^^^^^^^^^^^^^^
There are two primary modes of running OpenGL visualizations: multi-threaded
and single-threaded.
- Multi-threaded mode pops up a window using :func:`show`, and the caller can
then continue to interact with the vis module.
IMPORTANT: multi-threaded mode is only supported on some systems (Linux,
Windows using Qt). Due to weird OpenGL and Qt behavior in multi-threaded
programs, if you are using multithreaded mode, you should only interact
with OpenGL and the visualization using the methods in this module.
Custom OpenGL calls can be implemented inside GLPluginInterface plugins
and customDrawFunc.
- Single-threaded mode blocks the calling thread using :func:`loop`. To
interact with the scene, the caller will provide callbacks that can modify
the visualization world, pop up windows etc.
Single-threaded mode is the most compatible, and is the only mode that works
with GLUT and Mac OS.
There are also some convenience functions that will work in both modes, such
as :func:`run`, :func:`spin`, and func:`dialog`.
.. note::
In multithreaded mode, when changing the data shown by the window (e.g.,
modifying the configurations of robots in a WorldModel) you must call
``vis.lock()`` before accessing the data and then call ``vis.unlock()``
afterwards.
The biggest drawback of single-threaded operation is that you can only start
blocking dialogs at the outer-most level, not inside loop(). So if you have
a GUI that's running in real-time, in a multi-threaded visualization your
code can pop up a dialog (like an editor) and the continue running with the
returned value. There are some workarounds in single-thread mode (providing
a callback to the dialog function) but these are not nearly as convenient.
It is possible to start in single-threaded mode and convert to multi-threaded,
but the converse is not possible.
In OpenGL mode, you can also completely override the scene manager and run your
own OpenGL calls using a subclass of
:class:`~klampt.vis.glinterface.GLPluginInterface`. Here, you will need to
perform all the necessary OpenGL drawing / interaction inside its hooks.
To use this, you should call ``vis.setPlugin(plugin)`` to override the default
visualization behavior before creating your window. See
Klampt-examples/Python/demos/visplugin.py for an example of this in use.
IPython (Jupyter notebook)
~~~~~~~~~~~~~~~~~~~~~~~~~~
IPython visualizations run in a Jupyter notebook in a web browser, using a
WebGL widget to render the content. The Python code communicates with the
browser upon certain calls to update the visualization.
This mode will be enabled by default inside a Jupyter notebook, or if you first
call ``vis.init('IPython')``. In the cell in which you want to show the WebGL
widget, call ``vis.show()``. To show multiple widgets you can create new vis
windows, and use ``setWindow`` to switch between which widget you'd like
subsequent calls to modify.
The WebGL widget is updated automatically upon ``addX``, ``setColor``,
``clear``, and ``hide`` calls, but it can't tell when something changes in the
world, like a robot configuration or object transform. When the state of
something in the world changes, you must manually make a call to
``vis.update()``.
.. note::
For optimal performance when calling a lot of scene modifiers, you should
use ``vis.lock() / unlock()`` calls to block off the start and end of
scene modification. Doing so means that the WebGL widget
is only re-rendered at the very end of your calls.
Note the semantics here are slightly different from the normal sense of
locking / unlocking, which is to prevent thread clashes amongst changes to
the underlying object data.
This mode does NOT support plugins, dialogs, or custom draw functions. Also,
certain types of geometries like VolumeGrids are not supported.
Animations are supported, but you will manually have to advance the animations
and call ``vis.update()`` or ``vis.scene().update()`` for each frame.
See :class:`~klampt.vis.ipython.widgets.Playback` for a convenient widget
that handles this somewhat automatically.
HTML
~~~~~~~~~~~~~~~~~~~~
This output mode outputs an HTML + Javascript page that uses WebGL to display
the scene. Unlike other methods, this is not a live visualization that
interacts with your Python script, but rather you create a scene / animation
and then retrieve the HTML afterwards. This mode is appropriate for Google
Colab, for example.
To show the HTML page in an IPython notebook (e.g., Jupyter or Colab),
call ``vis.show()`` after the animation is done. This will show the visualizer
in an IFrame, whose size is the current viewport size.
To use it with a specified time step, use the following::
vis.add("world",world)
dt = 1.0/30.0 #30fps
while not done:
... change the world, add / remove things, etc ...
vis.stepAnimation(dt)
vis.show()
To have it capture a real-time process, use the ``vis.update()`` function
instead, as follows::
vis.add("world",world)
while not done:
... change the world, add / remove things, etc ...
vis.update()
time.sleep(1.0/3.0)
vis.show()
(Don't mix animations and updates, or else the resulting animation will be
strange.)
You may also retrieve raw HTML, either by casting ``vis.nativeWindow()`` to
a str, calling ``vis.nativeWindow().iframe(width,height)``, or
``vis.nativeWindow().page()``. In the first case, the HTML does not contain
the <html> or <body> tags, and takes up the whole screen. The .iframe()
option places the code into an IFrame of a given size, and the page() option
wraps the code into a full HTML page.
For example, to turn a Trajectory ``traj`` into a WebGL animation, use this
code::
vis.add("world",world)
vis.animate(("world",world.robot(0).getName()),traj)
t = 0
while t < traj.endTime():
vis.stepAnimation(dt)
t += dt
vis.show()
To turn a Simulator into a WebGL animation, use this code::
sim = Simulator(world)
vis.add("world",world)
while not done:
... add forces, send commands to the robot, etc...
sim.simulate(dt)
vis.stepAnimation(dt)
vis.show()
WINDOWING API
--------------
- :func:`debug`: a super easy way to visualize Klamp't items.
- :func:`init`: initializes the visualization. Can configure here
what backend(s) to use.
- :func:`createWindow`: creates a new visualization window and returns an
integer identifier.
- :func:`setWindow`: sets the active window for all subsequent calls. ID 0 is
the default visualization window.
- :func:`getWindow`: gets the active window ID.
- :func:`nativeWindow`: returns the current window object used by the backend.
- :func:`setWindowTitle`: sets the title of the visualization window.
- :func:`getWindowTitle`: returns the title of the visualization window.
- :func:`resizeWindow`: resizes the window. For OpenGL, this can be done
after the window is shown. Otherwise, it must take place before showing.
- :func:`scene`: returns the current :class:`VisualizationScene`
- :func:`setPlugin`: sets the current plugin (a
:class:`~klampt.vis.glinterface.GLPluginInterface` instance). This plugin
will now capture input from the visualization and can override any of the
default behavior of the visualizer. Set plugin=None if you want to return to
the default visualization.
- :func:`pushPlugin`: adds a new plugin (e.g., to capture input) on top of
the old one.
- :func:`splitView`: adds a second scene / viewport to the current
window. If a plugin is provided (a :class:`GLPluginInterface` instance) then
the new view is set to use this plugin.
- :func:`run`: pops up a dialog and then kills the program afterwards.
- :func:`kill`: kills all previously launched visualizations and terminates the
visualization thread. Afterwards, you may not be able to start new windows.
Call this to cleanly quit.
- :func:`multithreaded`: returns true if multithreading is available.
- :func:`loop`: Runs the visualization
thread inline with the main thread. The setup() function is called at the
start, the callback() function is run every time the event thread is idle,
and the cleanup() function is called on termination.
NOTE FOR MAC USERS: having the GUI in a separate thread is not supported on
Mac, so the ``loop`` function must be used rather than ``show``/``spin``.
NOTE FOR GLUT USERS: this may only be run once.
- :func:`dialog`: pops up a dialog box (does not return to calling thread until
closed).
- :func:`show`: shows/hides a visualization window. If not called
from the visualization loop, a new visualization thread is run in parallel
with the calling script.
- :func:`spin`: shows the visualization window for the desired amount
of time before returning, or until the user closes the window.
- :func:`shown`: returns true if the window is shown.
- :func:`lock`: locks the visualization scene for editing. The visualization
will be paused until unlock() is called.
- :func:`unlock` unlocks the visualization world. Must only be called once
after every lock().
- :func:`update`: manually triggers a redraw of the current scene.
- :func:`threadCall`: Call a user-defined function inside the visualization
thread. Advanced users may wish to inject calls that are incompatible with
being run outside the Qt or OpenGL thread.
- :func:`customUI`: launches a user-defined UI window with the OpenGL window
embedded into it (Qt only).
SCENE MODIFICATION API
------------------------
The following methods operate on the current scene (as returned from
:func:`scene`).
Objects accepted by :func:`add` include:
- text (str)
- Vector3 (3-list)
- Matrix3 (:mod:`klampt.math.so3` item)
- RigidTransform (:mod:`klampt.math.se3` item)
- Config (n-list): n must match the number of links of a RobotModel in "world"
- Configs (list of n-lists)
- :class:`Trajectory` and its subclasses
- polylines (use :class:`Trajectory` type)
- :class:`WorldModel`
- :class:`RobotModel`
- :class:`RobotModelLink`
- :class:`RigidObjectModel`
- :class:`TerrainModel`
- :class:`Geometry3D`
- :class:`GeometricPrimitive`
- :class:`PointCloud`
- :class:`TriangleMesh`
- :class:`Simulator`
- :class:`ContactPoint`
- objects from the :mod:`~klampt.model.coordinates` module.
See func:`setAttribute` for a list of attributes that can be used to customize
an object's appearance. Common attributes include ``color``, ``hide_label``,
``type``, ``position`` (for text) and ``size`` (for points).
In OpenGL modes and IPython mode, many objects can be edited using the
:func:`edit` function. In OpenGL, this will provide a visual editing widget,
while in IPython this will pop up IPython widgets.
If you are modifying the internal data of an object in an external loop
(as opposed to inside a plugin) be sure to call :func:`lock`/:func:`unlock`
before/after doing so to prevent the visualization from accessing the
object's data .
Scene management functions
~~~~~~~~~~~~~~~~~~~~~~~~~~
- :func:`add`: adds an item to the
visualization. name is a unique identifier. If an item with the same name
already exists, it will no longer be shown. Keyword attributes can be
given to customize the appearance of the object (see :func:`setAttribute`.)
- :func:`clear`: clears the visualization world.
- :func:`listItems`: prints out all names of visualization objects in the scene
or under a given object
- :func:`getItemName`: retrieves the name / path of a given object in the
scene, or returns None if the object doesnt exist.
- :func:`dirty`: marks the given item as dirty and recreates the OpenGL display
lists. You may need to call this if you modify an item's geometry, for
example.
- :func:`remove`: removes an item from the visualization.
- :func:`setItemConfig`: sets the configuration of a named item.
- :func:`getItemConfig`: returns the configuration of a named item.
- :func:`hide`: hides/unhides an item. The item is not removed, it just
becomes invisible.
- :func:`edit`: turns on/off visual editing of some item. Points, transforms,
``coordinates.Point``, ``coordinates.Transform``, ``coordinates.Frame``,
:class:`RobotModel`, and :class:`RigidObjectModel` are currently accepted.
- :func:`hideLabel`: hides/unhides an item's text label.
- :func:`setLabel`: changes an item's text label from its name to a custom
string.
- :func:`setAppearance`: changes the Appearance of an item.
- :func:`revertAppearance`: restores the Appearance of an item
- :func:`setAttribute`: sets an attribute to change an item's appearance.
- :func:`getAttribute`: gets an attribute of an item's appearance.
- :func:`getAttributes`: gets all relevant attributes of an item's appearance.
- :func:`setColor`: changes the color of an item.
- :func:`setDrawFunc`: sets a custom OpenGL drawing function for an item.
Animation functions
~~~~~~~~~~~~~~~~~~~~~~~~~~
- :func:`animate` Starts an animation on an item. The animation be a
:class:`Trajectory` or a list of configurations. Works with points, so3
elements, se3 elements, rigid objects, or robots.
- :func:`pauseAnimation`: Turns animation on/off.
- :func:`stepAnimation`: Moves forward the animation time by the given
amount, in seconds.
- :func:`animationTime`: Gets/sets the current animation time
- :func:`setTimeCallback`: sets a function that will return the global time
used by the animation timing, visualization plots, and movie-saving
functions. This must be monotonically non-decreasing.
Text and plots
~~~~~~~~~~~~~~~~
Like other items in the visualization scene, text and plots are referred to by
string identifiers.
Text is usually attached to 2D pixel coordinates, but in OpenGL mode can also
be attached to 3D points. Use the ``position`` attribute to control where the
text is located and the ``size`` attribute to control its size.
Plots are added to the scene and then items are added to the plot. The
configuration of a visualization item is then shown as a live display (OpenGL).
You may also log custom numeric data with :func:`logPlot` and event data using
:func:`logPlotEvent`.
- :func:`addText`: adds text to the visualizer.
- :func:`clearText`: clears all previously added text.
- :func:`addPlot`: creates a new empty plot.
- :func:`addPlotItem`: adds a visualization item to a plot.
- :func:`logPlot`: logs a custom visualization item to a plot
- :func:`logPlotEvent`: logs an event on the plot.
- :func:`hidePlotItem`: hides an item in the plot.
- :func:`setPlotDuration`: sets the plot duration.
- :func:`setPlotRange`: sets the y range of a plot.
- :func:`setPlotPosition`: sets the upper left position of the plot on the screen.
- :func:`setPlotSize`: sets the width and height of the plot.
- :func:`savePlot`: saves a plot to a CSV (extension .csv) or Trajectory
(extension .traj) file.
Global appearance / camera control functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :func:`getViewport`: Returns the :class:`GLViewport` for the currently active
view.
- :func:`setViewport`: Sets the :class:`GLViewport` for the currently active
scene. (This may also be used to resize windows.)
- :func:`setBackgroundColor`: Sets the background color for the active
view.
- :func:`autoFitCamera`: Automatically fits the camera to all objects in the
visualization. A scale > 1 magnifies the zoom.
- :func:`followCamera`: Sets the camera to follow a target.
- :func:`saveJsonConfig`: Saves the configuration to a JSON object or JSON
file.
- :func:`loadJsonConfig`: Loads the configuration from a JSON object or JSON
file.
- :func:`screenshot`: returns a screenshot of the scene. Can retrieve depth,
as well.
- :func:`screenshotCallback`: sets a callback that will receive a screenshot
of the scene after rendering is done.
Utility functions
~~~~~~~~~~~~~~~~~
- :func:`objectToVisType` Auto-determines a type of object compatible with the
visualizer.
- :func:`autoFitViewport`: Automatically fits a :class:`GLViewport` to see all
the given objects.
NAMING CONVENTION
-----------------
The world, if one exists, should be given the name 'world'. Configurations and
paths are drawn with reference to the first robot in the world.
All items that refer to a name (except add) can either be given a top level
item name (a string) or a sub-item (a sequence of strings, given a path from
the root to the leaf). For example, if you've added a RobotWorld under the
name 'world' containing a robot called 'myRobot', then::
vis.setColor(('world','myRobot'),0,1,0)
will turn the robot green. If 'link5' is the robot's 5th link, then::
vis.setColor(('world','myRobot','link5'),0,0,1)
will turn the 5th link blue.
A shortcut is to use the :func:`getItemName` function on the given robot,
e.g.::
robot = world.robot(0)
vis.setColor(vis.getItemName(robot),0,0,1)
If there's a Simulator instance added to the scene under the name 'sim' or
'simulator', the animation timing for Qt movie saving and HTML animations
will follow the simulation time rather than wall clock time.
"""
import threading
from ..robotsim import *
from ..math import vectorops,so3,se3
from . import gldraw
from . import glinit
from .glinterface import GLPluginInterface
from .glprogram import GLPluginProgram
from . import glcommon
import time
import signal
import weakref
import sys
from ..model import types
from ..model import config
from ..model import coordinates
from ..model.subrobot import SubRobotModel
from ..model.trajectory import *
from ..model.multipath import MultiPath
from ..model.contact import ContactPoint,Hold
from ..model.collide import bb_empty,bb_create,bb_union
import warnings
#the global lock for all visualization calls
_globalLock = threading.RLock()
#the chosen backend
_backend = None
#the _WindowManager instance
_window_manager = None
#the OpenGL module (will be taken from glinit.init())
GL = None
def _isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
elif shell == 'Shell':
if 'google.colab' in sys.modules:
return True
return False # Other type (?)
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def init(backends=None):
"""Initializes the vis module using some visualization backend. `backends`
can be None, in which case it tries using PyQt, then GLUT, then IPython in
that order. It can also be a string or list of strings from the following
set:
- 'PyQt': uses PyQT + OpenGL
- 'PyQt4' / 'PyQt5': uses a specific version of PyQT
- 'GLUT': uses GLUT + OpenGL
- 'IPython': uses an IPython widget
- 'HTML': outputs an HTML / Javascript widget
"""
global _backend,_window_manager,GL
if backends is None:
if _isnotebook():
if 'google.colab' in sys.modules:
backends = ['HTML']
else:
backends = ['IPython']
else:
backends = ['PyQt','GLUT','HTML']
if isinstance(backends,str):
backends = [backends]
if _backend is not None:
#already initialized
if _backend in backends or _backend[:4] in backends:
if _backend in ['IPython','HTML']:
#force a reset for IPython / HTML
_window_manager.reset()
return _backend
print("vis.init(): Trying to reset from backend",_backend,"to one of",backends)
_window_manager.reset()
_window_manager = None
_backend = None
OpenGLBackends = ['PyQt','PyQt4','PyQt5','GLUT']
order = [[]]
for backend in backends:
if backend in ['IPython','HTML']:
order.append(backend)
order.append([])
else:
order[-1].append(backend)
for trials in order:
if trials == 'IPython':
_backend = 'IPython'
from .backends import vis_ipython
_window_manager = vis_ipython.IPythonWindowManager()
return _backend
elif trials == 'HTML':
_backend = 'HTML'
from .backends import vis_html
_window_manager = vis_html.HTMLWindowManager()
return _backend
elif len(trials)>0:
res = glinit.init(trials)
GL = glinit.GL()
if res is not None:
_backend = glinit.active()
if glinit.active() == 'GLUT':
from .backends import vis_glut
_window_manager = vis_glut.GLUTWindowManager()
warnings.warn("klampt.visualization: QT is not available, falling back to poorer\nGLUT interface. Returning to another GLUT thread will not work\nproperly.")
else:
from .backends import vis_qt
_window_manager = vis_qt.QtWindowManager()
import atexit
atexit.register(kill)
return res
return None
def _init():
global _backend
if _backend is not None:
return
if init() is None:
raise RuntimeError("Unable to initialize visualization")
def debug(*args,**kwargs):
"""A super easy way to visualize Klamp't items.
The argument list can be a list of Klamp't items, and can also include
strings or dicts. If a string precedes an item, then it will be labeled
by the string. If a dict follows an item, the dict will specify
attributes for the item. It can also contain the 'animation' key, in
which case it should contain a Trajectory animating the item.
Keyword arguments may include:
- title: the window title
- animation: if only one item is given, sets a looping animation
- centerCamera: the name of the item that the camera should look at, or
True to center on the whole scene.
- followCamera: the name of the item that the camera will follow if
animated, or None (default).
- dialog: True if a dialog should be shown (default), False if a standard
show() should be used.
- anything else: Treated as named klamp't item.
"""
global _backend
_init()
oldWindow = getWindow()
if oldWindow is None:
oldWindow = 0
myWindow = createWindow()
nextName = None
lastName = None
itemcount = 0
if 'world' in kwargs:
add('world',kwargs['world'])
del kwargs['world']
animationDuration = 0
for i,arg in enumerate(args):
if isinstance(arg,str):
nextName = arg
elif isinstance(arg,dict):
if lastName is None:
warnings.warn("vis.debug(): dict of attributes must follow an object")
continue
for (k,v) in arg.items():
if k == 'animation':
animate(lastName,v)
animationDuration = max(animationDuration,v.endTime())
else:
try:
setAttribute(lastName,k,v)
except Exception:
warnings.warn("vis.debug(): Couldn't set attribute {} of item {}".format(k,lastName))
else:
label = None
if nextName is None:
name = None
if hasattr(arg,'getName'):
try:
name = arg.getName()
except Exception:
pass
if hasattr(arg,'name') and isinstance(arg.name,str):
name = arg.name
if name is None:
try:
type = types.objectToTypes(arg)
if isinstance(type,list):
type = type[0]
name = type + '['+str(itemcount)+']'
except ValueError:
name = 'item['+str(itemcount)+']'
else:
name = nextName
label = name
add(name,arg)
itemcount += 1
lastName = name
nextName = None
title = None
doDialog = True
centerCamera = None
followCameraItem = None
animation = None
for k,v in kwargs.items():
if k=='title':
title = v
elif k=='world':
pass
elif k=='dialog':
doDialog = v
elif k=='animation':
animation = v
elif k=='followCamera':
followCameraItem = v
elif k=='centerCamera':
centerCamera = v
else:
add(k,v)
lastName = k
itemcount += 1
if title is not None:
setWindowTitle(title)
else:
setWindowTitle("Klampt debugging: "+','.join(scene().items.keys()))
if animation is not None:
animate(lastName,animation)
if centerCamera is True:
autoFitCamera(rotate=False)
elif centerCamera:
if isinstance(centerCamera,int):
centerCamera = 'item['+str(centerCamera)+']'
elif not isinstance(centerCamera,(str,tuple)):
centerCamera = getItemName(centerCamera)
if centerCamera is None:
warnings.warn("vis.debug(): could not center camera, invalid object name")
else:
vp = getViewport()
try:
autoFitViewport(vp,[scene().getItem(centerCamera)],rotate=False)
setViewport(vp)
except Exception:
warnings.warn("vis.debug(): Centering camera failed")
import traceback
traceback.print_exc()
if followCameraItem is not None:
if followCameraItem is True:
followCamera(lastName,center=True)
else:
if not isinstance(followCameraItem,(str,tuple)):
followCameraItem = getItemName(followCameraItem)
if followCameraItem is None:
warnings.warn("vis.debug(): could not follow camera, invalid object name")
followCamera(followCameraItem,center=True)
if _backend == 'HTML':
#dump out the animation
if animation is not None:
animationDuration = max(animationDuration,animation.endTime())
if animationDuration > 0:
dt = 1.0/30.0
t = 0
while t < animationDuration:
stepAnimation(dt)
t += dt
show()
setWindow(oldWindow)
elif _backend == 'IPython':
#setup a Playback widget from the animation
if animation is not None:
animationDuration = max(animationDuration,animation.endTime())
if animationDuration > 0:
framerate = 30
my_scene = scene()
def advance():
my_scene.stepAnimation(1.0/framerate)
my_scene.update()
def reset():
my_scene.animationTime(0)
from .ipython.widgets import Playback
playback = Playback(nativeWindow(),advance=advance,reset=reset,maxframes=int(animationDuration*framerate),framerate=framerate)
from IPython.display import display
display(playback)
show()
else:
if doDialog:
dialog()
setWindow(oldWindow)
else:
show()
#open ended...
def nativeWindow():
"""Returns the active window data used by the backend. The result will be
a subclass of :class:`~klampt.vis.glprogram.GLPluginProgram` if OpenGL is
used (PyQt or GLUT) or a :class:`~klampt.vis.ipython.widgets.KlamptWidget`
"""
global _window_manager
if _window_manager is None:
return None
return _window_manager.frontend()
def scene():
"""Returns the active window data used by the backend. The result will be
a subclass of :class:`VisualizationScene`.
"""
global _window_manager
if _window_manager is None:
return None
return _window_manager.scene()
def createWindow(title=None):
"""Creates a new window (and sets it active).
Returns:
int: an identifier of the window (for use with :func:`setWindow`).
"""
global _globalLock,_window_manager
_init()
with _globalLock:
id = _window_manager.createWindow(title)
return id
def setWindow(id):
"""Sets currently active window.
Note:
ID 0 is the default visualization window.
"""
global _globalLock,_window_manager
_init()
with _globalLock:
_window_manager.setWindow(id)
def getWindow():
"""Retrieves ID of currently active window or -1 if no window is active"""
global _window_manager
_init()
return _window_manager.getWindow()
def setPlugin(plugin):
"""Lets the user capture input via a glinterface.GLPluginInterface class.
Set plugin to None to disable plugins and return to the standard
visualization.
Args:
plugin (GLPluginInterface): a plugin that will hereafter capture input
from the visualization and can override any of the default behavior
of the visualizer. Can be set to None if you want to return to the
default visualization.
"""
global _globalLock,_window_manager
_init()
with _globalLock:
_window_manager.setPlugin(plugin)
def pushPlugin(plugin):
"""Adds a new plugin on top of the old one.
Args:
plugin (GLPluginInterface): a plugin that will optionally intercept GUI
callbacks. Unhandled callbacks will be forwarded to the next plugin
on the stack.
"""
global _globalLock,_window_manager
_init()
with _globalLock:
_window_manager.pushPlugin(plugin)
def popPlugin():
"""Reverses a prior pushPlugin() call"""
global _window_manager
_init()
with _globalLock:
_window_manager.popPlugin()
def splitView(plugin=None):
"""Adds a second OpenGL viewport in the same window, governed by the given
plugin.
Args:
plugin (GLPluginInterface): the plugin used for the second viewport.
If None, the new viewport will have the default visualization
plugin.
"""
global _window_manager
_init()
with _globalLock:
_window_manager.splitView(plugin)
def addPlugin(plugin=None):
"""Adds a second OpenGL viewport in the same window, governed by the given
plugin. DEPRECATED: use :func:`splitView` instead.
Args:
plugin (GLPluginInterface): the plugin used for the second viewport.
If None, the new viewport will have the default visualization
plugin.
"""
splitView(plugin)
def run(plugin=None):
"""A blocking call to start a single window and then kill the visualization
once the user closes the window.
Args:
plugin (GLPluginInterface, optional): If given, the plugin used to handle all
rendering and user input. If plugin is None, the default visualization is
used.
Note:
Works in both multi-threaded and single-threaded mode.
"""
global _window_manager
_init()
if plugin is not None:
setPlugin(plugin)
_window_manager.run()
kill()
def multithreaded():
"""Returns true if the current GUI system allows multithreading. Useful for apps
that will work cross-platform with Macs and systems with only GLUT.
"""
global _window_manager
_init()
return _window_manager.multithreaded()
def dialog():
"""A blocking call to start a single dialog window with the current plugin. It is
closed by pressing OK or closing the window."""
global _window_manager
_init()
return _window_manager.dialog()
def setWindowTitle(title):
global _window_manager
_init()
_window_manager.setWindowName(title)
def getWindowTitle():
global _window_manager
return _window_manager.getWindowName()
def resizeWindow(w,h):
"""Resizes the current window. For OpenGL, this can be done after the
window is shown. Otherwise, it must take place before showing."""
global _window_manager
return _window_manager.resizeWindow(w,h)
def kill():
"""This should be called at the end of the calling program to cleanly terminate the
visualization thread"""
global _backend,_window_manager
if _backend is None:
return
_window_manager.kill()
_window_manager = None
_backend = None
def loop(setup=None,callback=None,cleanup=None):
"""Runs the visualization thread inline with the main thread.
The setup() function is called at the start, the callback() function is run
every time the event thread is idle, and the cleanup() function is called
on termination.
NOTE FOR MAC USERS: a multithreaded GUI is not supported on Mac, so the loop()
function must be used rather than "show and wait".
NOTE FOR GLUT USERS: this may only be run once.
"""
global _window_manager
_init()
_window_manager.loop(setup,callback,cleanup)
def show(display=True):
"""Shows or hides the current window.
NOTE FOR MAC USERS: due to a lack of support of multithreading on Mac, this
will not work outside of the setup / callback / cleanup functions given in a
call to loop().
"""
global _window_manager
_init()
with _globalLock:
if display:
_window_manager.show()
else:
_window_manager.hide()
def spin(duration):
"""Spin-shows a window for a certain duration or until the window is closed."""
global _window_manager
_init()
_window_manager.spin(duration)
def lock():
"""Begins a locked section. Needs to be called any time you modify a
visualization item outside of the visualization thread. unlock() must be
called to let the visualization thread proceed.
"""
global _window_manager
_window_manager.lock()
def unlock():
"""Ends a locked section acquired by lock()."""
global _window_manager
_window_manager.unlock()
def update():
"""Manually triggers a redraw of the current window."""
global _window_manager
_window_manager.update()
def shown():
"""Returns true if a visualization window is currently shown."""
global _globalLock,_window_manager
_init()
with _globalLock:
res = _window_manager.shown()
return res
def customUI(func):
"""Tells the next created window/dialog to use a custom UI function. Only
available in PyQT mode.
This is used to build custom editors and windows that are compatible with
other UI functionality.
Args:
func (function): a 1-argument function that takes a configured Klamp't
QtWindow as its argument and returns a QDialog, QMainWindow, or
QWidget.
(Could also be used with GLUT, but what would you do with a GLUTWindow?)
"""
global _globalLock,_window_manager
_init()
with _globalLock:
_window_manager.set_custom_ui(func)
def threadCall(func):
"""Call `func` inside the visualization thread. This is useful for some
odd calls that are incompatible with being run outside the Qt or OpenGL
thread.
Possible use cases include performing extra OpenGL draws for camera
simulation.
"""
global _globalLock,_window_manager
with _globalLock:
_window_manager.threadCall(func)
######### CONVENIENCE ALIASES FOR VisualizationScene methods ###########
def addAction(hook,short_text,key=None,description=None):
"""Adds a callback to the window that can be triggered by menu choice or
keyboard. Alias for nativeWindow().addAction().
Args:
hook (function): a python callback function, taking no arguments, called
when the action is triggered.
short_text (str): the text shown in the menu bar.
key (str, optional): a shortcut keyboard command (e.g., can be 'k' or 'Ctrl+k').
description (str, optional): if provided, this is a tooltip that shows up
when the user hovers their mouse over the menu item.
"""
_init()
nativeWindow().addAction(hook,short_text,key,description)
def clear():
"""Clears the visualization world."""
if _backend is None:
return
scene().clear()
def add(name,item,keepAppearance=False,**kwargs):
"""Adds an item to the visualization.
Args:
name (str): a unique identifier. If an item with the same name already
exists, it will no longer be shown.
keepAppearance (bool, optional): if True, then if there was an item that
had the same name, the prior item's appearance will be kept.
kwargs: key-value pairs to be added into the attributes dictionary. e.g.
vis.add("geom",geometry,color=[1,0,0,1]) adds a geometry while setting
its color to red.
"""
_init()
scene().add(name,item,keepAppearance,**kwargs)
def listItems(name=None,indent=0):
_init()
scene().listItems(name,indent)
def getItemName(object):
"""Retrieves the name / path of a given object in the scene, or returns
None if the object doesnt exist.
"""
if _backend is None:
return None
return scene().getItemName(object)
def dirty(item_name='all'):
"""Marks the given item as dirty and recreates the OpenGL display lists. You may need
to call this if you modify an item's geometry, for example. If things start disappearing
from your world when you create a new window, you may need to call this too."""
scene().dirty(item_name)
def animate(name,animation,speed=1.0,endBehavior='loop'):
"""Sends an animation to the named object.
Works with points, so3 elements, se3 elements, rigid objects, or robots, and may work
with other objects as well.
Args:
animation: may be a Trajectory or a list of configurations.
speed (float, optional): a modulator on the animation speed. If the animation
is a list of milestones, it is by default run at 1 milestone per second.
endBehavior (str, optional): either 'loop' (animation repeats forever) or 'halt'
(plays once).
"""
scene().animate(name,animation,speed,endBehavior)
def pauseAnimation(paused=True):
"""Pauses or unpauses the animation."""
scene().pauseAnimation(paused)
def stepAnimation(amount):
"""Moves forward the animation time by ``amount``, given in seconds"""
scene().stepAnimation(amount)
def animationTime(newtime=None):
"""Gets/sets the current animation time
If newtime is None (default), this gets the animation time.
If newtime is not None, this sets a new animation time.
"""
return scene().animationTime(newtime)
def setTimeCallback(timefunc=None):
"""Sets a function that will return the window's global time. This
will be used by the animation timing, visualization plots, and movie-saving
functions.
Args:
timefunc (callable): returns a monotonically non-decreasing float.
If None, reverts back to using time.time().
"""
_init()
scene().setTimeCallback(timefunc)
def remove(name):
"""Removes an item from the visualization"""
return scene().remove(name)
def getItemConfig(name):
"""Returns a configuration of an item from the visualization. Useful for
interacting with edited objects.
Returns:
list: a list of floats describing the item's current configuration. Returns
None if name doesnt refer to an object."""
return scene().getItemConfig(name)
def setItemConfig(name,value):
"""Sets a configuration of an item from the visualization.
Args:
name (str): the item to set the configuration of.
value (list of floats): the item's configuration. The number of items
depends on the object's type. See the config module for more information.
"""
return scene().setItemConfig(name,value)
def setLabel(name,text):
"""Changes the label of an item in the visualization"""
setAttribute(name,"label",text)
def hideLabel(name,hidden=True):
"""Hides or shows the label of an item in the visualization"""
return scene().hideLabel(name,hidden)
def hide(name,hidden=True):
"""Hides an item in the visualization.
Note: the opposite of hide() is not show(), it's hide(False).
"""
scene().hide(name,hidden)
def edit(name,doedit=True):
"""Turns on/off visual editing of some item.
In OpenGL mode, currently accepts items of type:
- Vector3 (3-list)
- Matrix3 (9-list)
- Config (n-list, where n is the # of robot links)
- RigidTransform (se3 object)
- :class:`~klampt.robotsim.RobotModel`
- :class:`~klampt.robotsim.RigidObjectModel`
- :class:`~klampt.model.coordinates.Point`
- :class:`~klampt.model.coordinates.Transform`
- :class:`~klampt.model.coordinates.Frame`
In IPython mode, currently accepts items of type:
- Vector3 (3-lists)
- Config (n-list, where n is the # of robot links)
- RigidTransform (se3 objects)
- :class:`~klampt.robotsim.RobotModel`
- :class:`~klampt.robotsim.RigidObjectModel`
- :class:`~klampt.model.coordinates.Point`
- :class:`~klampt.model.coordinates.Transform`
- :class:`~klampt.model.coordinates.Frame`
"""
scene().edit(name,doedit)
def setAppearance(name,appearance):
"""Changes the Appearance of an item, for an item that uses the Appearance
item to draw (config, geometry, robots, rigid bodies).
"""
scene().setAppearance(name,appearance)
def setAttribute(name,attr,value):
"""Sets an attribute of an item's appearance.
Args:
name (str): the name of the item
attr (str): the name of the attribute (see below)
value: the value (see below)
Accepted attributes are:
- 'robot': the index of the robot associated with this (default 0)
- 'color': the item's color (r,g,b) or (r,g,b,a)
- 'size': the size of the plot, text, point, ContactPoint, or IKObjective
- 'length': the length of axes in RigidTransform, or normal in ContactPoint
- 'width': the width of axes and trajectory curves
- 'duration': the duration of a plot
- 'pointSize': for a trajectory, the size of points (default None, set to 0
to disable drawing points)
- 'pointColor': for a trajectory, the size of points (default None)
- 'endeffectors': for a RobotTrajectory, the list of end effectors to plot
(default the last link).
- 'maxConfigs': for a Configs resource, the maximum number of drawn
configurations (default 10)
- 'fancy': for RigidTransform objects, whether the axes are drawn with
boxes or lines (default False)
- 'type': for ambiguous items, like a 3-item list when the robot has 3
links, specifies the type to be used. For example, 'Config' draws the
item as a robot configuration, while 'Vector3' or 'Point' draws it as a
point.
- 'label': a replacement label (str)
- 'hide_label': if True, the label will be hidden
"""
scene().setAttribute(name,attr,value)
def getAttribute(name,attr):
"""Gets an attribute of an item's appearance. If not previously set by the
user, the default value will be returned.
Args:
name (str): the name of the item
attr (str): the name of the attribute (see :func:`setAttribute`)
"""
return scene().getAttribute(name,attr)
def getAttributes(name):
"""Gets a dictionary of all relevant attributes of an item's appearance.
If not previously set by the user, default values will be returned.
Args:
name (str): the name of the item
"""
return scene().getAttributes(name)
def revertAppearance(name):
scene().revertAppearance(name)
def setColor(name,r,g,b,a=1.0):
scene().setColor(name,r,g,b,a)
def setDrawFunc(name,func):
"""Sets a custom OpenGL drawing function for an item.
Args:
name (str): the name of the item
func (function or None): a one-argument function draw(data) that takes the item data
as input. Set func to None to revert to default drawing.
"""
scene().setDrawFunc(name,func)
def _getOffsets(object):
if isinstance(object,WorldModel):
res = []
for i in range(object.numRobots()):
res += _getOffsets(object.robot(i))
for i in range(object.numRigidObjects()):
res += _getOffsets(object.rigidObject(i))
return res
elif isinstance(object,RobotModel):
q = object.getConfig()
object.setConfig([0.0]*len(q))
worig = [object.link(i).getTransform()[1] for i in range(object.numLinks())]
object.setConfig(q)
wnew = [object.link(i).getTransform()[1] for i in range(object.numLinks())]
return [vectorops.sub(b,a) for a,b in zip(worig,wnew)]
elif isinstance(object,RobotModelLink):
return [object.getTransform()[1]]
elif isinstance(object,RigidObjectModel):
return [object.getTransform()[1]]
elif isinstance(object,Geometry3D):
return [object.getCurrentTransform()[1]]
elif isinstance(object,VisAppearance):
res = _getOffsets(object.item)
if len(res) != 0: return res
if len(object.subAppearances) == 0:
bb = object.getBounds()
if bb is not None and not bb_empty(bb):
return [vectorops.mul(vectorops.add(bb[0],bb[1]),0.5)]
else:
res = []
for a in object.subAppearances.values():
res += _getOffsets(a)
return res
return []
def _getBounds(object):
if isinstance(object,WorldModel):
res = []
for i in range(object.numRobots()):
res += _getBounds(object.robot(i))
for i in range(object.numRigidObjects()):
res += _getBounds(object.rigidObject(i))
return res
elif isinstance(object,RobotModel):
res = []
for i in range(object.numLinks()):
bb = object.link(i).geometry().getBB()
if bb is not None and not bb_empty(bb):
res += list(bb)
return res
elif isinstance(object,RobotModelLink):
return list(object.geometry().getBB())
elif isinstance(object,RigidObjectModel):
return list(object.geometry().getBB())
elif isinstance(object,Geometry3D):
return list(object.getBB())
elif isinstance(object,VisAppearance):
if len(object.subAppearances) == 0:
if isinstance(object.item,TerrainModel):
return []
bb = object.getBounds()
if bb is not None and not bb_empty(bb):
return list(bb)
else:
res = []
for a in object.subAppearances.values():
res += _getBounds(a)
return res
return []
def autoFitViewport(viewport,objects,zoom=True,rotate=True):
from ..model.sensing import fit_plane_centroid
ofs = sum([_getOffsets(o) for o in objects],[])
pts = sum([_getBounds(o) for o in objects],[])
#print("Bounding box",bb,"center",center)
#raw_input()
#reset
viewport.camera.rot = [0.,0.,0.]
viewport.camera.tgt = [0.,0.,0.]
viewport.camera.dist = 6.0
viewport.clippingplanes = (0.2,20)
if len(ofs) == 0:
return
#print(pts)
#print(ofs)
pts = pts + ofs # just in case
bb = bb_create(*pts)
center = vectorops.mul(vectorops.add(bb[0],bb[1]),0.5)
viewport.camera.tgt = center
if zoom:
radius = max(vectorops.distance(bb[0],center),0.25)
viewport.camera.dist = 1.2*radius / math.tan(math.radians(viewport.fov*0.5))
#default: oblique view
if rotate:
viewport.camera.rot = [0,math.radians(30),math.radians(45)]
#fit a plane to these points
try:
centroid,normal = fit_plane_centroid(ofs)
except Exception as e:
try:
centroid,normal = fit_plane_centroid(pts)
except Exception as e:
warnings.warn("Exception occurred during fitting to points")
import traceback
traceback.print_exc()
raise
return
if normal[2] > 0:
normal = vectorops.mul(normal,-1)
z,x,y = so3.matrix(so3.inv(so3.canonical(normal)))
roll = 0
yaw = math.atan2(normal[0],normal[1])
pitch = math.atan2(-normal[2],vectorops.norm(normal[0:2]))
viewport.camera.rot = [roll,pitch,yaw]
else:
x = [1,0,0]
y = [0,0,1]
z = [0,1,0]
if zoom:
radius = max([abs(vectorops.dot(x,vectorops.sub(center,pt))) for pt in pts] + [abs(vectorops.dot(y,vectorops.sub(center,pt)))*viewport.w/viewport.h for pt in pts])
radius = max(radius,0.25)
zmin = min([vectorops.dot(z,vectorops.sub(center,pt)) for pt in pts])
zmax = max([vectorops.dot(z,vectorops.sub(center,pt)) for pt in pts])
#orient camera to point along normal direction
viewport.camera.tgt = center
viewport.camera.dist = 1.2*radius / math.tan(math.radians(viewport.fov*0.5))
near,far = viewport.clippingplanes
if viewport.camera.dist + zmin < near:
near = max((viewport.camera.dist + zmin)*0.5, radius*0.1)
if viewport.camera.dist + zmax > far:
far = max((viewport.camera.dist + zmax)*1.5, radius*3)
viewport.clippingplanes = (near,far)
def addText(name,text,position=None,**kwargs):
"""Adds text to the visualizer. You must give an identifier to all pieces
of text, which will be used to access the text as any other vis object.
Args:
name (str): the text's unique identifier.
text (str): the string to be drawn
pos (list, optional): the position of the string. If pos=None, this is
added to the on-screen "console" display. If pos has length 2, it
is the (x,y) position of the upper left corner of the text on the
screen. Negative units anchor the text to the right or bottom of
the window. If pos has length 3, the text is drawn in the world
coordinates.
kwargs (optional): optional keywords to give to setAppearance.
To customize the text appearance, you can set the 'color', 'size', and
'position' attributes, either through the keyword arguments, or using
setAttribute(). To refer to this item, use the identifier given in
``name``.
"""
_init()
if position is None:
scene().addText(name,text,**kwargs)
else:
scene().addText(name,text,position=position,**kwargs)
def clearText():
"""Clears all text in the visualization."""
scene().clearText()
def addPlot(name):
"""Creates a new empty plot with the identifier ``name``."""
add(name,VisPlot())
def addPlotItem(name,itemname):
"""Adds a scene item named ``itemname`` to the plot. All of the item's
configuration variables will be plotted by default, see
:func:`hidePlotItem` to turn off drawing some of these channels.
"""
scene().addPlotItem(name,itemname)
def logPlot(name,itemname,value):
"""Logs a custom visualization item to a plot. ``itemname`` can be an
arbitrary identifier; future logPlot calls with this itemname will add
values to the plotted curve.
"""
scene().logPlot(name,itemname,value)
def logPlotEvent(name,eventname,color=None):
"""Logs an event on the plot."""
scene().logPlotEvent(name,eventname,color)
def hidePlotItem(name,itemname,hidden=True):
"""Hides an item in the plot. To hide a particular channel of a given item
pass a pair ``(itemname,channelindex)``.
For example, to hide configurations 0-5 of 'robot', call::
hidePlotItem('plot',('robot',0))
...
hidePlotItem('plot',('robot',5))
"""
scene().hidePlotItem(name,itemname,hidden)
def setPlotDuration(name,time):
"""Sets the plot duration."""
setAttribute(name,'duration',time)
def setPlotRange(name,vmin,vmax):
"""Sets the y range of a plot to [vmin,vmax]."""
setAttribute(name,'range',(vmin,vmax))
def setPlotPosition(name,x,y):
"""Sets the upper left position of the plot on the screen."""
setAttribute(name,'position',(x,y))
def setPlotSize(name,w,h):
"""sets the width and height of the plot, in pixels."""
setAttribute(name,'size',(w,h))
def savePlot(name,fn):
"""Saves a plot to a CSV (extension .csv) or Trajectory (extension .traj) file."""
scene().savePlot(name,fn)
def autoFitCamera(zoom=True,rotate=True,scale=1):
"""Automatically fits the camera to all items in the visualization.
Args:
zoom (bool, optional): zooms the scene to the objects
rotate (bool, optional): rotates the scene to face the objects
scale (float, optional): a scale > 1 magnifies the camera zoom.
"""
print("klampt.vis: auto-fitting camera to scene.")
scene().autoFitCamera(zoom,rotate,scale)
def followCamera(target,translate=True,rotate=False,center=False):
"""Sets the camera to follow a target. The camera starts from its current
location and keeps the target in the same position on screen.
It can operate in the following modes:
- translation (``translate=True, rotate=False``): the camera moves with the
object. This is default.
- look-at (``translate=False, rotate=True``): the camera stays in the
current location but rotates to aim toward the object.
- follow (``translate=True, rotate=True``): the camera moves as though it
were fixed to the object.
Args:
target (str, Trajectory, or None): the target that is to be followed.
If this is None, the camera no longer follows anything.
translate (bool, optional): whether the camera should follow using
translation.
rotate (bool, optional): whether the camera should follow using
rotation.
center (bool, optional): whether the camera should first aim toward the
object before following. Default is False.
"""
scene().followCamera(target,translate,rotate,center)
def getViewport():
"""Returns the :class:`GLViewport` of the current scene"""
return scene().getViewport()
def setViewport(viewport):
"""Sets the current scene to use a given :class:`GLViewport`"""
scene().setViewport(viewport)
def setBackgroundColor(r,g,b,a=1):
"""Sets the background color of the current scene."""
scene().setBackgroundColor(r,g,b,a)
def saveJsonConfig(fn=None):
"""Saves the visualization options to a JSON object or file.
If fn is provided, it's saved to a file. Otherwise, it is returned.
"""
return scene().saveJsonConfig(fn)
def loadJsonConfig(jsonobj_or_fn):
"""Loads the visualization options from a JSON object or file.
jsonobj_or_fn can either by a dict (previously obtained by saveJsonConfig
or a str indicating a filename (previously saved using saveJsonConfig.)
"""
return scene().loadJsonConfig(jsonobj_or_fn)
def screenshot(format='auto',want_depth=False):
"""Returns a screenshot of the scene. Currently only available in OpenGL modes
(PyQt, GLUT).
``format`` describes the desired image format.
- 'auto': equivalent to 'numpy' if numpy is available, 'Image' if PIL is
available, or 'bytes' otherwise.
- 'numpy': the image will be a numpy uint8 array of shape (h,w,3).
- 'Image': the image will be a PIL Image if Python Imaging Library is
available.
- 'bytes': the image will be in the form ``(w,h,bytes)`` where ``bytes`` is a
raw bytes array of length 4*w*h. The buffer follows OpenGL convention with
the start in the lower-left corner.
Can also provide a depth image if want_depth=True. The format will a (w,h)
numpy float array, a 'F' (float) Image, or (w,h,array) an array of floats.
In multithreaded mode, this will block until rendering is
complete.
"""
global _window_manager
if _window_manager is None:
return None
return _window_manager.screenshot(format,want_depth)
def screenshotCallback(fn,format='auto',want_depth=False):
"""Sets a callback ``fn`` that will receive a screenshot of the scene when
rendering is done.
See :func:`screenshot` for a description of the ``format`` and ``want_depth``
arguments.
Currently only available in OpenGL modes (PyQt, GLUT).
To repeatedly receive screenshots, ``fn`` can call
``vis.screenshotCallback(fn)`` again.
"""
global _window_manager
if _window_manager is None:
return None
return _window_manager.screenshotCallback(fn,format,want_depth)
def objectToVisType(item,world):
"""Returns the default type for the given item in the current world"""
itypes = types.objectToTypes(item,world)
if isinstance(itypes,(list,tuple)):
#ambiguous, still need to figure out what to draw
validtypes = []
for t in itypes:
if t=='Vector3':
validtypes.append(t)
elif t == 'Config':
if world is not None:
match = False
for i in range(world.numRobots()):
if len(item) == world.robot(i).numLinks():
validtypes.append(t)
match = True
break
if not match and len(itypes) == 1:
warnings.warn("Config-like item of length {} doesn't match any of the # links of robots in the world: {}".format(len(item),[world.robot(i).numLinks() for i in range(world.numRobots())]))
elif t=='RigidTransform':
validtypes.append(t)
elif t=='Geometry3D':
validtypes.append(t)
elif t=='Trajectory':
validtypes.append(t)
if len(validtypes) > 1:
warnings.warn("Unable to draw item of ambiguous types {}\n (Try vis.setAttribute(item,'type',desired_type_str) to disambiguate)".format(validtypes))
return
if len(validtypes) == 0:
warnings.warn("Unable to draw any of types {}".format(itypes))
return
return validtypes[0]
return itypes
#_defaultCompressThreshold = 1e-2
_defaultCompressThreshold = 1e-3
class VisPlotItem:
def __init__(self,itemname,linkitem):
self.name = itemname
self.itemnames = []
self.linkitem = linkitem
self.traces = []
self.hidden = []
self.traceRanges = []
self.luminosity = []
self.compressThreshold = _defaultCompressThreshold
if linkitem is not None:
q = config.getConfig(linkitem.item)
assert q is not None
from collections import deque
self.traces = [deque() for i in range(len(q))]
self.itemnames = config.getConfigNames(linkitem.item)
def customUpdate(self,item,t,v):
for i,itemname in enumerate(self.itemnames):
if item == itemname:
self.updateTrace(i,t,v)
self.traceRanges[i] = (min(self.traceRanges[i][0],v),max(self.traceRanges[i][1],v))
return
else:
from collections import deque
self.itemnames.append(item)
self.traces.append(deque())
i = len(self.itemnames)-1
self.updateTrace(i,t,v)
self.traceRanges[i] = (min(self.traceRanges[i][0],v),max(self.traceRanges[i][1],v))
#raise ValueError("Invalid item specified: "+str(item))
def update(self,t):
if self.linkitem is None:
return
q = config.getConfig(self.linkitem.item)
assert len(self.traces) == len(q)
for i,v in enumerate(q):
self.updateTrace(i,t,v)
self.traceRanges[i] = (min(self.traceRanges[i][0],v),max(self.traceRanges[i][1],v))
def discard(self,tstart):
for t in self.traces:
if len(t)<=1: return
while len(t) >= 2:
if t[1][0] < tstart:
t.popleft()
else:
break
def updateTrace(self,i,t,v):
import random
assert i < len(self.traces)
assert i <= len(self.hidden)
assert i <= len(self.luminosity)
while i >= len(self.hidden):
self.hidden.append(False)
while i >= len(self.traceRanges):
self.traceRanges.append((v,v))
if i >= len(self.luminosity):
initialLuminosity = [0.5,0.25,0.75,1.0]
while i >= len(self.luminosity):
if len(self.luminosity)<len(initialLuminosity):
self.luminosity.append(initialLuminosity[len(self.luminosity)])
else:
self.luminosity.append(random.uniform(0,1))
trace = self.traces[i]
#trange = self.traceRanges[i][1] - self.traceRanges[i][0]
if len(trace) > 0 and trace[-1][0] >= t:
tsafe = trace[-1][0]+1e-8
if v != trace[-1][1]:
trace.append((tsafe,v))
return
trace[-1] = (tsafe,v)
return
if self.compressThreshold is None:
trace.append((t,v))
else:
if len(trace) < 2:
trace.append((t,v))
else:
pprev = trace[-2]
prev = trace[-1]
assert prev > pprev,"Added two items with the same time?"
assert t > prev[0]
slope_old = (prev[1]-pprev[1])/(prev[0]-pprev[0])
slope_new = (v-prev[1])/(t-prev[0])
if (slope_old > 0 != slope_new > 0) or abs(slope_old-slope_new) > self.compressThreshold:
trace.append((t,v))
else:
#near-linear, just extend along straight line
trace[-1] = (t,v)
class VisPlot:
def __init__(self):
self.items = []
self.colors = []
self.events = dict()
self.eventColors = dict()
self.outfile = None
self.outformat = None
def __del__(self):
self.endSave()
def update(self,t,duration,compressThreshold):
for i in self.items:
i.compressThreshold = compressThreshold
i.update(t)
if self.outfile:
self.dumpCurrent()
self.discard(t-duration)
else:
self.discard(t-60.0)
def discard(self,tmin):
for i in self.items:
i.discard(tmin)
delevents = []
for e,times in self.events.items():
while len(times) > 0 and times[0] < tmin:
times.popleft()
if len(times)==0:
delevents.append(e)
for e in delevents:
del self.events[e]
def addEvent(self,name,t,color=None):
if name in self.events:
self.events[name].append(t)
else:
from collections import deque
self.events[name] = deque([t])
if color is None:
import random
color = (random.uniform(0.01,1),random.uniform(0.01,1),random.uniform(0.01,1))
color = vectorops.mul(color,1.0/max(color))
if color is not None:
self.eventColors[name] = color
if len(color)==3:
self.eventColors[name] += [1.0]
def autoRange(self):
vmin = float('inf')
vmax = -float('inf')
for i in self.items:
for j in range(len(i.traceRanges)):
if not i.hidden[j]:
vmin = min(vmin,i.traceRanges[j][0])
vmax = max(vmax,i.traceRanges[j][1])
if math.isinf(vmin):
return (0.,1.)
if vmax == vmin:
vmax += 1.0
return (float(vmin),float(vmax))
def renderGL(self,window,x,y,w,h,duration,vmin=None,vmax=None):
if GL is None: raise RuntimeError("OpenGL wasn't initialized yet?")
if vmin is None:
vmin,vmax = self.autoRange()
import random
while len(self.colors) < len(self.items):
c = (random.uniform(0.01,1),random.uniform(0.01,1),random.uniform(0.01,1))
c = vectorops.mul(c,1.0/max(c))
self.colors.append(c)
GL.glColor3f(0,0,0)
GL.glBegin(GL.GL_LINE_LOOP)
GL.glVertex2f(x,y)
GL.glVertex2f(x+w,y)
GL.glVertex2f(x+w,y+h)
GL.glVertex2f(x,y+h)
GL.glEnd()
window.draw_text((x-18,y+4),'%.2f'%(vmax,),9)
window.draw_text((x-18,y+h+4),'%.2f'%(vmin,),9)
tmax = 0
for i in self.items:
for trace in i.traces:
if len(trace)==0: continue
tmax = max(tmax,trace[-1][0])
for i,item in enumerate(self.items):
for j,trace in enumerate(item.traces):
if len(trace)==0: continue
labelheight = trace[-1][1]
if len(item.name)==0:
label = item.itemnames[j]
else:
label = str(item.name) + '.' + item.itemnames[j]
labelheight = (labelheight - vmin)/(vmax-vmin)
labelheight = y + h - h*labelheight
GL.glColor3fv(vectorops.mul(self.colors[i],item.luminosity[j]))
window.draw_text((x+w+3,labelheight+4),label,9)
GL.glBegin(GL.GL_LINE_STRIP)
for k in range(len(trace)-1):
if trace[k+1][0] > tmax-duration:
u,v = trace[k]
if trace[k][0] < tmax-duration:
#interpolate so x is at tmax-duration
u2,v2 = trace[k+1]
#u + s(u2-u) = tmax-duration
s = (tmax-duration-u)/(u2-u)
v = v + s*(v2-v)
u = (tmax-duration)
u = (u-(tmax-duration))/duration
v = (v-vmin)/(vmax-vmin)
GL.glVertex2f(x+w*u,y+(1-v)*h)
u,v = trace[-1]
u = (u-(tmax-duration))/duration
v = (v-vmin)/(vmax-vmin)
GL.glVertex2f(x+w*u,y+(1-v)*h)
GL.glEnd()
if len(self.events) > 0:
for e,times in self.events.items():
for t in times:
if t < tmax-duration: continue
labelx = (t - (tmax-duration))/duration
labelx = x + w*labelx
c = self.eventColors[e]
GL.glColor4f(c[0]*0.5,c[1]*0.5,c[2]*0.5,c[3])
window.draw_text((labelx,y+h+12),e,9)
GL.glEnable(GL.GL_BLEND)
GL.glBlendFunc(GL.GL_SRC_ALPHA,GL.GL_ONE_MINUS_SRC_ALPHA)
GL.glBegin(GL.GL_LINES)
for e,times in self.events.items():
for t in times:
if t < tmax-duration: continue
labelx = (t - (tmax-duration))/duration
labelx = x + w*labelx
c = self.eventColors[e]
GL.glColor4f(c[0],c[1],c[2],c[3]*0.5)
GL.glVertex2f(labelx,y)
GL.glVertex2f(labelx,y+h)
GL.glEnd()
GL.glDisable(GL.GL_BLEND)
def beginSave(self,fn):
import os
ext = os.path.splitext(fn)[1]
if ext == '.csv' or ext == '.traj':
self.outformat = ext
else:
raise ValueError("Invalid extension for visualization plot, can only accept .csv or .traj")
self.outfile = open(fn,'w')
if self.outformat == '.csv':
#output a header
self.outfile.write("time")
for i in self.items:
self.outfile.write(",")
fullitemnames = []
if len(i.name) != 0:
name = None
if isinstance(i.name,(list,tuple)):
name = '.'.join(v for v in i.name)
else:
name = i.name
fullitemnames = [name+'.'+itemname for itemname in i.itemnames]
else:
fullitemnames = i.itemnames
self.outfile.write(",".join(fullitemnames))
self.outfile.write("\n")
self.dumpAll()
def endSave(self):
if self.outfile is not None:
self.outfile.close()
def dumpAll(self):
assert self.outfile is not None
if len(self.items) == 0: return
cols = []
mindt = float('inf')
mint = float('inf')
maxt = -float('inf')
for i in self.items:
if len(i.traces) == 0:
continue
for j,trace in enumerate(i.traces):
times,vals = list(zip(*trace))
if isinstance(vals[0],(int,float)):
vals = [[v] for v in vals]
traj = Trajectory(times,vals)
cols.append(traj)
mint = min(mint,traj.times[0])
maxt = max(maxt,traj.times[-1])
for k in range(len(traj.times)-1):
mindt = min(mindt,traj.times[k+1] - traj.times[k])
assert mindt > 0, "For some reason, there is a duplicate time?"
N = int((maxt - mint)/mindt)
dt = (maxt - mint)/N
times = [mint + i*(maxt-mint)/N for i in range(N+1)]
for i in range(N+1):
vals = [col.eval(times[i]) for col in cols]
if self.outformat == '.csv':
self.outfile.write(str(times[i])+',')
self.outfile.write(','.join([str(v[0]) for v in vals]))
self.outfile.write('\n')
else:
self.outfile.write(str(times[i])+'\t')
self.outfile.write(str(len(vals))+' ')
self.outfile.write(' '.join([str(v[0]) for v in vals]))
self.outfile.write('\n')
def dumpCurrent(self):
if len(self.items) == 0: return
assert len(self.items[0].traces) > 0, "Item has no channels?"
assert len(self.items[0].traces[0]) > 0, "Item has no readings yet?"
t = self.items[0].traces[0][-1]
vals = []
for i in self.items:
if len(i.traces) == 0:
continue
for j,trace in enumerate(i.traces):
vals.append(trace[-1][1])
if self.outformat == '.csv':
self.outfile.write(str(t)+',')
self.outfile.write(','.join([str(v) for v in vals]))
self.outfile.write('\n')
else:
self.outfile.write(str(t)+'\t')
self.outfile.write(str(len(vals))+' ')
self.outfile.write(' '.join([str(v) for v in vals]))
self.outfile.write('\n')
def drawTrajectory(traj,width,color,pointSize=None,pointColor=None):
"""Draws a trajectory of points or transforms.
By default draws points along the trajectory. To turn this off, set
pointSize = 0.
"""
if GL is None: raise RuntimeError("OpenGL not initialized?")
if isinstance(traj,list):
if len(traj)==0:
return
if pointSize is None:
pointSize = width+2
if pointColor is None:
pointColor = (color[0]*0.75,color[1]*0.75,color[2]*0.75,color[3])
#R3 trajectory
GL.glDisable(GL.GL_LIGHTING)
GL.glColor4f(*color)
if len(traj) == 1:
GL.glPointSize(max(width,pointSize))
GL.glBegin(GL.GL_POINTS)
GL.glVertex3fv(traj[0])
GL.glEnd()
if len(traj) >= 2 and width > 0:
GL.glLineWidth(width)
GL.glBegin(GL.GL_LINE_STRIP)
for p in traj:
GL.glVertex3fv(p)
GL.glEnd()
GL.glLineWidth(1.0)
if len(traj) >= 2 and pointSize > 0:
GL.glColor4f(*pointColor)
GL.glPointSize(pointSize)
GL.glBegin(GL.GL_POINTS)
for p in traj:
GL.glVertex3fv(p)
GL.glEnd()
elif isinstance(traj,SE3Trajectory):
pointTraj = []
for m in traj.milestones:
pointTraj.append(m[9:12])
drawTrajectory(pointTraj,width,color,pointSize,pointColor)
elif isinstance(traj,SE3HermiteTrajectory):
pointTraj = []
velTraj = []
for m in traj.milestones:
pointTraj.append(m[9:12])
velTraj.append(m[21:24])
drawTrajectory(HermiteTrajectory(traj.times,pointTraj,velTraj),width,color,pointSize,pointColor)
else:
if len(traj.milestones)==0:
return
wp = traj.waypoint(traj.milestones[0])
if len(wp) == 3:
if len(wp) == len(traj.milestones[0]):
drawTrajectory(traj.milestones,width,color,pointSize,pointColor)
else: #discrepancy, must be hermite
if width > 0:
discretized = traj.discretize(traj.duration()/len(traj.milestones)*0.1)
drawTrajectory(discretized.milestones,width,color,0,None)
if pointSize is None or pointSize > 0:
drawTrajectory([traj.waypoint(m) for m in traj.milestones],0,color,pointSize,pointColor)
elif len(wp) == 2:
#R2 trajectory
if len(wp) == len(traj.milestones[0]):
drawTrajectory([v + [0.0] for v in traj.milestones],width,color,pointSize,pointColor)
else: #discrepancy, must be hermite
if width > 0:
discretized = traj.discretize(traj.duration()/len(traj.milestones)*0.1)
drawTrajectory([m + [0.0] for m in discretized.milestones],width,color,0,None)
if pointSize is None or pointSize > 0:
drawTrajectory([traj.waypoint(m) + [0.0] for m in traj.milestones],0,color,pointSize,pointColor)
def drawRobotTrajectory(traj,robot,ees,width=2,color=(1,0.5,0,1),pointSize=None,pointColor=None):
"""Draws trajectories for the robot's end effectors. Note: no additional discretization is performed,
only the end effector points at the trajectory's milestones are shown. If you want more accurate trajectories,
first call traj.discretize(eps)."""
for i,ee in enumerate(ees):
if ee < 0: ees[i] = robot.numLinks()-1
pointTrajectories = []
for ee in ees:
pointTrajectories.append([])
if isinstance(traj,Trajectory):
traj = traj.milestones
for m in traj:
robot.setConfig(m)
for ee,eetraj in zip(ees,pointTrajectories):
eetraj.append(robot.link(ee).getTransform()[1])
for ptraj in pointTrajectories:
drawTrajectory(ptraj,width,color,pointSize,pointColor)
class _CascadingDict:
"""A hierarchical dictionary structure that can be defined with respect
to a parent dict or _CascadingDict. Items in this dict override the
items in the parent. Deleting an item from this dict does not delete
from the parent.
Be careful when modifying sub-items of top-level keys. You may not know
whether the key accesses the parent or this object::
parent = {'foo':[1,2,3]}
obj = _CascadingDict(parent=parent)
obj['foo'][0] = 'hello' #this actually changes the value of parent['foo']
obj2 = _CascadingDict(parent=parent)
print(obj2['foo']) #prints ['hello',2,3]
obj['foo'] = 4 #modifies the object's key 'foo'
print(obj2['foo']) #still prints ['hello',2,3], since the non-overridden key
#is still pointing to parent
"""
def __init__(self,rhs=None,parent=None):
if rhs is not None:
if isinstance(rhs,_CascadingDict):
self.overrides = rhs
else:
self.overrides = dict(rhs)
else:
self.overrides = dict()
self.parent = None
def setParent(self,parent):
self.parent = parent
def __getitem__(self,item):
try:
return self.overrides[item]
except KeyError:
if self.parent is None:
raise
else:
return self.parent[item]
def __setitem__(self,key,value):
self.overrides[key] = value
def __delitem__(self,key):
del self.overrides[key]
def get(self,item,default=None):
try:
return self.__getitem__(item)
except KeyError:
return default
def flatten(self):
"""Returns a normal dict containing all items in this or its parents"""
pdict = {}
if isinstance(self.parent,_CascadingDict):
pdict = self.parent.flatten()
elif isinstance(self.parent,dict):
pdict = self.parent.copy()
for k,v in self.overrides.items():
pdict[k] = v
return pdict
def __str__(self):
return str(self.flatten())
def __repr__(self):
return repr(self.flatten())
def __contains__(self,item):
if item in self.overrides:
return True
if self.parent is None:
return False
return item in self.parent
_default_str_attributes = {'color':[0,0,0,1], 'position':None, 'size':12 }
_default_Trajectory_attributes = { 'robot':0, "width":3, "color":(1,0.5,0,1), "pointSize":None, "pointColor":None }
_default_RobotTrajectory_attributes = { 'robot':0, "width":3, "color":(1,0.5,0,1), "pointSize":None, "pointColor":None , "endeffectors":[-1]}
_default_VisPlot_attributes = {'compress':_defaultCompressThreshold, 'duration':5., 'position':None, 'range':(None,None), 'size':(200,150), 'hide_label':True}
_default_Point_attributes = { "size":5.0, "color":(0,0,0,1) }
_default_Direction_attributes = { "length":0.15, "color":[0,1,1,1] }
_default_Frame_attributes = { "length":0.1, "width":0.01 }
_default_ContactPoint_attributes = { "size":5.0, "length":0.05, "color":[1,0.5,0,1] }
_default_IKObjective_attributes = { "size":5.0, "color":(0,0,0,1), "length":0.1, "width": 0.01, "axis_color":[0.5,0,0.5,1], "axis_width":3.0, "axis_length":0.1 }
_default_Geometry_attributes = { "size":None, "color":None }
_default_RigidTransform_attributes = { "fancy":False, "length":0.1, "width":0.01 }
def _default_attributes(item,type=None):
"""Returns an attribute dictionary for the defaults of a given item.
If the item is ambiguous, you can provide the type argument.
"""
res = {}
if isinstance(item,str):
return _default_str_attributes
elif isinstance(item,(WorldModel,RobotModel)):
pass
elif hasattr(item,'appearance'):
res['color'] = item.appearance().getColor()
pass
elif isinstance(item,(Trajectory,MultiPath)):
if isinstance(item,RobotTrajectory):
return _default_RobotTrajectory_attributes
else:
return _default_Trajectory_attributes
elif isinstance(item,VisPlot):
return _default_VisPlot_attributes
elif isinstance(item,coordinates.Point):
return _default_Point_attributes
elif isinstance(item,coordinates.Direction):
return _default_Direction_attributes
elif isinstance(item,coordinates.Frame):
return _default_Frame_attributes
elif isinstance(item,coordinates.Transform):
pass
elif isinstance(item,coordinates.Group):
res['hide_label'] = True
elif isinstance(item,ContactPoint):
return _default_ContactPoint_attributes
elif isinstance(item,Hold):
pass
elif isinstance(item,IKObjective):
return _default_IKObjective_attributes
elif isinstance(item,(GeometricPrimitive,TriangleMesh,PointCloud,Geometry3D)):
return _default_Geometry_attributes
else:
if type is not None:
itypes = type
else:
try:
itypes = objectToVisType(item,None)
res["type"]=itypes
except Exception as e:
if hasattr(item,'drawGL'):
#assume it's a SimRobotSensor, Appearance, or SubRobotModel
return
warnings.warn(str(e))
warnings.warn("Unsupported object type {} of type {}".format(item,item.__class__.__name__))
return
if itypes is None:
warnings.warn("Unable to convert item {} to drawable".format(str(item)))
return
elif itypes == 'Config':
pass
elif itypes == 'Configs':
res["maxConfigs"] = min(10,len(item))
elif itypes == 'Vector3':
return _default_Point_attributes
elif itypes == 'RigidTransform':
return _default_RigidTransform_attributes
else:
warnings.warn("klampt.vis: Unable to draw item of type \"%s\""%(str(itypes),))
res['hidden'] = True
res['hide_label'] = True
return res
class VisAppearance:
"""The core class that governs all of the drawing of an object
in the visualization. Can accommodate any drawable Klampt type.
"""
def __init__(self,item,name = None, type=None):
self.name = name
self.useDefaultAppearance = True
self.customAppearance = None
self.customDrawFunc = None
#For group items, this allows you to customize appearance of sub-items
self.subAppearances = {}
self.animation = None
self.animationStartTime = 0
self.animationSpeed = 1.0
self.attributes = _default_attributes(item,type)
if not isinstance(self.attributes,_CascadingDict):
self.attributes = _CascadingDict(self.attributes)
if 'hide_label' not in self.attributes:
self.attributes['hide_label'] = False
if 'hidden' not in self.attributes:
self.attributes['hidden'] = False
self.attributes['label'] = name
#used for Qt text rendering
self.widget = None
#used for visual editing of certain items
self.editor = None
#cached drawing
self.displayCache = [glcommon.CachedGLObject()]
self.displayCache[0].name = name
#temporary configuration of the item
self.drawConfig = None
self.transformChanged = False
self.setItem(item)
def setItem(self,item):
self.item = item
self.subAppearances = {}
#Parse out sub-items which can have their own appearance changed
if isinstance(item,WorldModel):
for i in range(item.numRobots()):
self.subAppearances[("Robot",i)] = VisAppearance(item.robot(i),item.robot(i).getName())
for i in range(item.numRigidObjects()):
self.subAppearances[("RigidObject",i)] = VisAppearance(item.rigidObject(i),item.rigidObject(i).getName())
for i in range(item.numTerrains()):
self.subAppearances[("Terrain",i)] = VisAppearance(item.terrain(i),item.terrain(i).getName())
elif isinstance(item,RobotModel):
for i in range(item.numLinks()):
self.subAppearances[("Link",i)] = VisAppearance(item.link(i),item.link(i).getName())
elif isinstance(item,coordinates.Group):
for n,f in item.frames.items():
self.subAppearances[("Frame",n)] = VisAppearance(f,n)
for n,p in item.points.items():
self.subAppearances[("Point",n)] = VisAppearance(p,n)
for n,d in item.directions.items():
self.subAppearances[("Direction",n)] = VisAppearance(d,n)
for n,g in item.subgroups.items():
self.subAppearances[("Subgroup",n)] = VisAppearance(g,n)
elif isinstance(item,Hold):
if item.ikConstraint is not None:
self.subAppearances["ikConstraint"] = VisAppearance(item.ikConstraint,"ik")
for n,c in enumerate(item.contacts):
self.subAppearances[("contact",n)] = VisAppearance(c,n)
def markChanged(self,config=True,appearance=True):
if appearance:
for c in self.displayCache:
c.markChanged()
for (k,a) in self.subAppearances.items():
a.markChanged(config,appearance)
if config:
self.update_editor(True)
self.transformChanged = True
def destroy(self):
for c in self.displayCache:
c.destroy()
for (k,a) in self.subAppearances.items():
a.destroy()
self.subAppearances = {}
def drawText(self,text,point):
"""Draws the given text at the given point"""
if len(point) != 3:
warnings.warn("drawText INCORRECT POINT SIZE {} {}".format(point,text))
return
if not all(math.isfinite(v) for v in point):
warnings.warn("drawText INVALID POINT {} {}".format(point,text))
return
self.widget.addLabel(text,point[:],[0,0,0])
def updateAnimation(self,t):
"""Updates the configuration, if it's being animated"""
if not self.animation:
if self.drawConfig is not None:
self.markChanged(config=True,appearance=False)
self.drawConfig = None
else:
u = self.animationSpeed*(t-self.animationStartTime)
q = self.animation.eval(u,self.animationEndBehavior)
self.drawConfig = config.getConfig(q)
self.markChanged(config=True,appearance=False)
for n,app in self.subAppearances.items():
app.updateAnimation(t)
def updateTime(self,t):
"""Updates in real time"""
if isinstance(self.item,VisPlot):
compressThreshold = self.attributes['compress']
duration = self.attributes['duration']
for items in self.item.items:
if items.linkitem:
items.linkitem.swapDrawConfig()
self.item.update(t,duration,compressThreshold)
for items in self.item.items:
if items.linkitem:
items.linkitem.swapDrawConfig()
def swapDrawConfig(self):
"""Given self.drawConfig!=None, swaps out the item's curren
configuration with self.drawConfig. Used for animations"""
if self.drawConfig:
try:
newDrawConfig = config.getConfig(self.item)
if len(newDrawConfig) != len(self.drawConfig):
warnings.warn("Incorrect length of draw configuration? {} vs {}".format(len(self.drawConfig),len(newDrawConfig)))
config.setConfig(self.item,self.drawConfig)
self.drawConfig = newDrawConfig
except Exception as e:
warnings.warn("Exception thrown during animation update. Probably have incorrect length of configuration")
import traceback
traceback.print_exc()
self.animation = None
pass
for n,app in self.subAppearances.items():
app.swapDrawConfig()
def clearDisplayLists(self):
if isinstance(self.item,WorldModel):
for r in range(self.item.numRobots()):
for link in range(self.item.robot(r).numLinks()):
self.item.robot(r).link(link).appearance().refresh()
for i in range(self.item.numRigidObjects()):
self.item.rigidObject(i).appearance().refresh()
for i in range(self.item.numTerrains()):
self.item.terrain(i).appearance().refresh()
elif hasattr(self.item,'appearance'):
self.item.appearance().refresh()
elif isinstance(self.item,RobotModel):
for link in range(self.item.numLinks()):
self.item.link(link).appearance().refresh()
for n,o in self.subAppearances.items():
o.clearDisplayLists()
self.markChanged(config=False,appearance=True)
def transparent(self):
"""Returns true if the item is entirely transparent, None if mixed transparency, and False otherwise"""
if len(self.subAppearances)!=0:
anyTransp = False
anyOpaque = False
for n,app in self.subAppearances.items():
if app.transparent():
anyTransp = True
else:
anyOpaque = True
if anyTransp and anyOpaque:
return None
else:
return anyTransp
if hasattr(self.item,'appearance'):
if self.useDefaultAppearance or 'color' not in self.attributes:
if isinstance(self.item,WorldModel):
#corner case: empty world
return False
else:
return self.item.appearance().getColor()[3] < 1.0
try:
return (self.attributes['color'][3] < 1.0)
except:
return False
def getAttributes(self):
if len(self.subAppearances) > 0:
return {}
return self.attributes.flatten()
def drawGL(self,world=None,viewport=None,draw_transparent=None):
"""Draws the specified item in the specified world, with all the
current modifications in attributes.
If a name or label are given, and
``self.attributes['hide_label'] != False``, then the label is shown.
The drawing passes are controlled by ``draw_transparent`` -- opaque
items should be rasterized before transparent ones.
Args:
world (WorldModel): the world model
viewport (Viewport): the C++ viewport of the current view, which is
compatible with the Klampt C++ Widget class.
draw_transparent (bool or None): If None, then everything is drawn.
If True, then only transparent items are drawn. If False, then
only opaque items are drawn. (This only affects WorldModels)
"""
if self.attributes["hidden"]:
return
if self.customDrawFunc is not None:
self.customDrawFunc(self.item)
return
item = self.item
name = None
if not self.attributes["hide_label"]:
name = self.attributes['label']
#set appearance
if not self.useDefaultAppearance and hasattr(item,'appearance'):
if not hasattr(self,'oldAppearance'):
self.oldAppearance = item.appearance().clone()
if self.customAppearance is not None:
item.appearance().set(self.customAppearance)
elif "color" in self.attributes:
item.appearance().setColor(*self.attributes["color"])
if self.editor is not None:
assert viewport is not None
#some weird logic to determine whether to draw now...
if draw_transparent is None or draw_transparent == self.transparent() or (self.transparent() is None and draw_transparent==False):
#KLUDGE:
#might be a robot, make sure the appearances are all up to date before drawing the editor
for n,app in self.subAppearances.items():
if app.useDefaultAppearance or not hasattr(app.item,'appearance'):
continue
if not hasattr(app,'oldAppearance'):
app.oldAppearance = app.item.appearance().clone()
if app.customAppearance is not None:
app.item.appearance().set(app.customAppearance)
elif "color" in app.attributes:
app.item.appearance().setColor(*app.attributes["color"])
if isinstance(self.editor,RobotPoser) and isinstance(self.item,(list,tuple)) and world is not None:
#KLUDGE: config editor; make sure that the robot in the world model is restored
#use the
robot = world.robot(0)
oldconfig = robot.getConfig()
oldAppearances = []
if "color" in self.attributes:
for i in range(robot.numLinks()):
oldAppearances.append(robot.link(i).appearance().clone())
robot.link(i).appearance().setColor(*self.attributes['color'])
else:
oldconfig = None
self.editor.drawGL(viewport)
if oldconfig is not None:
world.robot(0).setConfig(oldconfig)
if len(oldAppearances) > 0:
for i in range(robot.numLinks()):
robot.link(i).appearance().set(oldAppearances[i])
#Restore sub-appearances
for n,app in self.subAppearances.items():
if app.useDefaultAppearance or not hasattr(app.item,'appearance'):
continue
app.item.appearance().set(app.oldAppearance)
if isinstance(self.editor,RobotPoser):
#the widget took care of everything, dont continue drawing the item
#revert the robot's actual pose
#revert appearance if necessary
if not self.useDefaultAppearance and hasattr(item,'appearance'):
item.appearance().set(self.oldAppearance)
return
if len(self.subAppearances)!=0:
for n,app in self.subAppearances.items():
if draw_transparent is True:
if not app.transparent():
continue
elif draw_transparent is False:
if app.transparent():
continue
app.widget = self.widget
app.drawGL(world,viewport,draw_transparent)
elif hasattr(item,'drawGL'):
item.drawGL()
elif hasattr(item,'drawWorldGL'):
item.drawWorldGL()
elif isinstance(item,str):
pos = self.attributes["position"]
if pos is not None and len(pos)==3:
col = self.attributes["color"]
self.widget.addLabel(self.item,pos,col)
elif isinstance(item,VisPlot):
pass
elif isinstance(item,Trajectory):
doDraw = False
centroid = None
if len(item.milestones) == 0:
return
robot = (world.robot(self.attributes["robot"]) if world is not None and world.numRobots() > 0 else None)
if robot is not None:
robotConfig = robot.getConfig()
treatAsRobotTrajectory = (item.__class__ == Trajectory and len(item.milestones) > 0 and robot and len(item.milestones[0]) == robot.numLinks())
if isinstance(item,RobotTrajectory) or treatAsRobotTrajectory:
ees = self.attributes.get("endeffectors",[-1])
if world:
doDraw = (len(ees) > 0)
for i,ee in enumerate(ees):
if ee < 0: ees[i] = robot.numLinks()-1
if doDraw:
robot.setConfig(item.milestones[0])
centroid = vectorops.div(vectorops.add(*[robot.link(ee).getTransform()[1] for ee in ees]),len(ees))
elif isinstance(item,(SE3Trajectory,SE3HermiteTrajectory)):
doDraw = True
centroid = item.waypoint(item.milestones[0])[1]
else:
wp = item.waypoint(item.milestones[0])
if len(wp) == 3:
#R3 trajectory
doDraw = True
centroid = wp
elif len(item.waypoint(item.milestones[0])) == 2:
#R2 trajectory
doDraw = True
centroid = wp+[0.0]
else:
#don't know how to interpret this trajectory
pass
if doDraw:
assert len(centroid)==3
def drawRaw():
width = self.attributes["width"]
color = self.attributes["color"]
pointSize = self.attributes["pointSize"]
pointColor = self.attributes["pointColor"]
if isinstance(item,RobotTrajectory) or treatAsRobotTrajectory:
ees = self.attributes.get("endeffectors",[-1])
drawRobotTrajectory(item,robot,ees,width,color,pointSize,pointColor)
else:
drawTrajectory(item,width,color,pointSize,pointColor)
self.displayCache[0].draw(drawRaw)
if name is not None:
self.drawText(name,centroid)
if robot is not None:
robot.setConfig(robotConfig)
elif isinstance(item,MultiPath):
robot = (world.robot(self.attributes["robot"]) if world is not None and world.numRobots() > 0 else None)
if robot is not None and item.numSections() > 0:
if len(item.sections[0].configs[0]) == robot.numLinks():
ees = self.attributes.get("endeffectors",[-1])
centroid = None
if len(ees) > 0:
for i,ee in enumerate(ees):
if ee < 0: ees[i] = robot.numLinks()-1
robot.setConfig(item.sections[0].configs[0])
centroid = vectorops.div(vectorops.add(*[robot.link(ee).getTransform()[1] for ee in ees]),len(ees))
width = self.attributes["width"]
color = self.attributes["color"]
pointSize = self.attributes["pointSize"]
pointColor = self.attributes["pointColor"]
color2 = [1-c for c in color]
color2[3] = color[3]
def drawRaw():
for i,s in enumerate(item.sections):
drawRobotTrajectory(s.configs,robot,ees,width,(color if i%2 == 0 else color2),pointSize,pointColor)
#draw it!
self.displayCache[0].draw(drawRaw)
if name is not None and centroid is not None:
self.drawText(name,centroid)
elif isinstance(item,coordinates.Point):
def drawRaw():
GL.glDisable(GL.GL_LIGHTING)
GL.glEnable(GL.GL_POINT_SMOOTH)
GL.glPointSize(self.attributes["size"])
GL.glColor4f(*self.attributes["color"])
GL.glBegin(GL.GL_POINTS)
GL.glVertex3f(0,0,0)
GL.glEnd()
#write name
GL.glDisable(GL.GL_DEPTH_TEST)
self.displayCache[0].draw(drawRaw,[so3.identity(),item.worldCoordinates()])
GL.glEnable(GL.GL_DEPTH_TEST)
if name is not None:
self.drawText(name,item.worldCoordinates())
elif isinstance(item,coordinates.Direction):
def drawRaw():
GL.glDisable(GL.GL_LIGHTING)
GL.glDisable(GL.GL_DEPTH_TEST)
L = self.attributes["length"]
source = [0,0,0]
GL.glColor4f(*self.attributes["color"])
GL.glBegin(GL.GL_LINES)
GL.glVertex3f(*source)
GL.glVertex3f(*vectorops.mul(item.localCoordinates(),L))
GL.glEnd()
GL.glEnable(GL.GL_DEPTH_TEST)
#write name
self.displayCache[0].draw(drawRaw,item.frame().worldCoordinates(),parameters = item.localCoordinates())
if name is not None:
self.drawText(name,vectorops.add(item.frame().worldCoordinates()[1],item.worldCoordinates()))
elif isinstance(item,coordinates.Frame):
t = item.worldCoordinates()
if item.parent() is not None:
tp = item.parent().worldCoordinates()
else:
tp = se3.identity()
tlocal = item.relativeCoordinates()
def drawRaw():
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_LIGHTING)
GL.glLineWidth(2.0)
gldraw.xform_widget(tlocal,self.attributes["length"],self.attributes["width"])
GL.glLineWidth(1.0)
#draw curve between frame and parent
if item.parent() is not None:
d = vectorops.norm(tlocal[1])
vlen = d*0.5
v1 = so3.apply(tlocal[0],[-vlen]*3)
v2 = [vlen]*3
#GL.glEnable(GL.GL_BLEND)
#GL.glBlendFunc(GL.GL_SRC_ALPHA,GL.GL_ONE_MINUS_SRC_ALPHA)
#GL.glColor4f(1,1,0,0.5)
GL.glColor3f(1,1,0)
gldraw.hermite_curve(tlocal[1],v1,[0,0,0],v2,0.03*max(0.1,vectorops.norm(tlocal[1])))
#GL.glDisable(GL.GL_BLEND)
GL.glEnable(GL.GL_DEPTH_TEST)
#For some reason, cached drawing is causing OpenGL problems
#when the frame is rapidly changing
self.displayCache[0].draw(drawRaw,transform=tp, parameters = tlocal)
#GL.glPushMatrix()
#GL.glMultMatrixf(sum(zip(*se3.homogeneous(tp)),()))
#drawRaw()
#GL.glPopMatrix()
#write name
if name is not None:
self.drawText(name,t[1])
elif isinstance(item,coordinates.Transform):
#draw curve between frames
t1 = item.source().worldCoordinates()
if item.destination() is not None:
t2 = item.destination().worldCoordinates()
else:
t2 = se3.identity()
d = vectorops.distance(t1[1],t2[1])
vlen = d*0.5
v1 = so3.apply(t1[0],[-vlen]*3)
v2 = so3.apply(t2[0],[vlen]*3)
def drawRaw():
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_LIGHTING)
GL.glColor3f(1,1,1)
gldraw.hermite_curve(t1[1],v1,t2[1],v2,0.03)
GL.glEnable(GL.GL_DEPTH_TEST)
#write name at curve
self.displayCache[0].draw(drawRaw,transform=None,parameters = (t1,t2))
if name is not None:
self.drawText(name,spline.hermite_eval(t1[1],v1,t2[1],v2,0.5))
elif isinstance(item,coordinates.Group):
pass
elif isinstance(item,ContactPoint):
def drawRaw():
GL.glDisable(GL.GL_LIGHTING)
GL.glEnable(GL.GL_POINT_SMOOTH)
GL.glPointSize(self.attributes["size"])
l = self.attributes["length"]
GL.glColor4f(*self.attributes["color"])
GL.glBegin(GL.GL_POINTS)
GL.glVertex3f(0,0,0)
GL.glEnd()
GL.glBegin(GL.GL_LINES)
GL.glVertex3f(0,0,0)
GL.glVertex3f(l,0,0)
GL.glEnd()
self.displayCache[0].draw(drawRaw,[so3.canonical(item.n),item.x])
elif isinstance(item,Hold):
pass
elif isinstance(item,IKObjective):
if hasattr(item,'robot'):
#need this to be built with a robot element.
#Otherwise, can't determine the correct transforms
robot = item.robot
elif world:
if world is not None and world.numRobots() > 0:
robot = world.robot(self.attributes.get("robot",0))
else:
robot = None
else:
robot = None
if robot is not None:
link = robot.link(item.link())
dest = robot.link(item.destLink()) if item.destLink()>=0 else None
while len(self.displayCache) < 3:
self.displayCache.append(glcommon.CachedGLObject())
self.displayCache[1].name = self.name+" target position"
self.displayCache[2].name = self.name+" curve"
if item.numPosDims() != 0:
lp,wp = item.getPosition()
#set up parameters of connector
p1 = se3.apply(link.getTransform(),lp)
if dest is not None:
p2 = se3.apply(dest.getTransform(),wp)
else:
p2 = wp
d = vectorops.distance(p1,p2)
v1 = [0.0]*3
v2 = [0.0]*3
if item.numRotDims()==3: #full constraint
R = item.getRotation()
def drawRaw():
gldraw.xform_widget(se3.identity(),self.attributes["length"],self.attributes["width"])
t1 = se3.mul(link.getTransform(),(so3.identity(),lp))
t2 = (R,wp) if dest==None else se3.mul(dest.getTransform(),(R,wp))
self.displayCache[0].draw(drawRaw,transform=t1)
self.displayCache[1].draw(drawRaw,transform=t2)
vlen = d*0.1
v1 = so3.apply(t1[0],[-vlen]*3)
v2 = so3.apply(t2[0],[vlen]*3)
elif item.numRotDims()==0: #point constraint
def drawRaw():
GL.glDisable(GL.GL_LIGHTING)
GL.glEnable(GL.GL_POINT_SMOOTH)
GL.glPointSize(self.attributes["size"])
GL.glColor4f(*self.attributes["color"])
GL.glBegin(GL.GL_POINTS)
GL.glVertex3f(0,0,0)
GL.glEnd()
self.displayCache[0].draw(drawRaw,transform=(so3.identity(),p1))
self.displayCache[1].draw(drawRaw,transform=(so3.identity(),p2))
#set up the connecting curve
vlen = d*0.5
d = vectorops.sub(p2,p1)
v1 = vectorops.mul(d,0.5)
#curve in the destination
v2 = vectorops.cross((0,0,0.5),d)
else: #hinge constraint
p = [0,0,0]
d = [0,0,0]
def drawRawLine():
GL.glDisable(GL.GL_LIGHTING)
GL.glEnable(GL.GL_POINT_SMOOTH)
GL.glPointSize(self.attributes["size"])
GL.glColor4f(*self.attributes["color"])
GL.glBegin(GL.GL_POINTS)
GL.glVertex3f(*p)
GL.glEnd()
GL.glColor4f(*self.attributes["color"])
GL.glLineWidth(self.attributes["width"])
GL.glBegin(GL.GL_LINES)
GL.glVertex3f(*p)
GL.glVertex3f(*vectorops.madd(p,d,self.attributes["length"]))
GL.glEnd()
GL.glLineWidth(1.0)
ld,wd = item.getRotationAxis()
p = lp
d = ld
self.displayCache[0].draw(drawRawLine,transform=link.getTransform(),parameters=(p,d))
p = wp
d = wd
self.displayCache[1].draw(drawRawLine,transform=dest.getTransform() if dest else se3.identity(),parameters=(p,d))
#set up the connecting curve
d = vectorops.sub(p2,p1)
v1 = vectorops.mul(d,0.5)
#curve in the destination
v2 = vectorops.cross((0,0,0.5),d)
def drawConnection():
GL.glDisable(GL.GL_LIGHTING)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glColor3f(1,0.5,0)
gldraw.hermite_curve(p1,v1,p2,v2,0.03*max(0.1,vectorops.distance(p1,p2)))
#GL.glBegin(GL.GL_LINES)
#GL.glVertex3f(*p1)
#GL.glVertex3f(*p2)
#GL.glEnd()
GL.glEnable(GL.GL_DEPTH_TEST)
#TEMP for some reason the cached version sometimes gives a GL error
self.displayCache[2].draw(drawConnection,transform=None,parameters = (p1,v1,p2,v2))
#drawConnection()
if name is not None:
self.drawText(name,wp)
else:
wp = link.getTransform()[1]
if item.numRotDims()==3: #full constraint
R = item.getRotation()
def drawRaw():
gldraw.xform_widget(se3.identity(),self.attributes["length"],self.attributes["width"])
self.displayCache[0].draw(drawRaw,transform=link.getTransform())
self.displayCache[1].draw(drawRaw,transform=se3.mul(link.getTransform(),(R,[0,0,0])))
elif item.numRotDims() > 0:
#axis constraint
d = [0,0,0]
def drawRawLine():
GL.glDisable(GL.GL_LIGHTING)
GL.glColor4f(*self.attributes["axis_color"])
GL.glLineWidth(self.attributes["axis_width"])
GL.glBegin(GL.GL_LINES)
GL.glVertex3f(0,0,0)
GL.glVertex3f(*vectorops.mul(d,self.attributes["axis_length"]))
GL.glEnd()
GL.glLineWidth(1.0)
ld,wd = item.getRotationAxis()
d = ld
self.displayCache[0].draw(drawRawLine,transform=link.getTransform(),parameters=d)
d = wd
self.displayCache[1].draw(drawRawLine,transform=(dest.getTransform()[0] if dest else so3.identity(),wp),parameters=d)
else:
#no drawing
pass
if name is not None:
self.drawText(name,wp)
elif isinstance(item,(GeometricPrimitive,TriangleMesh,PointCloud,Geometry3D)):
#this can be tricky if the mesh or point cloud has colors
if not hasattr(self,'appearance'):
self.appearance = Appearance()
self.appearance.setColor(0.5,0.5,0.5,1)
c = self.attributes["color"]
if c is not None:
self.appearance.setColor(*c)
s = self.attributes["size"]
if s:
self.appearance.setPointSize(s)
wp = None
geometry = None
lighting = True
restoreLineWidth = False
if isinstance(self.item,GeometricPrimitive):
if not hasattr(self,'geometry'):
self.geometry = Geometry3D(self.item)
geometry = self.geometry
if self.item.type in ['Point','Segment']:
lighting = False
if self.item.type == 'Segment':
GL.glLineWidth(self.attributes.get('width',3.0))
restoreLineWidth = True
elif isinstance(self.item,PointCloud):
if not hasattr(self,'geometry'):
self.geometry = Geometry3D(self.item)
lighting = False
geometry = self.geometry
elif isinstance(self.item,TriangleMesh):
if not hasattr(self,'geometry'):
self.geometry = Geometry3D(self.item)
geometry = self.geometry
else:
assert isinstance(self.item,Geometry3D)
if self.item.type() == 'GeometricPrimitive':
prim = self.item.getGeometricPrimitive()
if prim.type in ['Point','Segment']:
lighting = False
if prim.type == 'Segment':
GL.glLineWidth(self.attributes.get('width',3.0))
restoreLineWidth = True
elif self.item.type() == 'PointCloud':
lighting = False
geometry = self.item
if lighting:
GL.glEnable(GL.GL_LIGHTING)
else:
GL.glDisable(GL.GL_LIGHTING)
self.appearance.drawWorldGL(geometry)
if restoreLineWidth:
GL.glLineWidth(1.0)
if name is not None:
bmin,bmax = geometry.getBB()
wp = vectorops.mul(vectorops.add(bmin,bmax),0.5)
self.drawText(name,wp)
else:
try:
itypes = self.attributes['type']
except KeyError:
try:
itypes = objectToVisType(item,world)
except Exception as e:
import traceback
traceback.print_exc()
warnings.warn("Unsupported object type {} of type {}".format(item,item.__class__.__name__))
return
if itypes is None:
warnings.warn("Unable to convert item {} to drawable".format(str(item)))
return
elif itypes == 'Config':
rindex = self.attributes.get("robot",0)
if world and rindex < world.numRobots():
robot = world.robot(rindex)
if robot.numLinks() != len(item):
warnings.warn("Unable to draw Config, does not have the same # of DOFs as the robot: %d != %d"%(robot.numLinks(),len(item)))
else:
if not self.useDefaultAppearance:
oldAppearance = [robot.link(i).appearance().clone() for i in range(robot.numLinks())]
for i in range(robot.numLinks()):
if self.customAppearance is not None:
robot.link(i).appearance().set(self.customAppearance)
elif "color" in self.attributes:
robot.link(i).appearance().setColor(*self.attributes["color"])
oldconfig = robot.getConfig()
robot.setConfig(item)
robot.drawGL()
robot.setConfig(oldconfig)
if not self.useDefaultAppearance:
for (i,app) in enumerate(oldAppearance):
robot.link(i).appearance().set(app)
else:
warnings.warn("Unable to draw Config items without a world or robot")
elif itypes == 'Configs':
def drawAsTrajectory():
width = self.attributes.get("width",3.0)
color = self.attributes["color"]
pointSize = self.attributes.get("pointSize",None)
pointColor = self.attributes.get("pointColor",None)
drawTrajectory(Trajectory(list(range(len(item))),item),width,color,pointSize,pointColor)
if len(item) == 0:
pass
elif world and world.numRobots() >= 1:
maxConfigs = self.attributes.get("maxConfigs",min(10,len(item)))
robot = world.robot(self.attributes.get("robot",0))
if robot.numLinks() != len(item[0]):
if len(item[0]) in [2,3]: #interpret as a trajectory
self.displayCache[0].draw(drawAsTrajectory)
centroid = item[0] + [0]*(3-len(item[0]))
if name is not None:
self.drawText(name,centroid)
else:
warnings.warn("Configs items aren't the right size for the robot")
if not self.useDefaultAppearance:
oldAppearance = [robot.link(i).appearance().clone() for i in range(robot.numLinks())]
for i in range(robot.numLinks()):
if self.customAppearance is not None:
robot.link(i).appearance().set(self.customAppearance)
elif "color" in self.attributes:
robot.link(i).appearance().setColor(*self.attributes["color"])
oldconfig = robot.getConfig()
for i in range(maxConfigs):
idx = int(i*len(item))//maxConfigs
robot.setConfig(item[idx])
robot.drawGL()
robot.setConfig(oldconfig)
if not self.useDefaultAppearance:
for (i,app) in enumerate(oldAppearance):
robot.link(i).appearance().set(app)
elif len(item[0]) in [2,3]: #interpret as a trajectory
self.displayCache[0].draw(drawAsTrajectory)
centroid = item[0] + [0]*(3-len(item[0]))
if name is not None:
self.drawText(name,centroid)
else:
warnings.warn("Unable to draw Configs items without a world or robot")
elif itypes == 'Vector3':
def drawRaw():
GL.glDisable(GL.GL_LIGHTING)
GL.glEnable(GL.GL_POINT_SMOOTH)
GL.glPointSize(self.attributes.get("size",5.0))
GL.glColor4f(*self.attributes.get("color",[0,0,0,1]))
GL.glBegin(GL.GL_POINTS)
GL.glVertex3f(0,0,0)
GL.glEnd()
self.displayCache[0].draw(drawRaw,[so3.identity(),item])
if name is not None:
self.drawText(name,item)
elif itypes == 'RigidTransform':
def drawRaw():
fancy = self.attributes.get("fancy",False)
if fancy: GL.glEnable(GL.GL_LIGHTING)
else: GL.glDisable(GL.GL_LIGHTING)
gldraw.xform_widget(se3.identity(),self.attributes.get("length",0.1),self.attributes.get("width",0.01),fancy=fancy)
self.displayCache[0].draw(drawRaw,transform=item)
if name is not None:
self.drawText(name,item[1])
else:
warnings.warn("Unable to draw item of type \"%s\""%(str(itypes),))
#revert appearance
if not self.useDefaultAppearance and hasattr(item,'appearance'):
item.appearance().set(self.oldAppearance)
def getBounds(self):
"""Returns a bounding box (bmin,bmax) or None if it can't be found"""
if len(self.subAppearances)!=0:
bb = bb_create()
for n,app in self.subAppearances.items():
bb = bb_union(bb,app.getBounds())
return bb
item = self.item
if isinstance(item,coordinates.Point):
return [item.worldCoordinates(),item.worldCoordinates()]
elif isinstance(item,coordinates.Direction):
T = item.frame().worldCoordinates()
d = item.localCoordinates()
L = self.attributes.get("length",0.1)
return bb_create(T[1],se3.apply(T,vectorops.mul(d,L)))
elif isinstance(item,coordinates.Frame):
T = item.worldCoordinates()
L = self.attributes.get("length",0.1)
return bb_create(T[1],se3.apply(T,(L,0,0)),se3.apply(T,(0,L,0)),se3.apply(T,(0,0,L)))
elif isinstance(item,ContactPoint):
L = self.attributes.get("length",0.05)
return bb_create(item.x,vectorops.madd(item.x,item.n,L))
elif isinstance(item,WorldModel):
pass
elif hasattr(item,'geometry'):
return item.geometry().getBB()
elif hasattr(item,'getBB'):
return item.getBB()
elif isinstance(item,(str,VisPlot)):
pass
else:
try:
vtype = objectToVisType(item,None)
if 'Vector3' == vtype:
#assumed to be a point
return (item,item)
elif 'RigidTransform' == vtype:
#assumed to be a rigid transform
T = item
L = self.attributes.get("length",0.1)
return bb_create(T[1],se3.apply(T,(L,0,0)),se3.apply(T,(0,L,0)),se3.apply(T,(0,0,L)))
except Exception:
raise
pass
warnings.warn("Empty bound for object {} type {}".format(self.name,self.item.__class__.__name__))
return bb_create()
def getCenter(self):
bb = self.getBounds()
if bb_empty(bb):
item = self.item
if hasattr(item,'getCurrentTransform'):
return item.getCurrentTransform()[1]
elif hasattr(item,'getTransform'):
return item.getTransform()[1]
return [0,0,0]
return vectorops.interpolate(bb[0],bb[1],0.5)
def getTransform(self):
if len(self.subAppearances) != 0:
return (so3.identity(),self.getCenter())
item = self.item
if isinstance(item,coordinates.Frame):
T = item.worldCoordinates()
L = self.attributes.get("length",0.1)
return (T[0],se3.apply(T,(L/2,L/2,L/2)))
elif hasattr(item,'geometry'):
return item.geometry().getCurrentTransform()
elif hasattr(item,'getCurrentTransform'):
return item.getCurrentTransform()
elif hasattr(item,'getTransform'):
return item.getTransform()
else:
try:
vtype = objectToVisType(item,None)
if 'RigidTransform' == vtype:
T = item
L = self.attributes.get("length",0.1)
return (T[0],se3.apply(T,(L/2,L/2,L/2)))
except Exception:
raise
return (so3.identity(),self.getCenter())
def getSubItem(self,path):
if len(path) == 0: return self
for k,v in self.subAppearances.items():
if v.name == path[0]:
try:
return v.getSubItem(path[1:])
except ValueError as e:
raise ValueError("Invalid sub-path specified "+str(path)+" at "+str(e))
raise ValueError("Invalid sub-item specified "+str(path[0]))
def make_editor(self,world=None):
if self.editor is not None:
return
item = self.item
if isinstance(item,coordinates.Point):
res = PointPoser()
res.set(item.worldCoordinates())
res.setAxes(item.frame().worldCoordinates()[0])
elif isinstance(item,coordinates.Direction):
res = PointPoser()
res.set(item.worldCoordinates())
res.setAxes(item.frame().worldCoordinates()[0])
elif isinstance(item,coordinates.Frame):
res = TransformPoser()
res.set(*item.worldCoordinates())
elif isinstance(item,RobotModel):
res = RobotPoser(item)
elif isinstance(item,SubRobotModel):
res = RobotPoser(item._robot)
res.setActiveDofs(item._links);
elif isinstance(item,RigidObjectModel):
res = ObjectPoser(item)
elif isinstance(item,(list,tuple)):
#determine if it's a rotation, transform, or point
itype = objectToVisType(item,None)
if itype == 'Vector3':
res = PointPoser()
res.set(item)
elif itype == 'Matrix3':
res = TransformPoser()
res.enableRotation(True)
res.enableTranslation(False)
res.set(item)
elif itype == 'RigidTransform':
res = TransformPoser()
res.enableRotation(True)
res.enableTranslation(True)
res.set(*item)
elif itype == 'Config':
if world is not None and world.numRobots() > 0 and world.robot(0).numLinks() == len(item):
#it's a valid configuration
oldconfig = world.robot(0).getConfig()
world.robot(0).setConfig(item)
res = RobotPoser(world.robot(0))
world.robot(0).setConfig(oldconfig)
else:
warnings.warn("VisAppearance.make_editor(): Editor for object of type {} cannot be associated with a robot".format(itype))
return
else:
warnings.warn("VisAppearance.make_editor(): Editor for object of type {} not defined".format(itype))
return
else:
warnings.warn("VisAppearance.make_editor(): Editor for object of type {} not defined".format(item.__class__.__name__))
return
self.editor = res
def update_editor(self,item_to_editor=False):
for (name,item) in self.subAppearances.items():
item.update_editor(item_to_editor)
if self.editor is None:
return
item = self.item
if item_to_editor:
if isinstance(item,coordinates.Point):
self.editor.set(self.item.worldCoordinates())
elif isinstance(item,coordinates.Direction):
self.editor.set(self.item.worldCoordinates())
elif isinstance(item,coordinates.Frame):
self.editor.set(*self.item.worldCoordinates())
elif isinstance(self.item,RobotModel):
self.editor.set(self.item.getConfig())
elif isinstance(self.item,SubRobotModel):
self.editor.set(self.item.tofull(self.item.getConfig()))
elif isinstance(self.item,RigidObjectModel):
self.editor.set(*self.item.getTransform())
elif isinstance(self.item,(list,tuple)):
itype = objectToVisType(self.item,None)
if itype in ('Vector3','Matrix3'):
self.editor.set(self.item)
elif itype == 'RigidTransform':
self.editor.set(*self.item)
elif itype == 'Config':
self.editor.set(self.item)
else:
raise RuntimeError("Uh... unsupported type with an editor?")
else:
if not self.editor.hasFocus():
return
if isinstance(item,coordinates.Point):
self.item._localCoordinates = se3.apply(se3.inv(self.item._frame.worldCoordinates()),self.editor.get())
elif isinstance(item,coordinates.Direction):
self.item._localCoordinates = se3.apply(se3.inv(self.item._frame.worldCoordinates()),self.editor.get())
elif isinstance(item,coordinates.Frame):
self.item._worldCoordinates = self.editor.get()
self.item._relativeCoordinates = se3.mul(se3.inv(self.item.parent().worldCoordinates()),self.editor.get())
#TODO: updating downstream frames?
elif isinstance(self.item,RobotModel):
self.item.setConfig(self.editor.getConditioned(self.item.getConfig()))
elif isinstance(self.item,SubRobotModel):
self.item.setConfig(self.item.fromfull(self.editor.get()))
elif isinstance(self.item,RigidObjectModel):
self.item.setTransform(*self.editor.get())
elif isinstance(self.item,(tuple,list)):
def setList(a,b):
if isinstance(a,(list,tuple)) and isinstance(b,(list,tuple)):
if len(a) == len(b):
for i in range(len(a)):
if not setList(a[i],b[i]):
if isinstance(a,list):
a[i] = b[i]
else:
return False
return True
return False
v = self.editor.get()
if not setList(self.item,v):
self.item = v
elif isinstance(self.item,tuple):
warnings.warn("Edited a tuple... maybe a point or an xform? can't actually edit")
self.item = self.editor.get()
else:
raise RuntimeError("Uh... unsupported type with an editor?")
def remove_editor(self):
self.editor = None
class VisualizationScene:
"""Holds all of the visualization information for a scene, including
labels, edit status, and animations"""
def __init__(self):
self.items = {}
self.labels = []
self.t = 0
self.timeCallback = None
self.startTime = None
self.animating = True
self.currentAnimationTime = 0
self.doRefresh = False
self.cameraController = None
def getItem(self,item_name):
"""Returns an VisAppearance according to the given name or path"""
if isinstance(item_name,(list,tuple)):
components = item_name
if len(components)==1:
return self.getItem(components[0])
if components[0] not in self.items:
raise ValueError("Invalid top-level item specified: "+str(item_name))
return self.items[components[0]].getSubItem(components[1:])
if item_name in self.items:
return self.items[item_name]
def dirty(self,item_name='all'):
"""Marks an item or everything as dirty, forcing a deep redraw."""
global _globalLock
with _globalLock:
if item_name == 'all':
for (name,itemvis) in self.items.items():
itemvis.markChanged()
else:
self.getItem(item_name).markChanged()
def clear(self):
"""Clears the visualization world"""
global _globalLock
with _globalLock:
for (name,itemvis) in self.items.items():
itemvis.destroy()
self.items = {}
self.currentAnimationTime = 0
self.doRefresh = True
def clearText(self):
"""Clears all text in the visualization."""
global _globalLock
with _globalLock:
del_items = []
for (name,itemvis) in self.items.items():
if isinstance(itemvis.item,str):
itemvis.destroy()
del_items.append(name)
for n in del_items:
del self.items[n]
def listItems(self,root=None,indent=0):
"""Prints out all items in the visualization world."""
if root is None:
for name,value in self.items.items():
self.listItems(value,indent)
else:
if isinstance(root,str):
root = self.getItem(root)
if indent > 0:
print(" "*(indent-1), end=' ')
print(root.name)
for n,v in root.subAppearances.items():
self.listItems(v,indent+2)
def getItemName(self,object):
name = None
if hasattr(object,'getName'):
name = object.getName()
if hasattr(object,'name'):
name = object.name()
if name is not None and name in self.items:
if self.items[name].item is object:
return name
if isinstance(object,RobotModelLink): #a link?
robot = object.robot()
robpath = self.getItemName(robot)
if robpath:
return robpath + (name,)
else:
warnings.warn("Couldnt find link {} under robot".format(name))
def objectPath(app,obj,name):
if app.item == object:
return ()
if name is not None and app.name == name:
return ()
for (subitem,subapp) in app.subAppearances.items():
subpath = objectPath(subapp,obj,name)
if subpath is not None:
return (subapp.name,)+subpath
return None
if hasattr(object,'world'):
#look through the world for the object
world = self.items.get('world',None)
if world is not None:
p = objectPath(self.items['world'],object,name)
if p is not None:
return ('world',)+p
else:
warnings.warn("Couldnt find object {} under world".format(name))
for (k,v) in self.items.items():
p = objectPath(v,object,name)
if p is not None:
if len(p)==0: #top level item
return k
return (k,)+p
else:
warnings.warn("Couldnt find object {} under {}".format(name,k))
return None
def add(self,name,item,keepAppearance=False,**kwargs):
"""Adds a named item to the visualization world. If the item already
exists, the appearance information will be reinitialized if keepAppearance=False
(default) or be kept if keepAppearance=True."""
global _globalLock
assert not isinstance(name,(list,tuple)),"Cannot add sub-path items"
with _globalLock:
if keepAppearance and name in self.items:
self.items[name].setItem(item)
else:
#need to erase prior item visualizer
if name in self.items:
self.items[name].destroy()
type = kwargs.get('type',None)
if type is None:
world = self.items.get('world',None)
if world is not None: world = world.item
try:
type = objectToVisType(item,world)
except Exception:
type = None
app = VisAppearance(item,name,type)
self.items[name] = app
item = self.items[name]
for (attr,value) in kwargs.items():
self._setAttribute(item,attr,value)
#self.refresh()
def addText(self,name,text,**kwargs):
self.add(name,text,True,**kwargs)
def animate(self,name,animation,speed=1.0,endBehavior='loop'):
global _globalLock
with _globalLock:
if hasattr(animation,'__iter__'):
#a list of milestones -- loop through them with 1s delay
print("visualization.animate(): Making a Trajectory with unit durations between",len(animation),"milestones")
animation = Trajectory(list(range(len(animation))),animation)
if isinstance(animation,MultiPath):
world = self.items.get('world',None)
if world is not None:
world=world.item
if world.numRobots() > 0:
#discretize multipath
robot = world.robot(0)
animation = animation.getTrajectory(robot,0.1)
else:
animation = animation.getTrajectory()
else:
animation = animation.getTrajectory()
assert isinstance(animation,Trajectory) or animation is None,"Must animate() with a Trajectory object or list of milestones"
item = self.getItem(name)
item.animation = animation
item.animationStartTime = self.currentAnimationTime
item.animationSpeed = speed
item.animationEndBehavior = endBehavior
item.markChanged(config=True,appearance=False)
def pauseAnimation(self,paused=True):
global _globalLock
with _globalLock:
self.animating = not paused
def stepAnimation(self,amount):
global _globalLock
with _globalLock:
self.animationTime(self.currentAnimationTime + amount)
def animationTime(self,newtime=None):
global _globalLock
if newtime is None:
#query mode
return self.currentAnimationTime
#update mode
with _globalLock:
self.currentAnimationTime = newtime
self.doRefresh = True
for (k,v) in self.items.items():
#do animation updates
v.updateAnimation(self.currentAnimationTime)
return
def remove(self,name):
global _globalLock
with _globalLock:
assert name in self.items,"Can only remove top level objects from visualization, try hide() instead"
item = self.getItem(name)
item.destroy()
del self.items[name]
self.doRefresh = True
def getItemConfig(self,name):
global _globalLock
with _globalLock:
res = config.getConfig(self.getItem(name).item)
return res
def setItemConfig(self,name,value):
global _globalLock
with _globalLock:
item = self.getItem(name)
if isinstance(item.item,(list,tuple)):
#TODO: broadcast value to the shape of item
item.item = value
elif isinstance(item.item,str):
item.item = value
else:
config.setConfig(item.item,value)
if item.editor:
item.update_editor(item_to_editor = True)
item.markChanged(config=True,appearance=False)
self.doRefresh = True
def addLabel(self,text,point,color):
global _globalLock
with _globalLock:
self.labels.append((text,point,color))
def hideLabel(self,name,hidden=True):
global _globalLock
with _globalLock:
item = self.getItem(name)
item.attributes["hide_label"] = hidden
item.markChanged(config=False,appearance=True)
self.doRefresh = True
def hide(self,name,hidden=True):
global _globalLock
with _globalLock:
self.getItem(name).attributes['hidden'] = hidden
self.doRefresh = True
def addPlotItem(self,plotname,itemname):
global _globalLock
with _globalLock:
plot = self.getItem(plotname)
assert plot is not None and isinstance(plot.item,VisPlot),(plotname+" is not a valid plot")
plot = plot.item
for i in plot.items:
assert i.name != itemname,(str(itemname)+" is already in the plot "+plotname)
item = self.getItem(itemname)
assert item is not None,(str(itemname)+" is not a valid item")
plot.items.append(VisPlotItem(itemname,item))
def logPlot(self,plotname,itemname,value):
global _globalLock
with _globalLock:
customIndex = -1
plot = self.getItem(plotname)
assert plot is not None and isinstance(plot.item,VisPlot),(plotname+" is not a valid plot")
compress = plot.attributes['compress']
plot = plot.item
for i,item in enumerate(plot.items):
if len(item.name)==0:
customIndex = i
if customIndex < 0:
customIndex = len(plot.items)
plot.items.append(VisPlotItem('',None))
t = self.t
if self.startTime is not None:
if self.timeCallback is None:
t = time.time() - self.startTime
else:
t = self.timeCallback() - self.startTime
else:
t = 0
plot.items[customIndex].compressThreshold = compress
plot.items[customIndex].customUpdate(itemname,t,value)
def logPlotEvent(self,plotname,eventname,color):
global _globalLock
with _globalLock:
plot = self.getItem(plotname)
assert plot is not None and isinstance(plot.item,VisPlot),(plotname+" is not a valid plot")
t = self.t
if self.startTime is not None:
if self.timeCallback is None:
t = time.time() - self.startTime
else:
t = self.timeCallback() - self.startTime
else:
t = 0
plot.item.addEvent(eventname,t,color)
def hidePlotItem(self,plotname,itemname,hidden=True):
global _globalLock
with _globalLock:
plot = self.getItem(plotname)
assert plot is not None and isinstance(plot.item,VisPlot),plotname+" is not a valid plot"
plot = plot.item
identified = False
if isinstance(itemname,(tuple,list)):
for i in plot.items:
if i.name == itemname[0]:
assert itemname[1] < len(i.hidden),("Invalid component index of item "+str(itemname[0]))
identified = True
i.hidden[itemname] = hidden
else:
for i in plot.items:
if i.name == itemname:
for j in range(len(i.hidden)):
i.hidden[j] = hidden
assert identified,("Invalid item "+str(itemname)+" specified in plot "+plotname)
self.doRefresh = True
def savePlot(self,plotname,fn):
global _globalLock
with _globalLock:
plot = self.getItem(plotname)
assert plot is not None and isinstance(plot.item,VisPlot),plotname+" is not a valid plot"
plot = plot.item
if fn is not None:
plot.beginSave(fn)
else:
plot.endSave(fn)
def setAppearance(self,name,appearance):
global _globalLock
with _globalLock:
item = self.getItem(name)
item.useDefaultAppearance = False
item.customAppearance = appearance
item.markChanged(config=False,appearance=True)
self.doRefresh = True
def _setAttribute(self,item,attr,value):
"""Internal use only"""
item.attributes[attr] = value
if value==None:
del item.attributes[attr]
if attr=='color':
item.useDefaultAppearance = False
if len(item.subAppearances) > 0 and attr not in ['label','hidden']:
#some attributes don't get inherited
for n,app in item.subAppearances.items():
self._setAttribute(app,attr,value)
if attr=='type':
#modify the parent attributes
item.attributes.setParent(_default_attributes(item.item,type=value))
item.markChanged(config=False,appearance=True)
def setAttribute(self,name,attr,value):
global _globalLock
with _globalLock:
item = self.getItem(name)
if item is None:
raise ValueError("Item "+str(name)+" doesn't exist in scene")
self._setAttribute(item,attr,value)
self.doRefresh = True
def getAttribute(self,name,attr):
global _globalLock
with _globalLock:
item = self.getItem(name)
res = item.attributes[attr]
return res
def getAttributes(self,name):
global _globalLock
with _globalLock:
item = self.getItem(name)
res = item.getAttributes()
return res
def revertAppearance(self,name):
global _globalLock
with _globalLock:
item = self.getItem(name)
item.useDefaultAppearance = True
item.markChanged(config=False,appearance=True)
self.doRefresh = True
def setColor(self,name,r,g,b,a=1.0):
global _globalLock
with _globalLock:
item = self.getItem(name)
self._setAttribute(item,"color",[r,g,b,a])
item.markChanged(config=False,appearance=True)
self.doRefresh = True
def setDrawFunc(self,name,func):
global _globalLock
with _globalLock:
item = self.getItem(name)
item.customDrawFunc = func
self.doRefresh = True
def autoFitCamera(self,zoom=True,rotate=True,scale=1.0):
vp = self.getViewport()
try:
autoFitViewport(vp,list(self.items.values()),zoom=zoom,rotate=rotate)
vp.camera.dist /= scale
self.setViewport(vp)
except Exception as e:
warnings.warn("Unable to auto-fit camera")
import traceback
traceback.print_exc()
def followCamera(self,target,translate,rotate,center):
if target is None:
self.cameraController = None
return
vp = self.getViewport()
if isinstance(target,str) or isinstance(target,(tuple,list)):
try:
target = self.getItem(target)
except KeyError:
raise ValueError("Invalid item "+str(target))
target_center = target.getCenter()
if center:
if translate:
_camera_translate(vp,target_center)
else:
_camera_lookat(vp,target_center)
if translate and rotate:
self.cameraController = _TrackingCameraController(vp,target)
elif translate:
self.cameraController = _TranslatingCameraController(vp,target)
elif rotate:
self.cameraController = _TargetCameraController(vp,target)
else:
self.cameraController = None
elif isinstance(target,Trajectory):
self.cameraController = _TrajectoryCameraController(vp,target)
elif isinstance(target,SimRobotSensor):
self.cameraController = _SimCamCameraController(vp,target)
else:
raise ValueError("Invalid value for target, must either be str or a Trajectory")
def setTimeCallback(self,cb):
"""Sets a callback in updateTime() to set the current time"""
self.timeCallback = cb
def updateTime(self,t=None):
"""The backend will call this during an idle loop to update the
visualization time. This may also update animations if currently
animating."""
if t is None:
if self.timeCallback is None:
t = time.time()
else:
t = self.timeCallback()
if self.startTime is None:
self.startTime = t
oldt = self.t
self.t = t-self.startTime
if self.t - oldt < 0:
warnings.warn("Time is going negative?")
if self.animating:
if self.t - oldt > 0:
self.stepAnimation(self.t - oldt)
for (k,v) in self.items.items():
#do other updates
v.updateTime(self.t)
def updateCamera(self):
"""Updates the camera, if controlled. The backend should call
this whenever the scene is to be drawn."""
if self.cameraController is not None:
vp = self.cameraController.update(self.currentAnimationTime)
if vp is not None:
self.setViewport(vp)
def edit(self,name,doedit=True):
raise NotImplementedError("Needs to be implemented by subclass")
def getViewport(self):
raise NotImplementedError("Needs to be implemented by subclass")
def setViewport(self,viewport):
raise NotImplementedError("Needs to be implemented by subclass")
def setBackgroundColor(self,r,g,b,a=1):
raise NotImplementedError("Needs to be implemented by subclass")
def renderGL(self,view):
"""Renders the scene in OpenGL"""
vp = view.toViewport()
self.labels = []
world = self.items.get('world',None)
if world is not None: world=world.item
#draw solid items first
delayed = []
for (k,v) in self.items.items():
transparent = v.transparent()
if transparent is not False:
delayed.append(k)
if transparent is True:
continue
v.widget = self
v.swapDrawConfig()
v.drawGL(world,viewport=vp,draw_transparent=False)
v.swapDrawConfig()
#allows garbage collector to delete these objects
v.widget = None
for k in delayed:
v = self.items[k]
v.widget = self
v.swapDrawConfig()
v.drawGL(world,viewport=vp,draw_transparent=True)
v.swapDrawConfig()
#allows garbage collector to delete these objects
v.widget = None
#cluster label points and draw labels
pointTolerance = view.camera.dist*0.03
if pointTolerance > 0:
pointHash = {}
for (text,point,color) in self.labels:
index = tuple([int(x/pointTolerance) for x in point])
try:
pointHash[index][1].append((text,color))
except KeyError:
pointHash[index] = [point,[(text,color)]]
for (p,items) in pointHash.values():
self._renderGLLabelRaw(view,p,*list(zip(*items)))
def renderScreenGL(self,view,window):
cx = 20
cy = 20
GL.glDisable(GL.GL_LIGHTING)
GL.glDisable(GL.GL_DEPTH_TEST)
for (k,v) in self.items.items():
if isinstance(v.item,VisPlot) and not v.attributes['hidden']:
pos = v.attributes['position']
duration = v.attributes['duration']
vrange = v.attributes['range']
w,h = v.attributes['size']
if pos is None:
v.item.renderGL(window,cx,cy,w,h,duration,vrange[0],vrange[1])
cy += h+18
else:
x = pos[0]
y = pos[1]
if x < 0:
x = view.w + x
if y < 0:
y = view.h + y
v.item.renderGL(window,x,y,w,h,duration,vrange[0],vrange[1])
for (k,v) in self.items.items():
if isinstance(v.item,str) and not v.attributes['hidden']:
pos = v.attributes['position']
col = v.attributes['color']
size = v.attributes['size']
if pos is None:
#draw at console
window.draw_text((cx,cy+size),v.item,size,col)
cy += (size*15)/10
elif len(pos)==2:
x = pos[0]
y = pos[1]
if x < 0:
x = view.w + x
if y < 0:
y = view.h + y
window.draw_text((x,y+size),v.item,size,col)
GL.glEnable(GL.GL_DEPTH_TEST)
def _renderGLLabelRaw(self,view,point,textList,colorList):
#assert not self.makingDisplayList,"drawText must be called outside of display list"
assert self.window is not None
invCameraRot = so3.inv(view.camera.matrix()[0])
for i,(text,c) in enumerate(zip(textList,colorList)):
if i+1 < len(textList): text = text+","
projpt = view.project(point,clip=False)
if projpt[2] > view.clippingplanes[0]:
d = float(12)/float(view.w)*projpt[2]*0.7
point = vectorops.add(point,so3.apply(invCameraRot,(0,-d,0)))
GL.glDisable(GL.GL_LIGHTING)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glColor3f(*c)
self.draw_text(point,text,size=12)
GL.glEnable(GL.GL_DEPTH_TEST)
def clearDisplayLists(self):
for i in self.items.values():
i.clearDisplayLists()
def saveJsonConfig(self,fn=None):
def dumpitem(v):
if len(v.subAppearances) > 0:
items = []
for (k,app) in v.subAppearances.items():
jsapp = dumpitem(app)
if len(jsapp) > 0:
items.append({"name":k,"appearance":jsapp})
return items
else:
return v.attributes.flatten()
out = {}
for (k,v) in self.items.items():
out[k] = dumpitem(v)
if fn is None:
return out
else:
import json
f = open(fn,'w')
json.dump(out,f)
f.close()
return out
def loadJsonConfig(self,jsonobj_or_file):
if isinstance(jsonobj_or_file,str):
import json
f = open(jsonobj_or_file,'r')
jsonobj = json.load(f)
f.close()
else:
jsonobj = jsonobj_or_file
def parseitem(js,app):
if isinstance(js,dict):
for (attr,value) in js.items():
app.attributes[attr] = value
app.markChanged()
elif isinstance(js,list):
for val in js:
if not isinstance(val,dict) or "name" not in val or "appearance" not in val:
warnings.warn("JSON object {} does not contain a valid subappearance".format(js))
name = val["name"]
jsapp = val["appearance"]
if isinstance(name,list):
name = tuple(name)
if name not in app.subAppearances:
warnings.warn("JSON object {} subappearance {} not in visualization".format(js,name))
else:
parseitem(jsapp,app.subAppearances[name])
else:
warnings.warn("JSON object {} does not contain a dict of attributes or list of sub-appearances".format(js))
parsed = set()
for (k,v) in self.items.items():
if k in jsonobj:
parsed.add(k)
parseitem(jsonobj[k],v)
else:
warnings.warn("Visualization object {} not in JSON object".format(k))
for (k,v) in jsonobj.items():
if k not in parsed:
warnings.warn("JSON object {} not in visualization".format(k))
def _camera_translate(vp,tgt):
vp.camera.tgt = tgt
def _camera_lookat(vp,tgt):
T = vp.getTransform()
vp.camera.tgt = tgt
vp.camera.dist = max(vectorops.distance(T[1],tgt),0.1)
#set R to point at target
zdir = vectorops.unit(vectorops.sub(tgt,T[1]))
xdir = vectorops.unit(vectorops.cross(zdir,[0,0,1]))
ydir = vectorops.unit(vectorops.cross(zdir,xdir))
R = xdir + ydir + zdir
vp.camera.set_orientation(R,'xyz')
class _TrackingCameraController:
def __init__(self,vp,target):
self.vp = vp
T = vp.getTransform()
target.swapDrawConfig()
self.viewportToTarget = se3.mul(se3.inv(target.getTransform()),T)
target.swapDrawConfig()
self.target = target
def update(self,t):
self.target.swapDrawConfig()
T = se3.mul(self.target.getTransform(),self.viewportToTarget)
self.target.swapDrawConfig()
self.vp.setTransform(T)
return self.vp
class _TranslatingCameraController:
def __init__(self,vp,target):
self.vp = vp
target.swapDrawConfig()
self.last_target_pos = target.getCenter()
target.swapDrawConfig()
self.target = target
def update(self,t):
self.target.swapDrawConfig()
t = self.target.getCenter()
self.target.swapDrawConfig()
self.vp.camera.tgt = vectorops.add(self.vp.camera.tgt,vectorops.sub(t,self.last_target_pos))
self.last_target_pos = t
return self.vp
class _TargetCameraController:
def __init__(self,vp,target):
self.vp = vp
target.swapDrawConfig()
self.last_target_pos = target.getCenter()
target.swapDrawConfig()
self.target = target
def update(self,t):
self.target.swapDrawConfig()
t = self.target.getCenter()
self.target.swapDrawConfig()
tgt = vectorops.add(self.vp.camera.tgt,vectorops.sub(t,self.last_target_pos))
self.last_target_pos = t
_camera_lookat(self.vp,tgt)
return self.vp
class _TrajectoryCameraController:
def __init__(self,vp,trajectory):
self.vp = vp
if isinstance(trajectory,SE3Trajectory):
pass
elif isinstance(trajectory,SO3Trajectory):
pass
else:
assert isinstance(trajectory,Trajectory)
pass
self.trajectory = trajectory
def update(self,t):
if isinstance(self.trajectory,(SE3Trajectory,SE3HermiteTrajectory)):
T = self.trajectory.eval(t,'loop')
self.vp.setTransform(T)
elif isinstance(self.trajectory,(SO3Trajectory,SO3HermiteTrajectory)):
R = self.trajectory.eval(t,'loop')
self.vp.camera.set_orientation(R,'xyz')
else:
trans = self.trajectory.eval(t,'loop')
T = self.vp.getTransform()
ofs = vectorops(self.vp.tgt,T[0])
self.vp.camera.tgt = vectorops.add(trans,ofs)
return self.vp
class _SimCamCameraController:
def __init__(self,vp,target):
self.vp = vp
self.target = target
def update(self,t):
from ..model import sensing
T = sensing.get_sensor_xform(self.target,self.target.robot())
self.vp.setTransform(T)
return self.vp
class _WindowManager:
def reset(self):
raise NotImplementedError()
def frontend(self):
raise NotImplementedError()
def createWindow(self,title):
raise NotImplementedError()
def setWindow(self,id):
raise NotImplementedError()
def getWindow(self):
raise NotImplementedError()
def scene(self):
raise NotImplementedError()
def getWindowName(self):
raise NotImplementedError()
def setWindowName(self,name):
raise NotImplementedError()
def resizeWindow(self,w,h):
raise NotImplementedError()
def setPlugin(self,plugin):
raise NotImplementedError()
def pushPlugin(self,plugin):
raise NotImplementedError()
def popPlugin(self):
raise NotImplementedError()
def splitView(self,plugin):
raise NotImplementedError()
def multithreaded(self):
return False
def run(self):
raise NotImplementedError()
def loop(self,setup,callback,cleanup):
raise NotImplementedError()
def spin(self,duration):
raise NotImplementedError()
def show(self):
raise NotImplementedError()
def shown(self):
raise NotImplementedError()
def hide(self):
raise NotImplementedError()
def dialog(self):
raise NotImplementedError()
def lock(self):
global _globalLock
_globalLock.acquire()
def unlock(self):
global _globalLock
_globalLock.release()
def update(self):
pass
def cleanup(self):
pass
def kill(self):
pass
def threadCall(self,func):
func()
def screenshot(self,*args):
raise NotImplementedError()
def screenshotCallback(self,fn,*args):
raise NotImplementedError()
class _ThreadedWindowManager(_WindowManager):
def __init__(self):
#signals to visualization thread
self.quit = False
self.in_vis_loop = False
self.vis_thread_running = False
self.vis_thread = None
self.in_app_thread = False
self.threadcalls = []
def reset(self):
self.kill()
self.in_app_thread = False
self.threadcalls = []
def run_app_thread(self,callback):
raise NotImplementedError()
def multithreaded(self):
return (True if sys.platform != 'darwin' else False)
def kill(self):
self.quit = True
if self.in_vis_loop:
#if the thread is running, _quit=True just signals to the vis loop to quit.
#otherwise, the program needs to be killed and cleaned up
if not self.vis_thread_running:
#need to clean up Qt resources
self.cleanup()
return
if self.vis_thread_running:
if self.vis_thread is not None:
self.vis_thread.join()
self.vis_thread = None
else:
#should we be tolerant to weird things that happen with kill?
self.vis_thread_running = False
assert self.vis_thread_running == False
self.quit = False
def loop(self,setup,callback,cleanup):
if self.vis_thread_running or self.in_vis_loop:
if setup is not None:
setup()
self.show()
dt = 1.0/30.0
while self.shown():
t0 = time.time()
if callback is not None:
self.lock()
callback()
self.unlock()
t1 = time.time()
time.sleep(max(0,dt-(t1-t0)))
if cleanup is not None:
cleanup()
return
raise RuntimeError("Cannot call loop() after show(), inside dialog(), or inside loop() callbacks")
self.in_vis_loop = True
try:
if setup is not None:
setup()
self.quit = False
self.run_app_thread(callback)
if cleanup is not None:
cleanup()
finally:
self.in_vis_loop = False
def spin(self,duration):
if self.in_vis_loop:
raise RuntimeError("spin() cannot be used inside loop()")
if self.multithreaded():
#use existing thread
self.show()
t = 0
while t < duration:
if not self.shown(): break
time.sleep(min(0.04,duration-t))
t += 0.04
self.hide()
else:
#use single thread
t0 = time.time()
def timed_break():
t1 = time.time()
if t1 - t0 >= duration:
self.hide()
self.loop(callback=timed_break,setup=lambda:self.show(),cleanup=None)
return
def run(self):
if self.vis_thread_running:
#already multithreaded, can't go back to single thread
self.show()
while self.shown():
time.sleep(0.1)
else:
#run in a single thread
self.loop(setup=None,callback=None,cleanup=None)
def _start_app_thread(self):
signal.signal(signal.SIGINT, signal.SIG_DFL)
self.vis_thread = threading.Thread(target=self.run_app_thread)
self.vis_thread.setDaemon(True)
self.vis_thread.start()
time.sleep(0.1)
def threadCall(self,func):
self.threadcalls.append(func)
|
bsd-3-clause
|
Thraxis/pymedusa
|
lib/sqlalchemy/ext/horizontal_shard.py
|
55
|
4814
|
# ext/horizontal_shard.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Horizontal sharding support.
Defines a rudimental 'horizontal sharding' system which allows a Session to
distribute queries and persistence operations across multiple databases.
For a usage example, see the :ref:`examples_sharding` example included in
the source distribution.
"""
from .. import util
from ..orm.session import Session
from ..orm.query import Query
__all__ = ['ShardedSession', 'ShardedQuery']
class ShardedQuery(Query):
def __init__(self, *args, **kwargs):
super(ShardedQuery, self).__init__(*args, **kwargs)
self.id_chooser = self.session.id_chooser
self.query_chooser = self.session.query_chooser
self._shard_id = None
def set_shard(self, shard_id):
"""return a new query, limited to a single shard ID.
all subsequent operations with the returned query will
be against the single shard regardless of other state.
"""
q = self._clone()
q._shard_id = shard_id
return q
def _execute_and_instances(self, context):
def iter_for_shard(shard_id):
context.attributes['shard_id'] = shard_id
result = self._connection_from_session(
mapper=self._mapper_zero(),
shard_id=shard_id).execute(
context.statement,
self._params)
return self.instances(result, context)
if self._shard_id is not None:
return iter_for_shard(self._shard_id)
else:
partial = []
for shard_id in self.query_chooser(self):
partial.extend(iter_for_shard(shard_id))
# if some kind of in memory 'sorting'
# were done, this is where it would happen
return iter(partial)
def get(self, ident, **kwargs):
if self._shard_id is not None:
return super(ShardedQuery, self).get(ident)
else:
ident = util.to_list(ident)
for shard_id in self.id_chooser(self, ident):
o = self.set_shard(shard_id).get(ident, **kwargs)
if o is not None:
return o
else:
return None
class ShardedSession(Session):
def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None,
query_cls=ShardedQuery, **kwargs):
"""Construct a ShardedSession.
:param shard_chooser: A callable which, passed a Mapper, a mapped
instance, and possibly a SQL clause, returns a shard ID. This id
may be based off of the attributes present within the object, or on
some round-robin scheme. If the scheme is based on a selection, it
should set whatever state on the instance to mark it in the future as
participating in that shard.
:param id_chooser: A callable, passed a query and a tuple of identity
values, which should return a list of shard ids where the ID might
reside. The databases will be queried in the order of this listing.
:param query_chooser: For a given Query, returns the list of shard_ids
where the query should be issued. Results from all shards returned
will be combined together into a single listing.
:param shards: A dictionary of string shard names
to :class:`~sqlalchemy.engine.Engine` objects.
"""
super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs)
self.shard_chooser = shard_chooser
self.id_chooser = id_chooser
self.query_chooser = query_chooser
self.__binds = {}
self.connection_callable = self.connection
if shards is not None:
for k in shards:
self.bind_shard(k, shards[k])
def connection(self, mapper=None, instance=None, shard_id=None, **kwargs):
if shard_id is None:
shard_id = self.shard_chooser(mapper, instance)
if self.transaction is not None:
return self.transaction.connection(mapper, shard_id=shard_id)
else:
return self.get_bind(
mapper,
shard_id=shard_id,
instance=instance
).contextual_connect(**kwargs)
def get_bind(self, mapper, shard_id=None,
instance=None, clause=None, **kw):
if shard_id is None:
shard_id = self.shard_chooser(mapper, instance, clause=clause)
return self.__binds[shard_id]
def bind_shard(self, shard_id, bind):
self.__binds[shard_id] = bind
|
gpl-3.0
|
gx1997/chrome-loongson
|
third_party/mesa/MesaLib/src/mapi/glapi/gen/typeexpr.py
|
35
|
6463
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <[email protected]>
import string, copy
class type_node:
def __init__(self):
self.pointer = 0 # bool
self.const = 0 # bool
self.signed = 1 # bool
self.integer = 1 # bool
# If elements is set to non-zero, then field is an array.
self.elements = 0
self.name = None
self.size = 0 # type's size in bytes
return
def string(self):
"""Return string representation of this type_node."""
s = ""
if self.pointer:
s = "* "
if self.const:
s += "const "
if not self.pointer:
if self.integer:
if self.signed:
s += "signed "
else:
s += "unsigned "
if self.name:
s += "%s " % (self.name)
return s
class type_table:
def __init__(self):
self.types_by_name = {}
return
def add_type(self, type_expr):
self.types_by_name[ type_expr.get_base_name() ] = type_expr
return
def find_type(self, name):
if name in self.types_by_name:
return self.types_by_name[ name ]
else:
return None
def create_initial_types():
tt = type_table()
basic_types = [
("char", 1, 1),
("short", 2, 1),
("int", 4, 1),
("long", 4, 1),
("float", 4, 0),
("double", 8, 0),
("enum", 4, 1)
]
for (type_name, type_size, integer) in basic_types:
te = type_expression(None)
tn = type_node()
tn.name = type_name
tn.size = type_size
tn.integer = integer
te.expr.append(tn)
tt.add_type( te )
type_expression.built_in_types = tt
return
class type_expression:
built_in_types = None
def __init__(self, type_string, extra_types = None):
self.expr = []
if not type_string:
return
self.original_string = type_string
if not type_expression.built_in_types:
raise RuntimeError("create_initial_types must be called before creating type_expression objects.")
# Replace '*' with ' * ' in type_string. Then, split the string
# into tokens, separated by spaces.
tokens = string.split( string.replace( type_string, "*", " * " ) )
const = 0
t = None
signed = 0
unsigned = 0
for i in tokens:
if i == "const":
if t and t.pointer:
t.const = 1
else:
const = 1
elif i == "signed":
signed = 1
elif i == "unsigned":
unsigned = 1
elif i == "*":
# This is a quirky special-case because of the
# way the C works for types. If 'unsigned' is
# specified all by itself, it is treated the
# same as "unsigned int".
if unsigned:
self.set_base_type( "int", signed, unsigned, const, extra_types )
const = 0
signed = 0
unsigned = 0
if not self.expr:
raise RuntimeError("Invalid type expression (dangling pointer)")
if signed:
raise RuntimeError("Invalid type expression (signed / unsigned applied to pointer)")
t = type_node()
t.pointer = 1
self.expr.append( t )
else:
if self.expr:
raise RuntimeError('Invalid type expression (garbage after pointer qualifier -> "%s")' % (self.original_string))
self.set_base_type( i, signed, unsigned, const, extra_types )
const = 0
signed = 0
unsigned = 0
if signed and unsigned:
raise RuntimeError("Invalid type expression (both signed and unsigned specified)")
if const:
raise RuntimeError("Invalid type expression (dangling const)")
if unsigned:
raise RuntimeError("Invalid type expression (dangling signed)")
if signed:
raise RuntimeError("Invalid type expression (dangling unsigned)")
return
def set_base_type(self, type_name, signed, unsigned, const, extra_types):
te = type_expression.built_in_types.find_type( type_name )
if not te:
te = extra_types.find_type( type_name )
if not te:
raise RuntimeError('Unknown base type "%s".' % (type_name))
self.expr = copy.deepcopy(te.expr)
t = self.expr[ len(self.expr) - 1 ]
t.const = const
if signed:
t.signed = 1
elif unsigned:
t.signed = 0
def set_base_type_node(self, tn):
self.expr = [tn]
return
def set_elements(self, count):
tn = self.expr[0]
tn.elements = count
return
def string(self):
s = ""
for t in self.expr:
s += t.string()
return s
def get_base_type_node(self):
return self.expr[0]
def get_base_name(self):
if len(self.expr):
return self.expr[0].name
else:
return None
def get_element_size(self):
tn = self.expr[0]
if tn.elements:
return tn.elements * tn.size
else:
return tn.size
def get_element_count(self):
tn = self.expr[0]
return tn.elements
def get_stack_size(self):
tn = self.expr[ len(self.expr) - 1 ]
if tn.elements or tn.pointer:
return 4
elif not tn.integer:
return tn.size
else:
return 4
def is_pointer(self):
tn = self.expr[ len(self.expr) - 1 ]
return tn.pointer
def format_string(self):
tn = self.expr[ len(self.expr) - 1 ]
if tn.pointer:
return "%p"
elif not tn.integer:
return "%f"
else:
return "%d"
if __name__ == '__main__':
types_to_try = [ "int", "int *", "const int *", "int * const", "const int * const", \
"unsigned * const *", \
"float", "const double", "double * const"]
create_initial_types()
for t in types_to_try:
print 'Trying "%s"...' % (t)
te = type_expression( t )
print 'Got "%s" (%u, %u).' % (te.string(), te.get_stack_size(), te.get_element_size())
|
bsd-3-clause
|
bigswitch/tempest
|
tempest/api/compute/flavors/test_flavors_negative.py
|
44
|
1490
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.api_schema.request.compute.v2 import flavors
from tempest import config
from tempest import test
CONF = config.CONF
load_tests = test.NegativeAutoTest.load_tests
@test.SimpleNegativeAutoTest
class FlavorsListWithDetailsNegativeTestJSON(base.BaseV2ComputeTest,
test.NegativeAutoTest):
_service = CONF.compute.catalog_type
_schema = flavors.flavor_list
@test.SimpleNegativeAutoTest
class FlavorDetailsNegativeTestJSON(base.BaseV2ComputeTest,
test.NegativeAutoTest):
_service = CONF.compute.catalog_type
_schema = flavors.flavors_details
@classmethod
def resource_setup(cls):
super(FlavorDetailsNegativeTestJSON, cls).resource_setup()
cls.set_resource("flavor", cls.flavor_ref)
|
apache-2.0
|
ericzundel/pants
|
src/python/pants/backend/codegen/tasks/jaxb_gen.py
|
4
|
3467
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.backend.codegen.targets.jaxb_library import JaxbLibrary
from pants.backend.codegen.tasks.simple_codegen_task import SimpleCodegenTask
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
class JaxbGen(SimpleCodegenTask, NailgunTask):
"""Generates java source files from jaxb schema (.xsd)."""
def __init__(self, *args, **kwargs):
"""
:param context: inherited parameter from Task
:param workdir: inherited parameter from Task
"""
super(JaxbGen, self).__init__(*args, **kwargs)
self.set_distribution(jdk=True)
self.gen_langs = set()
lang = 'java'
if self.context.products.isrequired(lang):
self.gen_langs.add(lang)
def _compile_schema(self, args):
classpath = self.dist.find_libs(['tools.jar'])
java_main = 'com.sun.tools.internal.xjc.Driver'
return self.runjava(classpath=classpath, main=java_main, args=args, workunit_name='xjc')
def synthetic_target_type(self, target):
return JavaLibrary
def is_gentarget(self, target):
return isinstance(target, JaxbLibrary)
def execute_codegen(self, target, target_workdir):
if not isinstance(target, JaxbLibrary):
raise TaskError('Invalid target type "{class_type}" (expected JaxbLibrary)'
.format(class_type=type(target).__name__))
for source in target.sources_relative_to_buildroot():
path_to_xsd = source
output_package = target.package
if output_package is None:
output_package = self._guess_package(source)
output_package = self._correct_package(output_package)
# NB(zundel): The -no-header option keeps it from writing a timestamp, making the
# output non-deterministic. See https://github.com/pantsbuild/pants/issues/1786
args = ['-p', output_package, '-d', target_workdir, '-no-header', path_to_xsd]
result = self._compile_schema(args)
if result != 0:
raise TaskError('xjc ... exited non-zero ({code})'.format(code=result))
@classmethod
def _guess_package(self, path):
"""Used in execute_codegen to actually invoke the compiler with the proper arguments, and in
_sources_to_be_generated to declare what the generated files will be.
"""
supported_prefixes = ('com', 'org', 'net',)
package = ''
slash = path.rfind(os.path.sep)
prefix_with_slash = max(path.rfind(os.path.join('', prefix, ''))
for prefix in supported_prefixes)
if prefix_with_slash < 0:
package = path[:slash]
elif prefix_with_slash >= 0:
package = path[prefix_with_slash:slash]
package = package.replace(os.path.sep, ' ')
package = package.strip().replace(' ', '.')
return package
@classmethod
def _correct_package(self, package):
package = package.replace('/', '.')
package = re.sub(r'^\.+', '', package)
package = re.sub(r'\.+$', '', package)
if re.search(r'\.{2,}', package) is not None:
raise ValueError('Package name cannot have consecutive periods! ({})'.format(package))
return package
|
apache-2.0
|
haddocking/pdb-tools
|
pdbtools/pdb_uniqname.py
|
2
|
3668
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2020 João Pedro Rodrigues
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Renames atoms sequentially (C1, C2, O1, ...) for each HETATM residue.
Relies on an element column being present (see pdb_element).
Usage:
python pdb_uniqname.py <pdb file>
Example:
python pdb_uniqname.py 1CTF.pdb
This program is part of the `pdb-tools` suite of utilities and should not be
distributed isolatedly. The `pdb-tools` were created to quickly manipulate PDB
files using the terminal, and can be used sequentially, with one tool streaming
data to another. They are based on old FORTRAN77 code that was taking too much
effort to maintain and compile. RIP.
"""
import collections
import os
import sys
__author__ = ["Joao Rodrigues"]
__email__ = ["[email protected]"]
def check_input(args):
"""Checks whether to read from stdin/file.
"""
# Defaults
fh = sys.stdin # file handle
if not len(args):
# Reading from pipe
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif len(args) == 1:
# Input File
if not os.path.isfile(args[0]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
else: # Whatever ...
sys.stderr.write(__doc__)
sys.exit(1)
return fh
def rename_atoms(fhandle):
"""Renames HETATM atoms on each residue based on their element.
"""
prev_res = None
for line_idx, line in enumerate(fhandle):
if line.startswith('HETATM'):
element = line[76:78].strip()
if not element:
emsg = 'ERROR!! No element found in line {}'.format(line_idx)
sys.stderr.write(emsg)
sys.exit(1)
resuid = line[17:27]
if prev_res != resuid:
prev_res = resuid
element_idx = collections.defaultdict(lambda: 1) # i.e. a counter
spacer = ' ' if len(element) == 1 else ''
name = (spacer + element + str(element_idx[element])).ljust(4)
line = line[:12] + name + line[16:]
element_idx[element] += 1
yield line
def main():
# Check Input
pdbfh = check_input(sys.argv[1:])
# Do the job
new_pdb = rename_atoms(pdbfh)
try:
_buffer = []
_buffer_size = 5000 # write N lines at a time
for lineno, line in enumerate(new_pdb):
if not (lineno % _buffer_size):
sys.stdout.write(''.join(_buffer))
_buffer = []
_buffer.append(line)
sys.stdout.write(''.join(_buffer))
sys.stdout.flush()
except IOError:
# This is here to catch Broken Pipes
# for example to use 'head' or 'tail' without
# the error message showing up
pass
# last line of the script
# We can close it even if it is sys.stdin
pdbfh.close()
sys.exit(0)
if __name__ == '__main__':
main()
|
apache-2.0
|
tanglei528/nova
|
plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py
|
113
|
4858
|
#!/usr/bin/env python
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This script is used to configure iptables, ebtables, and arptables rules on
XenServer hosts.
"""
import os
import sys
# This is written to Python 2.4, since that is what is available on XenServer
import simplejson as json
import novalib # noqa
def main(dom_id, command, only_this_vif=None):
xsls = novalib.execute_get_output('/usr/bin/xenstore-ls',
'/local/domain/%s/vm-data/networking' % dom_id)
macs = [line.split("=")[0].strip() for line in xsls.splitlines()]
for mac in macs:
xsread = novalib.execute_get_output('/usr/bin/xenstore-read',
'/local/domain/%s/vm-data/networking/%s' %
(dom_id, mac))
data = json.loads(xsread)
for ip in data['ips']:
if data["label"] == "public":
vif = "vif%s.0" % dom_id
else:
vif = "vif%s.1" % dom_id
if (only_this_vif is None) or (vif == only_this_vif):
params = dict(IP=ip['ip'], VIF=vif, MAC=data['mac'])
apply_ebtables_rules(command, params)
apply_arptables_rules(command, params)
apply_iptables_rules(command, params)
# A note about adding rules:
# Whenever we add any rule to iptables, arptables or ebtables we first
# delete the same rule to ensure the rule only exists once.
def apply_iptables_rules(command, params):
iptables = lambda *rule: novalib.execute('/sbin/iptables', *rule)
iptables('-D', 'FORWARD', '-m', 'physdev',
'--physdev-in', params['VIF'],
'-s', params['IP'],
'-j', 'ACCEPT')
if command == 'online':
iptables('-A', 'FORWARD', '-m', 'physdev',
'--physdev-in', params['VIF'],
'-s', params['IP'],
'-j', 'ACCEPT')
def apply_arptables_rules(command, params):
arptables = lambda *rule: novalib.execute('/sbin/arptables', *rule)
arptables('-D', 'FORWARD', '--opcode', 'Request',
'--in-interface', params['VIF'],
'--source-ip', params['IP'],
'--source-mac', params['MAC'],
'-j', 'ACCEPT')
arptables('-D', 'FORWARD', '--opcode', 'Reply',
'--in-interface', params['VIF'],
'--source-ip', params['IP'],
'--source-mac', params['MAC'],
'-j', 'ACCEPT')
if command == 'online':
arptables('-A', 'FORWARD', '--opcode', 'Request',
'--in-interface', params['VIF'],
'--source-mac', params['MAC'],
'-j', 'ACCEPT')
arptables('-A', 'FORWARD', '--opcode', 'Reply',
'--in-interface', params['VIF'],
'--source-ip', params['IP'],
'--source-mac', params['MAC'],
'-j', 'ACCEPT')
def apply_ebtables_rules(command, params):
ebtables = lambda *rule: novalib.execute("/sbin/ebtables", *rule)
ebtables('-D', 'FORWARD', '-p', '0806', '-o', params['VIF'],
'--arp-ip-dst', params['IP'],
'-j', 'ACCEPT')
ebtables('-D', 'FORWARD', '-p', '0800', '-o', params['VIF'],
'--ip-dst', params['IP'],
'-j', 'ACCEPT')
if command == 'online':
ebtables('-A', 'FORWARD', '-p', '0806',
'-o', params['VIF'],
'--arp-ip-dst', params['IP'],
'-j', 'ACCEPT')
ebtables('-A', 'FORWARD', '-p', '0800',
'-o', params['VIF'],
'--ip-dst', params['IP'],
'-j', 'ACCEPT')
ebtables('-D', 'FORWARD', '-s', '!', params['MAC'],
'-i', params['VIF'], '-j', 'DROP')
if command == 'online':
ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'],
'-i', params['VIF'], '-j', 'DROP')
if __name__ == "__main__":
if len(sys.argv) < 3:
print ("usage: %s dom_id online|offline [vif]" %
os.path.basename(sys.argv[0]))
sys.exit(1)
else:
dom_id, command = sys.argv[1:3]
vif = len(sys.argv) == 4 and sys.argv[3] or None
main(dom_id, command, vif)
|
apache-2.0
|
rex-xxx/mt6572_x201
|
external/webkit/Tools/Scripts/webkitpy/tool/steps/promptforbugortitle.py
|
147
|
2253
|
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.tool.steps.abstractstep import AbstractStep
class PromptForBugOrTitle(AbstractStep):
def run(self, state):
# No need to prompt if we alrady have the bug_id.
if state.get("bug_id"):
return
user_response = self._tool.user.prompt("Please enter a bug number or a title for a new bug:\n")
# If the user responds with a number, we assume it's bug number.
# Otherwise we assume it's a bug subject.
try:
state["bug_id"] = int(user_response)
except ValueError, TypeError:
state["bug_title"] = user_response
# FIXME: This is kind of a lame description.
state["bug_description"] = user_response
|
gpl-2.0
|
meisamhe/GPLshared
|
Programming/MPI — AMath 483 583, Spring 2013 1.0 documentation_files/generating_a_b_sqrt2_improved.py
|
1
|
1541
|
import sys
import random
import math
from generating_a_b_sqrt2 import generate_first_k_a_b_sqrt2 as golden
from generating_a_b_sqrt2 import ABSqrt2
# @include
def generate_first_k_a_b_sqrt2(k):
# Will store the first k numbers of the form a + b sqrt(2).
result = [ABSqrt2(0, 0)]
i = j = 0
for _ in range(1, k):
result_i_plus_1 = ABSqrt2(result[i].a + 1, result[i].b)
result_j_plus_sqrt2 = ABSqrt2(result[j].a, result[j].b + 1)
result.append(min(result_i_plus_1, result_j_plus_sqrt2))
if result_i_plus_1.val == result[-1].val:
i += 1
if result_j_plus_sqrt2.val == result[-1].val:
j += 1
return result
# @exclude
def simple_test():
ans = generate_first_k_a_b_sqrt2(8)
assert 0.0 == ans[0].val
assert 1.0 == ans[1].val
assert math.sqrt(2.0) == ans[2].val
assert 2.0 == ans[3].val
assert 1.0 + math.sqrt(2.0) == ans[4].val
assert 2.0 * math.sqrt(2.0) == ans[5].val
assert 3.0 == ans[6].val
assert 2.0 + math.sqrt(2.0) == ans[7].val
def main():
simple_test()
for _ in range(1000):
k = int(sys.argv[1]) if len(sys.argv) == 2 else random.randint(1, 10000)
ans = generate_first_k_a_b_sqrt2(k)
assert len(ans) == k
for i, a in enumerate(ans):
print(a.a, a.b, a.val)
if i > 0:
assert a.val >= ans[i - 1].val
gold_res = golden(k)
assert all(ans[i].val == gold_res[i].val for i in range(k))
if __name__ == '__main__':
main()
|
gpl-3.0
|
vicky2135/lucious
|
oscar/lib/python2.7/site-packages/unidecode/x003.py
|
246
|
3875
|
data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
'[?]', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'a', # 0x63
'e', # 0x64
'i', # 0x65
'o', # 0x66
'u', # 0x67
'c', # 0x68
'd', # 0x69
'h', # 0x6a
'm', # 0x6b
'r', # 0x6c
't', # 0x6d
'v', # 0x6e
'x', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'\'', # 0x74
',', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'?', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'[?]', # 0x82
'[?]', # 0x83
'', # 0x84
'', # 0x85
'A', # 0x86
';', # 0x87
'E', # 0x88
'E', # 0x89
'I', # 0x8a
'[?]', # 0x8b
'O', # 0x8c
'[?]', # 0x8d
'U', # 0x8e
'O', # 0x8f
'I', # 0x90
'A', # 0x91
'B', # 0x92
'G', # 0x93
'D', # 0x94
'E', # 0x95
'Z', # 0x96
'E', # 0x97
'Th', # 0x98
'I', # 0x99
'K', # 0x9a
'L', # 0x9b
'M', # 0x9c
'N', # 0x9d
'Ks', # 0x9e
'O', # 0x9f
'P', # 0xa0
'R', # 0xa1
'[?]', # 0xa2
'S', # 0xa3
'T', # 0xa4
'U', # 0xa5
'Ph', # 0xa6
'Kh', # 0xa7
'Ps', # 0xa8
'O', # 0xa9
'I', # 0xaa
'U', # 0xab
'a', # 0xac
'e', # 0xad
'e', # 0xae
'i', # 0xaf
'u', # 0xb0
'a', # 0xb1
'b', # 0xb2
'g', # 0xb3
'd', # 0xb4
'e', # 0xb5
'z', # 0xb6
'e', # 0xb7
'th', # 0xb8
'i', # 0xb9
'k', # 0xba
'l', # 0xbb
'm', # 0xbc
'n', # 0xbd
'x', # 0xbe
'o', # 0xbf
'p', # 0xc0
'r', # 0xc1
's', # 0xc2
's', # 0xc3
't', # 0xc4
'u', # 0xc5
'ph', # 0xc6
'kh', # 0xc7
'ps', # 0xc8
'o', # 0xc9
'i', # 0xca
'u', # 0xcb
'o', # 0xcc
'u', # 0xcd
'o', # 0xce
'[?]', # 0xcf
'b', # 0xd0
'th', # 0xd1
'U', # 0xd2
'U', # 0xd3
'U', # 0xd4
'ph', # 0xd5
'p', # 0xd6
'&', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'St', # 0xda
'st', # 0xdb
'W', # 0xdc
'w', # 0xdd
'Q', # 0xde
'q', # 0xdf
'Sp', # 0xe0
'sp', # 0xe1
'Sh', # 0xe2
'sh', # 0xe3
'F', # 0xe4
'f', # 0xe5
'Kh', # 0xe6
'kh', # 0xe7
'H', # 0xe8
'h', # 0xe9
'G', # 0xea
'g', # 0xeb
'CH', # 0xec
'ch', # 0xed
'Ti', # 0xee
'ti', # 0xef
'k', # 0xf0
'r', # 0xf1
'c', # 0xf2
'j', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
bsd-3-clause
|
varunagrawal/azure-services
|
varunagrawal/site-packages/django/forms/extras/widgets.py
|
88
|
5164
|
"""
Extra HTML Widget classes
"""
import datetime
import re
from django.forms.widgets import Widget, Select
from django.utils import datetime_safe
from django.utils.dates import MONTHS
from django.utils.safestring import mark_safe
from django.utils.formats import get_format
from django.conf import settings
__all__ = ('SelectDateWidget',)
RE_DATE = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
def _parse_date_fmt():
fmt = get_format('DATE_FORMAT')
escaped = False
output = []
for char in fmt:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
output.append('year')
#if not self.first_select: self.first_select = 'year'
elif char in 'bEFMmNn':
output.append('month')
#if not self.first_select: self.first_select = 'month'
elif char in 'dj':
output.append('day')
#if not self.first_select: self.first_select = 'day'
return output
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def __init__(self, attrs=None, years=None, required=True):
# years is an optional list/tuple of years to use in the "year" select box.
self.attrs = attrs or {}
self.required = required
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year+10)
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, basestring):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
v = datetime.datetime.strptime(value, input_format)
year_val, month_val, day_val = v.year, v.month, v.day
except ValueError:
pass
else:
match = RE_DATE.match(value)
if match:
year_val, month_val, day_val = [int(v) for v in match.groups()]
choices = [(i, i) for i in self.years]
year_html = self.create_select(name, self.year_field, value, year_val, choices)
choices = MONTHS.items()
month_html = self.create_select(name, self.month_field, value, month_val, choices)
choices = [(i, i) for i in range(1, 32)]
day_html = self.create_select(name, self.day_field, value, day_val, choices)
output = []
for field in _parse_date_fmt():
if field == 'year':
output.append(year_html)
elif field == 'month':
output.append(month_html)
elif field == 'day':
output.append(day_html)
return mark_safe(u'\n'.join(output))
def id_for_label(self, id_):
first_select = None
field_list = _parse_date_fmt()
if field_list:
first_select = field_list[0]
if first_select is not None:
return '%s_%s' % (id_, first_select)
else:
return '%s_month' % id_
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
return '%s-%s-%s' % (y, m, d)
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name, None)
def create_select(self, name, field, value, val, choices):
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
if not (self.required and val):
choices.insert(0, self.none_value)
local_attrs = self.build_attrs(id=field % id_)
s = Select(choices=choices)
select_html = s.render(field % name, val, local_attrs)
return select_html
def _has_changed(self, initial, data):
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
data = datetime_safe.datetime.strptime(data, input_format).date()
except (TypeError, ValueError):
pass
return super(SelectDateWidget, self)._has_changed(initial, data)
|
gpl-2.0
|
ellio167/lammps
|
tools/python/pizza/cfg.py
|
11
|
6036
|
# Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, [email protected], Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# cfg tool
oneline = "Convert LAMMPS snapshots to AtomEye CFG format"
docstr = """
c = cfg(d) d = object containing atom coords (dump, data)
c.one() write all snapshots to tmp.cfg
c.one("new") write all snapshots to new.cfg
c.many() write snapshots to tmp0000.cfg, tmp0001.cfg, etc
c.many("new") write snapshots to new0000.cfg, new0001.cfg, etc
c.single(N) write snapshot for timestep N to tmp.cfg
c.single(N,"file") write snapshot for timestep N to file.cfg
"""
# History
# 11/06, Aidan Thompson (SNL): original version
# ToDo list
# should decide if dump is scaled or not, since CFG prints in scaled coords
# this creates a simple AtomEye CFG format
# there is more complex format we could write out
# which allows for extra atom info, e.g. to do atom coloring on
# how to dump for a triclinic box, since AtomEye accepts this
# Variables
# data = data file to read from
# Imports and external programs
import sys
# Class definition
class cfg:
# --------------------------------------------------------------------
def __init__(self,data):
self.data = data
# --------------------------------------------------------------------
def one(self,*args):
if len(args) == 0: file = "tmp.cfg"
elif args[0][-4:] == ".cfg": file = args[0]
else: file = args[0] + ".cfg"
f = open(file,"w")
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
time,box,atoms,bonds,tris,lines = self.data.viz(which)
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
print time,
sys.stdout.flush()
n += 1
f.close()
print "\nwrote %d snapshots to %s in CFG format" % (n,file)
# --------------------------------------------------------------------
def many(self,*args):
if len(args) == 0: root = "tmp"
else: root = args[0]
n = flag = 0
while 1:
which,time,flag = self.data.iterator(flag)
if flag == -1: break
time,box,atoms,bonds,tris,lines = self.data.viz(which)
if n < 10:
file = root + "000" + str(n)
elif n < 100:
file = root + "00" + str(n)
elif n < 1000:
file = root + "0" + str(n)
else:
file = root + str(n)
file += ".cfg"
f = open(file,"w")
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
print time,
sys.stdout.flush()
f.close()
n += 1
print "\nwrote %s snapshots in CFG format" % n
# --------------------------------------------------------------------
def single(self,time,*args):
if len(args) == 0: file = "tmp.cfg"
elif args[0][-4:] == ".cfg": file = args[0]
else: file = args[0] + ".cfg"
which = self.data.findtime(time)
time,box,atoms,bonds,tris,lines = self.data.viz(which)
f = open(file,"w")
xlen = box[3]-box[0]
ylen = box[4]-box[1]
zlen = box[5]-box[2]
print >>f,"Number of particles = %d " % len(atoms)
print >>f,"# Timestep %d \n#\nA = 1.0 Angstrom" % time
print >>f,"H0(1,1) = %20.10f A " % xlen
print >>f,"H0(1,2) = 0.0 A "
print >>f,"H0(1,3) = 0.0 A "
print >>f,"H0(2,1) = 0.0 A "
print >>f,"H0(2,2) = %20.10f A " % ylen
print >>f,"H0(2,3) = 0.0 A "
print >>f,"H0(3,1) = 0.0 A "
print >>f,"H0(3,2) = 0.0 A "
print >>f,"H0(3,3) = %20.10f A " % zlen
print >>f,"#"
for atom in atoms:
itype = int(atom[1])
xfrac = (atom[2]-box[0])/xlen
yfrac = (atom[3]-box[1])/ylen
zfrac = (atom[4]-box[2])/zlen
# print >>f,"1.0 %d %15.10f %15.10f %15.10f %15.10f %15.10f %15.10f " % (itype,xfrac,yfrac,zfrac,atom[5],atom[6],atom[7])
print >>f,"1.0 %d %15.10f %15.10f %15.10f 0.0 0.0 0.0 " % (itype,xfrac,yfrac,zfrac)
f.close()
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.