gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import json
from datetime import datetime, timedelta
from decimal import Decimal
from django.conf import settings
from django.contrib.messages.storage.fallback import FallbackStorage
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.test.client import RequestFactory
import mock
from babel import numbers
from nose.tools import eq_, ok_
from pyquery import PyQuery as pq
from slumber import exceptions
import mkt
import mkt.site.tests
from mkt.abuse.models import AbuseReport
from mkt.access.models import Group, GroupUser
from mkt.constants.payments import (FAILED, PENDING, PROVIDER_BANGO,
PROVIDER_REFERENCE,
SOLITUDE_REFUND_STATUSES)
from mkt.developers.models import (ActivityLog, AddonPaymentAccount,
PaymentAccount, SolitudeSeller)
from mkt.developers.providers import get_provider
from mkt.developers.tests.test_views_payments import (setup_payment_account,
TEST_PACKAGE_ID)
from mkt.lookup.views import (_transaction_summary, app_summary,
transaction_refund, user_delete, user_summary)
from mkt.prices.models import AddonPaymentData, Refund
from mkt.purchase.models import Contribution
from mkt.reviewers.models import QUEUE_TARAKO
from mkt.site.fixtures import fixture
from mkt.site.tests import (ESTestCase, req_factory_factory, TestCase,
user_factory)
from mkt.site.utils import app_factory, file_factory, version_factory
from mkt.tags.models import Tag
from mkt.users.models import UserProfile
from mkt.webapps.models import AddonUser, Webapp
from mkt.websites.utils import website_factory
class SummaryTest(TestCase):
def add_payment_accounts(self, providers, app=None):
if not app:
app = self.app
user = self.user
seller = SolitudeSeller.objects.create(user=user, uuid='seller-uid')
for provider in providers:
uri = 'seller-{p}'.format(p=provider)
payment = PaymentAccount.objects.create(
user=user, solitude_seller=seller,
provider=provider,
seller_uri=uri, uri=uri,
agreed_tos=True, account_id='not-important')
AddonPaymentAccount.objects.create(
addon=app,
product_uri='product-{p}'.format(p=provider),
account_uri=payment.uri,
payment_account=payment
)
app.save()
def verify_bango_portal(self, app, response):
bango = pq(response.content)('[data-provider-name=bango]')
heading = pq('dt', bango).text()
assert 'Bango' in heading, heading
assert unicode(app.name) in heading, heading
eq_(pq('dd a', bango).attr('href'),
get_provider(name='bango').get_portal_url(app.app_slug))
@mock.patch.object(settings, 'TASK_USER_ID', 999)
class TestAcctSummary(SummaryTest):
fixtures = fixture('user_support_staff', 'user_999', 'webapp_337141',
'user_operator')
def setUp(self):
super(TestAcctSummary, self).setUp()
self.user = UserProfile.objects.get(email='[email protected]')
self.steamcube = Webapp.objects.get(pk=337141)
self.otherapp = app_factory(app_slug='otherapp')
self.reg_user = UserProfile.objects.get(email='[email protected]')
self.summary_url = reverse('lookup.user_summary', args=[self.user.pk])
self.login(UserProfile.objects.get(email='[email protected]'))
def buy_stuff(self, contrib_type):
for i in range(3):
if i == 1:
curr = 'GBR'
else:
curr = 'USD'
amount = Decimal('2.00')
Contribution.objects.create(addon=self.steamcube,
type=contrib_type,
currency=curr,
amount=amount,
user_id=self.user.pk)
def summary(self, expected_status=200):
res = self.client.get(self.summary_url)
eq_(res.status_code, expected_status)
return res
def payment_data(self):
return {'full_name': 'Ed Peabody Jr.',
'business_name': 'Mr. Peabody',
'phone': '(1) 773-111-2222',
'address_one': '1111 W Leland Ave',
'address_two': 'Apt 1W',
'city': 'Chicago',
'post_code': '60640',
'country': 'USA',
'state': 'Illinois'}
def test_home_auth(self):
self.client.logout()
res = self.client.get(reverse('lookup.home'))
self.assertLoginRedirects(res, reverse('lookup.home'))
def test_summary_auth(self):
self.client.logout()
res = self.client.get(self.summary_url)
self.assertLoginRedirects(res, self.summary_url)
def test_home(self):
res = self.client.get(reverse('lookup.home'))
self.assertNoFormErrors(res)
eq_(res.status_code, 200)
def test_basic_summary(self):
res = self.summary()
eq_(res.context['account'].pk, self.user.pk)
@mock.patch.object(settings, 'PAYMENT_PROVIDERS', ['bango', 'reference'])
def test_multiple_payment_accounts(self):
app = self.steamcube
self.add_payment_accounts([PROVIDER_BANGO, PROVIDER_REFERENCE],
app=app)
res = self.summary()
self.verify_bango_portal(app, res)
def test_app_counts(self):
self.buy_stuff(mkt.CONTRIB_PURCHASE)
sm = self.summary().context['app_summary']
eq_(sm['app_total'], 3)
eq_(sm['app_amount']['USD'], Decimal('4.0'))
eq_(sm['app_amount']['GBR'], Decimal('2.0'))
def test_requested_refunds(self):
contrib = Contribution.objects.create(type=mkt.CONTRIB_PURCHASE,
user_id=self.user.pk,
addon=self.steamcube,
currency='USD',
amount='0.99')
Refund.objects.create(contribution=contrib, user=self.user)
res = self.summary()
eq_(res.context['refund_summary']['requested'], 1)
eq_(res.context['refund_summary']['approved'], 0)
def test_approved_refunds(self):
contrib = Contribution.objects.create(type=mkt.CONTRIB_PURCHASE,
user_id=self.user.pk,
addon=self.steamcube,
currency='USD',
amount='0.99')
Refund.objects.create(contribution=contrib,
status=mkt.REFUND_APPROVED_INSTANT,
user=self.user)
res = self.summary()
eq_(res.context['refund_summary']['requested'], 1)
eq_(res.context['refund_summary']['approved'], 1)
def test_app_created(self):
res = self.summary()
# Number of apps/add-ons belonging to this user.
eq_(len(res.context['user_addons']), 1)
def test_payment_data(self):
payment_data = self.payment_data()
AddonPaymentData.objects.create(addon=self.steamcube,
**payment_data)
res = self.summary()
pd = res.context['payment_data'][0]
for key, value in payment_data.iteritems():
eq_(pd[key], value)
def test_no_payment_data(self):
res = self.summary()
eq_(len(res.context['payment_data']), 0)
def test_no_duplicate_payment_data(self):
role = AddonUser.objects.create(user=self.user,
addon=self.otherapp,
role=mkt.AUTHOR_ROLE_DEV)
self.otherapp.addonuser_set.add(role)
payment_data = self.payment_data()
AddonPaymentData.objects.create(addon=self.steamcube,
**payment_data)
AddonPaymentData.objects.create(addon=self.otherapp,
**payment_data)
res = self.summary()
eq_(len(res.context['payment_data']), 1)
pd = res.context['payment_data'][0]
for key, value in payment_data.iteritems():
eq_(pd[key], value)
def test_operator_app_lookup_only(self):
GroupUser.objects.create(
group=Group.objects.get(name='Operators'),
user=UserProfile.objects.get(email='[email protected]'))
res = self.client.get(reverse('lookup.home'))
doc = pq(res.content)
eq_(doc('#app-search-form select').length, 0)
def test_delete_user(self):
staff = UserProfile.objects.get(email='[email protected]')
req = req_factory_factory(
reverse('lookup.user_delete', args=[self.user.id]), user=staff,
post=True, data={'delete_reason': 'basketball reasons'})
r = user_delete(req, self.user.id)
self.assert3xx(r, reverse('lookup.user_summary', args=[self.user.id]))
# Test data.
assert UserProfile.objects.get(id=self.user.id).deleted
eq_(staff, ActivityLog.objects.for_user(self.user).filter(
action=mkt.LOG.DELETE_USER_LOOKUP.id)[0].user)
# Test frontend.
req = req_factory_factory(
reverse('lookup.user_summary', args=[self.user.id]), user=staff)
r = user_summary(req, self.user.id)
eq_(r.status_code, 200)
doc = pq(r.content)
eq_(doc('#delete-user dd:eq(1)').text(), 'basketball reasons')
class TestBangoRedirect(TestCase):
fixtures = fixture('user_support_staff', 'user_999', 'webapp_337141',
'user_operator')
def setUp(self):
super(TestBangoRedirect, self).setUp()
self.user = UserProfile.objects.get(email='[email protected]')
self.steamcube = Webapp.objects.get(pk=337141)
self.otherapp = app_factory(app_slug='otherapp')
self.reg_user = UserProfile.objects.get(email='[email protected]')
self.summary_url = reverse('lookup.user_summary', args=[self.user.pk])
self.login(UserProfile.objects.get(email='[email protected]'))
self.steamcube.update(premium_type=mkt.ADDON_PREMIUM)
self.account = setup_payment_account(self.steamcube, self.user)
self.portal_url = reverse(
'lookup.bango_portal_from_package',
args=[self.account.payment_account.account_id])
self.authentication_token = u'D0A44686-D4A3-4B2F-9BEB-5E4975E35192'
@mock.patch('mkt.developers.views_payments.client.api')
def test_bango_portal_redirect(self, api):
api.bango.login.post.return_value = {
'person_id': 600925,
'email_address': u'[email protected]',
'authentication_token': self.authentication_token,
}
res = self.client.get(self.portal_url)
eq_(res.status_code, 302)
eq_(api.bango.login.post.call_args[0][0]['packageId'],
int(TEST_PACKAGE_ID))
redirect_url = res['Location']
assert self.authentication_token in redirect_url, redirect_url
assert 'emailAddress=admin%40place.com' in redirect_url, redirect_url
@mock.patch('mkt.developers.views_payments.client.api')
def test_bango_portal_redirect_api_error(self, api):
message = 'Something went wrong.'
error = {'__all__': [message]}
api.bango.login.post.side_effect = exceptions.HttpClientError(
content=error)
res = self.client.get(self.portal_url, follow=True)
eq_(res.redirect_chain, [('http://testserver/lookup/', 302)])
ok_(message in [msg.message for msg in res.context['messages']][0])
@mock.patch('mkt.developers.views_payments.client.api')
def test_bango_portal_redirect_role_error(self, api):
self.login(self.user)
res = self.client.get(self.portal_url)
eq_(res.status_code, 403)
class SearchTestMixin(object):
def search(self, expect_objects=True, **data):
res = self.client.get(self.url, data)
eq_(res.status_code, 200)
data = json.loads(res.content)
if expect_objects:
assert len(data['objects']), 'should be more than 0 objects'
return data
def test_auth_required(self):
self.client.logout()
res = self.client.get(self.url)
self.assertLoginRedirects(res, self.url)
class TestAcctSearch(TestCase, SearchTestMixin):
fixtures = fixture('user_10482', 'user_support_staff', 'user_operator')
def setUp(self):
super(TestAcctSearch, self).setUp()
self.url = reverse('lookup.user_search')
self.user = UserProfile.objects.get(email='[email protected]')
self.login(UserProfile.objects.get(email='[email protected]'))
def verify_result(self, data):
eq_(data['objects'][0]['fxa_uid'], self.user.fxa_uid)
eq_(data['objects'][0]['display_name'], self.user.display_name)
eq_(data['objects'][0]['email'], self.user.email)
eq_(data['objects'][0]['id'], self.user.pk)
eq_(data['objects'][0]['url'], reverse('lookup.user_summary',
args=[self.user.pk]))
def test_by_fxa_uid(self):
self.user.update(fxa_uid='fake-fxa-uid')
data = self.search(q='fake-fxa-uid')
self.verify_result(data)
def test_by_display_name(self):
self.user.update(display_name='Kumar McMillan')
data = self.search(q='mcmill')
self.verify_result(data)
def test_by_id(self):
data = self.search(q=self.user.pk)
self.verify_result(data)
def test_by_email(self):
self.user.update(email='[email protected]')
data = self.search(q='fonzi')
self.verify_result(data)
@mock.patch('mkt.constants.lookup.SEARCH_LIMIT', 2)
@mock.patch('mkt.constants.lookup.MAX_RESULTS', 3)
def test_all_results(self):
for x in range(4):
name = 'chr' + str(x)
user_factory(email=name)
# Test not at search limit.
data = self.search(q='clouserw')
eq_(len(data['objects']), 1)
# Test search limit.
data = self.search(q='chr')
eq_(len(data['objects']), 2)
# Test maximum search result.
data = self.search(q='chr', limit='max')
eq_(len(data['objects']), 3)
class TestTransactionSearch(TestCase):
fixtures = fixture('user_support_staff', 'user_999', 'user_operator')
def setUp(self):
self.uuid = 45
self.url = reverse('lookup.transaction_search')
self.login('[email protected]')
def test_redirect(self):
r = self.client.get(self.url, {'q': self.uuid})
self.assert3xx(r, reverse('lookup.transaction_summary',
args=[self.uuid]))
def test_no_perm(self):
self.login('[email protected]')
r = self.client.get(self.url, {'q': self.uuid})
eq_(r.status_code, 403)
self.login('[email protected]')
r = self.client.get(self.url, {'q': self.uuid})
eq_(r.status_code, 403)
class TestTransactionSummary(TestCase):
fixtures = fixture('user_support_staff', 'user_999', 'user_operator')
def setUp(self):
self.uuid = 'some:uuid'
self.transaction_id = 'some:tr'
self.seller_uuid = 456
self.related_tx_uuid = 789
self.user = UserProfile.objects.get(pk=999)
self.app = app_factory()
self.contrib = Contribution.objects.create(
addon=self.app, uuid=self.uuid, user=self.user,
transaction_id=self.transaction_id)
self.url = reverse('lookup.transaction_summary', args=[self.uuid])
self.login('[email protected]')
@mock.patch.object(settings, 'TASK_USER_ID', 999)
def create_test_refund(self):
refund_contrib = Contribution.objects.create(
addon=self.app, related=self.contrib, type=mkt.CONTRIB_REFUND,
transaction_id='testtransactionid', user=self.user)
refund_contrib.enqueue_refund(mkt.REFUND_PENDING, self.user)
@mock.patch('mkt.lookup.views.client')
def test_transaction_summary(self, solitude):
data = _transaction_summary(self.uuid)
eq_(data['is_refundable'], False)
eq_(data['contrib'].pk, self.contrib.pk)
@mock.patch('mkt.lookup.views.client')
def test_refund_status(self, solitude):
solitude.api.bango.refund.get_object_or_404.return_value = (
{'status': PENDING})
solitude.api.generic.transaction.get_object_or_404.return_value = (
{'uid_support': 'foo', 'provider': 2})
self.create_test_refund()
data = _transaction_summary(self.uuid)
eq_(data['support'], 'foo')
eq_(data['refund_status'], SOLITUDE_REFUND_STATUSES[PENDING])
@mock.patch('mkt.lookup.views.client')
def test_bango_transaction_status(self, solitude):
solitude.api.generic.transaction.get_object_or_404.return_value = (
{'uid_support': 'foo', 'provider': 1,
'seller': '/generic/seller/1/'})
self.create_test_refund()
data = _transaction_summary(self.uuid)
ok_(data['package_id'])
@mock.patch('mkt.lookup.views.client')
def test_transaction_status(self, solitude):
solitude.api.generic.transaction.get_object_or_404.return_value = (
{'uid_support': 'foo', 'provider': 2})
self.create_test_refund()
data = _transaction_summary(self.uuid)
eq_(data['support'], 'foo')
eq_(data['provider'], 'reference')
@mock.patch('mkt.lookup.views.client')
def test_transaction_fails(self, solitude):
solitude.api.generic.transaction.get_object_or_404.side_effect = (
ObjectDoesNotExist)
self.create_test_refund()
data = _transaction_summary(self.uuid)
eq_(data['support'], None)
eq_(data['lookup']['transaction'], False)
@mock.patch('mkt.lookup.views.client')
def test_is_refundable(self, solitude):
solitude.api.bango.refund.get_object_or_404.return_value = (
{'status': PENDING})
self.contrib.update(type=mkt.CONTRIB_PURCHASE)
data = _transaction_summary(self.uuid)
eq_(data['contrib'].pk, self.contrib.pk)
eq_(data['is_refundable'], True)
self.create_test_refund()
data = _transaction_summary(self.uuid)
eq_(data['is_refundable'], False)
@mock.patch('mkt.lookup.views.client')
def test_200(self, solitude):
r = self.client.get(self.url)
eq_(r.status_code, 200)
def test_no_perm_403(self):
self.login('[email protected]')
r = self.client.get(self.url)
eq_(r.status_code, 403)
self.login('[email protected]')
r = self.client.get(self.url)
eq_(r.status_code, 403)
def test_no_transaction_404(self):
r = self.client.get(reverse('lookup.transaction_summary', args=[999]))
eq_(r.status_code, 404)
@mock.patch.object(settings, 'TASK_USER_ID', 999)
class TestTransactionRefund(TestCase):
fixtures = fixture('user_support_staff', 'user_999')
def setUp(self):
self.uuid = 'paymentuuid'
self.refund_uuid = 'refunduuid'
self.summary_url = reverse('lookup.transaction_summary',
args=[self.uuid])
self.url = reverse('lookup.transaction_refund', args=[self.uuid])
self.app = app_factory()
self.user = UserProfile.objects.get(email='[email protected]')
AddonUser.objects.create(addon=self.app, user=self.user)
self.req = self.request({'refund_reason': 'text'})
self.contrib = Contribution.objects.create(
addon=self.app, user=self.user, uuid=self.uuid,
type=mkt.CONTRIB_PURCHASE, amount=1, transaction_id='123')
# Fix Django 1.4 RequestFactory bug with MessageMiddleware.
setattr(self.req, 'session', 'session')
messages = FallbackStorage(self.req)
setattr(self.req, '_messages', messages)
self.login(self.req.user)
def bango_ret(self, status):
return {
'status': status,
'transaction': 'transaction_uri',
'uuid': 'some:uid'
}
def request(self, data):
req = RequestFactory().post(self.url, data)
req.user = UserProfile.objects.get(email='[email protected]')
req.groups = req.user.groups.all()
return req
def refund_tx_ret(self):
return {'uuid': self.refund_uuid}
@mock.patch('mkt.lookup.views.client')
def test_fake_refund_ignored(self, client):
req = self.request({'refund_reason': 'text', 'fake': 'OK'})
with self.settings(BANGO_FAKE_REFUNDS=False):
transaction_refund(req, self.uuid)
client.api.bango.refund.post.assert_called_with(
{'uuid': '123', 'manual': False})
@mock.patch('mkt.lookup.views.client')
def test_manual_refund(self, client):
req = self.request({'refund_reason': 'text', 'manual': True})
transaction_refund(req, self.uuid)
client.api.bango.refund.post.assert_called_with(
{'uuid': '123', 'manual': True})
@mock.patch('mkt.lookup.views.client')
def test_fake_refund(self, client):
req = self.request({'refund_reason': 'text', 'fake': 'OK'})
with self.settings(BANGO_FAKE_REFUNDS=True):
transaction_refund(req, self.uuid)
client.api.bango.refund.post.assert_called_with({
'fake_response_status': {'responseCode': 'OK'},
'uuid': '123', 'manual': False})
@mock.patch('mkt.lookup.views.client')
def test_refund_success(self, solitude):
solitude.api.bango.refund.post.return_value = self.bango_ret(PENDING)
solitude.get.return_value = self.refund_tx_ret()
# Do refund.
res = transaction_refund(self.req, self.uuid)
refund = Refund.objects.filter(contribution__addon=self.app)
refund_contribs = self.contrib.get_refund_contribs()
# Check Refund created.
assert refund.exists()
eq_(refund[0].status, mkt.REFUND_PENDING)
assert self.req.POST['refund_reason'] in refund[0].refund_reason
# Check refund Contribution created.
eq_(refund_contribs.exists(), True)
eq_(refund_contribs[0].refund, refund[0])
eq_(refund_contribs[0].related, self.contrib)
eq_(refund_contribs[0].amount, -self.contrib.amount)
eq_(refund_contribs[0].transaction_id, 'some:uid')
self.assert3xx(res, self.summary_url)
@mock.patch('mkt.lookup.views.client')
def test_refund_failed(self, solitude):
solitude.api.bango.refund.post.return_value = self.bango_ret(FAILED)
res = transaction_refund(self.req, self.uuid)
# Check no refund Contributions created.
assert not self.contrib.get_refund_contribs().exists()
self.assert3xx(res, self.summary_url)
def test_cant_refund(self):
self.contrib.update(type=mkt.CONTRIB_PENDING)
resp = self.client.post(self.url, {'refund_reason': 'text'})
eq_(resp.status_code, 404)
@mock.patch('mkt.lookup.views.client')
def test_already_refunded(self, solitude):
solitude.api.bango.refund.post.return_value = self.bango_ret(PENDING)
solitude.get.return_value = self.refund_tx_ret()
res = transaction_refund(self.req, self.uuid)
refund_count = Contribution.objects.all().count()
# Check no refund Contributions created.
res = self.client.post(self.url, {'refund_reason': 'text'})
assert refund_count == Contribution.objects.all().count()
self.assert3xx(res, reverse('lookup.transaction_summary',
args=[self.uuid]))
@mock.patch('mkt.lookup.views.client')
def test_refund_slumber_error(self, solitude):
for exception in (exceptions.HttpClientError,
exceptions.HttpServerError):
solitude.api.bango.refund.post.side_effect = exception
res = transaction_refund(self.req, self.uuid)
# Check no refund Contributions created.
assert not self.contrib.get_refund_contribs().exists()
self.assert3xx(res, self.summary_url)
@mock.patch('mkt.lookup.views.client')
def test_redirect(self, solitude):
solitude.api.bango.refund.post.return_value = self.bango_ret(PENDING)
solitude.get.return_value = self.refund_tx_ret()
res = self.client.post(self.url, {'refund_reason': 'text'})
self.assert3xx(res, reverse('lookup.transaction_summary',
args=[self.uuid]))
@mock.patch('mkt.lookup.views.client')
def test_403_reg_user(self, solitude):
solitude.api.bango.refund.post.return_value = self.bango_ret(PENDING)
solitude.get.return_value = self.refund_tx_ret()
self.login(self.user)
res = self.client.post(self.url, {'refund_reason': 'text'})
eq_(res.status_code, 403)
class TestAppSearch(ESTestCase, SearchTestMixin):
fixtures = fixture('user_support_staff', 'user_999', 'webapp_337141',
'user_operator')
def setUp(self):
super(TestAppSearch, self).setUp()
self.url = reverse('lookup.app_search')
self.app = Webapp.objects.get(pk=337141)
self.login('[email protected]')
def search(self, *args, **kwargs):
if 'lang' not in kwargs:
kwargs.update({'lang': 'en-US'})
return super(TestAppSearch, self).search(*args, **kwargs)
def verify_result(self, data):
eq_(data['objects'][0]['name'], self.app.name.localized_string)
eq_(data['objects'][0]['id'], self.app.pk)
eq_(data['objects'][0]['url'], reverse('lookup.app_summary',
args=[self.app.pk]))
eq_(data['objects'][0]['app_slug'], self.app.app_slug)
eq_(data['objects'][0]['status'],
mkt.STATUS_CHOICES_API_v2[self.app.status])
def test_auth_required(self):
self.client.logout()
res = self.client.get(self.url)
eq_(res.status_code, 403)
def test_operator(self):
self.login('[email protected]')
res = self.client.get(self.url, {'q': self.app.pk})
eq_(res.status_code, 200)
def test_by_name_part(self):
self.app.name = 'This is Steamcube'
self.app.save()
self.refresh('webapp')
data = self.search(q='steamcube')
self.verify_result(data)
def test_by_name_unreviewed(self):
# Just the same as the above test, but with an unreviewed app.
self.app.status = mkt.STATUS_PENDING
self.test_by_name_part()
def test_by_deleted_app(self):
self.app.delete()
self.refresh('webapp')
data = self.search(q='something')
self.verify_result(data)
def test_multiword(self):
self.app.name = 'Firefox Marketplace'
self.app.save()
self.refresh('webapp')
data = self.search(q='Firefox Marketplace')
self.verify_result(data)
def test_by_stem_name(self):
self.app.name = 'Instigated'
self.app.save()
self.refresh('webapp')
data = self.search(q='instigate')
self.verify_result(data)
def test_by_guid(self):
self.app.update(guid='1ab2c3d4-1234-5678-ab12-c34defa5b678')
self.refresh('webapp')
data = self.search(q=self.app.guid)
self.verify_result(data)
def test_by_id(self):
data = self.search(q=self.app.pk)
self.verify_result(data)
@mock.patch('mkt.lookup.views.AppLookupSearchView.paginate_by', 2)
@mock.patch('mkt.lookup.views.AppLookupSearchView.max_paginate_by', 3)
def test_all_results(self):
for x in range(4):
app_factory(name='chr' + str(x))
self.refresh('webapp')
# Test search limit.
data = self.search(q='chr')
eq_(len(data['objects']), 2)
# Test maximum search result.
data = self.search(q='chr', limit='max')
eq_(len(data['objects']), 3)
def test_statuses(self):
for status, api_status in mkt.STATUS_CHOICES_API_v2.items():
self.app.update(status=status)
self.refresh('webapp')
data = self.search(q='something')
eq_(data['objects'][0]['status'], api_status)
def test_disabled(self):
"""We override the status for disabled apps to be 'disabled'."""
self.app.update(disabled_by_user=True)
self.refresh('webapp')
data = self.search(q=self.app.app_slug)
eq_(data['objects'][0]['status'], 'disabled')
class AppSummaryTest(SummaryTest):
fixtures = fixture('prices', 'webapp_337141', 'user_support_staff')
def _setUp(self):
self.app = Webapp.objects.get(pk=337141)
self.url = reverse('lookup.app_summary',
args=[self.app.pk])
self.user = UserProfile.objects.get(email='[email protected]')
self.login('[email protected]')
def summary(self, expected_status=200):
res = self.client.get(self.url)
eq_(res.status_code, expected_status)
return res
@mock.patch('mkt.webapps.models.Webapp.get_cached_manifest', mock.Mock)
class TestAppSummary(AppSummaryTest):
fixtures = fixture('prices', 'user_admin', 'user_support_staff',
'webapp_337141', 'user_operator')
def setUp(self):
super(TestAppSummary, self).setUp()
self._setUp()
def test_app_deleted(self):
self.app.delete()
self.summary()
def test_packaged_app_deleted(self):
self.app.update(is_packaged=True)
ver = version_factory(addon=self.app)
file_factory(version=ver)
self.app.delete()
self.summary()
def test_authors(self):
user = UserProfile.objects.get(email='[email protected]')
res = self.summary()
eq_(res.context['authors'][0].display_name, user.display_name)
def test_status(self):
res = self.summary()
assert 'Published' in pq(res.content)('.column-b dd').eq(5).text()
def test_disabled(self):
self.app.update(disabled_by_user=True)
res = self.summary()
text = pq(res.content)('.column-b dd').eq(5).text()
assert 'Published' not in text
assert 'disabled by user' in text
def test_tarako_enabled(self):
tag = Tag(tag_text='tarako')
tag.save_tag(self.app)
res = self.summary()
text = 'Tarako enabled'
assert text in pq(res.content)('.column-b dd').eq(6).text()
def test_tarako_disabled_not_pending(self):
res = self.summary()
texta = 'Tarako not enabled |'
textb = 'Review not requested'
assert texta in pq(res.content)('.column-b dd').eq(6).text()
assert textb in pq(res.content)('.column-b dd').eq(6).text()
def test_tarako_review_pending(self):
self.app.additionalreview_set.create(queue=QUEUE_TARAKO)
res = self.summary()
texta = 'Tarako not enabled |'
textb = 'Review pending'
assert texta in pq(res.content)('.column-b dd').eq(6).text()
assert textb in pq(res.content)('.column-b dd').eq(6).text()
def test_visible_authors(self):
AddonUser.objects.all().delete()
for role in (mkt.AUTHOR_ROLE_DEV,
mkt.AUTHOR_ROLE_OWNER,
mkt.AUTHOR_ROLE_VIEWER,
mkt.AUTHOR_ROLE_SUPPORT):
role_name = unicode(mkt.AUTHOR_CHOICES_NAMES[role])
user = user_factory(display_name=role_name)
role = AddonUser.objects.create(user=user,
addon=self.app,
role=role)
self.app.addonuser_set.add(role)
res = self.summary()
eq_(sorted([u.display_name for u in res.context['authors']]),
[unicode(mkt.AUTHOR_CHOICES_NAMES[mkt.AUTHOR_ROLE_DEV]),
unicode(mkt.AUTHOR_CHOICES_NAMES[mkt.AUTHOR_ROLE_OWNER])])
def test_details(self):
res = self.summary()
eq_(res.context['app'].manifest_url, self.app.manifest_url)
eq_(res.context['app'].premium_type, mkt.ADDON_FREE)
eq_(res.context['price'], None)
def test_price(self):
self.make_premium(self.app)
res = self.summary()
eq_(res.context['price'], self.app.premium.price)
def test_abuse_reports(self):
for i in range(2):
AbuseReport.objects.create(addon=self.app,
ip_address='10.0.0.1',
message='spam and porn everywhere')
res = self.summary()
eq_(res.context['abuse_reports'], 2)
def test_permissions(self):
manifest = json.dumps({
'permissions': {
'geolocation': {
'description': 'Required to know where you are.'
}
}
})
self.app.latest_version.manifest_json.update(manifest=manifest)
res = self.summary()
eq_(res.context['permissions'], json.loads(manifest)['permissions'])
def test_version_history_non_packaged(self):
res = self.summary()
eq_(pq(res.content)('section.version-history').length, 0)
def test_version_history_packaged(self):
self.app.update(is_packaged=True)
self.version = self.app.current_version
self.file = self.version.all_files[0]
self.file.update(filename='mozball.zip')
res = self.summary()
eq_(pq(res.content)('section.version-history').length, 1)
assert 'mozball.zip' in pq(res.content)(
'section.version-history a.download').attr('href')
def test_edit_link_staff(self):
res = self.summary()
eq_(pq(res.content)('.shortcuts li').length, 4)
eq_(pq(res.content)('.shortcuts li').eq(3).text(), 'Edit Listing')
def test_operator_200(self):
self.login('[email protected]')
res = self.client.get(self.url)
eq_(res.status_code, 200)
def test_priority_button_available(self):
res = self.summary()
eq_(pq(res.content)('section.column-b button.button').attr('name'),
'prioritize')
eq_(pq(res.content)('section.column-b button.button').text(),
'Prioritize Review?')
def test_priority_button_already_prioritized(self):
self.app.update(priority_review=True)
res = self.summary()
eq_(pq(res.content)('section.column-b button.button,disabled')
.attr('name'), 'prioritize')
eq_(pq(res.content)('section.column-b button.button,disabled').text(),
'Review Prioritized')
def test_priority_button_works(self):
staff = UserProfile.objects.get(email='[email protected]')
req = req_factory_factory(self.url, post=True, user=staff,
data={'prioritize': 'true'})
app_summary(req, self.app.id)
self.app.reload()
eq_(self.app.priority_review, True)
@mock.patch.object(settings, 'PAYMENT_PROVIDERS', ['bango', 'reference'])
def test_multiple_payment_accounts(self):
self.add_payment_accounts([PROVIDER_BANGO, PROVIDER_REFERENCE])
res = self.summary()
self.verify_bango_portal(self.app, res)
class TestAppSummaryPurchases(AppSummaryTest):
def setUp(self):
super(TestAppSummaryPurchases, self).setUp()
self._setUp()
def assert_totals(self, data):
eq_(data['total'], 6)
six_bucks = numbers.format_currency(6, 'USD',
locale=numbers.LC_NUMERIC)
three_euro = numbers.format_currency(3, 'EUR',
locale=numbers.LC_NUMERIC)
eq_(set(data['amounts']), set([six_bucks, three_euro]))
eq_(len(data['amounts']), 2)
def assert_empty(self, data):
eq_(data['total'], 0)
eq_(sorted(data['amounts']), [])
def purchase(self, created=None, typ=mkt.CONTRIB_PURCHASE):
for curr, amount in (('USD', '2.00'), ('EUR', '1.00')):
for i in range(3):
c = Contribution.objects.create(addon=self.app,
user=self.user,
amount=Decimal(amount),
currency=curr,
type=typ)
if created:
c.update(created=created)
def test_24_hr(self):
self.purchase()
res = self.summary()
self.assert_totals(res.context['purchases']['last_24_hours'])
def test_ignore_older_than_24_hr(self):
self.purchase(created=datetime.now() - timedelta(days=1,
minutes=1))
res = self.summary()
self.assert_empty(res.context['purchases']['last_24_hours'])
def test_7_days(self):
self.purchase(created=datetime.now() - timedelta(days=6,
minutes=55))
res = self.summary()
self.assert_totals(res.context['purchases']['last_7_days'])
def test_ignore_older_than_7_days(self):
self.purchase(created=datetime.now() - timedelta(days=7,
minutes=1))
res = self.summary()
self.assert_empty(res.context['purchases']['last_7_days'])
def test_alltime(self):
self.purchase(created=datetime.now() - timedelta(days=31))
res = self.summary()
self.assert_totals(res.context['purchases']['alltime'])
def test_ignore_non_purchases(self):
for typ in [mkt.CONTRIB_REFUND,
mkt.CONTRIB_CHARGEBACK,
mkt.CONTRIB_PENDING]:
self.purchase(typ=typ)
res = self.summary()
self.assert_empty(res.context['purchases']['alltime'])
class TestAppSummaryRefunds(AppSummaryTest):
fixtures = AppSummaryTest.fixtures + fixture('user_999', 'user_admin')
def setUp(self):
super(TestAppSummaryRefunds, self).setUp()
self._setUp()
self.user = UserProfile.objects.get(email='[email protected]')
self.contrib1 = self.purchase()
self.contrib2 = self.purchase()
self.contrib3 = self.purchase()
self.contrib4 = self.purchase()
def purchase(self):
return Contribution.objects.create(addon=self.app,
user=self.user,
amount=Decimal('0.99'),
currency='USD',
paykey='AP-1235',
type=mkt.CONTRIB_PURCHASE)
def refund(self, refunds):
for contrib, status in refunds:
Refund.objects.create(contribution=contrib,
status=status,
user=self.user)
def test_requested(self):
self.refund(((self.contrib1, mkt.REFUND_APPROVED),
(self.contrib2, mkt.REFUND_APPROVED),
(self.contrib3, mkt.REFUND_DECLINED),
(self.contrib4, mkt.REFUND_DECLINED)))
res = self.summary()
eq_(res.context['refunds']['requested'], 2)
eq_(res.context['refunds']['percent_of_purchases'], '50.0%')
def test_no_refunds(self):
res = self.summary()
eq_(res.context['refunds']['requested'], 0)
eq_(res.context['refunds']['percent_of_purchases'], '0.0%')
eq_(res.context['refunds']['auto-approved'], 0)
eq_(res.context['refunds']['approved'], 0)
eq_(res.context['refunds']['rejected'], 0)
def test_auto_approved(self):
self.refund(((self.contrib1, mkt.REFUND_APPROVED),
(self.contrib2, mkt.REFUND_APPROVED_INSTANT)))
res = self.summary()
eq_(res.context['refunds']['auto-approved'], 1)
def test_approved(self):
self.refund(((self.contrib1, mkt.REFUND_APPROVED),
(self.contrib2, mkt.REFUND_DECLINED)))
res = self.summary()
eq_(res.context['refunds']['approved'], 1)
def test_rejected(self):
self.refund(((self.contrib1, mkt.REFUND_APPROVED),
(self.contrib2, mkt.REFUND_DECLINED),
(self.contrib3, mkt.REFUND_FAILED)))
res = self.summary()
eq_(res.context['refunds']['rejected'], 2)
class TestPurchases(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141', 'users')
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
self.reviewer = UserProfile.objects.get(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.url = reverse('lookup.user_purchases', args=[self.user.pk])
def test_not_allowed(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_not_even_mine(self):
self.login(self.user)
eq_(self.client.get(self.url).status_code, 403)
def test_access(self):
self.login(self.reviewer)
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(pq(res.content)('p.notice').length, 1)
def test_purchase_shows_up(self):
Contribution.objects.create(user=self.user, addon=self.app,
amount=1, type=mkt.CONTRIB_PURCHASE)
self.login(self.reviewer)
res = self.client.get(self.url)
eq_(res.status_code, 200)
doc = pq(res.content)
eq_(doc('div.product-lookup-list a').attr('href'),
self.app.get_detail_url())
def test_no_support_link(self):
for type_ in [mkt.CONTRIB_PURCHASE]:
Contribution.objects.create(user=self.user, addon=self.app,
amount=1, type=type_)
self.login(self.reviewer)
res = self.client.get(self.url)
eq_(res.status_code, 200)
doc = pq(res.content)
eq_(len(doc('.item a.request-support')), 0)
class TestActivity(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141', 'users')
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
self.reviewer = UserProfile.objects.get(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.url = reverse('lookup.user_activity', args=[self.user.pk])
def test_not_allowed(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_not_even_mine(self):
self.login(self.user)
eq_(self.client.get(self.url).status_code, 403)
def test_access(self):
self.login(self.reviewer)
res = self.client.get(self.url)
eq_(res.status_code, 200)
eq_(len(pq(res.content)('.simple-log div')), 0)
def test_log(self):
self.login(self.reviewer)
self.client.get(self.url)
log_item = ActivityLog.objects.get(action=mkt.LOG.ADMIN_VIEWED_LOG.id)
eq_(len(log_item.arguments), 1)
eq_(log_item.arguments[0].id, self.reviewer.id)
eq_(log_item.user, self.user)
def test_display(self):
mkt.log(mkt.LOG.PURCHASE_ADDON, self.app, user=self.user)
mkt.log(mkt.LOG.ADMIN_USER_EDITED, self.user, 'spite', user=self.user)
self.login(self.reviewer)
res = self.client.get(self.url)
eq_(res.status_code, 200)
doc = pq(res.content)
assert 'purchased' in doc('li.item').eq(0).text()
assert 'edited' in doc('li.item').eq(1).text()
class TestAppActivity(mkt.site.tests.TestCase):
fixtures = fixture('webapp_337141', 'users')
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
self.reviewer = UserProfile.objects.get(email='[email protected]')
self.user = UserProfile.objects.get(email='[email protected]')
self.url = reverse('lookup.app_activity', args=[self.app.pk])
def test_not_allowed(self):
self.client.logout()
self.assertLoginRequired(self.client.get(self.url))
def test_not_even_mine(self):
self.login(self.user)
eq_(self.client.get(self.url).status_code, 403)
def test_access(self):
self.login(self.reviewer)
res = self.client.get(self.url)
eq_(res.status_code, 200)
def test_logs(self):
# Admin log.
mkt.log(mkt.LOG.COMMENT_VERSION, self.app, self.app.current_version,
user=self.user)
# Regular log.
mkt.log(mkt.LOG.MANIFEST_UPDATED, self.app, user=self.user)
self.login(self.reviewer)
res = self.client.get(self.url)
doc = pq(res.content)
assert 'manifest updated' in doc('li.item').eq(0).text()
assert 'Comment on' in doc('li.item').eq(1).text()
class TestWebsiteSearch(ESTestCase, SearchTestMixin):
fixtures = fixture('user_support_staff', 'user_999')
def setUp(self):
super(TestWebsiteSearch, self).setUp()
self.url = reverse('lookup.website_search')
self.website = website_factory()
self.refresh('website')
self.login('[email protected]')
def search(self, *args, **kwargs):
if 'lang' not in kwargs:
kwargs.update({'lang': 'en-US'})
return super(TestWebsiteSearch, self).search(*args, **kwargs)
def verify_result(self, data):
eq_(data['objects'][0]['id'], self.website.pk)
eq_(data['objects'][0]['name'], self.website.name.localized_string)
eq_(data['objects'][0]['url'], reverse('lookup.website_summary',
args=[self.website.pk]))
def test_auth_required(self):
self.client.logout()
res = self.client.get(self.url)
eq_(res.status_code, 403)
def test_by_name(self):
data = self.search(q=self.website.name.localized_string)
self.verify_result(data)
def test_by_id(self):
data = self.search(q=self.website.pk)
self.verify_result(data)
|
|
# -*- coding: utf-8 -*-
import datetime
import unittest
import uuid
from voluptuous import Schema, MultipleInvalid, Required, Invalid
from zombase import validation
class TestUUID(unittest.TestCase):
def test_uuid(self):
self.assertTrue(validation.is_valid_uuid(uuid.uuid4()))
self.assertTrue(validation.is_valid_uuid(str(uuid.uuid4())))
self.assertFalse(validation.is_valid_uuid('bla'))
class TestMail(unittest.TestCase):
def test_mail(self):
schema = Schema(validation.Mail())
schema('[email protected]')
def test_other_mail(self):
schema = Schema(validation.Mail())
schema('[email protected]')
schema('[email protected]')
schema('[email protected]')
def test_lower_mail(self):
schema = Schema(validation.Mail())
schema_lower = Schema(validation.Mail(lower=True))
self.assertEqual(schema('[email protected]'), '[email protected]')
self.assertEqual(schema_lower('[email protected]'), '[email protected]')
def test_invalid_mail(self):
schema = Schema(validation.Mail())
with self.assertRaises(MultipleInvalid):
schema('a [email protected]')
class TestFloatable(unittest.TestCase):
def test_simple_floatable(self):
schema = Schema(validation.Floatable())
self.assertEqual(schema('1.12'), 1.12)
self.assertEqual(schema(1.12), 1.12)
def test_empty_to_none_floatable(self):
schema = Schema(validation.Floatable(empty_to_none=True))
self.assertEqual(schema(''), None)
def test_uncasted_floatable(self):
schema = Schema(validation.Floatable(cast=False))
self.assertEqual(schema('3.0'), '3.0')
def test_invalid_floatable(self):
schema = Schema(validation.Floatable())
with self.assertRaises(MultipleInvalid):
schema('3.a')
with self.assertRaises(MultipleInvalid):
schema(None)
class TestIntegeable(unittest.TestCase):
def test_simple_integeable(self):
schema = Schema(validation.Integeable())
self.assertEqual(schema('1'), 1)
def test_empty_to_none_integeable(self):
schema = Schema(validation.Integeable(empty_to_none=True))
self.assertEqual(schema(''), None)
def test_uncasted_integeable(self):
schema = Schema(validation.Integeable(cast=False))
self.assertEqual(schema('3'), '3')
def test_invalid_integeable(self):
schema = Schema(validation.Integeable())
with self.assertRaises(MultipleInvalid):
schema('a')
def test_invalid_integeable_but_floatable(self):
schema = Schema(validation.Integeable())
with self.assertRaises(MultipleInvalid):
schema('3.2')
with self.assertRaises(MultipleInvalid):
schema(3.2)
with self.assertRaises(MultipleInvalid):
schema(None)
class TestDateable(unittest.TestCase):
def test_simple_no_cast(self):
schema = Schema(validation.Dateable())
self.assertEqual(
schema(datetime.date(2015, 11, 13)), datetime.date(2015, 11, 13))
def test_simple_cast(self):
schema = Schema(validation.Dateable())
self.assertEqual(schema('2015-11-13'), datetime.date(2015, 11, 13))
def test_cast_w_format(self):
schema = Schema(validation.Dateable(format='%Y%m%d'))
self.assertEqual(schema('20151113'), datetime.date(2015, 11, 13))
def test_nocast_w_format(self):
schema = Schema(validation.Dateable(cast=False, format='%Y%m%d'))
value = '20151113'
nocast = schema('20151113')
self.assertEqual(nocast, value)
def test_wrong_choice_in_dict(self):
schema = Schema(validation.Dateable())
with self.assertRaises(Invalid):
schema('20151113')
class TestChoice(unittest.TestCase):
def test_choice(self):
schema = Schema(validation.Choice(['a', 'b']))
schema('a')
def test_wrong_choice(self):
schema = Schema(validation.Choice(['a', 'b']))
with self.assertRaises(MultipleInvalid):
schema('c')
def test_wrong_choice_in_dict(self):
# The error message system is different in a dict.
schema = Schema({
'bla': validation.Choice(['a', 'b']),
})
with self.assertRaises(MultipleInvalid):
schema({'bla': 'c'})
class TestAdaptDict(unittest.TestCase):
input_dict = {
'to_keep': 'dummy',
'to_remove': 'dummy',
'to_make_required': 'dummy'
}
def test_keep(self):
output_dict = validation.adapt_dict(self.input_dict, keep=['to_keep'])
self.assertEqual(output_dict, {'to_keep': 'dummy'})
def test_remove(self):
output_dict = validation.adapt_dict(
self.input_dict, remove=['to_remove'])
self.assertEqual(output_dict, {'to_keep': 'dummy',
'to_make_required': 'dummy'})
def test_make_required(self):
output_dict = validation.adapt_dict(
self.input_dict, make_required=['to_make_required'])
def the_assert(output_dict):
for key in output_dict:
if (str(key) == 'to_make_required' and
not isinstance(key, Required)):
return False
elif (str(key) != 'to_make_required' and
isinstance(key, Required)):
return False
return True
self.assertTrue(the_assert(output_dict))
class TestSchemaDictNone(unittest.TestCase):
schema_dict = {
Required('id'): int,
'name': str,
'value': int,
'target': int,
}
def test_wrong_init(self):
with self.assertRaises(ValueError):
validation.SchemaDictNone(['a', 'b'])
def test_basic_schema(self):
schema = validation.SchemaDictNone(self.schema_dict)
data = {'id': 2, 'name': 'bla', 'value': None}
new_data = schema(data)
self.assertEqual(new_data['value'], None)
self.assertEqual(new_data['name'], 'bla')
def test_schema_with_not_none(self):
schema = validation.SchemaDictNone(
self.schema_dict, not_none=('name',))
data = {'id': 2, 'value': None, 'name': None}
with self.assertRaises(MultipleInvalid):
schema(data)
|
|
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for Twisted plugin system.
"""
from __future__ import absolute_import, division
import sys, errno, os, time
import compileall
import functools
from zope.interface import Interface
from twisted.trial import unittest
from twisted.python.compat import _PY3
from twisted.python.log import textFromEventDict, addObserver, removeObserver
from twisted.python.filepath import FilePath
from twisted import plugin
if _PY3:
from importlib import invalidate_caches as invalidateImportCaches
else:
def invalidateImportCaches():
"""
On python 2, import caches don't need to be invalidated.
"""
class ITestPlugin(Interface):
"""
A plugin for use by the plugin system's unit tests.
Do not use this.
"""
class ITestPlugin2(Interface):
"""
See L{ITestPlugin}.
"""
class PluginTests(unittest.TestCase):
"""
Tests which verify the behavior of the current, active Twisted plugins
directory.
"""
def setUp(self):
"""
Save C{sys.path} and C{sys.modules}, and create a package for tests.
"""
self.originalPath = sys.path[:]
self.savedModules = sys.modules.copy()
self.root = FilePath(self.mktemp())
self.root.createDirectory()
self.package = self.root.child('mypackage')
self.package.createDirectory()
self.package.child('__init__.py').setContent(b"")
FilePath(__file__).sibling('plugin_basic.py'
).copyTo(self.package.child('testplugin.py'))
self.originalPlugin = "testplugin"
sys.path.insert(0, self.root.path)
import mypackage
self.module = mypackage
def tearDown(self):
"""
Restore C{sys.path} and C{sys.modules} to their original values.
"""
sys.path[:] = self.originalPath
sys.modules.clear()
sys.modules.update(self.savedModules)
def _unimportPythonModule(self, module, deleteSource=False):
modulePath = module.__name__.split('.')
packageName = '.'.join(modulePath[:-1])
moduleName = modulePath[-1]
delattr(sys.modules[packageName], moduleName)
del sys.modules[module.__name__]
for ext in ['c', 'o'] + (deleteSource and [''] or []):
try:
os.remove(module.__file__ + ext)
except OSError as ose:
if ose.errno != errno.ENOENT:
raise
def _clearCache(self):
"""
Remove the plugins B{droping.cache} file.
"""
self.package.child('dropin.cache').remove()
def _withCacheness(meth):
"""
This is a paranoid test wrapper, that calls C{meth} 2 times, clear the
cache, and calls it 2 other times. It's supposed to ensure that the
plugin system behaves correctly no matter what the state of the cache
is.
"""
@functools.wraps(meth)
def wrapped(self):
meth(self)
meth(self)
self._clearCache()
meth(self)
meth(self)
return wrapped
def test_cache(self):
"""
Check that the cache returned by L{plugin.getCache} hold the plugin
B{testplugin}, and that this plugin has the properties we expect:
provide L{TestPlugin}, has the good name and description, and can be
loaded successfully.
"""
cache = plugin.getCache(self.module)
dropin = cache[self.originalPlugin]
self.assertEqual(dropin.moduleName,
'mypackage.%s' % (self.originalPlugin,))
self.assertIn("I'm a test drop-in.", dropin.description)
# Note, not the preferred way to get a plugin by its interface.
p1 = [p for p in dropin.plugins if ITestPlugin in p.provided][0]
self.assertIdentical(p1.dropin, dropin)
self.assertEqual(p1.name, "TestPlugin")
# Check the content of the description comes from the plugin module
# docstring
self.assertEqual(
p1.description.strip(),
"A plugin used solely for testing purposes.")
self.assertEqual(p1.provided, [ITestPlugin, plugin.IPlugin])
realPlugin = p1.load()
# The plugin should match the class present in sys.modules
self.assertIdentical(
realPlugin,
sys.modules['mypackage.%s' % (self.originalPlugin,)].TestPlugin)
# And it should also match if we import it classicly
import mypackage.testplugin as tp
self.assertIdentical(realPlugin, tp.TestPlugin)
test_cache = _withCacheness(test_cache)
def test_cacheRepr(self):
"""
L{CachedPlugin} has a helpful C{repr} which contains relevant
information about it.
"""
cachedDropin = plugin.getCache(self.module)[self.originalPlugin]
cachedPlugin = list(p for p in cachedDropin.plugins
if p.name == 'TestPlugin')[0]
self.assertEqual(
repr(cachedPlugin),
"<CachedPlugin 'TestPlugin'/'mypackage.testplugin' "
"(provides 'ITestPlugin, IPlugin')>"
)
def test_plugins(self):
"""
L{plugin.getPlugins} should return the list of plugins matching the
specified interface (here, L{ITestPlugin2}), and these plugins
should be instances of classes with a C{test} method, to be sure
L{plugin.getPlugins} load classes correctly.
"""
plugins = list(plugin.getPlugins(ITestPlugin2, self.module))
self.assertEqual(len(plugins), 2)
names = ['AnotherTestPlugin', 'ThirdTestPlugin']
for p in plugins:
names.remove(p.__name__)
p.test()
test_plugins = _withCacheness(test_plugins)
def test_detectNewFiles(self):
"""
Check that L{plugin.getPlugins} is able to detect plugins added at
runtime.
"""
FilePath(__file__).sibling('plugin_extra1.py'
).copyTo(self.package.child('pluginextra.py'))
try:
# Check that the current situation is clean
self.failIfIn('mypackage.pluginextra', sys.modules)
self.failIf(hasattr(sys.modules['mypackage'], 'pluginextra'),
"mypackage still has pluginextra module")
plgs = list(plugin.getPlugins(ITestPlugin, self.module))
# We should find 2 plugins: the one in testplugin, and the one in
# pluginextra
self.assertEqual(len(plgs), 2)
names = ['TestPlugin', 'FourthTestPlugin']
for p in plgs:
names.remove(p.__name__)
p.test1()
finally:
self._unimportPythonModule(
sys.modules['mypackage.pluginextra'],
True)
test_detectNewFiles = _withCacheness(test_detectNewFiles)
def test_detectFilesChanged(self):
"""
Check that if the content of a plugin change, L{plugin.getPlugins} is
able to detect the new plugins added.
"""
FilePath(__file__).sibling('plugin_extra1.py'
).copyTo(self.package.child('pluginextra.py'))
try:
plgs = list(plugin.getPlugins(ITestPlugin, self.module))
# Sanity check
self.assertEqual(len(plgs), 2)
FilePath(__file__).sibling('plugin_extra2.py'
).copyTo(self.package.child('pluginextra.py'))
# Fake out Python.
self._unimportPythonModule(sys.modules['mypackage.pluginextra'])
# Make sure additions are noticed
plgs = list(plugin.getPlugins(ITestPlugin, self.module))
self.assertEqual(len(plgs), 3)
names = ['TestPlugin', 'FourthTestPlugin', 'FifthTestPlugin']
for p in plgs:
names.remove(p.__name__)
p.test1()
finally:
self._unimportPythonModule(
sys.modules['mypackage.pluginextra'],
True)
test_detectFilesChanged = _withCacheness(test_detectFilesChanged)
def test_detectFilesRemoved(self):
"""
Check that when a dropin file is removed, L{plugin.getPlugins} doesn't
return it anymore.
"""
FilePath(__file__).sibling('plugin_extra1.py'
).copyTo(self.package.child('pluginextra.py'))
try:
# Generate a cache with pluginextra in it.
list(plugin.getPlugins(ITestPlugin, self.module))
finally:
self._unimportPythonModule(
sys.modules['mypackage.pluginextra'],
True)
plgs = list(plugin.getPlugins(ITestPlugin, self.module))
self.assertEqual(1, len(plgs))
test_detectFilesRemoved = _withCacheness(test_detectFilesRemoved)
def test_nonexistentPathEntry(self):
"""
Test that getCache skips over any entries in a plugin package's
C{__path__} which do not exist.
"""
path = self.mktemp()
self.failIf(os.path.exists(path))
# Add the test directory to the plugins path
self.module.__path__.append(path)
try:
plgs = list(plugin.getPlugins(ITestPlugin, self.module))
self.assertEqual(len(plgs), 1)
finally:
self.module.__path__.remove(path)
test_nonexistentPathEntry = _withCacheness(test_nonexistentPathEntry)
def test_nonDirectoryChildEntry(self):
"""
Test that getCache skips over any entries in a plugin package's
C{__path__} which refer to children of paths which are not directories.
"""
path = FilePath(self.mktemp())
self.failIf(path.exists())
path.touch()
child = path.child("test_package").path
self.module.__path__.append(child)
try:
plgs = list(plugin.getPlugins(ITestPlugin, self.module))
self.assertEqual(len(plgs), 1)
finally:
self.module.__path__.remove(child)
test_nonDirectoryChildEntry = _withCacheness(test_nonDirectoryChildEntry)
def test_deployedMode(self):
"""
The C{dropin.cache} file may not be writable: the cache should still be
attainable, but an error should be logged to show that the cache
couldn't be updated.
"""
# Generate the cache
plugin.getCache(self.module)
cachepath = self.package.child('dropin.cache')
# Add a new plugin
FilePath(__file__).sibling('plugin_extra1.py'
).copyTo(self.package.child('pluginextra.py'))
invalidateImportCaches()
os.chmod(self.package.path, 0o500)
# Change the right of dropin.cache too for windows
os.chmod(cachepath.path, 0o400)
self.addCleanup(os.chmod, self.package.path, 0o700)
self.addCleanup(os.chmod, cachepath.path, 0o700)
# Start observing log events to see the warning
events = []
addObserver(events.append)
self.addCleanup(removeObserver, events.append)
cache = plugin.getCache(self.module)
# The new plugin should be reported
self.assertIn('pluginextra', cache)
self.assertIn(self.originalPlugin, cache)
# Make sure something was logged about the cache.
expected = "Unable to write to plugin cache %s: error number %d" % (
cachepath.path, errno.EPERM)
for event in events:
if expected in textFromEventDict(event):
break
else:
self.fail(
"Did not observe unwriteable cache warning in log "
"events: %r" % (events,))
# This is something like the Twisted plugins file.
pluginInitFile = b"""
from twisted.plugin import pluginPackagePaths
__path__.extend(pluginPackagePaths(__name__))
__all__ = []
"""
def pluginFileContents(name):
return (
"from zope.interface import provider\n"
"from twisted.plugin import IPlugin\n"
"from twisted.test.test_plugin import ITestPlugin\n"
"\n"
"@provider(IPlugin, ITestPlugin)\n"
"class {0}(object):\n"
" pass\n"
).format(name).encode('ascii')
def _createPluginDummy(entrypath, pluginContent, real, pluginModule):
"""
Create a plugindummy package.
"""
entrypath.createDirectory()
pkg = entrypath.child('plugindummy')
pkg.createDirectory()
if real:
pkg.child('__init__.py').setContent(b'')
plugs = pkg.child('plugins')
plugs.createDirectory()
if real:
plugs.child('__init__.py').setContent(pluginInitFile)
plugs.child(pluginModule + '.py').setContent(pluginContent)
return plugs
class DeveloperSetupTests(unittest.TestCase):
"""
These tests verify things about the plugin system without actually
interacting with the deployed 'twisted.plugins' package, instead creating a
temporary package.
"""
def setUp(self):
"""
Create a complex environment with multiple entries on sys.path, akin to
a developer's environment who has a development (trunk) checkout of
Twisted, a system installed version of Twisted (for their operating
system's tools) and a project which provides Twisted plugins.
"""
self.savedPath = sys.path[:]
self.savedModules = sys.modules.copy()
self.fakeRoot = FilePath(self.mktemp())
self.fakeRoot.createDirectory()
self.systemPath = self.fakeRoot.child('system_path')
self.devPath = self.fakeRoot.child('development_path')
self.appPath = self.fakeRoot.child('application_path')
self.systemPackage = _createPluginDummy(
self.systemPath, pluginFileContents('system'),
True, 'plugindummy_builtin')
self.devPackage = _createPluginDummy(
self.devPath, pluginFileContents('dev'),
True, 'plugindummy_builtin')
self.appPackage = _createPluginDummy(
self.appPath, pluginFileContents('app'),
False, 'plugindummy_app')
# Now we're going to do the system installation.
sys.path.extend([x.path for x in [self.systemPath,
self.appPath]])
# Run all the way through the plugins list to cause the
# L{plugin.getPlugins} generator to write cache files for the system
# installation.
self.getAllPlugins()
self.sysplug = self.systemPath.child('plugindummy').child('plugins')
self.syscache = self.sysplug.child('dropin.cache')
# Make sure there's a nice big difference in modification times so that
# we won't re-build the system cache.
now = time.time()
os.utime(
self.sysplug.child('plugindummy_builtin.py').path,
(now - 5000,) * 2)
os.utime(self.syscache.path, (now - 2000,) * 2)
# For extra realism, let's make sure that the system path is no longer
# writable.
self.lockSystem()
self.resetEnvironment()
def lockSystem(self):
"""
Lock the system directories, as if they were unwritable by this user.
"""
os.chmod(self.sysplug.path, 0o555)
os.chmod(self.syscache.path, 0o555)
def unlockSystem(self):
"""
Unlock the system directories, as if they were writable by this user.
"""
os.chmod(self.sysplug.path, 0o777)
os.chmod(self.syscache.path, 0o777)
def getAllPlugins(self):
"""
Get all the plugins loadable from our dummy package, and return their
short names.
"""
# Import the module we just added to our path. (Local scope because
# this package doesn't exist outside of this test.)
import plugindummy.plugins
x = list(plugin.getPlugins(ITestPlugin, plugindummy.plugins))
return [plug.__name__ for plug in x]
def resetEnvironment(self):
"""
Change the environment to what it should be just as the test is
starting.
"""
self.unsetEnvironment()
sys.path.extend([x.path for x in [self.devPath,
self.systemPath,
self.appPath]])
def unsetEnvironment(self):
"""
Change the Python environment back to what it was before the test was
started.
"""
invalidateImportCaches()
sys.modules.clear()
sys.modules.update(self.savedModules)
sys.path[:] = self.savedPath
def tearDown(self):
"""
Reset the Python environment to what it was before this test ran, and
restore permissions on files which were marked read-only so that the
directory may be cleanly cleaned up.
"""
self.unsetEnvironment()
# Normally we wouldn't "clean up" the filesystem like this (leaving
# things for post-test inspection), but if we left the permissions the
# way they were, we'd be leaving files around that the buildbots
# couldn't delete, and that would be bad.
self.unlockSystem()
def test_developmentPluginAvailability(self):
"""
Plugins added in the development path should be loadable, even when
the (now non-importable) system path contains its own idea of the
list of plugins for a package. Inversely, plugins added in the
system path should not be available.
"""
# Run 3 times: uncached, cached, and then cached again to make sure we
# didn't overwrite / corrupt the cache on the cached try.
for x in range(3):
names = self.getAllPlugins()
names.sort()
self.assertEqual(names, ['app', 'dev'])
def test_freshPyReplacesStalePyc(self):
"""
Verify that if a stale .pyc file on the PYTHONPATH is replaced by a
fresh .py file, the plugins in the new .py are picked up rather than
the stale .pyc, even if the .pyc is still around.
"""
mypath = self.appPackage.child("stale.py")
mypath.setContent(pluginFileContents('one'))
# Make it super stale
x = time.time() - 1000
os.utime(mypath.path, (x, x))
pyc = mypath.sibling('stale.pyc')
# compile it
if _PY3:
# On python 3, don't use the __pycache__ directory; the intention
# of scanning for .pyc files is for configurations where you want
# to intentionally include them, which means we _don't_ scan for
# them inside cache directories.
extra = dict(legacy=True)
else:
# On python 2 this option doesn't exist.
extra = dict()
compileall.compile_dir(self.appPackage.path, quiet=1, **extra)
os.utime(pyc.path, (x, x))
# Eliminate the other option.
mypath.remove()
# Make sure it's the .pyc path getting cached.
self.resetEnvironment()
# Sanity check.
self.assertIn('one', self.getAllPlugins())
self.failIfIn('two', self.getAllPlugins())
self.resetEnvironment()
mypath.setContent(pluginFileContents('two'))
self.failIfIn('one', self.getAllPlugins())
self.assertIn('two', self.getAllPlugins())
def test_newPluginsOnReadOnlyPath(self):
"""
Verify that a failure to write the dropin.cache file on a read-only
path will not affect the list of plugins returned.
Note: this test should pass on both Linux and Windows, but may not
provide useful coverage on Windows due to the different meaning of
"read-only directory".
"""
self.unlockSystem()
self.sysplug.child('newstuff.py').setContent(pluginFileContents('one'))
self.lockSystem()
# Take the developer path out, so that the system plugins are actually
# examined.
sys.path.remove(self.devPath.path)
# Start observing log events to see the warning
events = []
addObserver(events.append)
self.addCleanup(removeObserver, events.append)
self.assertIn('one', self.getAllPlugins())
# Make sure something was logged about the cache.
expected = "Unable to write to plugin cache %s: error number %d" % (
self.syscache.path, errno.EPERM)
for event in events:
if expected in textFromEventDict(event):
break
else:
self.fail(
"Did not observe unwriteable cache warning in log "
"events: %r" % (events,))
class AdjacentPackageTests(unittest.TestCase):
"""
Tests for the behavior of the plugin system when there are multiple
installed copies of the package containing the plugins being loaded.
"""
def setUp(self):
"""
Save the elements of C{sys.path} and the items of C{sys.modules}.
"""
self.originalPath = sys.path[:]
self.savedModules = sys.modules.copy()
def tearDown(self):
"""
Restore C{sys.path} and C{sys.modules} to their original values.
"""
sys.path[:] = self.originalPath
sys.modules.clear()
sys.modules.update(self.savedModules)
def createDummyPackage(self, root, name, pluginName):
"""
Create a directory containing a Python package named I{dummy} with a
I{plugins} subpackage.
@type root: L{FilePath}
@param root: The directory in which to create the hierarchy.
@type name: C{str}
@param name: The name of the directory to create which will contain
the package.
@type pluginName: C{str}
@param pluginName: The name of a module to create in the
I{dummy.plugins} package.
@rtype: L{FilePath}
@return: The directory which was created to contain the I{dummy}
package.
"""
directory = root.child(name)
package = directory.child('dummy')
package.makedirs()
package.child('__init__.py').setContent(b'')
plugins = package.child('plugins')
plugins.makedirs()
plugins.child('__init__.py').setContent(pluginInitFile)
pluginModule = plugins.child(pluginName + '.py')
pluginModule.setContent(pluginFileContents(name))
return directory
def test_hiddenPackageSamePluginModuleNameObscured(self):
"""
Only plugins from the first package in sys.path should be returned by
getPlugins in the case where there are two Python packages by the same
name installed, each with a plugin module by a single name.
"""
root = FilePath(self.mktemp())
root.makedirs()
firstDirectory = self.createDummyPackage(root, 'first', 'someplugin')
secondDirectory = self.createDummyPackage(root, 'second', 'someplugin')
sys.path.append(firstDirectory.path)
sys.path.append(secondDirectory.path)
import dummy.plugins
plugins = list(plugin.getPlugins(ITestPlugin, dummy.plugins))
self.assertEqual(['first'], [p.__name__ for p in plugins])
def test_hiddenPackageDifferentPluginModuleNameObscured(self):
"""
Plugins from the first package in sys.path should be returned by
getPlugins in the case where there are two Python packages by the same
name installed, each with a plugin module by a different name.
"""
root = FilePath(self.mktemp())
root.makedirs()
firstDirectory = self.createDummyPackage(root, 'first', 'thisplugin')
secondDirectory = self.createDummyPackage(root, 'second', 'thatplugin')
sys.path.append(firstDirectory.path)
sys.path.append(secondDirectory.path)
import dummy.plugins
plugins = list(plugin.getPlugins(ITestPlugin, dummy.plugins))
self.assertEqual(['first'], [p.__name__ for p in plugins])
class PackagePathTests(unittest.TestCase):
"""
Tests for L{plugin.pluginPackagePaths} which constructs search paths for
plugin packages.
"""
def setUp(self):
"""
Save the elements of C{sys.path}.
"""
self.originalPath = sys.path[:]
def tearDown(self):
"""
Restore C{sys.path} to its original value.
"""
sys.path[:] = self.originalPath
def test_pluginDirectories(self):
"""
L{plugin.pluginPackagePaths} should return a list containing each
directory in C{sys.path} with a suffix based on the supplied package
name.
"""
foo = FilePath('foo')
bar = FilePath('bar')
sys.path = [foo.path, bar.path]
self.assertEqual(
plugin.pluginPackagePaths('dummy.plugins'),
[foo.child('dummy').child('plugins').path,
bar.child('dummy').child('plugins').path])
def test_pluginPackagesExcluded(self):
"""
L{plugin.pluginPackagePaths} should exclude directories which are
Python packages. The only allowed plugin package (the only one
associated with a I{dummy} package which Python will allow to be
imported) will already be known to the caller of
L{plugin.pluginPackagePaths} and will most commonly already be in
the C{__path__} they are about to mutate.
"""
root = FilePath(self.mktemp())
foo = root.child('foo').child('dummy').child('plugins')
foo.makedirs()
foo.child('__init__.py').setContent(b'')
sys.path = [root.child('foo').path, root.child('bar').path]
self.assertEqual(
plugin.pluginPackagePaths('dummy.plugins'),
[root.child('bar').child('dummy').child('plugins').path])
|
|
import numpy as np
from nolearn_utils.iterators import get_random_idx
from nolearn_utils.image_utils import im_affine_transform
from scipy.ndimage import find_objects
def get_pair_idx(y, same_p=0.5):
n = len(y)
# labels = np.unique(y)
left_idxes = np.zeros(n, dtype=int)
right_idxes = np.zeros(n, dtype=int)
left_labels = np.zeros(n, dtype=y.dtype)
right_labels = np.zeros(n, dtype=y.dtype)
for i in range(n):
is_same = np.random.random() < same_p
# Sample from the true distribution instead of the unique labels
# so that the paired dataset have similar distribution too
left_label = np.random.choice(y)
if is_same:
right_label = left_label
else:
right_label = np.random.choice(y)
left_idxes[i] = np.random.choice(np.where(y == left_label)[0])
# TODO it is possible that the left and right pair is the exact
# same image
right_idxes[i] = np.random.choice(np.where(y == right_label)[0])
left_labels[i] = left_label
right_labels[i] = right_label
return (left_idxes, right_idxes), (left_labels, right_labels)
class RandomFlipBatchIteratorMixin(object):
"""
Randomly flip the random horizontally or vertically
Also flip the bounding box (y)
"""
def __init__(self, flip_horizontal_p=0.5, flip_vertical_p=0.5, *args, **kwargs):
super(RandomFlipBatchIteratorMixin, self).__init__(*args, **kwargs)
self.flip_horizontal_p = flip_horizontal_p
self.flip_vertical_p = flip_vertical_p
def transform(self, Xb, yb):
Xb, yb = super(RandomFlipBatchIteratorMixin, self).transform(Xb, yb)
Xb_flipped = Xb.copy()
yb_flipped = yb.copy()
if self.flip_horizontal_p > 0:
horizontal_flip_idx = get_random_idx(Xb, self.flip_horizontal_p)
Xb_flipped[horizontal_flip_idx] = Xb_flipped[horizontal_flip_idx, :, :, ::-1]
yb_flipped[horizontal_flip_idx, 0] = 1 - yb_flipped[horizontal_flip_idx, 0] - yb_flipped[horizontal_flip_idx, 2]
if self.flip_vertical_p > 0:
vertical_flip_idx = get_random_idx(Xb, self.flip_vertical_p)
Xb_flipped[vertical_flip_idx] = Xb_flipped[vertical_flip_idx, :, ::-1, :]
yb_flipped[vertical_flip_idx, 1] = 1 - yb_flipped[vertical_flip_idx, 1] - yb_flipped[vertical_flip_idx, 3]
return Xb_flipped, yb_flipped
class PairBatchIteratorMixin(object):
def __init__(self, pair_same_p=0.5, pair_stack=True, *args, **kwargs):
super(PairBatchIteratorMixin, self).__init__(*args, **kwargs)
self.pair_same_p = pair_same_p
self.pair_stack = pair_stack
def __iter__(self):
n_samples = self.X.shape[0]
bs = self.batch_size
n_batches = (n_samples + bs - 1) // bs
(left_idxes, right_idxes), (left_labels, right_labels) = get_pair_idx(self.y, same_p=self.pair_same_p)
for i in range(n_batches):
sl = slice(i * bs, (i + 1) * bs)
Xb_left = self.X[left_idxes[sl]]
Xb_right = self.X[right_idxes[sl]]
if self.y is not None:
yb_left = self.y[left_idxes[sl]]
yb_right = self.y[right_idxes[sl]]
else:
yb_left = None
yb_right = None
Xb_left, yb_left = self.transform(Xb_left, yb_left)
Xb_right, yb_right = self.transform(Xb_right, yb_right)
if self.pair_stack == 'hstack':
yield np.hstack([Xb_left, Xb_right]), np.vstack([yb_left, yb_right]).T
elif self.pair_stack == 'oddeven':
yield np.hstack([Xb_left, Xb_right]).reshape(-1, Xb_left.shape[1], Xb_left.shape[2], Xb_left.shape[3]), np.vstack([yb_left, yb_right]).T.ravel()
else:
yield (Xb_left, Xb_right), (yb_left, yb_right)
class AffineTransformBBoxBatchIteratorMixin(object):
"""
Apply affine transform (scale, translate and rotation)
with a random chance
"""
def __init__(self, affine_p,
affine_scale_choices=[1.], affine_translation_choices=[0.],
affine_rotation_choices=[0.], affine_shear_choices=[0.],
affine_transform_bbox=False,
*args, **kwargs):
super(AffineTransformBBoxBatchIteratorMixin,
self).__init__(*args, **kwargs)
self.affine_p = affine_p
self.affine_scale_choices = affine_scale_choices
self.affine_translation_choices = affine_translation_choices
self.affine_rotation_choices = affine_rotation_choices
self.affine_shear_choices = affine_shear_choices
if self.verbose:
print('Random transform probability: %.2f' % self.affine_p)
print('Rotation choices', self.affine_rotation_choices)
print('Scale choices', self.affine_scale_choices)
print('Translation choices', self.affine_translation_choices)
print('Shear choices', self.affine_shear_choices)
def transform(self, Xb, yb):
Xb, yb = super(AffineTransformBBoxBatchIteratorMixin,
self).transform(Xb, yb)
# Skip if affine_p is 0. Setting affine_p may be useful for quickly
# disabling affine transformation
if self.affine_p == 0:
return Xb, yb
image_height = Xb.shape[2]
image_width = Xb.shape[3]
assert image_height == image_width
idx = get_random_idx(Xb, self.affine_p)
Xb_transformed = Xb.copy()
yb_transformed = yb.copy()
for i in idx:
scale = np.random.choice(self.affine_scale_choices)
rotation = np.random.choice(self.affine_rotation_choices)
shear = np.random.choice(self.affine_shear_choices)
translation_y = np.random.choice(self.affine_translation_choices)
translation_x = np.random.choice(self.affine_translation_choices)
transform_kwargs = dict(
scale=scale, rotation=rotation,
shear=shear,
translation_y=translation_y,
translation_x=translation_x
)
img_transformed = im_affine_transform(
Xb[i], **transform_kwargs)
bbox_transformed = get_transformed_bbox(
yb[i] * image_width, image_width, image_height, **transform_kwargs)
Xb_transformed[i] = img_transformed
yb_transformed[i] = np.array(bbox_transformed).astype(np.float32) / image_width
return Xb_transformed, yb_transformed
def get_transformed_bbox(bbox, image_width, image_height, **kwargs):
l, t, w, h = bbox
r = l + w
b = t + h
y_heatmap = np.zeros((image_height, image_width)).astype(bool)
y_heatmap[t:b, l:r] = True
y_heatmap = im_affine_transform(y_heatmap[np.newaxis, ...], **kwargs)
y_heatmap = y_heatmap[0].astype(bool)
dets = find_objects(y_heatmap)
if len(dets) == 1:
t = dets[0][0].start
b = dets[0][0].stop
l = dets[0][1].start
r = dets[0][1].stop
w = r - l
h = b - t
else:
l, t, w, h = 0, 0, 0, 0
return l, t, w, h
class AffineTransformPtsBatchIteratorMixin(object):
"""
Apply affine transform (scale, translate and rotation)
with a random chance
"""
def __init__(self, affine_p,
affine_scale_choices=[1.], affine_translation_choices=[0.],
affine_rotation_choices=[0.], affine_shear_choices=[0.],
affine_transform_bbox=False,
*args, **kwargs):
super(AffineTransformPtsBatchIteratorMixin,
self).__init__(*args, **kwargs)
self.affine_p = affine_p
self.affine_scale_choices = affine_scale_choices
self.affine_translation_choices = affine_translation_choices
self.affine_rotation_choices = affine_rotation_choices
self.affine_shear_choices = affine_shear_choices
if self.verbose:
print('Random transform probability: %.2f' % self.affine_p)
print('Rotation choices', self.affine_rotation_choices)
print('Scale choices', self.affine_scale_choices)
print('Translation choices', self.affine_translation_choices)
print('Shear choices', self.affine_shear_choices)
def transform(self, Xb, yb):
Xb, yb = super(AffineTransformPtsBatchIteratorMixin,
self).transform(Xb, yb)
# Skip if affine_p is 0. Setting affine_p may be useful for quickly
# disabling affine transformation
if self.affine_p == 0:
return Xb, yb
image_height = Xb.shape[2]
image_width = Xb.shape[3]
assert image_height == image_width
idx = get_random_idx(Xb, self.affine_p)
Xb_transformed = Xb.copy()
yb_transformed = yb.copy()
for i in idx:
scale = np.random.choice(self.affine_scale_choices)
rotation = np.random.choice(self.affine_rotation_choices)
shear = np.random.choice(self.affine_shear_choices)
translation_y = np.random.choice(self.affine_translation_choices)
translation_x = np.random.choice(self.affine_translation_choices)
transform_kwargs = dict(
scale=scale, rotation=rotation,
shear=shear,
translation_y=translation_y,
translation_x=translation_x,
return_tform=True
)
img_transformed, tform = im_affine_transform(
Xb[i], **transform_kwargs
)
Xb_transformed[i] = img_transformed
pts = yb_transformed[i].reshape(-1, 2) * image_height
pts = tform.inverse(pts).ravel()
yb_transformed[i] = pts / image_height
return Xb_transformed, yb_transformed
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import time
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse
from libsentry.api import get_api
from libsentry.sentry_site import get_sentry_server_admin_groups
from hadoop.cluster import get_defaultfs
from beeswax.api import autocomplete
LOG = logging.getLogger(__name__)
def fetch_hive_path(request):
path = request.GET['path']
database = None
table = None
column = None
if path:
database = path
if '/' in path:
database, table = path.split('/', 1)
if '.' in table:
table, column = table.split('.', 1)
resp = autocomplete(request, database, table, column)
if database and request.GET['doas'] != request.user.username:
request.GET = request.GET.copy()
request.GET['doas'] = request.GET['doas']
resp = autocomplete(request, database, table, column)
return resp
def list_sentry_roles_by_group(request):
result = {'status': -1, 'message': 'Error'}
try:
if request.POST.get('groupName'):
groupName = request.POST['groupName']
else:
# Admins can see everything, other only the groups they belong too
groupName = None if request.user.groups.filter(name__in=get_sentry_server_admin_groups()).exists() else '*'
roles = get_api(request.user).list_sentry_roles_by_group(groupName)
result['roles'] = sorted(roles, key=lambda role: role['name'])
result['message'] = ''
result['status'] = 0
except Exception, e:
LOG.exception("could not retrieve roles")
if "couldn't be retrieved." in str(e):
result['roles'] = []
result['status'] = 0
else:
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def list_sentry_privileges_by_role(request):
result = {'status': -1, 'message': 'Error'}
try:
roleName = request.POST['roleName']
sentry_privileges = get_api(request.user).list_sentry_privileges_by_role(roleName)
result['sentry_privileges'] = sorted(sentry_privileges, key=lambda privilege: '%s.%s.%s.%s' % (privilege['server'], privilege['database'], privilege['table'], privilege['URI']))
result['message'] = ''
result['status'] = 0
except Exception, e:
LOG.exception("could not list sentry privileges")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def _to_sentry_privilege(privilege):
return {
'privilegeScope': privilege['privilegeScope'],
'serverName': privilege['serverName'],
'dbName': privilege['dbName'],
'tableName': privilege['tableName'],
'columnName': privilege['columnName'],
'URI': _massage_uri(privilege['URI'].encode('utf-8')),
'action': privilege['action'],
'createTime': privilege['timestamp'],
'grantOption': 1 if privilege['grantOption'] else 0,
}
def _hive_add_privileges(user, role, privileges):
api = get_api(user)
_privileges = []
for privilege in privileges:
if privilege['status'] not in ('deleted',):
api.alter_sentry_role_grant_privilege(role['name'], _to_sentry_privilege(privilege))
# Mocked until Sentry API returns the info. Not used currently as we refresh the whole role.
_privileges.append({
'timestamp': int(time.time()),
'database': privilege.get('dbName'),
'action': privilege.get('action'),
'scope': privilege.get('privilegeScope'),
'table': privilege.get('tableName'),
'column': privilege.get('columnName'),
'URI': privilege.get('URI').encode('utf-8'),
'server': privilege.get('serverName'),
'grantOption': privilege.get('grantOption') == 1
})
return _privileges
def _massage_uri(uri):
if uri:
if uri.startswith('hdfs:///'):
uri = uri.replace('hdfs://', get_defaultfs())
elif uri.startswith('/'):
uri = get_defaultfs() + uri
return uri
def _get_splitted_path(path):
parts = path.split('.')
db, table, column = '', '', ''
if len(parts) >= 1:
db = parts[0]
if len(parts) >= 2:
table = parts[1]
if len(parts) >= 3:
column = parts[2]
return db, table, column
def _drop_sentry_privilege(user, role, authorizable):
return get_api(user).alter_sentry_role_revoke_privilege(role['name'], _to_sentry_privilege(authorizable))
def create_role(request):
result = {'status': -1, 'message': 'Error'}
try:
role = json.loads(request.POST['role'])
api = get_api(request.user)
api.create_sentry_role(role['name'])
privileges = [privilege for privilege in role['privileges'] if privilege['status'] not in ('deleted', 'alreadydeleted')]
result['privileges'] = _hive_add_privileges(request.user, role, privileges)
api.alter_sentry_role_add_groups(role['name'], role['groups'])
result['role'] = {"name": role['name'], "groups": role['groups']}
result['message'] = _('Role created!')
result['status'] = 0
except Exception, e:
LOG.exception("could not create role")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def update_role_groups(request):
result = {'status': -1, 'message': 'Error'}
try:
role = json.loads(request.POST['role'])
new_groups = set(role['groups']) - set(role['originalGroups'])
deleted_groups = set(role['originalGroups']) - set(role['groups'])
api = get_api(request.user)
if new_groups:
api.alter_sentry_role_add_groups(role['name'], new_groups)
if deleted_groups:
api.alter_sentry_role_delete_groups(role['name'], deleted_groups)
result['message'] = ''
result['status'] = 0
except Exception, e:
LOG.exception("could not update role groups")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def save_privileges(request):
result = {'status': -1, 'message': 'Error'}
try:
role = json.loads(request.POST['role'])
new_privileges = [privilege for privilege in role['privilegesChanged'] if privilege['status'] == 'new']
result['privileges'] = _hive_add_privileges(request.user, role, new_privileges)
deleted_privileges = [privilege for privilege in role['privilegesChanged'] if privilege['status'] == 'deleted']
for privilege in deleted_privileges:
_drop_sentry_privilege(request.user, role, privilege)
modified_privileges = [privilege for privilege in role['privilegesChanged'] if privilege['status'] == 'modified']
old_privileges_ids = [privilege['id'] for privilege in modified_privileges]
_hive_add_privileges(request.user, role, modified_privileges)
for privilege in role['originalPrivileges']:
if privilege['id'] in old_privileges_ids:
_drop_sentry_privilege(request.user, role, privilege)
result['message'] = _('Privileges updated')
result['status'] = 0
except Exception, e:
LOG.exception("could not save privileges")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def grant_privilege(request):
result = {'status': -1, 'message': 'Error'}
try:
roleName = json.loads(request.POST['roleName'])
privilege = json.loads(request.POST['privilege'])
result['privileges'] = _hive_add_privileges(request.user, {'name': roleName}, [privilege])
result['message'] = _('Privilege granted successfully to %s.') % roleName
result['status'] = 0
except Exception, e:
LOG.exception("could not grant privileges")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def create_sentry_role(request):
result = {'status': -1, 'message': 'Error'}
try:
roleName = request.POST['roleName']
get_api(request.user).create_sentry_role(roleName)
result['message'] = _('Role and privileges created.')
result['status'] = 0
except Exception, e:
LOG.exception("could not create role")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def drop_sentry_role(request):
result = {'status': -1, 'message': 'Error'}
try:
roleName = request.POST['roleName']
get_api(request.user).drop_sentry_role(roleName)
result['message'] = _('Role and privileges deleted.')
result['status'] = 0
except Exception, e:
LOG.exception("could not drop role")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def list_sentry_privileges_by_authorizable(request):
result = {'status': -1, 'message': 'Error'}
try:
groups = [request.POST['groupName']] if request.POST.get('groupName') else None
authorizableSet = [json.loads(request.POST['authorizableHierarchy'])]
_privileges = []
for authorizable, roles in get_api(request.user).list_sentry_privileges_by_authorizable(authorizableSet=authorizableSet, groups=groups):
for role, privileges in roles.iteritems():
for privilege in privileges:
privilege['roleName'] = role
_privileges.append(privilege)
result['privileges'] = sorted(_privileges, key=lambda privilege: privilege['roleName'])
result['message'] = ''
result['status'] = 0
except Exception, e:
LOG.exception("could not list privileges by authorizable")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def bulk_delete_privileges(request):
result = {'status': -1, 'message': 'Error'}
try:
checkedPaths = json.loads(request.POST['checkedPaths'])
authorizableHierarchy = json.loads(request.POST['authorizableHierarchy'])
for path in [path['path'] for path in checkedPaths]:
db, table, column = _get_splitted_path(path)
authorizableHierarchy.update({
'db': db,
'table': table,
'column': column,
})
get_api(request.user).drop_sentry_privileges(authorizableHierarchy)
result['message'] = _('Privileges deleted.')
result['status'] = 0
except Exception, e:
LOG.exception("could not bulk delete privileges")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def bulk_add_privileges(request):
result = {'status': -1, 'message': 'Error'}
try:
privileges = json.loads(request.POST['privileges'])
checkedPaths = json.loads(request.POST['checkedPaths'])
authorizableHierarchy = json.loads(request.POST['authorizableHierarchy'])
privileges = [privilege for privilege in privileges if privilege['status'] == '']
for path in [path['path'] for path in checkedPaths]:
db, table, column = _get_splitted_path(path)
privilegeScope = 'COLUMN' if column else 'TABLE' if table else 'DATABASE' if db else 'SERVER'
authorizableHierarchy.update({
'db': db,
'table': table,
'column': column,
})
for privilege in privileges:
privilege['dbName'] = db
privilege['tableName'] = table
privilege['columnName'] = column
privilege['privilegeScope'] = privilegeScope
_hive_add_privileges(request.user, {'name': privilege['roleName']}, [privilege])
result['message'] = _('Privileges added.')
result['status'] = 0
except Exception, e:
LOG.exception("could not bulk add privileges")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def rename_sentry_privilege(request):
result = {'status': -1, 'message': 'Error'}
try:
oldAuthorizable = json.loads(request.POST['oldAuthorizable'])
newAuthorizable = json.loads(request.POST['newAuthorizable'])
get_api(request.user).rename_sentry_privilege(oldAuthorizable, newAuthorizable)
result['message'] = _('Privilege deleted.')
result['status'] = 0
except Exception, e:
LOG.exception("could not rename privilege")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
def list_sentry_privileges_for_provider(request):
result = {'status': -1, 'message': 'Error'}
try:
groups = json.loads(request.POST['groups'])
roleSet = json.loads(request.POST['roleSet'])
authorizableHierarchy = json.loads(request.POST['authorizableHierarchy'])
sentry_privileges = get_api(request.user).list_sentry_privileges_for_provider(groups=groups, roleSet=roleSet, authorizableHierarchy=authorizableHierarchy)
result['sentry_privileges'] = sentry_privileges
result['message'] = ''
result['status'] = 0
except Exception, e:
LOG.exception("could not list privileges for provider")
result['message'] = unicode(str(e), "utf8")
return JsonResponse(result)
|
|
from base64 import b64decode
from bson import ObjectId
import simplejson as json
from eve.tests import TestBase
from eve.tests.utils import DummyEvent
from eve.tests.test_settings import MONGO_DBNAME
from eve import STATUS_OK, LAST_UPDATED, DATE_CREATED, ISSUES, STATUS, ETAG
from eve.methods.post import post
from eve.methods.post import post_internal
from io import BytesIO
from werkzeug.datastructures import MultiDict
class TestPost(TestBase):
def test_unknown_resource(self):
_, status = self.post(self.unknown_resource_url, data={})
self.assert404(status)
def test_readonly_resource(self):
_, status = self.post(self.readonly_resource_url, data={})
self.assert405(status)
def test_post_to_item_endpoint(self):
_, status = self.post(self.item_id_url, data={})
self.assert405(status)
def test_validation_error(self):
r, status = self.post(self.known_resource_url, data={"ref": "123"})
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'ref': 'min length is 25'})
r, status = self.post(self.known_resource_url, data={"prog": 123})
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'ref': 'required'})
def test_post_bulk_insert_on_disabled_bulk(self):
r, status = self.post(
self.disabled_bulk_url,
data=[{'string_field': '123'}, {'string_field': '123'}])
self.assert400(status)
def test_post_empty_bulk_insert(self):
r, status = self.post(self.empty_resource_url, data=[])
self.assert400(status)
def test_post_empty_resource(self):
data = []
for _ in range(10):
data.append({"inv_number": self.random_string(10)})
r, status = self.post(self.empty_resource_url, data=data)
self.assert201(status)
self.assertPostResponse(r)
def test_post_string(self):
test_field = 'ref'
test_value = "1234567890123456789054321"
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_duplicate_key(self):
data = {'ref': '1234567890123456789054321'}
r = self.perform_post(data)
id_field = self.domain[self.known_resource]['id_field']
item_id = r[id_field]
data = {'ref': '0123456789012345678901234', id_field: item_id}
r, status = self.post(self.known_resource_url, data=data)
self.assertEqual(status, 409)
def test_post_integer(self):
del(self.domain['contacts']['schema']['ref']['required'])
test_field = 'prog'
test_value = 1
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_list_as_array(self):
del(self.domain['contacts']['schema']['ref']['required'])
test_field = "role"
test_value = ["vendor", "client"]
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_rows(self):
del(self.domain['contacts']['schema']['ref']['required'])
test_field = "rows"
test_value = [
{'sku': 'AT1234', 'price': 99},
{'sku': 'XF9876', 'price': 9999}
]
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_list(self):
del(self.domain['contacts']['schema']['ref']['required'])
test_field = "alist"
test_value = ["a_string", 99]
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_integer_zero(self):
del(self.domain['contacts']['schema']['ref']['required'])
test_field = "aninteger"
test_value = 0
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_float_zero(self):
del(self.domain['contacts']['schema']['ref']['required'])
test_field = "afloat"
test_value = 0.0
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_dict(self):
del(self.domain['contacts']['schema']['ref']['required'])
test_field = "location"
test_value = {'address': 'an address', 'city': 'a city'}
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_datetime(self):
del(self.domain['contacts']['schema']['ref']['required'])
test_field = "born"
test_value = "Tue, 06 Nov 2012 10:33:31 GMT"
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_objectid(self):
del(self.domain['contacts']['schema']['ref']['required'])
test_field = 'tid'
test_value = "50656e4538345b39dd0414f0"
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_null_objectid(self):
# verify that #341 is fixed.
del(self.domain['contacts']['schema']['ref']['required'])
test_field = 'tid'
test_value = None
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_default_value(self):
test_field = 'title'
test_value = "Mr."
data = {'ref': '9234567890123456789054321'}
self.assertPostItem(data, test_field, test_value)
def test_post_default_value_none(self):
# default values that assimilate to None (0, '', False) were ignored
# prior to 0.1.1
title = self.domain['contacts']['schema']['title']
title['default'] = ''
self.app.set_defaults()
data = {"ref": "UUUUUUUUUUUUUUUUUUUUUUUUU"}
self.assertPostItem(data, 'title', '')
title['type'] = 'integer'
title['default'] = 0
self.app.set_defaults()
data = {"ref": "TTTTTTTTTTTTTTTTTTTTTTTTT"}
self.assertPostItem(data, 'title', 0)
title['type'] = 'boolean'
title['default'] = False
self.app.set_defaults()
data = {"ref": "QQQQQQQQQQQQQQQQQQQQQQQQQ"}
self.assertPostItem(data, 'title', False)
def test_multi_post_valid(self):
data = [
{"ref": "9234567890123456789054321"},
{"ref": "5432112345678901234567890", "role": ["agent"]},
]
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
results = r['_items']
self.assertEqual(results[0]['_status'], 'OK')
self.assertEqual(results[1]['_status'], 'OK')
with self.app.test_request_context():
contacts = self.app.data.driver.db['contacts']
r = contacts.find({"ref": "9234567890123456789054321"}).count()
self.assertTrue(r == 1)
r = contacts.find({"ref": "5432112345678901234567890"}).count()
self.assertTrue(r == 1)
def test_multi_post_invalid(self):
data = [
{"ref": "9234567890123456789054321"},
{"prog": 9999},
{"ref": "5432112345678901234567890", "role": ["agent"]},
{"ref": self.item_ref},
{"ref": "9234567890123456789054321", "tid": "12345678"},
]
r, status = self.post(self.known_resource_url, data=data)
self.assertValidationErrorStatus(status)
results = r['_items']
self.assertEqual(results[0]['_status'], 'OK')
self.assertEqual(results[2]['_status'], 'OK')
self.assertValidationError(results[1], {'ref': 'required'})
self.assertValidationError(results[3], {'ref': 'unique'})
self.assertValidationError(results[4], {'tid': 'ObjectId'})
id_field = self.domain[self.known_resource]['id_field']
self.assertTrue(id_field not in results[0])
self.assertTrue(id_field not in results[1])
self.assertTrue(id_field not in results[2])
self.assertTrue(id_field not in results[3])
with self.app.test_request_context():
contacts = self.app.data.driver.db['contacts']
r = contacts.find({"prog": 9999}).count()
self.assertTrue(r == 0)
r = contacts.find({"ref": "9234567890123456789054321"}).count()
self.assertTrue(r == 0)
def test_post_x_www_form_urlencoded(self):
test_field = "ref"
test_value = "1234567890123456789054321"
data = {test_field: test_value}
r, status = self.parse_response(self.test_client.post(
self.known_resource_url, data=data))
self.assert201(status)
self.assertTrue('OK' in r[STATUS])
self.assertPostResponse(r)
def test_post_x_www_form_urlencoded_number_serialization(self):
del(self.domain['contacts']['schema']['ref']['required'])
test_field = "anumber"
test_value = 34
data = {test_field: test_value}
r, status = self.parse_response(self.test_client.post(
self.known_resource_url, data=data))
self.assert201(status)
self.assertTrue('OK' in r[STATUS])
self.assertPostResponse(r)
def test_post_auto_collapse_multiple_keys(self):
self.app.config['AUTO_COLLAPSE_MULTI_KEYS'] = True
self.app.register_resource('test_res', {
'schema': {
'list_field': {
'type': 'list',
'schema': {
'type': 'string'
}
}
}
})
data = MultiDict([("list_field", "value1"),
("list_field", "value2")])
resp = self.test_client.post(
'/test_res/', data=data,
content_type='application/x-www-form-urlencoded')
r, status = self.parse_response(resp)
self.assert201(status)
resp = self.test_client.post('/test_res/', data=data,
content_type='multipart/form-data')
r, status = self.parse_response(resp)
self.assert201(status)
def test_post_auto_collapse_media_list(self):
self.app.config['AUTO_COLLAPSE_MULTI_KEYS'] = True
self.app.register_resource('test_res', {
'schema': {
'list_field': {
'type': 'list',
'schema': {
'type': 'media'
}
}
}
})
# Create a document
data = MultiDict([('list_field',
(BytesIO(b'file_content1'), 'test1.txt')),
('list_field',
(BytesIO(b'file_content2'), 'test2.txt'))])
resp = self.test_client.post('/test_res/', data=data,
content_type='multipart/form-data')
r, status = self.parse_response(resp)
self.assert201(status)
# check that the files were created
_db = self.connection[MONGO_DBNAME]
id_field = self.domain['test_res']['id_field']
obj = _db.test_res.find_one({id_field: ObjectId(r[id_field])})
media_ids = obj['list_field']
self.assertEqual(len(media_ids), 2)
with self.app.test_request_context():
for i in [0, 1]:
self.assertTrue(
self.app.media.exists(media_ids[i], 'test_res'))
# GET the document and check the file content is correct
r, status = self.parse_response(
self.test_client.get('/test_res/%s' % r[id_field]))
files = r['list_field']
self.assertEqual(b64decode(files[0]), b'file_content1')
self.assertEqual(b64decode(files[1]), b'file_content2')
# DELETE the document
resp = self.test_client.delete('/test_res/%s' % r['_id'],
headers={'If-Match': r['_etag']})
r, status = self.parse_response(resp)
self.assert204(status)
# Check files were deleted
with self.app.test_request_context():
for i in [0, 1]:
self.assertFalse(
self.app.media.exists(media_ids[i], 'test_res'))
def test_post_auto_create_lists(self):
self.app.config['AUTO_CREATE_LISTS'] = True
self.app.register_resource('test_res', {
'schema': {
'list_field': {
'type': 'list',
'schema': {
'type': 'string'
}
}
}
})
data = MultiDict([("list_field", "value1")])
resp = self.test_client.post(
'/test_res/', data=data,
content_type='application/x-www-form-urlencoded')
r, status = self.parse_response(resp)
self.assert201(status)
def test_post_referential_integrity(self):
data = {"person": self.unknown_item_id}
r, status = self.post('/invoices/', data=data)
self.assertValidationErrorStatus(status)
expected = ("value '%s' must exist in resource '%s', field '%s'" %
(self.unknown_item_id, 'contacts',
self.domain['contacts']['id_field']))
self.assertValidationError(r, {'person': expected})
data = {"person": self.item_id}
r, status = self.post('/invoices/', data=data)
self.assert201(status)
self.assertPostResponse(r)
def test_dbref_post_referential_integrity(self):
data = {"persondbref": {"$col": "contacts",
"$id": self.unknown_item_id}}
r, status = self.post('/invoices/', data=data)
self.assertValidationErrorStatus(status)
expected = ("value '%s' must exist in resource '%s', field '%s'" %
(self.unknown_item_id, 'contacts',
self.domain['contacts']['id_field']))
self.assertValidationError(r, {'persondbref': expected})
data = {"persondbref": {"$col": "contacts", "$id": self.item_id}}
r, status = self.post('/invoices/', data=data)
self.assert201(status)
self.assertPostResponse(r)
def test_post_referential_integrity_list(self):
data = {"invoicing_contacts": [self.item_id, self.unknown_item_id]}
r, status = self.post('/invoices/', data=data)
self.assertValidationErrorStatus(status)
expected = ("value '%s' must exist in resource '%s', field '%s'" %
(self.unknown_item_id, 'contacts',
self.domain['contacts']['id_field']))
self.assertValidationError(r, {'invoicing_contacts': expected})
data = {"invoicing_contacts": [self.item_id, self.item_id]}
r, status = self.post('/invoices/', data=data)
self.assert201(status)
self.assertPostResponse(r)
def test_post_allow_unknown(self):
del(self.domain['contacts']['schema']['ref']['required'])
data = {"unknown": "unknown"}
r, status = self.post(self.known_resource_url, data=data)
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'unknown': 'unknown'})
# since resource settings are only set at app startup we set
# those that influence the 'allow_unknown' property by hand (so we
# don't have to re-initialize the whole app.)
settings = self.app.config['DOMAIN'][self.known_resource]
settings['allow_unknown'] = True
settings['datasource']['projection'] = None
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
self.assertPostResponse(r)
# test that the unknown field is also returned with subsequent get
# requests
id = r[self.domain[self.known_resource]['id_field']]
r = self.test_client.get('%s/%s' % (self.known_resource_url, id))
r_data = json.loads(r.get_data())
self.assertTrue('unknown' in r_data)
self.assertEqual('unknown', r_data['unknown'])
def test_post_with_content_type_charset(self):
test_field = 'ref'
test_value = "1234567890123456789054321"
data = {test_field: test_value}
r, status = self.post(self.known_resource_url, data=data,
content_type='application/json; charset=utf-8')
self.assert201(status)
self.assertPostResponse(r)
def test_post_with_extra_response_fields(self):
self.domain['contacts']['extra_response_fields'] = ['ref', 'notreally']
test_field = 'ref'
test_value = "1234567890123456789054321"
data = {test_field: test_value}
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
self.assertTrue('ref' in r and 'notreally' not in r)
def test_post_with_excluded_response_fields(self):
data = {
'email': '[email protected]',
'password': 'password'
}
r, status = self.post('login', data=data)
self.assert201(status)
login_id = r[self.domain['login']['id_field']]
r = self.test_client.get('%s/%s' % ('login', login_id))
r_data = json.loads(r.get_data())
self.assertTrue('password' not in r_data)
self.assertTrue('email' in r_data)
def test_post_write_concern(self):
# should get a 500 since there's no replicaset on mongod test instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
test_field = 'ref'
test_value = "1234567890123456789054321"
data = {test_field: test_value}
_, status = self.post(self.known_resource_url, data=data)
self.assert500(status)
# 0 and 1 are the only valid values for 'w' on our mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 0}
test_value = "1234567890123456789054329"
data = {test_field: test_value}
_, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
def test_post_with_get_override(self):
# a GET request with POST override turns into a POST request.
test_field = 'ref'
test_value = "1234567890123456789054321"
data = json.dumps({test_field: test_value})
headers = [('X-HTTP-Method-Override', 'POST'),
('Content-Type', 'application/json')]
r = self.test_client.get(self.known_resource_url, data=data,
headers=headers)
self.assert201(r.status_code)
self.assertPostResponse(json.loads(r.get_data()))
def test_post_list_of_objectid(self):
objectid = '50656e4538345b39dd0414f0'
del(self.domain['contacts']['schema']['ref']['required'])
data = {'id_list': ['%s' % objectid]}
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
r, status = self.get(self.known_resource, '?where={"id_list": '
'{"$in": ["%s"]}}' % objectid)
self.assert200(status)
self.assertTrue(len(r), 1)
self.assertTrue('%s' % objectid in r['_items'][0]['id_list'])
def test_post_nested_dict_objectid(self):
objectid = '50656e4538345b39dd0414f0'
del(self.domain['contacts']['schema']['ref']['required'])
data = {'id_list_of_dict': [{'id': '%s' % objectid}]}
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
r, status = self.get(self.known_resource,
'?where={"id_list_of_dict.id": ' '"%s"}'
% objectid)
self.assertTrue(len(r), 1)
self.assertTrue('%s' % objectid in
r['_items'][0]['id_list_of_dict'][0]['id'])
def test_post_valueschema_with_objectid(self):
del(self.domain['contacts']['schema']['ref']['required'])
data = {'dict_valueschema': {'id': {'challenge':
'50656e4538345b39dd0414f0'}}}
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
def test_post_list_fixed_len(self):
objectid = '50656e4538345b39dd0414f0'
del(self.domain['contacts']['schema']['ref']['required'])
data = {'id_list_fixed_len': ['%s' % objectid]}
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
r, status = self.get(self.known_resource,
'?where={"id_list_fixed_len": '
'{"$in": ["%s"]}}' % objectid)
self.assert200(status)
self.assertTrue(len(r), 1)
self.assertTrue('%s' % objectid in r['_items'][0]['id_list_fixed_len'])
def test_custom_issues(self):
self.app.config['ISSUES'] = 'errors'
r, status = self.post(self.known_resource_url, data={"ref": "123"})
self.assertValidationErrorStatus(status)
self.assertTrue('errors' in r and ISSUES not in r)
def test_custom_status(self):
self.app.config['STATUS'] = 'report'
r, status = self.post(self.known_resource_url, data={"ref": "123"})
self.assertValidationErrorStatus(status)
self.assertTrue('report' in r and STATUS not in r)
def test_custom_etag_update_date(self):
self.app.config['ETAG'] = '_myetag'
r, status = self.post(self.known_resource_url,
data={"ref": "1234567890123456789054321"})
self.assert201(status)
self.assertTrue('_myetag' in r and ETAG not in r)
def test_custom_date_updated(self):
self.app.config['LAST_UPDATED'] = '_update_date'
r, status = self.post(self.known_resource_url,
data={"ref": "1234567890123456789054321"})
self.assert201(status)
self.assertTrue('_update_date' in r and LAST_UPDATED not in r)
def test_subresource(self):
response, status = self.post('users/%s/invoices' %
self.item_id, data={})
self.assert201(status)
self.assertPostResponse(response)
invoice_id = response.get(self.domain['peopleinvoices']['id_field'])
response, status = self.get('users/%s/invoices/%s' %
(self.item_id, invoice_id))
self.assert200(status)
self.assertEqual(response.get('person'), self.item_id)
def test_subresource_required_ref(self):
response, status = self.post('users/%s/required_invoices' %
self.item_id, data={})
self.assert201(status)
self.assertPostResponse(response)
invoice_id = response.get(self.domain['required_invoices']['id_field'])
response, status = self.get('users/%s/required_invoices/%s' %
(self.item_id, invoice_id))
self.assert200(status)
self.assertEqual(response.get('person'), self.item_id)
def test_post_ifmatch_disabled(self):
# if IF_MATCH is disabled, then we get no etag in the payload.
self.app.config['IF_MATCH'] = False
test_field = 'ref'
test_value = "1234567890123456789054321"
data = {test_field: test_value}
r, status = self.post(self.known_resource_url, data=data)
self.assertTrue(ETAG not in r)
def test_post_custom_idfield(self):
# Test that we can post a document with a custom id_field.
id_field = 'sku'
product = {id_field: 'FOO', 'title': 'Foobar'}
r, status = self.post('products', data=product)
self.assert201(status)
self.assertTrue(id_field in r)
self.assertItemLink(r['_links'], r[id_field])
def test_post_with_relation_to_custom_idfield(self):
# Test that we can post a document that relates to a resource with a
# custom id_field.
id_field = 'sku'
db = self.connection[MONGO_DBNAME]
existing_product = db.products.find_one()
product = {
id_field: 'BAR',
'title': 'Foobar',
'parent_product': existing_product[id_field]
}
r, status = self.post('products', data=product)
self.assert201(status)
self.assertTrue(id_field in r)
self.assertItemLink(r['_links'], r[id_field])
r, status = self.get('products', item='BAR')
self.assertEqual(r['parent_product'], existing_product[id_field])
def test_post_bandwidth_saver(self):
data = {'inv_number': self.random_string(10)}
# bandwidth_saver is on by default
self.assertTrue(self.app.config['BANDWIDTH_SAVER'])
r, status = self.post(self.empty_resource_url, data=data)
self.assert201(status)
self.assertPostResponse(r)
self.assertFalse('inv_number' in r)
etag = r[self.app.config['ETAG']]
r, status = self.get(
self.empty_resource, '',
r[self.domain[self.empty_resource]['id_field']])
self.assertEqual(etag, r[self.app.config['ETAG']])
# test return all fields (bandwidth_saver off)
self.app.config['BANDWIDTH_SAVER'] = False
r, status = self.post(self.empty_resource_url, data=data)
self.assert201(status)
self.assertPostResponse(r)
self.assertTrue('inv_number' in r)
etag = r[self.app.config['ETAG']]
r, status = self.get(
self.empty_resource, '',
r[self.domain[self.empty_resource]['id_field']])
self.assertEqual(etag, r[self.app.config['ETAG']])
def test_post_alternative_payload(self):
payl = {"ref": "5432112345678901234567890", "role": ["agent"]}
with self.app.test_request_context(self.known_resource_url):
r, _, _, status, _ = post(self.known_resource, payl=payl)
self.assert201(status)
self.assertPostResponse(r)
def test_post_dependency_fields_with_default(self):
# test that default values are resolved before validation. See #353.
del(self.domain['contacts']['schema']['ref']['required'])
test_field = 'dependency_field2'
test_value = 'a value'
data = {test_field: test_value}
self.assertPostItem(data, test_field, test_value)
def test_post_dependency_required_fields(self):
del(self.domain['contacts']['schema']['ref']['required'])
schema = self.domain['contacts']['schema']
schema['dependency_field3']['required'] = True
r, status = self.post(self.known_resource_url, data={})
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'dependency_field3': 'required'})
# required field dependnecy value matches the dependent field's default
# value. validation still fails since required field is still missing.
# See #665.
schema['dependency_field3']['dependencies'] = {'dependency_field1':
'default'}
r, status = self.post(self.known_resource_url, data={})
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'dependency_field3': 'required'})
r, status = self.post(self.known_resource_url,
data={'dependency_field3': 'hello'})
self.assert201(status)
def test_post_dependency_fields_with_values(self):
# test that dependencies values are validated correctly. See #547.
del(self.domain['contacts']['schema']['ref']['required'])
schema = {
'field1': {
'required': False,
'default': 'one'
},
'field2': {
'required': True,
'dependencies': {'field1': ['one', 'two']}
}
}
settings = {
'RESOURCE_METHODS': ['GET', 'POST', 'DELETE'],
'ITEM_METHODS': ['GET', 'PATCH', 'PUT', 'DELETE'],
'schema': schema
}
self.app.register_resource('posts', settings)
data = {"field1": "three", "field2": 7}
r, s = self.post('posts', data=data)
self.assert422(s)
data = {"field2": 7}
r, s = self.post('posts', data=data)
self.assert201(s)
data = {"field1": "one", "field2": 7}
r, s = self.post('posts', data=data)
self.assert201(s)
data = {"field1": "two", "field2": 7}
r, s = self.post('posts', data=data)
self.assert201(s)
def test_post_dependency_fields_with_subdocuments(self):
# test that dependencies with sub-document fields are properly
# validated. See #706.
del(self.domain['contacts']['schema']['ref']['required'])
schema = {
'field1': {
'type': 'dict',
'schema': {
'address': {'type': 'string'}
}
},
'field2': {
'dependencies': {'field1.address': ['one', 'two']}
}
}
settings = {
'RESOURCE_METHODS': ['GET', 'POST', 'DELETE'],
'ITEM_METHODS': ['GET', 'PATCH', 'PUT', 'DELETE'],
'schema': schema
}
self.app.register_resource('endpoint', settings)
data = {"field1": {"address": "three"}, "field2": 7}
r, s = self.post('endpoint', data=data)
self.assert422(s)
data = {"field1": {"address": "one"}, "field2": 7}
r, s = self.post('endpoint', data=data)
self.assert201(s)
data = {"field1": {"address": "two"}, "field2": 7}
r, s = self.post('endpoint', data=data)
self.assert201(s)
def test_post_readonly_field_with_default(self):
# test that a read only field with a 'default' setting is correctly
# validated now that we resolve field values before validation.
del(self.domain['contacts']['schema']['ref']['required'])
test_field = 'read_only_field'
# thou shalt not pass.
test_value = 'a random value'
data = {test_field: test_value}
r, status = self.post(self.known_resource_url, data=data)
self.assertValidationErrorStatus(status)
# this will not pass even if value matches 'default' setting.
# (hey it's still a read-onlu field so you can't reset it)
test_value = 'default'
data = {test_field: test_value}
r, status = self.post(self.known_resource_url, data=data)
self.assertValidationErrorStatus(status)
def test_post_readonly_in_dict(self):
# Test that a post with a readonly field inside a dict is properly
# validated (even if it has a defult value)
del(self.domain['contacts']['schema']['ref']['required'])
test_field = 'dict_with_read_only'
test_value = {'read_only_in_dict': 'default'}
data = {test_field: test_value}
r, status = self.post(self.known_resource_url, data=data)
self.assertValidationErrorStatus(status)
def test_post_valueschema_dict(self):
""" make sure Cerberus#48 is fixed """
del(self.domain['contacts']['schema']['ref']['required'])
r, status = self.post(self.known_resource_url,
data={"valueschema_dict": {"k1": "1"}})
self.assertValidationErrorStatus(status)
issues = r[ISSUES]
self.assertTrue('valueschema_dict' in issues)
self.assertEqual(issues['valueschema_dict'],
{'k1': 'must be of integer type'})
r, status = self.post(self.known_resource_url,
data={"valueschema_dict": {"k1": 1}})
self.assert201(status)
def test_post_propertyschema_dict(self):
del(self.domain['contacts']['schema']['ref']['required'])
r, status = self.post(self.known_resource_url,
data={"propertyschema_dict": {"aaa": 1}})
self.assert201(status)
r, status = self.post(self.known_resource_url,
data={"propertyschema_dict": {"AAA": "1"}})
self.assertValidationErrorStatus(status)
issues = r[ISSUES]
self.assertTrue('propertyschema_dict' in issues)
self.assertEqual(issues['propertyschema_dict'],
'propertyschema_dict')
def test_post_internal(self):
# test that post_internal is available and working properly.
test_field = 'ref'
test_value = "1234567890123456789054321"
payload = {test_field: test_value}
with self.app.test_request_context(self.known_resource_url):
r, _, _, status, _ = post_internal(self.known_resource,
payl=payload)
self.assert201(status)
def test_post_internal_skip_validation(self):
# test that when skip_validation is active everything behaves as
# expected. Also make sure that #726 is fixed.
test_field = 'ref'
test_value = "1234567890123456789054321"
payload = {test_field: test_value}
with self.app.test_request_context(self.known_resource_url):
r, _, _, status, _ = post_internal(self.known_resource,
payl=payload,
skip_validation=True)
self.assert201(status)
def test_post_nested(self):
del(self.domain['contacts']['schema']['ref']['required'])
data = {'location.city': 'a nested city',
'location.address': 'a nested address'}
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
values = self.compare_post_with_get(
r[self.domain[self.known_resource]['id_field']],
['location']).pop()
self.assertEqual(values['city'], 'a nested city')
self.assertEqual(values['address'], 'a nested address')
def test_post_error_as_list(self):
del(self.domain['contacts']['schema']['ref']['required'])
self.app.config['VALIDATION_ERROR_AS_LIST'] = True
data = {'unknown_field': 'a value'}
r, status = self.post(self.known_resource_url, data=data)
self.assert422(status)
error = r[ISSUES]['unknown_field']
self.assertTrue(isinstance(error, list))
def test_id_field_included_with_document(self):
# since v0.6 we also allow the id field to be included with the POSTed
# document
id_field = self.domain[self.known_resource]['id_field']
id = '55b2340538345bd048100ffe'
data = {"ref": "1234567890123456789054321", id_field: id}
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
self.assertPostResponse(r)
self.assertEqual(r['_id'], id)
def test_post_type_coercion(self):
schema = self.domain[self.known_resource]['schema']
schema['aninteger']['coerce'] = lambda string: int(float(string))
data = {'ref': '1234567890123456789054321', 'aninteger': '42.3'}
self.assertPostItem(data, 'aninteger', 42)
def test_post_location_header_hateoas_on(self):
self.app.config['HATEOAS'] = True
data = json.dumps({'ref': '1234567890123456789054321'})
headers = [('Content-Type', 'application/json')]
r = self.test_client.post(self.known_resource_url, data=data,
headers=headers)
self.assertTrue('Location' in r.headers)
self.assertTrue(self.known_resource_url in r.headers['Location'])
def test_post_location_header_hateoas_off(self):
self.app.config['HATEOAS'] = False
data = json.dumps({'ref': '1234567890123456789054321'})
headers = [('Content-Type', 'application/json')]
r = self.test_client.post(self.known_resource_url, data=data,
headers=headers)
self.assertTrue('Location' in r.headers)
self.assertTrue(self.known_resource_url in r.headers['Location'])
def perform_post(self, data, valid_items=[0]):
r, status = self.post(self.known_resource_url, data=data)
self.assert201(status)
self.assertPostResponse(r, valid_items)
return r
def assertPostItem(self, data, test_field, test_value):
r = self.perform_post(data)
item_id = r[self.domain[self.known_resource]['id_field']]
item_etag = r[ETAG]
db_value = self.compare_post_with_get(item_id, [test_field, ETAG])
self.assertTrue(db_value[0] == test_value)
self.assertTrue(db_value[1] == item_etag)
def assertPostResponse(self, response, valid_items=[0], resource=None):
if '_items' in response:
results = response['_items']
else:
results = [response]
id_field = self.domain[resource or self.known_resource]['id_field']
for i in valid_items:
item = results[i]
self.assertTrue(STATUS in item)
self.assertTrue(STATUS_OK in item[STATUS])
self.assertFalse(ISSUES in item)
self.assertTrue(id_field in item)
self.assertTrue(LAST_UPDATED in item)
self.assertTrue('_links' in item)
self.assertItemLink(item['_links'], item[id_field])
self.assertTrue(ETAG in item)
def compare_post_with_get(self, item_id, fields):
raw_r = self.test_client.get("%s/%s" % (self.known_resource_url,
item_id))
item, status = self.parse_response(raw_r)
id_field = self.domain[self.known_resource]['id_field']
self.assert200(status)
self.assertTrue(id_field in item)
self.assertTrue(item[id_field] == item_id)
self.assertTrue(DATE_CREATED in item)
self.assertTrue(LAST_UPDATED in item)
self.assertEqual(item[DATE_CREATED], item[LAST_UPDATED])
if isinstance(fields, list):
return [item[field] for field in fields]
else:
return item[fields]
def post(self, url, data, headers=[], content_type='application/json'):
headers.append(('Content-Type', content_type))
r = self.test_client.post(url, data=json.dumps(data), headers=headers)
return self.parse_response(r)
class TestEvents(TestBase):
new_contact_id = "0123456789012345678901234"
def test_on_pre_POST(self):
devent = DummyEvent(self.before_insert)
self.app.on_pre_POST += devent
self.post()
self.assertFalse(devent.called is None)
def test_on_pre_POST_contacts(self):
devent = DummyEvent(self.before_insert)
self.app.on_pre_POST_contacts += devent
self.post()
self.assertFalse(devent.called is None)
def test_on_post_POST(self):
devent = DummyEvent(self.after_insert)
self.app.on_post_POST += devent
self.post()
self.assertEqual(devent.called[0], self.known_resource)
def test_on_POST_post_resource(self):
devent = DummyEvent(self.after_insert)
self.app.on_post_POST_contacts += devent
self.post()
self.assertFalse(devent.called is None)
def test_on_insert(self):
devent = DummyEvent(self.before_insert, True)
self.app.on_insert += devent
self.post()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(self.new_contact_id, devent.called[1][0]['ref'])
def test_on_insert_contacts(self):
devent = DummyEvent(self.before_insert, True)
self.app.on_insert_contacts += devent
self.post()
self.assertEqual(self.new_contact_id, devent.called[0][0]['ref'])
def test_on_inserted(self):
devent = DummyEvent(self.after_insert, True)
self.app.on_inserted += devent
self.post()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(self.new_contact_id, devent.called[1][0]['ref'])
def test_on_inserted_contacts(self):
devent = DummyEvent(self.after_insert, True)
self.app.on_inserted_contacts += devent
self.post()
self.assertEqual(self.new_contact_id, devent.called[0][0]['ref'])
def post(self):
headers = [('Content-Type', 'application/json')]
data = json.dumps({"ref": self.new_contact_id})
self.test_client.post(
self.known_resource_url, data=data, headers=headers)
def before_insert(self):
db = self.connection[MONGO_DBNAME]
return db.contacts.find_one({"ref": self.new_contact_id}) is None
def after_insert(self):
return not self.before_insert()
|
|
"""Provide functionality for TTS."""
from __future__ import annotations
import asyncio
import functools as ft
import hashlib
import io
import logging
import mimetypes
import os
import re
from typing import cast
from aiohttp import web
import mutagen
from mutagen.id3 import ID3, TextFrame as ID3Text
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
DOMAIN as DOMAIN_MP,
MEDIA_TYPE_MUSIC,
SERVICE_PLAY_MEDIA,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DESCRIPTION,
CONF_NAME,
CONF_PLATFORM,
HTTP_BAD_REQUEST,
HTTP_NOT_FOUND,
PLATFORM_FORMAT,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.network import get_url
from homeassistant.helpers.service import async_set_service_schema
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.loader import async_get_integration
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.util.yaml import load_yaml
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
ATTR_CACHE = "cache"
ATTR_LANGUAGE = "language"
ATTR_MESSAGE = "message"
ATTR_OPTIONS = "options"
ATTR_PLATFORM = "platform"
BASE_URL_KEY = "tts_base_url"
CONF_BASE_URL = "base_url"
CONF_CACHE = "cache"
CONF_CACHE_DIR = "cache_dir"
CONF_LANG = "language"
CONF_SERVICE_NAME = "service_name"
CONF_TIME_MEMORY = "time_memory"
CONF_FIELDS = "fields"
DEFAULT_CACHE = True
DEFAULT_CACHE_DIR = "tts"
DEFAULT_TIME_MEMORY = 300
DOMAIN = "tts"
MEM_CACHE_FILENAME = "filename"
MEM_CACHE_VOICE = "voice"
SERVICE_CLEAR_CACHE = "clear_cache"
SERVICE_SAY = "say"
_RE_VOICE_FILE = re.compile(r"([a-f0-9]{40})_([^_]+)_([^_]+)_([a-z_]+)\.[a-z0-9]{3,4}")
KEY_PATTERN = "{0}_{1}_{2}_{3}"
def _deprecated_platform(value):
"""Validate if platform is deprecated."""
if value == "google":
raise vol.Invalid(
"google tts service has been renamed to google_translate,"
" please update your configuration."
)
return value
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): vol.All(cv.string, _deprecated_platform),
vol.Optional(CONF_CACHE, default=DEFAULT_CACHE): cv.boolean,
vol.Optional(CONF_CACHE_DIR, default=DEFAULT_CACHE_DIR): cv.string,
vol.Optional(CONF_TIME_MEMORY, default=DEFAULT_TIME_MEMORY): vol.All(
vol.Coerce(int), vol.Range(min=60, max=57600)
),
vol.Optional(CONF_BASE_URL): cv.string,
vol.Optional(CONF_SERVICE_NAME): cv.string,
}
)
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
SCHEMA_SERVICE_SAY = vol.Schema(
{
vol.Required(ATTR_MESSAGE): cv.string,
vol.Optional(ATTR_CACHE): cv.boolean,
vol.Required(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Optional(ATTR_LANGUAGE): cv.string,
vol.Optional(ATTR_OPTIONS): dict,
}
)
SCHEMA_SERVICE_CLEAR_CACHE = vol.Schema({})
async def async_setup(hass, config):
"""Set up TTS."""
tts = SpeechManager(hass)
try:
conf = config[DOMAIN][0] if config.get(DOMAIN, []) else {}
use_cache = conf.get(CONF_CACHE, DEFAULT_CACHE)
cache_dir = conf.get(CONF_CACHE_DIR, DEFAULT_CACHE_DIR)
time_memory = conf.get(CONF_TIME_MEMORY, DEFAULT_TIME_MEMORY)
base_url = conf.get(CONF_BASE_URL)
hass.data[BASE_URL_KEY] = base_url
await tts.async_init_cache(use_cache, cache_dir, time_memory, base_url)
except (HomeAssistantError, KeyError):
_LOGGER.exception("Error on cache init")
return False
hass.http.register_view(TextToSpeechView(tts))
hass.http.register_view(TextToSpeechUrlView(tts))
# Load service descriptions from tts/services.yaml
integration = await async_get_integration(hass, DOMAIN)
services_yaml = integration.file_path / "services.yaml"
services_dict = cast(
dict, await hass.async_add_executor_job(load_yaml, str(services_yaml))
)
async def async_setup_platform(p_type, p_config=None, discovery_info=None):
"""Set up a TTS platform."""
if p_config is None:
p_config = {}
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
return
try:
if hasattr(platform, "async_get_engine"):
provider = await platform.async_get_engine(
hass, p_config, discovery_info
)
else:
provider = await hass.async_add_executor_job(
platform.get_engine, hass, p_config, discovery_info
)
if provider is None:
_LOGGER.error("Error setting up platform %s", p_type)
return
tts.async_register_engine(p_type, provider, p_config)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform: %s", p_type)
return
async def async_say_handle(service):
"""Service handle for say."""
entity_ids = service.data[ATTR_ENTITY_ID]
message = service.data.get(ATTR_MESSAGE)
cache = service.data.get(ATTR_CACHE)
language = service.data.get(ATTR_LANGUAGE)
options = service.data.get(ATTR_OPTIONS)
try:
url = await tts.async_get_url_path(
p_type, message, cache=cache, language=language, options=options
)
except HomeAssistantError as err:
_LOGGER.error("Error on init TTS: %s", err)
return
base = tts.base_url or get_url(hass)
url = base + url
data = {
ATTR_MEDIA_CONTENT_ID: url,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_ENTITY_ID: entity_ids,
}
await hass.services.async_call(
DOMAIN_MP,
SERVICE_PLAY_MEDIA,
data,
blocking=True,
context=service.context,
)
service_name = p_config.get(CONF_SERVICE_NAME, f"{p_type}_{SERVICE_SAY}")
hass.services.async_register(
DOMAIN, service_name, async_say_handle, schema=SCHEMA_SERVICE_SAY
)
# Register the service description
service_desc = {
CONF_NAME: f"Say an TTS message with {p_type}",
CONF_DESCRIPTION: f"Say something using text-to-speech on a media player with {p_type}.",
CONF_FIELDS: services_dict[SERVICE_SAY][CONF_FIELDS],
}
async_set_service_schema(hass, DOMAIN, service_name, service_desc)
setup_tasks = [
asyncio.create_task(async_setup_platform(p_type, p_config))
for p_type, p_config in config_per_platform(config, DOMAIN)
]
if setup_tasks:
await asyncio.wait(setup_tasks)
async def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
await async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
async def async_clear_cache_handle(service):
"""Handle clear cache service call."""
await tts.async_clear_cache()
hass.services.async_register(
DOMAIN,
SERVICE_CLEAR_CACHE,
async_clear_cache_handle,
schema=SCHEMA_SERVICE_CLEAR_CACHE,
)
return True
def _hash_options(options: dict) -> str:
"""Hashes an options dictionary."""
opts_hash = hashlib.blake2s(digest_size=5)
for key, value in sorted(options.items()):
opts_hash.update(str(key).encode())
opts_hash.update(str(value).encode())
return opts_hash.hexdigest()
class SpeechManager:
"""Representation of a speech store."""
def __init__(self, hass):
"""Initialize a speech store."""
self.hass = hass
self.providers = {}
self.use_cache = DEFAULT_CACHE
self.cache_dir = DEFAULT_CACHE_DIR
self.time_memory = DEFAULT_TIME_MEMORY
self.base_url = None
self.file_cache = {}
self.mem_cache = {}
async def async_init_cache(self, use_cache, cache_dir, time_memory, base_url):
"""Init config folder and load file cache."""
self.use_cache = use_cache
self.time_memory = time_memory
self.base_url = base_url
try:
self.cache_dir = await self.hass.async_add_executor_job(
_init_tts_cache_dir, self.hass, cache_dir
)
except OSError as err:
raise HomeAssistantError(f"Can't init cache dir {err}") from err
try:
cache_files = await self.hass.async_add_executor_job(
_get_cache_files, self.cache_dir
)
except OSError as err:
raise HomeAssistantError(f"Can't read cache dir {err}") from err
if cache_files:
self.file_cache.update(cache_files)
async def async_clear_cache(self):
"""Read file cache and delete files."""
self.mem_cache = {}
def remove_files():
"""Remove files from filesystem."""
for filename in self.file_cache.values():
try:
os.remove(os.path.join(self.cache_dir, filename))
except OSError as err:
_LOGGER.warning("Can't remove cache file '%s': %s", filename, err)
await self.hass.async_add_executor_job(remove_files)
self.file_cache = {}
@callback
def async_register_engine(self, engine, provider, config):
"""Register a TTS provider."""
provider.hass = self.hass
if provider.name is None:
provider.name = engine
self.providers[engine] = provider
self.hass.config.components.add(
PLATFORM_FORMAT.format(domain=engine, platform=DOMAIN)
)
async def async_get_url_path(
self, engine, message, cache=None, language=None, options=None
):
"""Get URL for play message.
This method is a coroutine.
"""
provider = self.providers[engine]
msg_hash = hashlib.sha1(bytes(message, "utf-8")).hexdigest()
use_cache = cache if cache is not None else self.use_cache
# Languages
language = language or provider.default_language
if language is None or language not in provider.supported_languages:
raise HomeAssistantError(f"Not supported language {language}")
# Options
if provider.default_options and options:
merged_options = provider.default_options.copy()
merged_options.update(options)
options = merged_options
options = options or provider.default_options
if options is not None:
invalid_opts = [
opt_name
for opt_name in options.keys()
if opt_name not in (provider.supported_options or [])
]
if invalid_opts:
raise HomeAssistantError(f"Invalid options found: {invalid_opts}")
options_key = _hash_options(options)
else:
options_key = "-"
key = KEY_PATTERN.format(
msg_hash, language.replace("_", "-"), options_key, engine
).lower()
# Is speech already in memory
if key in self.mem_cache:
filename = self.mem_cache[key][MEM_CACHE_FILENAME]
# Is file store in file cache
elif use_cache and key in self.file_cache:
filename = self.file_cache[key]
self.hass.async_create_task(self.async_file_to_mem(key))
# Load speech from provider into memory
else:
filename = await self.async_get_tts_audio(
engine, key, message, use_cache, language, options
)
return f"/api/tts_proxy/{filename}"
async def async_get_tts_audio(self, engine, key, message, cache, language, options):
"""Receive TTS and store for view in cache.
This method is a coroutine.
"""
provider = self.providers[engine]
extension, data = await provider.async_get_tts_audio(message, language, options)
if data is None or extension is None:
raise HomeAssistantError(f"No TTS from {engine} for '{message}'")
# Create file infos
filename = f"{key}.{extension}".lower()
# Validate filename
if not _RE_VOICE_FILE.match(filename):
raise HomeAssistantError(
f"TTS filename '{filename}' from {engine} is invalid!"
)
# Save to memory
data = self.write_tags(filename, data, provider, message, language, options)
self._async_store_to_memcache(key, filename, data)
if cache:
self.hass.async_create_task(self.async_save_tts_audio(key, filename, data))
return filename
async def async_save_tts_audio(self, key, filename, data):
"""Store voice data to file and file_cache.
This method is a coroutine.
"""
voice_file = os.path.join(self.cache_dir, filename)
def save_speech():
"""Store speech to filesystem."""
with open(voice_file, "wb") as speech:
speech.write(data)
try:
await self.hass.async_add_executor_job(save_speech)
self.file_cache[key] = filename
except OSError as err:
_LOGGER.error("Can't write %s: %s", filename, err)
async def async_file_to_mem(self, key):
"""Load voice from file cache into memory.
This method is a coroutine.
"""
filename = self.file_cache.get(key)
if not filename:
raise HomeAssistantError(f"Key {key} not in file cache!")
voice_file = os.path.join(self.cache_dir, filename)
def load_speech():
"""Load a speech from filesystem."""
with open(voice_file, "rb") as speech:
return speech.read()
try:
data = await self.hass.async_add_executor_job(load_speech)
except OSError as err:
del self.file_cache[key]
raise HomeAssistantError(f"Can't read {voice_file}") from err
self._async_store_to_memcache(key, filename, data)
@callback
def _async_store_to_memcache(self, key, filename, data):
"""Store data to memcache and set timer to remove it."""
self.mem_cache[key] = {MEM_CACHE_FILENAME: filename, MEM_CACHE_VOICE: data}
@callback
def async_remove_from_mem():
"""Cleanup memcache."""
self.mem_cache.pop(key, None)
self.hass.loop.call_later(self.time_memory, async_remove_from_mem)
async def async_read_tts(self, filename):
"""Read a voice file and return binary.
This method is a coroutine.
"""
record = _RE_VOICE_FILE.match(filename.lower())
if not record:
raise HomeAssistantError("Wrong tts file format!")
key = KEY_PATTERN.format(
record.group(1), record.group(2), record.group(3), record.group(4)
)
if key not in self.mem_cache:
if key not in self.file_cache:
raise HomeAssistantError(f"{key} not in cache!")
await self.async_file_to_mem(key)
content, _ = mimetypes.guess_type(filename)
return content, self.mem_cache[key][MEM_CACHE_VOICE]
@staticmethod
def write_tags(filename, data, provider, message, language, options):
"""Write ID3 tags to file.
Async friendly.
"""
data_bytes = io.BytesIO(data)
data_bytes.name = filename
data_bytes.seek(0)
album = provider.name
artist = language
if options is not None and options.get("voice") is not None:
artist = options.get("voice")
try:
tts_file = mutagen.File(data_bytes)
if tts_file is not None:
if not tts_file.tags:
tts_file.add_tags()
if isinstance(tts_file.tags, ID3):
tts_file["artist"] = ID3Text(encoding=3, text=artist)
tts_file["album"] = ID3Text(encoding=3, text=album)
tts_file["title"] = ID3Text(encoding=3, text=message)
else:
tts_file["artist"] = artist
tts_file["album"] = album
tts_file["title"] = message
tts_file.save(data_bytes)
except mutagen.MutagenError as err:
_LOGGER.error("ID3 tag error: %s", err)
return data_bytes.getvalue()
class Provider:
"""Represent a single TTS provider."""
hass: HomeAssistantType | None = None
name: str | None = None
@property
def default_language(self):
"""Return the default language."""
return None
@property
def supported_languages(self):
"""Return a list of supported languages."""
return None
@property
def supported_options(self):
"""Return a list of supported options like voice, emotionen."""
return None
@property
def default_options(self):
"""Return a dict include default options."""
return None
def get_tts_audio(self, message, language, options=None):
"""Load tts audio file from provider."""
raise NotImplementedError()
async def async_get_tts_audio(self, message, language, options=None):
"""Load tts audio file from provider.
Return a tuple of file extension and data as bytes.
"""
return await self.hass.async_add_executor_job(
ft.partial(self.get_tts_audio, message, language, options=options)
)
def _init_tts_cache_dir(hass, cache_dir):
"""Init cache folder."""
if not os.path.isabs(cache_dir):
cache_dir = hass.config.path(cache_dir)
if not os.path.isdir(cache_dir):
_LOGGER.info("Create cache dir %s", cache_dir)
os.mkdir(cache_dir)
return cache_dir
def _get_cache_files(cache_dir):
"""Return a dict of given engine files."""
cache = {}
folder_data = os.listdir(cache_dir)
for file_data in folder_data:
record = _RE_VOICE_FILE.match(file_data)
if record:
key = KEY_PATTERN.format(
record.group(1), record.group(2), record.group(3), record.group(4)
)
cache[key.lower()] = file_data.lower()
return cache
class TextToSpeechUrlView(HomeAssistantView):
"""TTS view to get a url to a generated speech file."""
requires_auth = True
url = "/api/tts_get_url"
name = "api:tts:geturl"
def __init__(self, tts):
"""Initialize a tts view."""
self.tts = tts
async def post(self, request: web.Request) -> web.Response:
"""Generate speech and provide url."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON specified", HTTP_BAD_REQUEST)
if not data.get(ATTR_PLATFORM) and data.get(ATTR_MESSAGE):
return self.json_message(
"Must specify platform and message", HTTP_BAD_REQUEST
)
p_type = data[ATTR_PLATFORM]
message = data[ATTR_MESSAGE]
cache = data.get(ATTR_CACHE)
language = data.get(ATTR_LANGUAGE)
options = data.get(ATTR_OPTIONS)
try:
path = await self.tts.async_get_url_path(
p_type, message, cache=cache, language=language, options=options
)
except HomeAssistantError as err:
_LOGGER.error("Error on init tts: %s", err)
return self.json({"error": err}, HTTP_BAD_REQUEST)
base = self.tts.base_url or get_url(self.tts.hass)
url = base + path
return self.json({"url": url, "path": path})
class TextToSpeechView(HomeAssistantView):
"""TTS view to serve a speech audio."""
requires_auth = False
url = "/api/tts_proxy/{filename}"
name = "api:tts_speech"
def __init__(self, tts):
"""Initialize a tts view."""
self.tts = tts
async def get(self, request: web.Request, filename: str) -> web.Response:
"""Start a get request."""
try:
content, data = await self.tts.async_read_tts(filename)
except HomeAssistantError as err:
_LOGGER.error("Error on load tts: %s", err)
return web.Response(status=HTTP_NOT_FOUND)
return web.Response(body=data, content_type=content)
def get_base_url(hass):
"""Get base URL."""
return hass.data[BASE_URL_KEY] or get_url(hass)
|
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Implementation to execute GL Intermediate Representation (GLIR)
"""
import os
import sys
import re
import json
import weakref
from distutils.version import LooseVersion
import numpy as np
from . import gl
from ..ext.six import string_types
from ..util import logger
# TODO: expose these via an extension space in .gl?
_internalformats = [
gl.Enum('GL_RED', 6403),
gl.Enum('GL_R', 8194),
gl.Enum('GL_R8', 33321),
gl.Enum('GL_R16', 33322),
gl.Enum('GL_R16F', 33325),
gl.Enum('GL_R32F', 33326),
gl.Enum('GL_RG', 33319),
gl.Enum('GL_RG8', 333323),
gl.Enum('GL_RG16', 333324),
gl.Enum('GL_RG16F', 333327),
gl.Enum('GL_RG32F', 33328),
gl.Enum('GL_RGB', 6407),
gl.Enum('GL_RGB8', 32849),
gl.Enum('GL_RGB16', 32852),
gl.Enum('GL_RGB16F', 34843),
gl.Enum('GL_RGB32F', 34837),
gl.Enum('GL_RGBA', 6408),
gl.Enum('GL_RGBA8', 32856),
gl.Enum('GL_RGBA16', 32859),
gl.Enum('GL_RGBA16F', 34842),
gl.Enum('GL_RGBA32F', 34836)
]
_internalformats = dict([(enum.name, enum) for enum in _internalformats])
# Value to mark a glir object that was just deleted. So we can safely
# ignore it (and not raise an error that the object could not be found).
# This can happen e.g. if A is created, A is bound to B and then A gets
# deleted. The commands may get executed in order: A gets created, A
# gets deleted, A gets bound to B.
JUST_DELETED = 'JUST_DELETED'
def as_enum(enum):
""" Turn a possibly string enum into an integer enum.
"""
if isinstance(enum, string_types):
try:
enum = getattr(gl, 'GL_' + enum.upper())
except AttributeError:
try:
enum = _internalformats['GL_' + enum.upper()]
except KeyError:
raise ValueError('Could not find int value for enum %r' % enum)
return enum
class _GlirQueueShare(object):
"""This class contains the actual queues of GLIR commands that are
collected until a context becomes available to execute the commands.
Instances of this class are further wrapped by GlirQueue to allow the
underlying queues to be transparently merged when GL objects become
associated.
The motivation for this design is that it allows most glir commands to be
added directly to their final queue (the same one used by the context),
which reduces the effort required at draw time to determine the complete
set of GL commands to be issued.
At the same time, all GLObjects begin with their own local queue to allow
commands to be queued at any time, even if the GLObject has
not been associated yet. This works as expected even for complex topologies
of GL objects, when some queues may only be joined at the last possible
moment.
"""
def __init__(self, queue):
self._commands = [] # local commands
self._verbose = False
# queues that have been merged with this one
self._associations = weakref.WeakKeyDictionary({queue: None})
def command(self, *args):
""" Send a command. See the command spec at:
https://github.com/vispy/vispy/wiki/Spec.-Gloo-IR
"""
self._commands.append(args)
def set_verbose(self, verbose):
""" Set verbose or not. If True, the GLIR commands are printed
right before they get parsed. If a string is given, use it as
a filter.
"""
self._verbose = verbose
def show(self, filter=None):
""" Print the list of commands currently in the queue. If filter is
given, print only commands that match the filter.
"""
for command in self._commands:
if command[0] is None: # or command[1] in self._invalid_objects:
continue # Skip nill commands
if filter and command[0] != filter:
continue
t = []
for e in command:
if isinstance(e, np.ndarray):
t.append('array %s' % str(e.shape))
elif isinstance(e, str):
s = e.strip()
if len(s) > 20:
s = s[:18] + '... %i lines' % (e.count('\n')+1)
t.append(s)
else:
t.append(e)
print(tuple(t))
def clear(self):
""" Pop the whole queue (and associated queues) and return a
list of commands.
"""
commands = self._commands
self._commands = []
return commands
def flush(self, parser):
""" Flush all current commands to the GLIR interpreter.
"""
if self._verbose:
show = self._verbose if isinstance(self._verbose, str) else None
self.show(show)
parser.parse(self._filter(self.clear(), parser))
def _filter(self, commands, parser):
""" Filter DATA/SIZE commands that are overridden by a
SIZE command.
"""
resized = set()
commands2 = []
for command in reversed(commands):
if command[0] == 'SHADERS':
convert = parser.convert_shaders()
if convert:
shaders = self._convert_shaders(convert, command[2:])
command = command[:2] + shaders
elif command[1] in resized:
if command[0] in ('SIZE', 'DATA'):
continue # remove this command
elif command[0] == 'SIZE':
resized.add(command[1])
commands2.append(command)
return list(reversed(commands2))
def _convert_shaders(self, convert, shaders):
return convert_shaders(convert, shaders)
class GlirQueue(object):
""" Representation of a queue of GLIR commands
One instance of this class is attached to each context object, and
to each gloo object. Internally, commands are stored in a shared queue
object that may be swapped out and merged with other queues when
``associate()`` is called.
Upon drawing (i.e. `Program.draw()`) and framebuffer switching, the
commands in the queue are pushed to a parser, which is stored at
context.shared. The parser can interpret the commands in Python,
send them to a browser, etc.
"""
def __init__(self):
# We do not actually queue any commands here, but on a shared queue
# object that may be joined with others as queues are associated.
self._shared = _GlirQueueShare(self)
def command(self, *args):
""" Send a command. See the command spec at:
https://github.com/vispy/vispy/wiki/Spec.-Gloo-IR
"""
self._shared.command(*args)
def set_verbose(self, verbose):
""" Set verbose or not. If True, the GLIR commands are printed
right before they get parsed. If a string is given, use it as
a filter.
"""
self._shared.set_verbose(verbose)
def clear(self):
""" Pop the whole queue (and associated queues) and return a
list of commands.
"""
return self._shared.clear()
def associate(self, queue):
"""Merge this queue with another.
Both queues will use a shared command list and either one can be used
to fill or flush the shared queue.
"""
assert isinstance(queue, GlirQueue)
if queue._shared is self._shared:
return
# merge commands
self._shared._commands.extend(queue.clear())
self._shared._verbose |= queue._shared._verbose
self._shared._associations[queue] = None
# update queue and all related queues to use the same _shared object
for ch in queue._shared._associations:
ch._shared = self._shared
self._shared._associations[ch] = None
queue._shared = self._shared
def flush(self, parser):
""" Flush all current commands to the GLIR interpreter.
"""
self._shared.flush(parser)
def convert_shaders(convert, shaders):
""" Modify shading code so that we can write code once
and make it run "everywhere".
"""
# New version of the shaders
out = []
if convert == 'es2':
for isfragment, shader in enumerate(shaders):
has_version = False
has_prec_float = False
has_prec_int = False
lines = []
# Iterate over lines
for line in shader.lstrip().splitlines():
if line.startswith('#version'):
has_version = True
continue
if line.startswith('precision '):
has_prec_float = has_prec_float or 'float' in line
has_prec_int = has_prec_int or 'int' in line
lines.append(line.rstrip())
# Write
# BUG: fails on WebGL (Chrome)
# if True:
# lines.insert(has_version, '#line 0')
if not has_prec_float:
lines.insert(has_version, 'precision highp float;')
if not has_prec_int:
lines.insert(has_version, 'precision highp int;')
# BUG: fails on WebGL (Chrome)
# if not has_version:
# lines.insert(has_version, '#version 100')
out.append('\n'.join(lines))
elif convert == 'desktop':
for isfragment, shader in enumerate(shaders):
has_version = False
lines = []
# Iterate over lines
for line in shader.lstrip().splitlines():
has_version = has_version or line.startswith('#version')
if line.startswith('precision '):
line = ''
for prec in (' highp ', ' mediump ', ' lowp '):
line = line.replace(prec, ' ')
lines.append(line.rstrip())
# Write
if not has_version:
lines.insert(0, '#version 120\n')
out.append('\n'.join(lines))
else:
raise ValueError('Cannot convert shaders to %r.' % convert)
return tuple(out)
def as_es2_command(command):
""" Modify a desktop command so it works on es2.
"""
if command[0] == 'FUNC':
return (command[0], re.sub(r'^gl([A-Z])',
lambda m: m.group(1).lower(), command[1])) + command[2:]
if command[0] == 'SHADERS':
return command[:2] + convert_shaders('es2', command[2:])
if command[0] == 'UNIFORM':
return command[:-1] + (command[-1].tolist(),)
return command
class BaseGlirParser(object):
""" Base clas for GLIR parsers that can be attached to a GLIR queue.
"""
def __init__(self):
self.capabilities = dict(
gl_version='Unknown',
max_texture_size=None,
)
def is_remote(self):
""" Whether the code is executed remotely. i.e. gloo.gl cannot
be used.
"""
raise NotImplementedError()
def convert_shaders(self):
""" Whether to convert shading code. Valid values are 'es2' and
'desktop'. If None, the shaders are not modified.
"""
raise NotImplementedError()
def parse(self, commands):
""" Parse the GLIR commands. Or sent them away.
"""
raise NotImplementedError()
class GlirParser(BaseGlirParser):
""" A class for interpreting GLIR commands using gloo.gl
We make use of relatively light GLIR objects that are instantiated
on CREATE commands. These objects are stored by their id in a
dictionary so that commands like ACTIVATE and DATA can easily
be executed on the corresponding objects.
"""
def __init__(self):
super(GlirParser, self).__init__()
self._objects = {}
self._invalid_objects = set()
self._classmap = {'Program': GlirProgram,
'VertexBuffer': GlirVertexBuffer,
'IndexBuffer': GlirIndexBuffer,
'Texture1D': GlirTexture1D,
'Texture2D': GlirTexture2D,
'Texture3D': GlirTexture3D,
'RenderBuffer': GlirRenderBuffer,
'FrameBuffer': GlirFrameBuffer,
}
# We keep a dict that the GLIR objects use for storing
# per-context information. This dict is cleared each time
# that the context is made current. This seems necessary for
# when two Canvases share a context.
self.env = {}
def is_remote(self):
return False
def convert_shaders(self):
if '.es' in gl.current_backend.__name__:
return 'es2'
else:
return 'desktop'
def _parse(self, command):
""" Parse a single command.
"""
cmd, id_, args = command[0], command[1], command[2:]
if cmd == 'CURRENT':
# This context is made current
self.env.clear()
self._gl_initialize()
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0)
elif cmd == 'FUNC':
# GL function call
args = [as_enum(a) for a in args]
try:
getattr(gl, id_)(*args)
except AttributeError:
logger.warning('Invalid gl command: %r' % id_)
elif cmd == 'CREATE':
# Creating an object
if args[0] is not None:
klass = self._classmap[args[0]]
self._objects[id_] = klass(self, id_)
else:
self._invalid_objects.add(id_)
elif cmd == 'DELETE':
# Deleting an object
ob = self._objects.get(id_, None)
if ob is not None:
self._objects[id_] = JUST_DELETED
ob.delete()
else:
# Doing somthing to an object
ob = self._objects.get(id_, None)
if ob == JUST_DELETED:
return
if ob is None:
if id_ not in self._invalid_objects:
raise RuntimeError('Cannot %s object %i because it '
'does not exist' % (cmd, id_))
return
# Triage over command. Order of commands is set so most
# common ones occur first.
if cmd == 'DRAW': # Program
ob.draw(*args)
elif cmd == 'TEXTURE': # Program
ob.set_texture(*args)
elif cmd == 'UNIFORM': # Program
ob.set_uniform(*args)
elif cmd == 'ATTRIBUTE': # Program
ob.set_attribute(*args)
elif cmd == 'DATA': # VertexBuffer, IndexBuffer, Texture
ob.set_data(*args)
elif cmd == 'SIZE': # VertexBuffer, IndexBuffer,
ob.set_size(*args) # Texture[1D, 2D, 3D], RenderBuffer
elif cmd == 'ATTACH': # FrameBuffer
ob.attach(*args)
elif cmd == 'FRAMEBUFFER': # FrameBuffer
ob.set_framebuffer(*args)
elif cmd == 'SHADERS': # Program
ob.set_shaders(*args)
elif cmd == 'WRAPPING': # Texture1D, Texture2D, Texture3D
ob.set_wrapping(*args)
elif cmd == 'INTERPOLATION': # Texture1D, Texture2D, Texture3D
ob.set_interpolation(*args)
else:
logger.warning('Invalid GLIR command %r' % cmd)
def parse(self, commands):
""" Parse a list of commands.
"""
# Get rid of dummy objects that represented deleted objects in
# the last parsing round.
to_delete = []
for id_, val in self._objects.items():
if val == JUST_DELETED:
to_delete.append(id_)
for id_ in to_delete:
self._objects.pop(id_)
for command in commands:
self._parse(command)
def get_object(self, id_):
""" Get the object with the given id or None if it does not exist.
"""
return self._objects.get(id_, None)
def _gl_initialize(self):
""" Deal with compatibility; desktop does not have sprites
enabled by default. ES has.
"""
if '.es' in gl.current_backend.__name__:
pass # ES2: no action required
else:
# Desktop, enable sprites
GL_VERTEX_PROGRAM_POINT_SIZE = 34370
GL_POINT_SPRITE = 34913
gl.glEnable(GL_VERTEX_PROGRAM_POINT_SIZE)
gl.glEnable(GL_POINT_SPRITE)
if self.capabilities['max_texture_size'] is None: # only do once
self.capabilities['gl_version'] = gl.glGetParameter(gl.GL_VERSION)
self.capabilities['max_texture_size'] = \
gl.glGetParameter(gl.GL_MAX_TEXTURE_SIZE)
this_version = self.capabilities['gl_version'].split(' ')[0]
this_version = LooseVersion(this_version)
if this_version < '2.1':
if os.getenv('VISPY_IGNORE_OLD_VERSION', '').lower() != 'true':
logger.warning('OpenGL version 2.1 or higher recommended, '
'got %s. Some functionality may fail.'
% self.capabilities['gl_version'])
def glir_logger(parser_cls, file_or_filename):
from ..util.logs import NumPyJSONEncoder
class cls(parser_cls):
def __init__(self, *args, **kwargs):
parser_cls.__init__(self, *args, **kwargs)
if isinstance(file_or_filename, string_types):
self._file = open(file_or_filename, 'w')
else:
self._file = file_or_filename
self._file.write('[]')
self._empty = True
def _parse(self, command):
parser_cls._parse(self, command)
self._file.seek(self._file.tell() - 1)
if self._empty:
self._empty = False
else:
self._file.write(',\n')
json.dump(as_es2_command(command),
self._file, cls=NumPyJSONEncoder)
self._file.write(']')
return cls
## GLIR objects
class GlirObject(object):
def __init__(self, parser, id_):
self._parser = parser
self._id = id_
self._handle = -1 # Must be set by subclass in create()
self.create()
@property
def handle(self):
return self._handle
@property
def id(self):
return self._id
def __repr__(self):
return '<%s %i at 0x%x>' % (self.__class__.__name__, self.id, id(self))
class GlirProgram(GlirObject):
UTYPEMAP = {
'float': 'glUniform1fv',
'vec2': 'glUniform2fv',
'vec3': 'glUniform3fv',
'vec4': 'glUniform4fv',
'int': 'glUniform1iv',
'ivec2': 'glUniform2iv',
'ivec3': 'glUniform3iv',
'ivec4': 'glUniform4iv',
'bool': 'glUniform1iv',
'bvec2': 'glUniform2iv',
'bvec3': 'glUniform3iv',
'bvec4': 'glUniform4iv',
'mat2': 'glUniformMatrix2fv',
'mat3': 'glUniformMatrix3fv',
'mat4': 'glUniformMatrix4fv',
'sampler1D': 'glUniform1i',
'sampler2D': 'glUniform1i',
'sampler3D': 'glUniform1i',
}
ATYPEMAP = {
'float': 'glVertexAttrib1f',
'vec2': 'glVertexAttrib2f',
'vec3': 'glVertexAttrib3f',
'vec4': 'glVertexAttrib4f',
}
ATYPEINFO = {
'float': (1, gl.GL_FLOAT, np.float32),
'vec2': (2, gl.GL_FLOAT, np.float32),
'vec3': (3, gl.GL_FLOAT, np.float32),
'vec4': (4, gl.GL_FLOAT, np.float32),
'int': (1, gl.GL_INT, np.int32),
}
def create(self):
self._handle = gl.glCreateProgram()
self._validated = False
self._linked = False
# Keeping track of uniforms/attributes
self._handles = {} # cache with handles to attributes/uniforms
self._unset_variables = set()
# Store samplers in buffers that are bount to uniforms/attributes
self._samplers = {} # name -> (tex-target, tex-handle, unit)
self._attributes = {} # name -> (vbo-handle, attr-handle, func, args)
self._known_invalid = set() # variables that we know are invalid
def delete(self):
gl.glDeleteProgram(self._handle)
def activate(self):
""" Avoid overhead in calling glUseProgram with same arg.
Warning: this will break if glUseProgram is used somewhere else.
Per context we keep track of one current program.
"""
if self._handle != self._parser.env.get('current_program', False):
self._parser.env['current_program'] = self._handle
gl.glUseProgram(self._handle)
def deactivate(self):
""" Avoid overhead in calling glUseProgram with same arg.
Warning: this will break if glUseProgram is used somewhere else.
Per context we keep track of one current program.
"""
if self._parser.env.get('current_program', 0) != 0:
self._parser.env['current_program'] = 0
gl.glUseProgram(0)
def set_shaders(self, vert, frag):
""" This function takes care of setting the shading code and
compiling+linking it into a working program object that is ready
to use.
"""
self._linked = False
# Create temporary shader objects
vert_handle = gl.glCreateShader(gl.GL_VERTEX_SHADER)
frag_handle = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)
# For both vertex and fragment shader: set source, compile, check
for code, handle, type_ in [(vert, vert_handle, 'vertex'),
(frag, frag_handle, 'fragment')]:
gl.glShaderSource(handle, code)
gl.glCompileShader(handle)
status = gl.glGetShaderParameter(handle, gl.GL_COMPILE_STATUS)
if not status:
errors = gl.glGetShaderInfoLog(handle)
errormsg = self._get_error(code, errors, 4)
raise RuntimeError("Shader compilation error in %s:\n%s" %
(type_ + ' shader', errormsg))
# Attach shaders
gl.glAttachShader(self._handle, vert_handle)
gl.glAttachShader(self._handle, frag_handle)
# Link the program and check
gl.glLinkProgram(self._handle)
if not gl.glGetProgramParameter(self._handle, gl.GL_LINK_STATUS):
raise RuntimeError('Program linking error:\n%s'
% gl.glGetProgramInfoLog(self._handle))
# Now we can remove the shaders. We no longer need them and it
# frees up precious GPU memory:
# http://gamedev.stackexchange.com/questions/47910
gl.glDetachShader(self._handle, vert_handle)
gl.glDetachShader(self._handle, frag_handle)
gl.glDeleteShader(vert_handle)
gl.glDeleteShader(frag_handle)
# Now we know what variables will be used by the program
self._unset_variables = self._get_active_attributes_and_uniforms()
self._handles = {}
self._known_invalid = set()
self._linked = True
def _get_active_attributes_and_uniforms(self):
""" Retrieve active attributes and uniforms to be able to check that
all uniforms/attributes are set by the user.
Other GLIR implementations may omit this.
"""
# This match a name of the form "name[size]" (= array)
regex = re.compile("""(?P<name>\w+)\s*(\[(?P<size>\d+)\])\s*""")
# Get how many active attributes and uniforms there are
cu = gl.glGetProgramParameter(self._handle, gl.GL_ACTIVE_UNIFORMS)
ca = gl.glGetProgramParameter(self.handle, gl.GL_ACTIVE_ATTRIBUTES)
# Get info on each one
attributes = []
uniforms = []
for container, count, func in [(attributes, ca, gl.glGetActiveAttrib),
(uniforms, cu, gl.glGetActiveUniform)]:
for i in range(count):
name, size, gtype = func(self._handle, i)
m = regex.match(name) # Check if xxx[0] instead of xx
if m:
name = m.group('name')
for i in range(size):
container.append(('%s[%d]' % (name, i), gtype))
else:
container.append((name, gtype))
#return attributes, uniforms
return set([v[0] for v in attributes] + [v[0] for v in uniforms])
def _parse_error(self, error):
""" Parses a single GLSL error and extracts the linenr and description
Other GLIR implementations may omit this.
"""
error = str(error)
# Nvidia
# 0(7): error C1008: undefined variable "MV"
m = re.match(r'(\d+)\((\d+)\)\s*:\s(.*)', error)
if m:
return int(m.group(2)), m.group(3)
# ATI / Intel
# ERROR: 0:131: '{' : syntax error parse error
m = re.match(r'ERROR:\s(\d+):(\d+):\s(.*)', error)
if m:
return int(m.group(2)), m.group(3)
# Nouveau
# 0:28(16): error: syntax error, unexpected ')', expecting '('
m = re.match(r'(\d+):(\d+)\((\d+)\):\s(.*)', error)
if m:
return int(m.group(2)), m.group(4)
# Other ...
return None, error
def _get_error(self, code, errors, indentation=0):
"""Get error and show the faulty line + some context
Other GLIR implementations may omit this.
"""
# Init
results = []
lines = None
if code is not None:
lines = [line.strip() for line in code.split('\n')]
for error in errors.split('\n'):
# Strip; skip empy lines
error = error.strip()
if not error:
continue
# Separate line number from description (if we can)
linenr, error = self._parse_error(error)
if None in (linenr, lines):
results.append('%s' % error)
else:
results.append('on line %i: %s' % (linenr, error))
if linenr > 0 and linenr < len(lines):
results.append(' %s' % lines[linenr - 1])
# Add indentation and return
results = [' ' * indentation + r for r in results]
return '\n'.join(results)
def set_texture(self, name, value):
""" Set a texture sampler. Value is the id of the texture to link.
"""
if not self._linked:
raise RuntimeError('Cannot set uniform when program has no code')
# Get handle for the uniform, first try cache
handle = self._handles.get(name, -1)
if handle < 0:
if name in self._known_invalid:
return
handle = gl.glGetUniformLocation(self._handle, name)
self._unset_variables.discard(name) # Mark as set
self._handles[name] = handle # Store in cache
if handle < 0:
self._known_invalid.add(name)
logger.info('Variable %s is not an active uniform' % name)
return
# Program needs to be active in order to set uniforms
self.activate()
if True:
# Sampler: the value is the id of the texture
tex = self._parser.get_object(value)
if tex == JUST_DELETED:
return
if tex is None:
raise RuntimeError('Could not find texture with id %i' % value)
unit = len(self._samplers)
if name in self._samplers:
unit = self._samplers[name][-1] # Use existing unit
self._samplers[name] = tex._target, tex.handle, unit
gl.glUniform1i(handle, unit)
def set_uniform(self, name, type_, value):
""" Set a uniform value. Value is assumed to have been checked.
"""
if not self._linked:
raise RuntimeError('Cannot set uniform when program has no code')
# Get handle for the uniform, first try cache
handle = self._handles.get(name, -1)
count = 1
if handle < 0:
if name in self._known_invalid:
return
handle = gl.glGetUniformLocation(self._handle, name)
self._unset_variables.discard(name) # Mark as set
# if we set a uniform_array, mark all as set
if not type_.startswith('mat'):
count = value.nbytes // (4 * self.ATYPEINFO[type_][0])
if count > 1:
for ii in range(count):
if '%s[%s]' % (name, ii) in self._unset_variables:
self._unset_variables.discard('%s[%s]' % (name, ii))
self._handles[name] = handle # Store in cache
if handle < 0:
self._known_invalid.add(name)
logger.info('Variable %s is not an active uniform' % name)
return
# Look up function to call
funcname = self.UTYPEMAP[type_]
func = getattr(gl, funcname)
# Program needs to be active in order to set uniforms
self.activate()
# Triage depending on type
if type_.startswith('mat'):
# Value is matrix, these gl funcs have alternative signature
transpose = False # OpenGL ES 2.0 does not support transpose
func(handle, 1, transpose, value)
else:
# Regular uniform
func(handle, count, value)
def set_attribute(self, name, type_, value):
""" Set an attribute value. Value is assumed to have been checked.
"""
if not self._linked:
raise RuntimeError('Cannot set attribute when program has no code')
# Get handle for the attribute, first try cache
handle = self._handles.get(name, -1)
if handle < 0:
if name in self._known_invalid:
return
handle = gl.glGetAttribLocation(self._handle, name)
self._unset_variables.discard(name) # Mark as set
self._handles[name] = handle # Store in cache
if handle < 0:
self._known_invalid.add(name)
if value[0] != 0 and value[2] > 0: # VBO with offset
return # Probably an unused element in a structured VBO
logger.info('Variable %s is not an active attribute' % name)
return
# Program needs to be active in order to set uniforms
self.activate()
# Triage depending on VBO or tuple data
if value[0] == 0:
# Look up function call
funcname = self.ATYPEMAP[type_]
func = getattr(gl, funcname)
# Set data
self._attributes[name] = 0, handle, func, value[1:]
else:
# Get meta data
vbo_id, stride, offset = value
size, gtype, dtype = self.ATYPEINFO[type_]
# Get associated VBO
vbo = self._parser.get_object(vbo_id)
if vbo == JUST_DELETED:
return
if vbo is None:
raise RuntimeError('Could not find VBO with id %i' % vbo_id)
# Set data
func = gl.glVertexAttribPointer
args = size, gtype, gl.GL_FALSE, stride, offset
self._attributes[name] = vbo.handle, handle, func, args
def _pre_draw(self):
self.activate()
# Activate textures
for tex_target, tex_handle, unit in self._samplers.values():
gl.glActiveTexture(gl.GL_TEXTURE0 + unit)
gl.glBindTexture(tex_target, tex_handle)
# Activate attributes
for vbo_handle, attr_handle, func, args in self._attributes.values():
if vbo_handle:
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, vbo_handle)
gl.glEnableVertexAttribArray(attr_handle)
func(attr_handle, *args)
else:
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
gl.glDisableVertexAttribArray(attr_handle)
func(attr_handle, *args)
# Validate. We need to validate after textures units get assigned
if not self._validated:
self._validated = True
self._validate()
def _validate(self):
# Validate ourselves
if self._unset_variables:
logger.info('Program has unset variables: %r' %
self._unset_variables)
# Validate via OpenGL
gl.glValidateProgram(self._handle)
if not gl.glGetProgramParameter(self._handle,
gl.GL_VALIDATE_STATUS):
raise RuntimeError('Program validation error:\n%s'
% gl.glGetProgramInfoLog(self._handle))
def _post_draw(self):
# No need to deactivate each texture/buffer, just set to 0
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)
gl.glBindTexture(gl.GL_TEXTURE_2D, 0)
if USE_TEX_3D:
gl.glBindTexture(GL_TEXTURE_3D, 0)
gl.glBindTexture(GL_TEXTURE_1D, 0)
#Deactivate program - should not be necessary. In single-program
#apps it would not even make sense.
#self.deactivate()
def draw(self, mode, selection):
""" Draw program in given mode, with given selection (IndexBuffer or
first, count).
"""
if not self._linked:
raise RuntimeError('Cannot draw program if code has not been set')
# Init
gl.check_error('Check before draw')
mode = as_enum(mode)
# Draw
if len(selection) == 3:
# Selection based on indices
id_, gtype, count = selection
if count:
self._pre_draw()
ibuf = self._parser.get_object(id_)
ibuf.activate()
gl.glDrawElements(mode, count, as_enum(gtype), None)
ibuf.deactivate()
else:
# Selection based on start and count
first, count = selection
if count:
self._pre_draw()
gl.glDrawArrays(mode, first, count)
# Wrap up
gl.check_error('Check after draw')
self._post_draw()
class GlirBuffer(GlirObject):
_target = None
_usage = gl.GL_DYNAMIC_DRAW # STATIC_DRAW, STREAM_DRAW or DYNAMIC_DRAW
def create(self):
self._handle = gl.glCreateBuffer()
self._buffer_size = 0
self._bufferSubDataOk = False
def delete(self):
gl.glDeleteBuffer(self._handle)
def activate(self):
gl.glBindBuffer(self._target, self._handle)
def deactivate(self):
gl.glBindBuffer(self._target, 0)
def set_size(self, nbytes): # in bytes
if nbytes != self._buffer_size:
self.activate()
gl.glBufferData(self._target, nbytes, self._usage)
self._buffer_size = nbytes
def set_data(self, offset, data):
self.activate()
nbytes = data.nbytes
# Determine whether to check errors to try handling the ATI bug
check_ati_bug = ((not self._bufferSubDataOk) and
(gl.current_backend is gl.gl2) and
sys.platform.startswith('win'))
# flush any pending errors
if check_ati_bug:
gl.check_error('periodic check')
try:
gl.glBufferSubData(self._target, offset, data)
if check_ati_bug:
gl.check_error('glBufferSubData')
self._bufferSubDataOk = True # glBufferSubData seems to work
except Exception:
# This might be due to a driver error (seen on ATI), issue #64.
# We try to detect this, and if we can use glBufferData instead
if offset == 0 and nbytes == self._buffer_size:
gl.glBufferData(self._target, data, self._usage)
logger.debug("Using glBufferData instead of " +
"glBufferSubData (known ATI bug).")
else:
raise
class GlirVertexBuffer(GlirBuffer):
_target = gl.GL_ARRAY_BUFFER
class GlirIndexBuffer(GlirBuffer):
_target = gl.GL_ELEMENT_ARRAY_BUFFER
class GlirTexture(GlirObject):
_target = None
_types = {
np.dtype(np.int8): gl.GL_BYTE,
np.dtype(np.uint8): gl.GL_UNSIGNED_BYTE,
np.dtype(np.int16): gl.GL_SHORT,
np.dtype(np.uint16): gl.GL_UNSIGNED_SHORT,
np.dtype(np.int32): gl.GL_INT,
np.dtype(np.uint32): gl.GL_UNSIGNED_INT,
# np.dtype(np.float16) : gl.GL_HALF_FLOAT,
np.dtype(np.float32): gl.GL_FLOAT,
# np.dtype(np.float64) : gl.GL_DOUBLE
}
def create(self):
self._handle = gl.glCreateTexture()
self._shape_formats = 0 # To make setting size cheap
def delete(self):
gl.glDeleteTexture(self._handle)
def activate(self):
gl.glBindTexture(self._target, self._handle)
def deactivate(self):
gl.glBindTexture(self._target, 0)
# Taken from pygly
def _get_alignment(self, width):
"""Determines a textures byte alignment.
If the width isn't a power of 2
we need to adjust the byte alignment of the image.
The image height is unimportant
www.opengl.org/wiki/Common_Mistakes#Texture_upload_and_pixel_reads
"""
# we know the alignment is appropriate
# if we can divide the width by the
# alignment cleanly
# valid alignments are 1,2,4 and 8
# put 4 first, since it's the default
alignments = [4, 8, 2, 1]
for alignment in alignments:
if width % alignment == 0:
return alignment
def set_wrapping(self, wrapping):
self.activate()
wrapping = [as_enum(w) for w in wrapping]
if len(wrapping) == 3:
GL_TEXTURE_WRAP_R = 32882
gl.glTexParameterf(self._target, GL_TEXTURE_WRAP_R, wrapping[0])
if len(wrapping) >= 2:
gl.glTexParameterf(self._target,
gl.GL_TEXTURE_WRAP_S, wrapping[-2])
gl.glTexParameterf(self._target, gl.GL_TEXTURE_WRAP_T, wrapping[-1])
def set_interpolation(self, min, mag):
self.activate()
min, mag = as_enum(min), as_enum(mag)
gl.glTexParameterf(self._target, gl.GL_TEXTURE_MIN_FILTER, min)
gl.glTexParameterf(self._target, gl.GL_TEXTURE_MAG_FILTER, mag)
# these should be auto generated in _constants.py. But that doesn't seem
# to be happening. TODO - figure out why the C parser in (createglapi.py)
# is not extracting these constanst out.
# found the constant value at:
# http://docs.factorcode.org/content/word-GL_TEXTURE_1D,opengl.gl.html
# http://docs.factorcode.org/content/word-GL_SAMPLER_1D%2Copengl.gl.html
GL_SAMPLER_1D = gl.Enum('GL_SAMPLER_1D', 35677)
GL_TEXTURE_1D = gl.Enum('GL_TEXTURE_1D', 3552)
class GlirTexture1D(GlirTexture):
_target = GL_TEXTURE_1D
def set_size(self, shape, format, internalformat):
format = as_enum(format)
if internalformat is not None:
internalformat = as_enum(internalformat)
else:
internalformat = format
# Shape is width
if (shape, format, internalformat) != self._shape_formats:
self.activate()
self._shape_formats = shape, format, internalformat
glTexImage1D(self._target, 0, internalformat, format,
gl.GL_BYTE, shape[:1])
def set_data(self, offset, data):
self.activate()
shape, format, internalformat = self._shape_formats
x = offset[0]
# Get gtype
gtype = self._types.get(np.dtype(data.dtype), None)
if gtype is None:
raise ValueError("Type %r not allowed for texture" % data.dtype)
# Set alignment (width is nbytes_per_pixel * npixels_per_line)
alignment = self._get_alignment(data.shape[-1])
if alignment != 4:
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, alignment)
# Upload
glTexSubImage1D(self._target, 0, x, format, gtype, data)
# Set alignment back
if alignment != 4:
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 4)
class GlirTexture2D(GlirTexture):
_target = gl.GL_TEXTURE_2D
def set_size(self, shape, format, internalformat):
# Shape is height, width
format = as_enum(format)
internalformat = format if internalformat is None \
else as_enum(internalformat)
if (shape, format, internalformat) != self._shape_formats:
self._shape_formats = shape, format, internalformat
self.activate()
gl.glTexImage2D(self._target, 0, internalformat, format,
gl.GL_UNSIGNED_BYTE, shape[:2])
def set_data(self, offset, data):
self.activate()
shape, format, internalformat = self._shape_formats
y, x = offset
# Get gtype
gtype = self._types.get(np.dtype(data.dtype), None)
if gtype is None:
raise ValueError("Type %r not allowed for texture" % data.dtype)
# Set alignment (width is nbytes_per_pixel * npixels_per_line)
alignment = self._get_alignment(data.shape[-2]*data.shape[-1])
if alignment != 4:
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, alignment)
# Upload
gl.glTexSubImage2D(self._target, 0, x, y, format, gtype, data)
# Set alignment back
if alignment != 4:
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 4)
GL_SAMPLER_3D = gl.Enum('GL_SAMPLER_3D', 35679)
GL_TEXTURE_3D = gl.Enum('GL_TEXTURE_3D', 32879)
USE_TEX_3D = False
def _check_pyopengl_3D():
"""Helper to ensure users have OpenGL for 3D texture support (for now)"""
global USE_TEX_3D
USE_TEX_3D = True
try:
import OpenGL.GL as _gl
except ImportError:
raise ImportError('PyOpenGL is required for 3D texture support')
return _gl
def glTexImage3D(target, level, internalformat, format, type, pixels):
# Import from PyOpenGL
_gl = _check_pyopengl_3D()
border = 0
assert isinstance(pixels, (tuple, list)) # the only way we use this now
depth, height, width = pixels
_gl.glTexImage3D(target, level, internalformat,
width, height, depth, border, format, type, None)
def glTexImage1D(target, level, internalformat, format, type, pixels):
# Import from PyOpenGL
_gl = _check_pyopengl_3D()
border = 0
assert isinstance(pixels, (tuple, list)) # the only way we use this now
# pixels will be a tuple of the form (width, )
# we only need the first argument
width = pixels[0]
_gl.glTexImage1D(target, level, internalformat,
width, border, format, type, None)
def glTexSubImage1D(target, level, xoffset,
format, type, pixels):
# Import from PyOpenGL
_gl = _check_pyopengl_3D()
width = pixels.shape[:1]
# width will be a tuple of the form (w, )
# we need to take the first element (integer)
_gl.glTexSubImage1D(target, level, xoffset,
width[0], format, type, pixels)
def glTexSubImage3D(target, level, xoffset, yoffset, zoffset,
format, type, pixels):
# Import from PyOpenGL
_gl = _check_pyopengl_3D()
depth, height, width = pixels.shape[:3]
_gl.glTexSubImage3D(target, level, xoffset, yoffset, zoffset,
width, height, depth, format, type, pixels)
class GlirTexture3D(GlirTexture):
_target = GL_TEXTURE_3D
def set_size(self, shape, format, internalformat):
format = as_enum(format)
if internalformat is not None:
internalformat = as_enum(internalformat)
else:
internalformat = format
# Shape is depth, height, width
if (shape, format, internalformat) != self._shape_formats:
self.activate()
self._shape_formats = shape, format, internalformat
glTexImage3D(self._target, 0, internalformat, format,
gl.GL_BYTE, shape[:3])
def set_data(self, offset, data):
self.activate()
shape, format, internalformat = self._shape_formats
z, y, x = offset
# Get gtype
gtype = self._types.get(np.dtype(data.dtype), None)
if gtype is None:
raise ValueError("Type not allowed for texture")
# Set alignment (width is nbytes_per_pixel * npixels_per_line)
alignment = self._get_alignment(data.shape[-3] *
data.shape[-2] * data.shape[-1])
if alignment != 4:
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, alignment)
# Upload
glTexSubImage3D(self._target, 0, x, y, z, format, gtype, data)
# Set alignment back
if alignment != 4:
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 4)
class GlirRenderBuffer(GlirObject):
def create(self):
self._handle = gl.glCreateRenderbuffer()
self._shape_format = 0 # To make setting size cheap
def delete(self):
gl.glDeleteRenderbuffer(self._handle)
def activate(self):
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, self._handle)
def deactivate(self):
gl.glBindRenderbuffer(gl.GL_RENDERBUFFER, 0)
def set_size(self, shape, format):
if isinstance(format, string_types):
format = GlirFrameBuffer._formats[format][1]
if (shape, format) != self._shape_format:
self._shape_format = shape, format
self.activate()
gl.glRenderbufferStorage(gl.GL_RENDERBUFFER, format,
shape[1], shape[0])
class GlirFrameBuffer(GlirObject):
# todo: on ES 2.0 -> gl.gl_RGBA4
_formats = {'color': (gl.GL_COLOR_ATTACHMENT0, gl.GL_RGBA),
'depth': (gl.GL_DEPTH_ATTACHMENT, gl.GL_DEPTH_COMPONENT16),
'stencil': (gl.GL_STENCIL_ATTACHMENT, gl.GL_STENCIL_INDEX8)}
def create(self):
#self._parser._fb_stack = [0] # To keep track of active FB
self._handle = gl.glCreateFramebuffer()
self._validated = False
def delete(self):
gl.glDeleteFramebuffer(self._handle)
def set_framebuffer(self, yes):
if yes:
self.activate()
if not self._validated:
self._validated = True
self._validate()
else:
self.deactivate()
def activate(self):
stack = self._parser.env.setdefault('fb_stack', [0])
if stack[-1] != self._handle:
stack.append(self._handle)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self._handle)
def deactivate(self):
stack = self._parser.env.setdefault('fb_stack', [0])
while self._handle in stack:
stack.remove(self._handle)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, stack[-1])
def attach(self, attachment, buffer_id):
attachment = GlirFrameBuffer._formats[attachment][0]
self.activate()
if buffer_id == 0:
gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, attachment,
gl.GL_RENDERBUFFER, 0)
else:
buffer = self._parser.get_object(buffer_id)
if buffer == JUST_DELETED:
return
if buffer is None:
raise ValueError("Unknown buffer with id %i for attachement" %
buffer_id)
elif isinstance(buffer, GlirRenderBuffer):
buffer.activate()
gl.glFramebufferRenderbuffer(gl.GL_FRAMEBUFFER, attachment,
gl.GL_RENDERBUFFER, buffer.handle)
buffer.deactivate()
elif isinstance(buffer, GlirTexture2D):
buffer.activate()
# INFO: 0 is for mipmap level 0 (default) of the texture
gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, attachment,
gl.GL_TEXTURE_2D, buffer.handle, 0)
buffer.deactivate()
else:
raise ValueError("Invalid attachment: %s" % type(buffer))
self._validated = False
self.deactivate()
def _validate(self):
res = gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER)
if res == gl.GL_FRAMEBUFFER_COMPLETE:
return
_bad_map = {
0: 'Target not equal to GL_FRAMEBUFFER',
gl.GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
'FrameBuffer attachments are incomplete.',
gl.GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
'No valid attachments in the FrameBuffer.',
gl.GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS:
'attachments do not have the same width and height.',
# gl.GL_FRAMEBUFFER_INCOMPLETE_FORMATS: \ # not in es 2.0
# 'Internal format of attachment is not renderable.'
gl.GL_FRAMEBUFFER_UNSUPPORTED:
'Combination of internal formats used by attachments is '
'not supported.',
}
raise RuntimeError(_bad_map.get(res, 'Unknown framebuffer error: %r.'
% res))
|
|
"""Guess the MIME type of a file.
This module defines two useful functions:
guess_type(url, strict=1) -- guess the MIME type and encoding of a URL.
guess_extension(type, strict=1) -- guess the extension for a given MIME type.
It also contains the following, for tuning the behavior:
Data:
knownfiles -- list of files to parse
inited -- flag set when init() has been called
suffix_map -- dictionary mapping suffixes to suffixes
encodings_map -- dictionary mapping suffixes to encodings
types_map -- dictionary mapping suffixes to types
Functions:
init([files]) -- parse a list of files, default knownfiles (on Windows, the
default values are taken from the registry)
read_mime_types(file) -- parse one file, return a dictionary or None
"""
import os
import sys
import posixpath
import urllib
try:
import _winreg
except ImportError:
_winreg = None
__all__ = [
"guess_type","guess_extension","guess_all_extensions",
"add_type","read_mime_types","init"
]
knownfiles = [
"/etc/mime.types",
"/etc/httpd/mime.types", # Mac OS X
"/etc/httpd/conf/mime.types", # Apache
"/etc/apache/mime.types", # Apache 1
"/etc/apache2/mime.types", # Apache 2
"/usr/local/etc/httpd/conf/mime.types",
"/usr/local/lib/netscape/mime.types",
"/usr/local/etc/httpd/conf/mime.types", # Apache 1.2
"/usr/local/etc/mime.types", # Apache 1.3
]
inited = False
_db = None
class MimeTypes:
"""MIME-types datastore.
This datastore can handle information from mime.types-style files
and supports basic determination of MIME type from a filename or
URL, and can guess a reasonable extension given a MIME type.
"""
def __init__(self, filenames=(), strict=True):
if not inited:
init()
self.encodings_map = encodings_map.copy()
self.suffix_map = suffix_map.copy()
self.types_map = ({}, {}) # dict for (non-strict, strict)
self.types_map_inv = ({}, {})
for (ext, type) in types_map.items():
self.add_type(type, ext, True)
for (ext, type) in common_types.items():
self.add_type(type, ext, False)
for name in filenames:
self.read(name, strict)
def add_type(self, type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if ext not in exts:
exts.append(ext)
def guess_type(self, url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if
the type can't be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to '.tar.gz'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict' argument when False adds a bunch of commonly found,
but non-standard types.
"""
scheme, url = urllib.splittype(url)
if scheme == 'data':
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
# type/subtype defaults to "text/plain"
comma = url.find(',')
if comma < 0:
# bad data URL
return None, None
semi = url.find(';', 0, comma)
if semi >= 0:
type = url[:semi]
else:
type = url[:comma]
if '=' in type or '/' not in type:
type = 'text/plain'
return type, None # never compressed, so encoding is None
base, ext = posixpath.splitext(url)
while ext in self.suffix_map:
base, ext = posixpath.splitext(base + self.suffix_map[ext])
if ext in self.encodings_map:
encoding = self.encodings_map[ext]
base, ext = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
elif strict:
return None, encoding
types_map = self.types_map[False]
if ext in types_map:
return types_map[ext], encoding
elif ext.lower() in types_map:
return types_map[ext.lower()], encoding
else:
return None, encoding
def guess_all_extensions(self, type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type' by guess_type().
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if not strict:
for ext in self.types_map_inv[False].get(type, []):
if ext not in extensions:
extensions.append(ext)
return extensions
def guess_extension(self, type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
extensions = self.guess_all_extensions(type, strict)
if not extensions:
return None
return extensions[0]
def read(self, filename, strict=True):
"""
Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
with open(filename) as fp:
self.readfp(fp, strict)
def readfp(self, fp, strict=True):
"""
Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
while 1:
line = fp.readline()
if not line:
break
words = line.split()
for i in range(len(words)):
if words[i][0] == '#':
del words[i:]
break
if not words:
continue
type, suffixes = words[0], words[1:]
for suff in suffixes:
self.add_type(type, '.' + suff, strict)
def read_windows_registry(self, strict=True):
"""
Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
# Windows only
if not _winreg:
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except EnvironmentError:
break
try:
ctype = ctype.encode(default_encoding) # omit in 3.x!
except UnicodeEncodeError:
pass
else:
yield ctype
i += 1
default_encoding = sys.getdefaultencoding()
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT,
r'MIME\Database\Content Type') as mimedb:
for ctype in enum_types(mimedb):
try:
with _winreg.OpenKey(mimedb, ctype) as key:
suffix, datatype = _winreg.QueryValueEx(key,
'Extension')
except EnvironmentError:
continue
if datatype != _winreg.REG_SZ:
continue
try:
suffix = suffix.encode(default_encoding) # omit in 3.x!
except UnicodeEncodeError:
continue
self.add_type(ctype, suffix, strict)
def guess_type(url, strict=True):
"""Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if the
type can't be guessed (no or unknown suffix) or a string of the
form type/subtype, usable for a MIME Content-type header; and
encoding is None for no encoding or the name of the program used
to encode (e.g. compress or gzip). The mappings are table
driven. Encoding suffixes are case sensitive; type suffixes are
first tried case sensitive, then case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all mapped
to ".tar.gz". (This is table-driven too, using the dictionary
suffix_map).
Optional `strict' argument when false adds a bunch of commonly found, but
non-standard types.
"""
if _db is None:
init()
return _db.guess_type(url, strict)
def guess_all_extensions(type, strict=True):
"""Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot ('.'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type' by
guess_type(). If no extension can be guessed for `type', None
is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_all_extensions(type, strict)
def guess_extension(type, strict=True):
"""Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension, including the
leading dot ('.'). The extension is not guaranteed to have been
associated with any particular data stream, but would be mapped to the
MIME type `type' by guess_type(). If no extension can be guessed for
`type', None is returned.
Optional `strict' argument when false adds a bunch of commonly found,
but non-standard types.
"""
if _db is None:
init()
return _db.guess_extension(type, strict)
def add_type(type, ext, strict=True):
"""Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.
"""
if _db is None:
init()
return _db.add_type(type, ext, strict)
def init(files=None):
global suffix_map, types_map, encodings_map, common_types
global inited, _db
inited = True # so that MimeTypes.__init__() doesn't call us again
db = MimeTypes()
if files is None:
if _winreg:
db.read_windows_registry()
files = knownfiles
for file in files:
if os.path.isfile(file):
db.read(file)
encodings_map = db.encodings_map
suffix_map = db.suffix_map
types_map = db.types_map[True]
common_types = db.types_map[False]
# Make the DB a global variable now that it is fully initialized
_db = db
def read_mime_types(file):
try:
f = open(file)
except IOError:
return None
db = MimeTypes()
db.readfp(f, True)
return db.types_map[True]
def _default_mime_types():
global suffix_map
global encodings_map
global types_map
global common_types
suffix_map = {
'.tgz': '.tar.gz',
'.taz': '.tar.gz',
'.tz': '.tar.gz',
'.tbz2': '.tar.bz2',
}
encodings_map = {
'.gz': 'gzip',
'.Z': 'compress',
'.bz2': 'bzip2',
}
# Before adding new types, make sure they are either registered with IANA,
# at http://www.isi.edu/in-notes/iana/assignments/media-types
# or extensions, i.e. using the x- prefix
# If you add to these, please keep them sorted!
types_map = {
'.a' : 'application/octet-stream',
'.ai' : 'application/postscript',
'.aif' : 'audio/x-aiff',
'.aifc' : 'audio/x-aiff',
'.aiff' : 'audio/x-aiff',
'.au' : 'audio/basic',
'.avi' : 'video/x-msvideo',
'.bat' : 'text/plain',
'.bcpio' : 'application/x-bcpio',
'.bin' : 'application/octet-stream',
'.bmp' : 'image/x-ms-bmp',
'.c' : 'text/plain',
# Duplicates :(
'.cdf' : 'application/x-cdf',
'.cdf' : 'application/x-netcdf',
'.cpio' : 'application/x-cpio',
'.csh' : 'application/x-csh',
'.css' : 'text/css',
'.dll' : 'application/octet-stream',
'.doc' : 'application/msword',
'.dot' : 'application/msword',
'.dvi' : 'application/x-dvi',
'.eml' : 'message/rfc822',
'.eps' : 'application/postscript',
'.etx' : 'text/x-setext',
'.exe' : 'application/octet-stream',
'.gif' : 'image/gif',
'.gtar' : 'application/x-gtar',
'.h' : 'text/plain',
'.hdf' : 'application/x-hdf',
'.htm' : 'text/html',
'.html' : 'text/html',
'.ief' : 'image/ief',
'.jpe' : 'image/jpeg',
'.jpeg' : 'image/jpeg',
'.jpg' : 'image/jpeg',
'.js' : 'application/x-javascript',
'.ksh' : 'text/plain',
'.latex' : 'application/x-latex',
'.m1v' : 'video/mpeg',
'.man' : 'application/x-troff-man',
'.me' : 'application/x-troff-me',
'.mht' : 'message/rfc822',
'.mhtml' : 'message/rfc822',
'.mif' : 'application/x-mif',
'.mov' : 'video/quicktime',
'.movie' : 'video/x-sgi-movie',
'.mp2' : 'audio/mpeg',
'.mp3' : 'audio/mpeg',
'.mp4' : 'video/mp4',
'.mpa' : 'video/mpeg',
'.mpe' : 'video/mpeg',
'.mpeg' : 'video/mpeg',
'.mpg' : 'video/mpeg',
'.ms' : 'application/x-troff-ms',
'.nc' : 'application/x-netcdf',
'.nws' : 'message/rfc822',
'.o' : 'application/octet-stream',
'.obj' : 'application/octet-stream',
'.oda' : 'application/oda',
'.p12' : 'application/x-pkcs12',
'.p7c' : 'application/pkcs7-mime',
'.pbm' : 'image/x-portable-bitmap',
'.pdf' : 'application/pdf',
'.pfx' : 'application/x-pkcs12',
'.pgm' : 'image/x-portable-graymap',
'.pl' : 'text/plain',
'.png' : 'image/png',
'.pnm' : 'image/x-portable-anymap',
'.pot' : 'application/vnd.ms-powerpoint',
'.ppa' : 'application/vnd.ms-powerpoint',
'.ppm' : 'image/x-portable-pixmap',
'.pps' : 'application/vnd.ms-powerpoint',
'.ppt' : 'application/vnd.ms-powerpoint',
'.ps' : 'application/postscript',
'.pwz' : 'application/vnd.ms-powerpoint',
'.py' : 'text/x-python',
'.pyc' : 'application/x-python-code',
'.pyo' : 'application/x-python-code',
'.qt' : 'video/quicktime',
'.ra' : 'audio/x-pn-realaudio',
'.ram' : 'application/x-pn-realaudio',
'.ras' : 'image/x-cmu-raster',
'.rdf' : 'application/xml',
'.rgb' : 'image/x-rgb',
'.roff' : 'application/x-troff',
'.rtx' : 'text/richtext',
'.sgm' : 'text/x-sgml',
'.sgml' : 'text/x-sgml',
'.sh' : 'application/x-sh',
'.shar' : 'application/x-shar',
'.snd' : 'audio/basic',
'.so' : 'application/octet-stream',
'.src' : 'application/x-wais-source',
'.sv4cpio': 'application/x-sv4cpio',
'.sv4crc' : 'application/x-sv4crc',
'.swf' : 'application/x-shockwave-flash',
'.t' : 'application/x-troff',
'.tar' : 'application/x-tar',
'.tcl' : 'application/x-tcl',
'.tex' : 'application/x-tex',
'.texi' : 'application/x-texinfo',
'.texinfo': 'application/x-texinfo',
'.tif' : 'image/tiff',
'.tiff' : 'image/tiff',
'.tr' : 'application/x-troff',
'.tsv' : 'text/tab-separated-values',
'.txt' : 'text/plain',
'.ustar' : 'application/x-ustar',
'.vcf' : 'text/x-vcard',
'.wav' : 'audio/x-wav',
'.wiz' : 'application/msword',
'.wsdl' : 'application/xml',
'.xbm' : 'image/x-xbitmap',
'.xlb' : 'application/vnd.ms-excel',
# Duplicates :(
'.xls' : 'application/excel',
'.xls' : 'application/vnd.ms-excel',
'.xml' : 'text/xml',
'.xpdl' : 'application/xml',
'.xpm' : 'image/x-xpixmap',
'.xsl' : 'application/xml',
'.xwd' : 'image/x-xwindowdump',
'.zip' : 'application/zip',
}
# These are non-standard types, commonly found in the wild. They will
# only match if strict=0 flag is given to the API methods.
# Please sort these too
common_types = {
'.jpg' : 'image/jpg',
'.mid' : 'audio/midi',
'.midi': 'audio/midi',
'.pct' : 'image/pict',
'.pic' : 'image/pict',
'.pict': 'image/pict',
'.rtf' : 'application/rtf',
'.xul' : 'text/xul'
}
_default_mime_types()
if __name__ == '__main__':
import getopt
USAGE = """\
Usage: mimetypes.py [options] type
Options:
--help / -h -- print this message and exit
--lenient / -l -- additionally search of some common, but non-standard
types.
--extension / -e -- guess extension instead of type
More than one type argument may be given.
"""
def usage(code, msg=''):
print USAGE
if msg: print msg
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hle',
['help', 'lenient', 'extension'])
except getopt.error, msg:
usage(1, msg)
strict = 1
extension = 0
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-l', '--lenient'):
strict = 0
elif opt in ('-e', '--extension'):
extension = 1
for gtype in args:
if extension:
guess = guess_extension(gtype, strict)
if not guess: print "I don't know anything about type", gtype
else: print guess
else:
guess, encoding = guess_type(gtype, strict)
if not guess: print "I don't know anything about type", gtype
else: print 'type:', guess, 'encoding:', encoding
|
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 56377
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Management Interface used by iLO modules."""
import mock
import six
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as conductor_utils
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import inspect as ilo_inspect
from ironic.drivers.modules.ilo import power as ilo_power
from ironic import objects
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_ilo_info()
class IloInspectTestCase(db_base.DbTestCase):
def setUp(self):
super(IloInspectTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='fake_ilo', driver_info=INFO_DICT)
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
properties = ilo_common.REQUIRED_PROPERTIES.copy()
properties.update(ilo_common.SNMP_PROPERTIES)
properties.update(ilo_common.SNMP_OPTIONAL_PROPERTIES)
self.assertEqual(properties,
task.driver.inspect.get_properties())
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, driver_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.validate(task)
driver_info_mock.assert_called_once_with(task.node)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_ok(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capabilities = ''
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.inspect_hardware(task)
self.assertEqual(properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(task, macs)
@mock.patch.object(ilo_inspect.LOG, 'warning',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_ok_local_gb_zero(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock,
log_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': 0,
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capabilities = ''
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
properties = task.node.properties
properties['local_gb'] = 10
task.node.properties = properties
task.node.save()
expected_properties = {'memory_mb': '512', 'local_gb': 10,
'cpus': '1', 'cpu_arch': 'x86_64'}
task.driver.inspect.inspect_hardware(task)
self.assertEqual(expected_properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
self.assertTrue(log_mock.called)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(task, macs)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(conductor_utils, 'node_power_action', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_ok_power_off(self, get_ilo_object_mock,
power_mock,
set_power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capabilities = ''
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_OFF
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.inspect_hardware(task)
self.assertEqual(properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
set_power_mock.assert_any_call(task, states.POWER_ON)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(task, macs)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_capabilities_ok(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
capability_str = 'sriov_enabled:true'
capabilities = {'sriov_enabled': 'true'}
result = {'properties': properties, 'macs': macs}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.inspect.inspect_hardware(task)
expected_properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64',
'capabilities': capability_str}
self.assertEqual(expected_properties, task.node.properties)
power_mock.assert_called_once_with(mock.ANY, task)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(task, macs)
@mock.patch.object(ilo_inspect, '_get_capabilities', spec_set=True,
autospec=True)
@mock.patch.object(ilo_inspect, '_create_ports_if_not_exist',
spec_set=True, autospec=True)
@mock.patch.object(ilo_inspect, '_get_essential_properties', spec_set=True,
autospec=True)
@mock.patch.object(ilo_power.IloPower, 'get_power_state', spec_set=True,
autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_inspect_essential_capabilities_exist_ok(self, get_ilo_object_mock,
power_mock,
get_essential_mock,
create_port_mock,
get_capabilities_mock):
ilo_object_mock = get_ilo_object_mock.return_value
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64',
'somekey': 'somevalue'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
result = {'properties': properties, 'macs': macs}
capabilities = {'sriov_enabled': 'true'}
get_essential_mock.return_value = result
get_capabilities_mock.return_value = capabilities
power_mock.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.properties = {'capabilities': 'boot_mode:uefi'}
expected_capabilities = ('sriov_enabled:true,'
'boot_mode:uefi')
set1 = set(expected_capabilities.split(','))
task.driver.inspect.inspect_hardware(task)
end_capabilities = task.node.properties['capabilities']
set2 = set(end_capabilities.split(','))
self.assertEqual(set1, set2)
expected_properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64',
'capabilities': end_capabilities}
power_mock.assert_called_once_with(mock.ANY, task)
self.assertEqual(task.node.properties, expected_properties)
get_essential_mock.assert_called_once_with(task.node,
ilo_object_mock)
get_capabilities_mock.assert_called_once_with(task.node,
ilo_object_mock)
create_port_mock.assert_called_once_with(task, macs)
class TestInspectPrivateMethods(db_base.DbTestCase):
def setUp(self):
super(TestInspectPrivateMethods, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='fake_ilo', driver_info=INFO_DICT)
@mock.patch.object(ilo_inspect.LOG, 'info', spec_set=True, autospec=True)
@mock.patch.object(objects, 'Port', spec_set=True, autospec=True)
def test__create_ports_if_not_exist(self, port_mock, log_mock):
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
node_id = self.node.id
port_dict1 = {'address': 'aa:aa:aa:aa:aa:aa', 'node_id': node_id}
port_dict2 = {'address': 'bb:bb:bb:bb:bb:bb', 'node_id': node_id}
port_obj1, port_obj2 = mock.MagicMock(), mock.MagicMock()
port_mock.side_effect = [port_obj1, port_obj2]
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_inspect._create_ports_if_not_exist(task, macs)
self.assertTrue(log_mock.called)
expected_calls = [mock.call(task.context, **port_dict1),
mock.call(task.context, **port_dict2)]
port_mock.assert_has_calls(expected_calls, any_order=True)
port_obj1.create.assert_called_once_with()
port_obj2.create.assert_called_once_with()
@mock.patch.object(ilo_inspect.LOG, 'warning',
spec_set=True, autospec=True)
@mock.patch.object(objects.Port, 'create', spec_set=True, autospec=True)
def test__create_ports_if_not_exist_mac_exception(self,
create_mock,
log_mock):
create_mock.side_effect = exception.MACAlreadyExists('f')
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ilo_inspect._create_ports_if_not_exist(task, macs)
self.assertEqual(2, log_mock.call_count)
def test__get_essential_properties_ok(self):
ilo_mock = mock.MagicMock(spec=['get_essential_properties'])
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
result = {'properties': properties, 'macs': macs}
ilo_mock.get_essential_properties.return_value = result
actual_result = ilo_inspect._get_essential_properties(self.node,
ilo_mock)
self.assertEqual(result, actual_result)
def test__get_essential_properties_fail(self):
ilo_mock = mock.MagicMock(
spec=['get_additional_capabilities', 'get_essential_properties'])
# Missing key: cpu_arch
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa', 'Port 2': 'bb:bb:bb:bb:bb:bb'}
result = {'properties': properties, 'macs': macs}
ilo_mock.get_essential_properties.return_value = result
result = self.assertRaises(exception.HardwareInspectionFailure,
ilo_inspect._get_essential_properties,
self.node,
ilo_mock)
self.assertEqual(
six.text_type(result),
("Failed to inspect hardware. Reason: Server didn't return the "
"key(s): cpu_arch"))
def test__get_essential_properties_fail_invalid_format(self):
ilo_mock = mock.MagicMock(
spec=['get_additional_capabilities', 'get_essential_properties'])
# Not a dict
properties = ['memory_mb', '512', 'local_gb', '10',
'cpus', '1']
macs = ['aa:aa:aa:aa:aa:aa', 'bb:bb:bb:bb:bb:bb']
capabilities = ''
result = {'properties': properties, 'macs': macs}
ilo_mock.get_essential_properties.return_value = result
ilo_mock.get_additional_capabilities.return_value = capabilities
self.assertRaises(exception.HardwareInspectionFailure,
ilo_inspect._get_essential_properties,
self.node, ilo_mock)
def test__get_essential_properties_fail_mac_invalid_format(self):
ilo_mock = mock.MagicMock(spec=['get_essential_properties'])
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
# Not a dict
macs = 'aa:aa:aa:aa:aa:aa'
result = {'properties': properties, 'macs': macs}
ilo_mock.get_essential_properties.return_value = result
self.assertRaises(exception.HardwareInspectionFailure,
ilo_inspect._get_essential_properties,
self.node, ilo_mock)
def test__get_essential_properties_hardware_port_empty(self):
ilo_mock = mock.MagicMock(
spec=['get_additional_capabilities', 'get_essential_properties'])
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
# Not a dictionary
macs = None
result = {'properties': properties, 'macs': macs}
capabilities = ''
ilo_mock.get_essential_properties.return_value = result
ilo_mock.get_additional_capabilities.return_value = capabilities
self.assertRaises(exception.HardwareInspectionFailure,
ilo_inspect._get_essential_properties,
self.node, ilo_mock)
def test__get_essential_properties_hardware_port_not_dict(self):
ilo_mock = mock.MagicMock(spec=['get_essential_properties'])
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1', 'cpu_arch': 'x86_64'}
# Not a dict
macs = 'aa:bb:cc:dd:ee:ff'
result = {'properties': properties, 'macs': macs}
ilo_mock.get_essential_properties.return_value = result
result = self.assertRaises(
exception.HardwareInspectionFailure,
ilo_inspect._get_essential_properties, self.node, ilo_mock)
@mock.patch.object(utils, 'get_updated_capabilities', spec_set=True,
autospec=True)
def test__get_capabilities_ok(self, capability_mock):
ilo_mock = mock.MagicMock(spec=['get_server_capabilities'])
capabilities = {'ilo_firmware_version': 'xyz'}
ilo_mock.get_server_capabilities.return_value = capabilities
cap = ilo_inspect._get_capabilities(self.node, ilo_mock)
self.assertEqual(cap, capabilities)
def test__validate_ok(self):
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '2', 'cpu_arch': 'x86_arch'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa'}
data = {'properties': properties, 'macs': macs}
valid_keys = ilo_inspect.IloInspect.ESSENTIAL_PROPERTIES
ilo_inspect._validate(self.node, data)
self.assertEqual(sorted(set(properties)), sorted(valid_keys))
def test__validate_essential_keys_fail_missing_key(self):
properties = {'memory_mb': '512', 'local_gb': '10',
'cpus': '1'}
macs = {'Port 1': 'aa:aa:aa:aa:aa:aa'}
data = {'properties': properties, 'macs': macs}
self.assertRaises(exception.HardwareInspectionFailure,
ilo_inspect._validate, self.node, data)
def test___create_supported_capabilities_dict(self):
capabilities = {}
expected = {}
for key in ilo_inspect.CAPABILITIES_KEYS:
capabilities.update({key: 'true'})
expected.update({key: 'true'})
capabilities.update({'unknown_property': 'true'})
cap = ilo_inspect._create_supported_capabilities_dict(capabilities)
self.assertEqual(expected, cap)
def test___create_supported_capabilities_dict_excluded_capability(self):
capabilities = {}
expected = {}
for key in ilo_inspect.CAPABILITIES_KEYS - {'has_ssd'}:
capabilities.update({key: 'true'})
expected.update({key: 'true'})
cap = ilo_inspect._create_supported_capabilities_dict(capabilities)
self.assertEqual(expected, cap)
|
|
from __future__ import absolute_import, division, print_function
import textwrap
from distutils.version import LooseVersion
from collections import Iterator
import sys
import traceback
from contextlib import contextmanager
import warnings
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.common import is_datetime64tz_dtype
import toolz
from ..core import get_deps
from ..async import get_sync
PANDAS_VERSION = LooseVersion(pd.__version__)
if PANDAS_VERSION >= '0.19.0':
PANDAS_ge_0190 = True
from pandas.api.types import is_categorical_dtype, is_scalar # noqa
else:
PANDAS_ge_0190 = False
from pandas.core.common import is_categorical_dtype # noqa
is_scalar = pd.lib.isscalar # noqa
def shard_df_on_index(df, divisions):
""" Shard a DataFrame by ranges on its index
Examples
--------
>>> df = pd.DataFrame({'a': [0, 10, 20, 30, 40], 'b': [5, 4 ,3, 2, 1]})
>>> df
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
>>> shards = list(shard_df_on_index(df, [2, 4]))
>>> shards[0]
a b
0 0 5
1 10 4
>>> shards[1]
a b
2 20 3
3 30 2
>>> shards[2]
a b
4 40 1
>>> list(shard_df_on_index(df, []))[0] # empty case
a b
0 0 5
1 10 4
2 20 3
3 30 2
4 40 1
"""
if isinstance(divisions, Iterator):
divisions = list(divisions)
if not len(divisions):
yield df
else:
divisions = np.array(divisions)
df = df.sort_index()
index = df.index
if is_categorical_dtype(index):
index = index.as_ordered()
indices = index.searchsorted(divisions)
yield df.iloc[:indices[0]]
for i in range(len(indices) - 1):
yield df.iloc[indices[i]: indices[i + 1]]
yield df.iloc[indices[-1]:]
def unique(divisions):
""" Polymorphic unique function
>>> list(unique([1, 2, 3, 1, 2, 3]))
[1, 2, 3]
>>> unique(np.array([1, 2, 3, 1, 2, 3]))
array([1, 2, 3])
>>> unique(pd.Categorical(['Alice', 'Bob', 'Alice'], ordered=False))
[Alice, Bob]
Categories (2, object): [Alice, Bob]
"""
if isinstance(divisions, np.ndarray):
return np.unique(divisions)
if isinstance(divisions, pd.Categorical):
return pd.Categorical.from_codes(np.unique(divisions.codes),
divisions.categories,
divisions.ordered)
if isinstance(divisions, (tuple, list, Iterator)):
return tuple(toolz.unique(divisions))
raise NotImplementedError()
_META_TYPES = "meta : pd.DataFrame, pd.Series, dict, iterable, tuple, optional"
_META_DESCRIPTION = """\
An empty ``pd.DataFrame`` or ``pd.Series`` that matches the dtypes and
column names of the output. This metadata is necessary for many algorithms
in dask dataframe to work. For ease of use, some alternative inputs are
also available. Instead of a ``DataFrame``, a ``dict`` of ``{name: dtype}``
or iterable of ``(name, dtype)`` can be provided. Instead of a series, a
tuple of ``(name, dtype)`` can be used. If not provided, dask will try to
infer the metadata. This may lead to unexpected results, so providing
``meta`` is recommended. For more information, see
``dask.dataframe.utils.make_meta``.
"""
def insert_meta_param_description(*args, **kwargs):
"""Replace `$META` in docstring with param description.
If pad keyword is provided, will pad description by that number of
spaces (default is 8)."""
if not args:
return lambda f: insert_meta_param_description(f, **kwargs)
f = args[0]
if f.__doc__:
indent = " " * kwargs.get('pad', 8)
body = textwrap.wrap(_META_DESCRIPTION, initial_indent=indent,
subsequent_indent=indent, width=78)
descr = '{0}\n{1}'.format(_META_TYPES, '\n'.join(body))
f.__doc__ = f.__doc__.replace('$META', descr)
return f
@contextmanager
def raise_on_meta_error(funcname=None):
"""Reraise errors in this block to show metadata inference failure.
Parameters
----------
funcname : str, optional
If provided, will be added to the error message to indicate the
name of the method that failed.
"""
try:
yield
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
msg = ("Metadata inference failed{0}.\n\n"
"Original error is below:\n"
"------------------------\n"
"{1}\n\n"
"Traceback:\n"
"---------\n"
"{2}"
).format(" in `{0}`".format(funcname) if funcname else "",
repr(e), tb)
raise ValueError(msg)
def make_meta(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')])
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8'))
Series([], Name: a, dtype: float64)
>>> make_meta('i8')
1
"""
if hasattr(x, '_meta'):
return x._meta
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.iloc[0:0]
elif isinstance(x, pd.Index):
return x[0:0]
index = index if index is None else index[0:0]
if isinstance(x, dict):
return pd.DataFrame({c: pd.Series([], dtype=d)
for (c, d) in x.items()},
index=index)
elif isinstance(x, tuple) and len(x) == 2:
return pd.Series([], dtype=x[1], name=x[0], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError("Expected iterable of tuples of (name, dtype), "
"got {0}".format(x))
return pd.DataFrame({c: pd.Series([], dtype=d) for (c, d) in x},
columns=[c for c, d in x], index=index)
elif not hasattr(x, 'dtype') and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except:
# Continue on to next check
pass
if is_pd_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in (pd.Int64Index, pd.Float64Index):
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(['a', 'b'], name=idx.name)
elif typ is pd.DatetimeIndex:
start = '1970-01-01'
data = [start, start] if idx.freq is None else None
return pd.DatetimeIndex(data, start=start, periods=2, freq=idx.freq,
tz=idx.tz, name=idx.name)
elif typ is pd.PeriodIndex:
return pd.PeriodIndex(start='1970-01-01', periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, 'D')
data = [start, start] if idx.freq is None else None
return pd.TimedeltaIndex(data, start=start, periods=2, freq=idx.freq,
name=idx.name)
elif typ is pd.CategoricalIndex:
element = idx.categories[0]
return pd.CategoricalIndex([element, element],
categories=idx.categories,
ordered=idx.ordered, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(i) for i in idx.levels]
labels = [[0, 0] for i in idx.levels]
return pd.MultiIndex(levels=levels, labels=labels, names=idx.names)
raise TypeError("Don't know how to handle index of "
"type {0}".format(type(idx).__name__))
_simple_fake_mapping = {
'b': np.bool_(True),
'V': np.void(b' '),
'M': np.datetime64('1970-01-01'),
'm': np.timedelta64(1),
'S': np.str_('foo'),
'a': np.str_('foo'),
'U': np.unicode_('foo'),
'O': 'foo'
}
def _scalar_from_dtype(dtype):
if dtype.kind in ('i', 'f', 'u'):
return dtype.type(1)
elif dtype.kind == 'c':
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ('m', 'M') else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
def _nonempty_scalar(x):
if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)):
return x
elif np.isscalar(x):
dtype = x.dtype if hasattr(x, 'dtype') else np.dtype(type(x))
return _scalar_from_dtype(dtype)
else:
raise TypeError("Can't handle meta of type "
"'{0}'".format(type(x).__name__))
def is_pd_scalar(x):
"""Whether the object is a scalar type"""
return (np.isscalar(x) or isinstance(x, (pd.Timestamp, pd.Timedelta,
pd.Period)))
def _nonempty_series(s, idx):
dtype = s.dtype
if is_datetime64tz_dtype(dtype):
entry = pd.Timestamp('1970-01-01', tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
entry = s.cat.categories[0]
data = pd.Categorical([entry, entry],
categories=s.cat.categories,
ordered=s.cat.ordered)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
return pd.Series(data, name=s.name, index=idx)
def meta_nonempty(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if isinstance(x, pd.Index):
return _nonempty_index(x)
elif isinstance(x, pd.Series):
idx = _nonempty_index(x.index)
return _nonempty_series(x, idx)
elif isinstance(x, pd.DataFrame):
idx = _nonempty_index(x.index)
data = {i: _nonempty_series(x.iloc[:, i], idx)
for i, c in enumerate(x.columns)}
res = pd.DataFrame(data, index=idx,
columns=np.arange(len(x.columns)))
res.columns = x.columns
return res
elif is_pd_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError("Expected Index, Series, DataFrame, or scalar, "
"got {0}".format(type(x).__name__))
###############################################################
# Testing
###############################################################
def _check_dask(dsk, check_names=True, check_dtypes=True):
import dask.dataframe as dd
if hasattr(dsk, 'dask'):
result = dsk.compute(get=get_sync)
if isinstance(dsk, dd.Index):
assert isinstance(result, pd.Index), type(result)
if check_names:
assert dsk.name == result.name
# cache
assert isinstance(dsk._meta, pd.Index), type(dsk._meta)
if check_names:
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.Series):
assert isinstance(result, pd.Series), type(result)
if check_names:
assert dsk.name == result.name, (dsk.name, result.name)
# cache
assert isinstance(dsk._meta, pd.Series), type(dsk._meta)
if check_names:
assert dsk._meta.name == result.name
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.DataFrame):
assert isinstance(result, pd.DataFrame), type(result)
assert isinstance(dsk.columns, pd.Index), type(dsk.columns)
if check_names:
tm.assert_index_equal(dsk.columns, result.columns)
# cache
assert isinstance(dsk._meta, pd.DataFrame), type(dsk._meta)
if check_names:
tm.assert_index_equal(dsk._meta.columns, result.columns)
if check_dtypes:
assert_dask_dtypes(dsk, result)
elif isinstance(dsk, dd.core.Scalar):
assert (np.isscalar(result) or
isinstance(result, (pd.Timestamp, pd.Timedelta)))
if check_dtypes:
assert_dask_dtypes(dsk, result)
else:
msg = 'Unsupported dask instance {0} found'.format(type(dsk))
raise AssertionError(msg)
return result
return dsk
def _maybe_sort(a):
# sort by value, then index
try:
if isinstance(a, pd.DataFrame):
a = a.sort_values(by=a.columns.tolist())
else:
a = a.sort_values()
except (TypeError, IndexError, ValueError):
pass
return a.sort_index()
def assert_eq(a, b, check_names=True, check_dtypes=True,
check_divisions=True, check_index=True, **kwargs):
if check_divisions:
assert_divisions(a)
assert_divisions(b)
assert_sane_keynames(a)
assert_sane_keynames(b)
a = _check_dask(a, check_names=check_names, check_dtypes=check_dtypes)
b = _check_dask(b, check_names=check_names, check_dtypes=check_dtypes)
if not check_index:
a = a.reset_index(drop=True)
b = b.reset_index(drop=True)
if isinstance(a, pd.DataFrame):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_frame_equal(a, b, **kwargs)
elif isinstance(a, pd.Series):
a = _maybe_sort(a)
b = _maybe_sort(b)
tm.assert_series_equal(a, b, check_names=check_names, **kwargs)
elif isinstance(a, pd.Index):
tm.assert_index_equal(a, b, **kwargs)
else:
if a == b:
return True
else:
if np.isnan(a):
assert np.isnan(b)
else:
assert np.allclose(a, b)
return True
def eq(*args, **kwargs):
warnings.warn('eq is deprecated. Use assert_frame instead', UserWarning)
assert_eq(*args, **kwargs)
def assert_dask_graph(dask, label):
if hasattr(dask, 'dask'):
dask = dask.dask
assert isinstance(dask, dict)
for k in dask:
if isinstance(k, tuple):
k = k[0]
if k.startswith(label):
return True
else:
msg = "given dask graph doesn't contan label: {0}"
raise AssertionError(msg.format(label))
def assert_divisions(ddf):
if not hasattr(ddf, 'divisions'):
return
if not hasattr(ddf, 'index'):
return
if not ddf.known_divisions:
return
index = lambda x: x if isinstance(x, pd.Index) else x.index
results = get_sync(ddf.dask, ddf._keys())
for i, df in enumerate(results[:-1]):
if len(df):
assert index(df).min() >= ddf.divisions[i]
assert index(df).max() < ddf.divisions[i + 1]
if len(results[-1]):
assert index(results[-1]).min() >= ddf.divisions[-2]
assert index(results[-1]).max() <= ddf.divisions[-1]
def assert_sane_keynames(ddf):
if not hasattr(ddf, 'dask'):
return
for k in ddf.dask.keys():
while isinstance(k, tuple):
k = k[0]
assert isinstance(k, (str, bytes))
assert len(k) < 100
assert ' ' not in k
if sys.version_info[0] >= 3:
assert k.split('-')[0].isidentifier()
def assert_dask_dtypes(ddf, res, numeric_equal=True):
"""Check that the dask metadata matches the result.
If `numeric_equal`, integer and floating dtypes compare equal. This is
useful due to the implicit conversion of integer to floating upon
encountering missingness, which is hard to infer statically."""
eq_types = {'O', 'S', 'U', 'a'} # treat object and strings alike
if numeric_equal:
eq_types.update(('i', 'f'))
if isinstance(res, pd.DataFrame):
for col, a, b in pd.concat([ddf._meta.dtypes, res.dtypes],
axis=1).itertuples():
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
elif isinstance(res, (pd.Series, pd.Index)):
a = ddf._meta.dtype
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
if hasattr(ddf._meta, 'dtype'):
a = ddf._meta.dtype
if not hasattr(res, 'dtype'):
assert np.isscalar(res)
b = np.dtype(type(res))
else:
b = res.dtype
assert (a.kind in eq_types and b.kind in eq_types) or (a == b)
else:
assert type(ddf._meta) == type(res)
def assert_max_deps(x, n, eq=True):
dependencies, dependents = get_deps(x.dask)
if eq:
assert max(map(len, dependencies.values())) == n
else:
assert max(map(len, dependencies.values())) <= n
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" @todo docstring me """
# @todo break into phases:
# 1. Download, report bad urls
# 2. Check hashes, report bad hashes
# 3. Unzip, report by extract_dirs
from __future__ import (
absolute_import,
division,
print_function,
# unicode_literals
)
import datetime
import email.utils
import glob
import hashlib
import io
import logging
import json
import re
import os
# import pprint
# @todo implement progressbar
import shutil
import ssl
import stat
import subprocess
import sys
# import time
# import urllib2
# import zipfile
from six.moves.urllib.parse import urlsplit, urlunsplit # pylint: disable=import-error
import jsoncomment
import urllib3
import urllib3.contrib.pyopenssl
import certifi
import requests
DOWNLOADER = "urllib3"
if DOWNLOADER == "urllib3":
urllib3.contrib.pyopenssl.inject_into_urllib3()
# UA = "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36"
# SF_UA = "Scoop/1.0 (+http://scoop.sh/) (Windows NT 6.1; WOW64)"
UA = "Scoop/1.0 (+http://scoop.sh/) (Windows NT 6.1; WOW64)"
# UAS = {'sourceforge.net': SF_UA}
UAS = {}
NO_REFERRERS = ["sourceforge.net"]
temp_drive = os.environ["TEMP_DRIVE"]
if not temp_drive:
temp_drive = "l:"
TMP_DIR = "%s/tmp" % temp_drive
# https://stackoverflow.com/a/4829285/1432614
# pylint: disable=W0613 # Unused argument 'func' (unused-argument)
def on_rm_error(func, path, exc_info):
"""@todo docstring me"""
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
return os.unlink(path)
class CheckURLs:
"""@todo docstring me"""
def __init__(self):
"""@todo docstring me"""
self.check_https = True
self.check_hash = True
self.check_exists = True
self.dir = ""
self.file = ""
self.basename = ""
self.data = ""
self.logger = None
self.tmp_file = ""
self.tmp_dir = ""
self.zip_file = ""
self.zip_dir = ""
self.head_file = ""
self.head_values = {}
def is_https(self, url):
"""@todo docstring me"""
scheme = self.get_scheme(url).lower()
return scheme == "https"
def is_http_or_https(self, url):
"""@todo docstring me"""
scheme = self.get_scheme(url).lower()
return re.search(r"^(https|http)$", scheme, re.I) is not None
@staticmethod
def get_scheme(url):
"""@todo docstring me"""
parts = list(urlsplit(url))
if parts:
return parts[0]
logging.warning("Cannot split %s", url)
return None
@staticmethod
def get_host(url):
"""@todo docstring me"""
parts = list(urlsplit(url))
if len(parts) > 1:
return parts[1]
logging.warning("Cannot split %s", url)
return None
def is_sourceforge(self, url):
"""@todo docstring me"""
host = self.get_host(url)
return re.search(r"sourceforge\.net$", host, re.I) is not None
def get_ua(self, url):
"""@todo docstring me"""
host = self.get_host(url)
if not host:
logging.warning("Cannot split %s", url)
return UA
for regex in UAS:
if re.search(re.escape(regex), host, re.I):
return UAS[regex]
return UA
def change_scheme(self, url, new_scheme="https"):
"""@todo docstring me"""
if not self.is_http_or_https(url):
return url
parts = list(urlsplit(url))
if not parts:
logging.warning("Cannot split %s", url)
return url
if parts[0] == new_scheme:
return url
parts[0] = new_scheme
return urlunsplit(parts)
@staticmethod
def get_referer(url):
"""@todo docstring me"""
parts = list(urlsplit(url))
if len(parts) < 2:
logging.warning("Cannot split %s", url)
return url
for referer in NO_REFERRERS:
if re.search(re.escape(referer), parts[1], re.I):
return ""
m = re.search(r"(.*/)[^/]+$", parts[2])
if m:
base = m.group(1)
else:
base = "/"
return urlunsplit([parts[0], parts[1], base, "", ""])
def get_filenames(self, url, key):
"""@todo docstring me"""
INVALID_FILE_CHARS = '<>"|?*:/\\%'
m = re.search(r"/([^/]+)/?$", url)
if not m:
logging.warning("%s: no / in url: %s", key, url)
return False
self.tmp_dir = os.path.join(TMP_DIR, "~", self.basename)
file = m.group(1)
for c in INVALID_FILE_CHARS:
file = file.replace(c, "-")
self.tmp_file = os.path.join(self.tmp_dir, file)
self.head_file = os.path.join(self.tmp_dir, "." + file)
(basename, _) = os.path.splitext(file)
if basename == file:
self.zip_dir = ""
self.zip_file = ""
return True
# if re.search('\.zip', extension, re.I):
self.zip_dir = os.path.join(self.tmp_dir, basename)
self.zip_file = self.tmp_file
# logging.info('self.zip_dir="%s" self.zip_file="%s"', self.zip_dir, self.zip_file)
# else:
# self.zip_dir = ''
# self.zip_file = ''
return True
@staticmethod
def rmtree(sdir):
"""@todo docstring me"""
def _on_rm_error(func, path, exc_info):
"""@todo docstring me"""
logging.error("path=%s", path)
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
os.chmod(path, stat.S_IWRITE)
return os.unlink(path)
# https://stackoverflow.com/a/4829285/1432614
return shutil.rmtree(sdir, onerror=_on_rm_error)
def save(self, url, data, key):
"""@todo docstring me"""
if re.search(r"(autoupdate|checkver|github|homepage|license)", key, re.I):
return False
try:
if os.path.exists(self.tmp_dir):
self.rmtree(self.tmp_dir)
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
logging.debug("%s: Saving %s bytes to %s", key, len(data), self.tmp_file)
self.save_headers()
with io.open(self.tmp_file, "wb") as f:
f.write(data)
if "epoch" in self.head_values:
os.utime(self.tmp_file, (self.head_values["epoch"], self.head_values["epoch"]))
except Exception as e:
logging.exception(e)
return False
return True
def save_headers(self):
"""@todo docstring me"""
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
# logging.debug("Saving %s", self.head_file)
jsons = json.dumps(self.head_values, sort_keys=True, indent=4, separators=(",", ": "))
with open(self.head_file, "w") as f:
f.write(jsons)
def download(self, url, headers):
"""@todo docstring me"""
status = None
data = None
if DOWNLOADER == "urllib3":
# retries = urllib3.util.retry.Retry(connect=1, read=1)
http = urllib3.PoolManager(
# retries=retries,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=certifi.where(),
)
r = http.request("HEAD", url, headers=headers)
self.head_values = {}
h = r.getheaders()
for k, v in h.iteritems():
self.head_values[k] = v
# logging.debug(self.head_values)
last_modified = r.getheader("Last-Modified")
# logging.debug('last_modified=%s', last_modified)
etag = r.getheader("ETag")
# logging.debug('etag=%s', etag)
if last_modified or etag:
epoch = 0
if last_modified:
# https://stackoverflow.com/a/1472336/1432614
dt = datetime.datetime(*email.utils.parsedate(last_modified)[:6])
# logging.debug('dt=%s', dt)
# https://stackoverflow.com/a/11743262/1432614
epoch = (dt - datetime.datetime(1970, 1, 1)).total_seconds()
epoch = int(epoch)
# logging.debug('epoch=%s', epoch)
self.head_values["epoch"] = epoch
if os.path.isfile(self.head_file):
with open(self.head_file) as f:
old_values = json.load(f)
if "epoch" in old_values:
if old_values["epoch"] == epoch:
self.save_headers()
status = 304 # not modified
if os.path.isfile(self.tmp_file):
with open(self.tmp_file, "rb") as f:
data = f.read()
return (status, data)
if "etag" in old_values:
if old_values["ETag"] == etag:
self.save_headers()
status = 304 # not modified
if os.path.isfile(self.tmp_file):
with open(self.tmp_file, "rb") as f:
data = f.read()
return (status, data)
self.save_headers()
r = http.request("GET", url, headers=headers)
status = r.status
data = r.data
if DOWNLOADER == "requests":
r = requests.get(url, headers=headers)
status = r.status_code
data = r.content
# if DOWNLOADER == 'urllib2':
# request = urllib2.Request(url, headers=headers)
# data = urllib2.urlopen(request).read()
# status = request.getcode()
return (status, data)
def unzip(self, url, data, key):
"""@todo docstring me"""
if not self.zip_file:
return True
if not os.path.exists(self.zip_file):
return True
logging.debug("%s: Unzipping %s to %s", key, self.zip_file, self.zip_dir)
if os.path.exists(self.zip_dir):
self.rmtree(self.zip_dir)
if not os.path.exists(self.zip_dir):
# logging.debug("Creating directory '%s'", self.zip_dir)
os.makedirs(self.zip_dir)
cmd = '7z x -bb0 -y -o"%s" "%s">NUL' % (self.zip_dir, self.zip_file)
logging.debug(cmd)
os.system(cmd)
return True
# try:
# z = zipfile.ZipFile(self.zip_file, 'r')
# # https://stackoverflow.com/a/9813471/1432614
# for f in z.infolist():
# name, date_time = f.filename, f.date_time
# # logging.debug("name='%s'", name)
# name = os.path.join(self.zip_dir, name)
# if not os.path.exists(os.path.dirname(name)):
# # logging.debug("Creating directory '%s'", os.path.dirname(name))
# os.makedirs(os.path.dirname(name))
# # logging.debug("Creating '%s'", name)
# z.extract(f, self.zip_dir)
# # with open(name, 'w') as outFile:
# # outFile.write(z.open(f).read())
# date_time = time.mktime(date_time + (0, 0, -1))
# if os.path.exists(name):
# # logging.debug("Setting time")
# os.utime(name, (date_time, date_time))
# else:
# pass
# # logging.debug("Cannot set time as file not found: %s", name)
# # z.extractall(self.zip_dir)
# except Exception as e:
# logging.exception(e)
# finally:
# z.close()
# return True
def get(self, url, key="", whine=True):
"""@todo docstring me"""
ssl_errors = ["MaxRetryError", "SSLError"]
if re.search(r"(autoupdate|checkver|github|homepage|license)", key, re.I):
return False
if not self.is_http_or_https(url):
logging.debug("%s %s: %s", key, "not http or https", url)
return False
try:
logging.debug("%s: Retrieving %s", key, url)
ua = self.get_ua(url)
headers = {"User-Agent": ua}
referer = self.get_referer(url)
if referer:
headers["Referer"] = referer
self.get_filenames(url, key)
(status, data) = self.download(url, headers)
if status == 304:
logging.debug("%s: Status %s: %s", key, status, url)
return data
if status < 200 or status > 299:
if whine:
logging.error("%s: Error %s: %s", key, status, url)
return False
logging.debug("%s: Status %s: %s", key, status, url)
self.save(url, data, key)
self.unzip(url, data, key)
return data
except Exception as exc:
reason = ""
# pylint: disable=E1101 # Instance of 'Exception' has no 'reason' member (no-member)
if hasattr(exc, "reason"):
reason = exc.reason
elif hasattr(exc, "code"):
reason = exc.code
if type(exc).__name__ in ssl_errors:
logging.debug("%s: Exception %s: %s (%s)", key, type(exc).__name__, reason, url)
return False
logging.error("%s: Exception %s: %s (%s)", key, type(exc).__name__, reason, url)
logging.exception(exc)
return False
def check_url(self, url, key, _hash="", desc=""):
"""@todo docstring me"""
hashmap = {
32: "md5",
40: "sha1",
64: "sha256",
128: "sha512",
}
if desc:
key += "." + desc
logging.debug("%s: %s (%s)", key, url, _hash)
if not _hash and self.is_https(url) and not self.check_exists:
return False
if self.check_https and not self.is_https(url):
new_url = self.change_scheme(url)
else:
new_url = url
content = False
if self.check_exists:
retry = self.is_https(new_url)
else:
retry = new_url != url and _hash
content = self.get(new_url, key, not retry)
if retry and not content:
if self.check_exists:
new_url = self.change_scheme(url, "http")
else:
new_url = url
content = self.get(new_url, key)
if not content:
logging.debug("%s: No content for %s", key, new_url)
return False
if self.check_hash and _hash:
logging.debug("%s: Verifying hash %s", key, _hash)
m = re.search(r":([^:]+)$", _hash)
if m:
_hash = m.group(1).strip()
hashlen = len(_hash)
if hashlen not in hashmap:
logging.error("%s: Unknown hash type %s: %s", key, hashlen, _hash)
else:
h = hashlib.new(hashmap[hashlen])
h.update(content)
chash = h.hexdigest().lower()
if chash == _hash.lower():
logging.debug("%s: Hashes match: %s", key, chash)
else:
output = subprocess.check_output(["file", self.tmp_file])
if re.search(r"html", output, re.I) is None:
logging.error("%s: Found %s, expected %s (%s)", key, chash, _hash, url)
for line in output.splitlines():
line = line.split()
if line:
logging.error(line)
self.data = re.sub(_hash, chash, self.data)
if new_url == url:
return ""
old_data = self.data
logging.error("%s: Changing\n%s to\n%s", key, url, new_url)
self.data = re.sub(re.escape(url), new_url, self.data)
if self.data != old_data:
logging.debug("%s: Returning %s", key, self.get_scheme(new_url))
return self.get_scheme(new_url)
logging.debug('%s: Returning ""', key)
return ""
def check_urls(self, url_or_list, key, _hash="", desc=""):
"""@todo docstring me"""
# if desc:
# key += '.' + desc
if isinstance(url_or_list, list):
schemes = []
for index, url in enumerate(url_or_list):
hash_value = ""
if isinstance(_hash, list):
if len(_hash) > index:
hash_value = _hash[index]
schemes.append(self.check_url(url, key, hash_value, desc))
return schemes
return self.check_url(url_or_list, key, _hash, desc)
def process(self, j, key, _hash="", desc=""):
"""@todo docstring me"""
if key not in j:
return False
if isinstance(j[key], dict):
if "url" not in j[key]:
return False
if not _hash and self.check_hash and "hash" in j[key]:
_hash = j[key]["hash"]
return self.check_urls(j[key]["url"], key, _hash, desc)
return self.check_urls(j[key], key, _hash, desc)
def _fix_scheme(self, url, key, scheme="https", desc=""):
"""@todo docstring me"""
if desc:
key += "." + desc
if isinstance(scheme, list):
logging.info("_fix_scheme: scheme=%s", ",".join(scheme))
scheme = scheme[0]
logging.info("_fix_scheme: scheme=%s", scheme)
new_url = self.change_scheme(url, scheme)
old_data = self.data
if new_url != url:
self.data = re.sub(re.escape(url), new_url, self.data)
if self.data != old_data:
logging.debug("%s: Changing %s to %s", key, url, new_url)
return self.data != old_data
def _fix_schemes(self, url_or_list, key, scheme="https", desc=""):
"""@todo docstring me"""
# if desc:
# key += '.' + desc
if isinstance(url_or_list, list):
updated = False
for index, url in enumerate(url_or_list):
if isinstance(scheme, list):
logging.info("_fix_schemes: scheme=%s", ",".join(scheme))
if index < len(scheme):
scheme = scheme[index]
else:
scheme = scheme[0]
logging.info("_fix_schemes: scheme=%s", scheme)
updated |= self._fix_scheme(url, key, scheme, desc)
return updated
# logging.debug('scheme=%s', scheme)
return self._fix_scheme(url_or_list, key, scheme, desc)
def fix_schemes(self, j, key, scheme="https", desc=""):
"""@todo docstring me"""
if key not in j:
return False
if isinstance(j[key], dict):
if "url" not in j[key]:
return False
logging.info("fix_schemes: scheme=%s", scheme)
return self._fix_schemes(j[key]["url"], key, scheme, desc)
logging.info("fix_schemes: scheme=%s", scheme)
return self._fix_schemes(j[key], key, scheme, desc)
@staticmethod
def schemes_changed(schemes):
"""@todo docstring me"""
if isinstance(schemes, list):
for scheme in schemes:
if scheme:
return True
return False
return schemes
def run(self):
"""@todo docstring me"""
if len(sys.argv) >= 3:
filespec = sys.argv[2]
else:
filespec = "*.json"
if len(sys.argv) >= 2:
dir_name = sys.argv[1]
else:
dir_name = "."
self.dir = dir_name
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
logger2 = logging.getLogger("urllib3")
logger2.setLevel(logging.CRITICAL)
parser = jsoncomment.JsonComment(json)
if not os.path.isdir(TMP_DIR):
os.makedirs(TMP_DIR)
mask = dir_name + "/" + filespec
logging.info("==> Processing dir %s", mask)
for file in glob.glob(mask):
self.file = os.path.basename(file)
self.basename = os.path.splitext(self.file)[0]
logging.info("--> Processing file %s", file)
with io.open(file, "r", encoding="utf-8") as f:
self.data = f.read()
orig_data = self.data
j = parser.loads(self.data)
_hash = ""
if self.check_hash and "hash" in j:
_hash = j["hash"]
scheme = self.process(j, "homepage")
scheme = self.process(j, "license")
scheme = self.process(j, "url", _hash)
if self.schemes_changed(scheme):
logging.info("run: url: scheme=%s", scheme)
self.fix_schemes(j, "autoupdate", scheme)
scheme = self.process(j, "checkver")
if "checkver" in j:
if isinstance(j["checkver"], dict):
scheme = self.process(j["checkver"], "github")
if "architecture" in j:
scheme = self.process(j["architecture"], "32bit", "", "architecture")
if self.schemes_changed(scheme):
logging.info("run: architecture.32bit: scheme=%s", scheme)
if "autoupdate" in j:
if "architecture" in j["autoupdate"]:
self.fix_schemes(j["autoupdate"]["architecture"], "32bit", scheme, "autoupdate.architecture")
scheme = self.process(j["architecture"], "64bit", "", "architecture")
if self.schemes_changed(scheme):
logging.info("run: architecture.64bit: scheme=%s", scheme)
if "autoupdate" in j:
if "architecture" in j["autoupdate"]:
self.fix_schemes(j["autoupdate"]["architecture"], "64bit", scheme, "autoupdate.architecture")
if self.data != orig_data:
logging.info("Updating %s", file)
if os.path.isfile(file + ".bak"):
os.remove(file + ".bak")
os.rename(file, file + ".bak")
with io.open(file, "w", encoding="utf-8") as f:
f.write(self.data)
checker = CheckURLs()
checker.run()
sys.exit(0)
|
|
from pyramid_handlers import action
from pyramid.response import Response
import pyramid.httpexceptions as exc
from sqlalchemy import or_
import datetime as dt
import json
from webapp.libapp.models.calendar import Calendar
from webapp.libapp.models.event import Event
from webapp.libapp.models.calendar_permission import CalendarPermission
from webapp.libapp.models.user_account import UserAccount
from webapp.libapp.helpers.helper import isInt
class CalendarController(object):
__autoexpose__ = None
renderer_path = 'modules/calendar/templates/'
weekdays = {
0: 'Monday',
1: 'Tuesday',
2: 'Wednesday',
3: 'Thursday',
4: 'Friday',
5: 'Saturday',
6: 'Sunday',
}
months = {
1: "January",
2: "February",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December",
}
def __init__(self, request):
self.request = request
@action(renderer=renderer_path + 'indexView.jinja2', permission='view_acl')
def index(self):
try:
guest_calendars = self.request.db.query(CalendarPermission.calendar_id).filter_by(user_id=self.request.user.user_id).all()[0]
except IndexError:
guest_calendars = ()
if len(guest_calendars) > 0:
calendars_set = self.request.db.query(Calendar)\
.filter(or_(
Calendar.owner == self.request.user.user_id,
Calendar.calendar_id.in_(guest_calendars)))\
.order_by(Calendar.calendar_id)\
.all()
else:
calendars_set = self.request.db.query(Calendar)\
.filter(or_(
Calendar.owner == self.request.user.user_id))\
.order_by(Calendar.calendar_id)\
.all()
return {
'calendars_set': calendars_set,
}
@action(renderer=renderer_path + 'editCalendarView.jinja2', permission='view_acl')
def editCalendar(self):
action = self.request.params.get('action')
if action and action == 'edit':
calendar = self.request.db.query(Calendar).filter_by(calendar_id=self.request.params.get('calendar_id')).first()
if not CalendarPermission.check(self.request.db, calendar.calendar_id, self.request.user.user_id, 'edit'):
return exc.HTTPForbidden()
from webapp.libapp.helpers.helper import hash_random
self.request.session['edit_calendar_{}'.format(calendar.calendar_id)] = (hash_random(calendar.calendar_id))
users = self.request.db.query(UserAccount).filter(UserAccount.user_id != calendar.owner).all()
user_permissions = self.request.db.query(CalendarPermission).filter_by(calendar_id=calendar.calendar_id).all()
def check_permission(user_id, perm_type):
for perm in user_permissions:
if perm.user_id == user_id and perm.perm_type == perm_type:
return True
return False
return {
'calendar': calendar,
'unique_id': self.request.session['edit_calendar_{}'.format(calendar.calendar_id)],
'users': users,
'user_permissions': user_permissions,
'check_permission': check_permission
}
elif self.request.method == 'POST':
calendar_params = {
'name': self.request.params.get('name'),
'color': self.request.params.get('color')
}
session = self.request.db
try:
if self.request.params.get('calendar_id'):
calendar_id = self.request.params.get('calendar_id')
if not CalendarPermission.check(self.request.db, calendar_id, self.request.user.user_id, 'edit'):
return exc.HTTPForbidden()
calendar = session.query(Calendar).filter_by(calendar_id=calendar_id).first()
if self.request.session['edit_calendar_{}'.format(calendar_id)] == (self.request.params.get('unique_id')):
calendar.name = calendar_params['name']
calendar.color = calendar_params['color']
e_perms = self.request.params.getall('edit-permission')
v_perms = self.request.params.getall('view-permission')
self.request.db.query(CalendarPermission).filter_by(calendar_id=calendar.calendar_id).delete()
def add_perm(perms, type_, sess):
for user_id in perms:
cp = CalendarPermission(
calendar_id=calendar.calendar_id,
user_id=user_id,
perm_type=type_
)
sess.add(cp)
add_perm(v_perms, 'view', self.request.db)
add_perm(e_perms, 'edit', self.request.db)
session.commit()
message = '{} has been edited'.format(calendar.name)
else:
calendar = session.query(Calendar).filter_by(name=calendar_params['name']).first()
if calendar:
return Response(json.dumps({
'error': 1,
'message': '{} exists yet'.format(calendar.name)
}))
else:
calendar = Calendar(
owner=self.request.user.user_id,
name=calendar_params['name'],
color=calendar_params['color']
)
Calendar.insert(calendar)
message = '{} has been edited'.format(calendar.name)
return Response(json.dumps({
'error': 0,
'message': message,
}))
except Exception:
session.rollback()
raise
finally:
session.close()
else:
return {}
@action(renderer=renderer_path + 'calendar_component.jinja2', permission='view_acl')
def getCalendar(self):
calendar_id = self.request.params.get('calendar_id')
if not CalendarPermission.check(self.request.db, calendar_id, self.request.user.user_id, 'view'):
return exc.HTTPForbidden()
if 'date[year]' in self.request.params:
year = int(self.request.params.get('date[year]'))
month = int(self.request.params.get('date[month]'))
if(month <= 0):
year = year - 1
month = 12
elif(month >= 13):
year = year + 1
month = 1
day = dt.datetime(year, month, 1)
else:
day = dt.datetime.today()
date_list = self.getDateList(day)
naive_events = self.getNaiveEvents(calendar_id, date_list)
events_list = self.orderEvents(date_list, naive_events)
calendar = self.request.db.query(Calendar).filter_by(calendar_id=calendar_id).first()
return {
'weekdays': self.weekdays,
'months': self.months,
'selected_month': day.month,
'today_year': day.year,
'calendar_id': calendar_id,
'date_list': date_list,
'events_list': events_list,
'calendar_color': calendar.color,
'edit_permission': CalendarPermission.check(self.request.db, calendar_id, self.request.user.user_id, 'edit')
}
def getDateList(self, day):
year = day.year
month = day.month
first_calendar_day = dt.date(year, month, 1) - dt.timedelta(dt.date(year, month, 1).weekday())
date_list = [first_calendar_day + dt.timedelta(days=x) for x in range(0, 42)]
return date_list
def getNaiveEvents(self, calendar_id, date_list):
events = self.request.db.query(Event).filter(
Event.calendar_id == calendar_id,
Event.start_date >= date_list[0],
Event.end_date <= date_list[-1]).all()
self.request.db.expunge_all()
events = sorted(
events,
key=lambda x: (x.event_type == 'all_day',
x.event_type == 'normal',
x.end_date - x.start_date,
x.event_id),
reverse=True
)
events = sorted(events, key=lambda x: (x.start_date))
for event in events:
event.over_day = event.end_date - event.start_date - dt.timedelta(days=1) > dt.timedelta(days=0)
naive_events = []
for event in events:
tmp_event = event.naive_time(self.request.user.timezone_offset) if event.event_type == 'normal' else event
naive_events.append(tmp_event)
return naive_events
def orderEvents(self, date_list, naive_events):
events_list = []
for i in range(42):
iter_date = date_list[i]
events_list.append([])
for event in naive_events:
if iter_date >= event.start_date.date() and iter_date <= event.end_date.date():
events_list[i].append(event)
return events_list
@action(renderer=renderer_path + 'editEventView.jinja2', permission='view_acl')
def editEvent(self):
action = self.request.params.get('action')
if action and action == 'edit':
event = self.request.db.query(Event).filter_by(event_id=self.request.params.get('event_id')).first()
self.request.db.expunge(event)
naive_event = event.naive_time(self.request.user.timezone_offset)
guest_events = self.request.db.query(Calendar.owner).join(Event)\
.filter(Event.parent == event.event_id)\
.all()
if len(guest_events) > 0:
guests = self.request.db.query(UserAccount)\
.filter(
UserAccount.user_id.notin_(guest_events),
UserAccount.user_id != self.request.user.user_id)\
.all()
else:
guests = self.request.db.query(UserAccount)\
.filter(
UserAccount.user_id != self.request.user.user_id)\
.all()
guest_rsvp = self.request.db.query(Event.rsvp)\
.join(Calendar).join(UserAccount)\
.add_columns(UserAccount.email, UserAccount.user_id)
if event.parent is not None:
guest_rsvp = guest_rsvp.filter(or_(
Event.parent == event.parent,
Event.event_id == event.event_id,
Event.event_id == event.parent))
else:
guest_rsvp = guest_rsvp.filter(or_(
Event.parent == event.event_id,
Event.event_id == event.event_id))
guest_rsvp.all()
return {
'event': naive_event,
'selected_timezone': event.timezone_offset,
'guests': guests,
'guest_rsvp': guest_rsvp if 'guest_rsvp' in locals() else None,
'edit_permission': CalendarPermission.check(self.request.db, event.calendar_id, self.request.user.user_id, 'edit')
}
elif self.request.method == 'POST':
if not CalendarPermission.check(self.request.db, self.request.params.get('event_calendar_id'), self.request.user.user_id, 'edit'):
return exc.HTTPForbidden()
if self.request.params.get('all_day', 'normal') == 'normal':
user_timezone = self.request.user.timezone_offset
s_date = dt.datetime.strptime(
'{} {}'.format(self.request.params.get('from_date'),
self.request.params.get('from_time')),
'%d-%m-%Y %H:%M'
) - dt.timedelta(hours=user_timezone)
e_date = dt.datetime.strptime(
'{} {}'.format(self.request.params.get('to_date'),
self.request.params.get('to_time')),
'%d-%m-%Y %H:%M'
) - dt.timedelta(hours=user_timezone)
else:
s_date = dt.datetime.strptime(
self.request.params.get('from_date'), '%d-%m-%Y')
e_date = dt.datetime.strptime(
self.request.params.get('to_date'), '%d-%m-%Y')
if s_date > e_date:
return Response(json.dumps({
'error': 1,
'message': 'starting date cannot be later than ending'
}))
new_event = Event(
event_type=self.request.params.get('all_day', 'normal'),
name=self.request.params.get('name'),
start_date=s_date,
end_date=e_date,
description=self.request.params.get('description'),
calendar_id=self.request.params.get('event_calendar_id'),
timezone_offset=self.request.params.get('timezone_offset'),
)
session = self.request.db
if self.request.params.get('event_id'):
new_event.event_id = self.request.params.get('event_id')
old_event = self.request.db.query(Event.rsvp, Event.parent)\
.filter_by(event_id=self.request.params.get('event_id'))\
.first()
new_event.rsvp = old_event.rsvp
new_event.parent = old_event.parent
session.merge(new_event)
else:
session.add(new_event)
session.commit()
return Response(json.dumps({
'error': 0,
'message': 'Event has been added'
}))
else:
calendar_id = self.request.params.get('calendar_id')
return {
'calendar_id': calendar_id,
'selected_timezone': self.request.user.timezone_offset,
'edit_permission': CalendarPermission.check(self.request.db, calendar_id, self.request.user.user_id, 'edit')
}
@action(permission='view_acl')
def addGuests(self):
invites = self.request.params.getall('invites[]')
event_id = self.request.params.get('event_id')
event = self.request.db.query(Event).filter_by(event_id=event_id).first()
self.request.db.expunge(event)
if isInt(event.parent):
event = self.request.db.query(Event).filter_by(event_id=event.parent).first()
self.request.db.expunge(event)
new_events_id = []
for invite in invites:
guest_calendars = self.request.db.query(Calendar.calendar_id).filter(Calendar.owner == invite).all()
if len(guest_calendars) > 0:
event_test = self.request.db.query(Event).filter(Event.parent == event_id, Event.calendar_id.in_(guest_calendars)).first()
if not event_test:
guest_event = Event(
parent=event.event_id,
rsvp='unknown',
calendar_id=guest_calendars[0],
event_type=event.event_type,
name=event.name,
start_date=event.start_date,
end_date=event.end_date,
description=event.description,
timezone_offset=event.timezone_offset,
)
self.request.db.add(guest_event)
new_events_id.append(guest_event)
self.request.db.commit()
new_events_id = [x.event_id for x in new_events_id]
new_guests = self.request.db.query(Event.rsvp)\
.join(Calendar)\
.join(UserAccount)\
.add_columns(UserAccount.email, UserAccount.user_id)\
.filter(
Event.event_id.in_(new_events_id), )\
.all()
new_guests_list = [{
'email': x.email,
'rsvp': x.rsvp,
'user_id': x.user_id} for x in new_guests]
return Response(json.dumps({
'error': 0,
'new_guests': new_guests_list
}))
@action(permission='view_acl')
def response(self):
response = self.request.params.get('response')
event_id = self.request.params.get('event_id')
event = self.request.db.query(Event).filter_by(event_id=event_id).first()
event.rsvp = response
self.request.db.commit()
return Response(json.dumps({
'error': 0,
'user_id': self.request.user.user_id
}))
|
|
# -*- coding: utf-8 -*-
'''
Manage Perl modules using CPAN
.. versionadded:: 2015.5.0
'''
from __future__ import absolute_import
# Import python libs
import os
import os.path
import logging
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
# Don't shadow built-ins.
__func_alias__ = {
'list_': 'list'
}
def __virtual__():
'''
Only work on supported POSIX-like systems
'''
if salt.utils.which('cpan'):
return True
return (False, 'Unable to locate cpan. Make sure it is installed and in the PATH.')
def install(module):
'''
Install a Perl module from CPAN
CLI Example:
.. code-block:: bash
salt '*' cpan.install Template::Alloy
'''
ret = {
'old': None,
'new': None,
}
old_info = show(module)
cmd = 'cpan -i {0}'.format(module)
out = __salt__['cmd.run'](cmd)
import pprint
pprint.pprint(out)
if "don't know what it is" in out:
ret['error'] = 'CPAN cannot identify this package'
return ret
new_info = show(module)
ret['old'] = old_info.get('installed version', None)
ret['new'] = new_info['installed version']
return ret
def remove(module, details=False):
'''
Attempt to remove a Perl module that was installed from CPAN. Because the
``cpan`` command doesn't actually support "uninstall"-like functionality,
this function will attempt to do what it can, with what it has from CPAN.
Until this function is declared stable, USE AT YOUR OWN RISK!
CLI Example:
.. code-block:: bash
salt '*' cpan.remove Old::Package
'''
ret = {
'old': None,
'new': None,
}
info = show(module)
if 'error' in info:
return {
'error': info['error']
}
version = info.get('installed version', None)
if version is None:
return ret
ret['old'] = version
if 'cpan build dirs' not in info:
return {
'error': 'No CPAN data available to use for uninstalling'
}
mod_pathfile = module.replace('::', '/') + '.pm'
ins_path = info['installed file'].replace(mod_pathfile, '')
files = []
for build_dir in info['cpan build dirs']:
contents = os.listdir(build_dir)
if 'MANIFEST' not in contents:
continue
mfile = os.path.join(build_dir, 'MANIFEST')
with salt.utils.fopen(mfile, 'r') as fh_:
for line in fh_.readlines():
if line.startswith('lib/'):
files.append(line.replace('lib/', ins_path).strip())
rm_details = {}
for file_ in files:
if file_ in rm_details:
continue
log.trace('Removing {0}'.format(file_))
if __salt__['file.remove'](file_):
rm_details[file_] = 'removed'
else:
rm_details[file_] = 'unable to remove'
if details:
ret['details'] = rm_details
return ret
def list_():
'''
List installed Perl modules, and the version installed
CLI Example:
.. code-block:: bash
salt '*' cpan.list
'''
ret = {}
cmd = 'cpan -l'
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
comps = line.split()
ret[comps[0]] = comps[1]
return ret
def show(module):
'''
Show information about a specific Perl module
CLI Example:
.. code-block:: bash
salt '*' cpan.show Template::Alloy
'''
ret = {}
ret['name'] = module
# This section parses out details from CPAN, if possible
cmd = 'cpan -D {0}'.format(module)
out = __salt__['cmd.run'](cmd).splitlines()
mode = 'skip'
info = []
for line in out:
if line.startswith('-------------'):
mode = 'parse'
continue
if mode == 'skip':
continue
info.append(line)
if len(info) == 6:
# If the module is not installed, we'll be short a line
info.insert(2, '')
if len(info) < 6:
# This must not be a real package
ret['error'] = 'This package does not seem to exist'
return ret
ret['description'] = info[0].strip()
ret['cpan file'] = info[1].strip()
if info[2].strip():
ret['installed file'] = info[2].strip()
else:
ret['installed file'] = None
comps = info[3].split(':')
if len(comps) > 1:
ret['installed version'] = comps[1].strip()
if 'installed version' not in ret or not ret['installed version']:
ret['installed version'] = None
comps = info[4].split(':')
comps = comps[1].split()
ret['cpan version'] = comps[0].strip()
ret['author name'] = info[5].strip()
ret['author email'] = info[6].strip()
# Check and see if there are cpan build directories
config = show_config()
build_dir = config.get('build_dir', None)
if build_dir is not None:
ret['cpan build dirs'] = []
builds = os.listdir(build_dir)
pfile = module.replace('::', '-')
for file_ in builds:
if file_.startswith(pfile):
ret['cpan build dirs'].append(os.path.join(build_dir, file_))
return ret
def show_config():
'''
Return a dict of CPAN configuration values
CLI Example:
.. code-block:: bash
salt '*' cpan.show_config
'''
ret = {}
cmd = 'cpan -J'
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
if '=>' not in line:
# TODO: Some options take up multiple lines, so this doesn't always work
continue
comps = line.split('=>')
key = comps[0].replace("'", '').strip()
val = comps[1].replace("',", '').replace("'", '').strip()
ret[key] = val
return ret
|
|
"""Utils for training neural networks.
"""
import os
import Image
from time import time
from datetime import datetime
from copy import deepcopy
import cPickle
import numpy
import skimage.transform
from skimage import color
import theano
import theano.tensor as T
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from anna.layers import layers
from anna.datasets import supervised_dataset
def load_checkpoint(model, checkpoint_path):
all_parameters = model.all_save_parameters_symbol
f = open(checkpoint_path, 'rb')
checkpoint = cPickle.load(f)
f.close()
[model_param.set_value(checkpoint_param)
for model_param, checkpoint_param in zip(all_parameters, checkpoint)]
def save_checkpoint(model, checkpoint_directory_name):
all_parameters = model.all_save_parameters_symbol
checkpoint = [param.get_value() for param in all_parameters]
tt = datetime.now()
time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')
checkpoint_name = '%s-%s.pkl' % (model.name, time_string)
# print(model.path)
checkpoint_path = os.path.join(model.path, checkpoint_directory_name,
checkpoint_name)
print 'Saving model checkpoint to: %s' % checkpoint_path
f = open(checkpoint_path, 'wb')
cPickle.dump(checkpoint, f)
f.close()
def rescale(data):
data = data / 2.0 * 255.0
data[data > 255.0] = 255.0
return data
def color_augment_image(data):
image = data.transpose(1, 2, 0)
hsv = color.rgb2hsv(image)
# Contrast 2
s_factor1 = numpy.random.uniform(0.25, 4)
s_factor2 = numpy.random.uniform(0.7, 1.4)
s_factor3 = numpy.random.uniform(-0.1, 0.1)
hsv[:, :, 1] = (hsv[:, :, 1] ** s_factor1) * s_factor2 + s_factor3
v_factor1 = numpy.random.uniform(0.25, 4)
v_factor2 = numpy.random.uniform(0.7, 1.4)
v_factor3 = numpy.random.uniform(-0.1, 0.1)
hsv[:, :, 2] = (hsv[:, :, 2] ** v_factor1) * v_factor2 + v_factor3
# Color
h_factor = numpy.random.uniform(-0.1, 0.1)
hsv[:, :, 0] = hsv[:, :, 0] + h_factor
hsv[hsv < 0] = 0.0
hsv[hsv > 1] = 1.0
rgb = color.hsv2rgb(hsv)
data_out = rgb.transpose(2, 0, 1)
return data_out
def gray_augment_image(data):
image = data.transpose(1, 2, 0)
v_factor1 = numpy.random.uniform(0.25, 4)
v_factor2 = numpy.random.uniform(0.7, 1.4)
v_factor3 = numpy.random.uniform(-0.1, 0.1)
# print '(v1, v2, v3) = (%f, %f, %f)' % (v_factor1, v_factor2, v_factor3)
image = (image ** v_factor1) * v_factor2 + v_factor3
# image[image < 0] = 0.0
# image[image > 1] = 1.0
# Rescale to [0, 1] range
image_min = image.min()
image -= image_min
image_max = image.max()
image /= image_max
data_out = image.transpose(2, 0, 1)
return data_out
class ReconVisualizer(object):
def __init__(self, model, batch, steps=2000):
self.model = model
self.batch = deepcopy(batch)
self.count = 0
self.steps = steps
self.save_path = os.path.join(self.model.path, 'recon')
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
def run(self):
if self.count % self.steps == 0:
self._save()
self.count += 1
def _save(self):
tt = datetime.now()
time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')
sub_path = os.path.join(self.save_path, time_string)
if not os.path.exists(sub_path):
os.makedirs(sub_path)
prediction = self.model.prediction(self.batch)
for i in range(128):
image = self.batch[:, :, :, i]
image = image.transpose(1, 2, 0)
recon = numpy.array(prediction[:, :, :, i])
recon = recon.transpose(1, 2, 0)
image_array = numpy.uint8(rescale(numpy.hstack((image, recon))))
to_save = Image.fromarray(image_array)
filename = 'recon-%02d.jpeg' % i
filepath = os.path.join(sub_path, filename)
to_save.save(filepath)
class NormReconVisualizer(object):
def __init__(self, model, batch, steps=2000):
self.model = model
self.batch = deepcopy(batch)
self.count = 0
self.steps = steps
self.save_path = os.path.join(self.model.path, 'recon')
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
def run(self):
if self.count % self.steps == 0:
self._save()
self.count += 1
def _save(self):
tt = datetime.now()
time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')
sub_path = os.path.join(self.save_path, time_string)
if not os.path.exists(sub_path):
os.makedirs(sub_path)
prediction = self.model.prediction(self.batch)
for i in range(128):
image = deepcopy(self.batch[:, :, :, i])
image = image.transpose(1, 2, 0)
image_min = image.min()
image -= image_min
image_max = image.max()
image /= image_max
image *= 255
recon = numpy.array(prediction[:, :, :, i])
recon = recon.transpose(1, 2, 0)
recon2 = deepcopy(recon) * 1.0
recon_mask = (numpy.sum(recon == 0.0, axis=2) < 3)
# recon_mask = 255*(numpy.tile(recon_mask[:, :,None],(1,1,3)))
recon_mask = 255 * (numpy.tile(recon_mask[:, :, None],
(1, 1, image.shape[2])))
recon -= image_min
recon /= image_max
recon *= 255
recon2 -= recon2.min()
recon2 /= recon2.max()
recon2 *= 255
image_array = numpy.uint8(numpy.hstack((image, recon, recon2,
recon_mask)))
to_save = Image.fromarray(image_array)
filename = 'recon-%02d.jpeg' % i
filepath = os.path.join(sub_path, filename)
to_save.save(filepath)
class NormReconVisualizerGrayscale(object):
def __init__(self, model, batch, steps=2000):
self.model = model
self.batch = deepcopy(batch)
self.count = 0
self.steps = steps
self.save_path = os.path.join(self.model.path, 'recon')
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
def run(self):
if self.count % self.steps == 0:
self._save()
self.count += 1
def _save(self):
tt = datetime.now()
time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')
sub_path = os.path.join(self.save_path, time_string)
if not os.path.exists(sub_path):
os.makedirs(sub_path)
prediction = self.model.prediction(self.batch)
for i in range(self.batch.shape[3]):
image = deepcopy(self.batch[:, :, :, i])
image = image.transpose(1, 2, 0)
image_min = image.min()
image -= image_min
image_max = image.max()
image /= image_max
image *= 255
recon = numpy.array(prediction[:, :, :, i])
recon = recon.transpose(1, 2, 0)
recon2 = deepcopy(recon) * 1.0
recon_mask = (numpy.sum(recon == 0.0, axis=2) < 3)
recon_mask = 255 * (numpy.tile(recon_mask[:, :, None],
(1, 1, image.shape[2])))
recon -= image_min
recon /= image_max
recon *= 255
recon2 -= recon2.min()
recon2 /= recon2.max()
recon2 *= 255
image_array = numpy.uint8(numpy.hstack((image, recon, recon2,
recon_mask)))
# Needed for grayscale images. If color, has no effect.
image_array = numpy.tile(image_array, (1, 1, 3))
to_save = Image.fromarray(image_array)
filename = 'recon-%02d.jpeg' % i
filepath = os.path.join(sub_path, filename)
to_save.save(filepath)
class FilterVisualizer(object):
def __init__(self, model, steps=2000):
self.model = model
self.count = 0
self.steps = steps
self.save_path = os.path.join(self.model.path, 'filters')
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
def run(self):
if self.count % self.steps == 0:
self._save()
self.count += 1
def _save(self):
tt = datetime.now()
time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')
W = self.model.conv1.W.get_value()
W = W.transpose(1, 2, 0, 3)
row_list = []
img_list = []
k = 0
rows = W.shape[3] / 16
bar = 0.5 * numpy.ones((W.shape[0], 1, 3))
for i in range(rows):
row_list.append(bar)
for j in range(16):
W0 = W[:, :, :, k]
W0 -= W0.min()
W0 /= W0.max()
# W0[:, :,0] -= W0[:, :,0].min()
# W0[:, :,0] /= W0[:, :,0].max()
# W0[:, :,1] -= W0[:, :,1].min()
# W0[:, :,1] /= W0[:, :,1].max()
# W0[:, :,2] -= W0[:, :,2].min()
# W0[:, :,2] /= W0[:, :,2].max()
row_list.append(W0)
row_list.append(bar)
k += 1
row_image = numpy.hstack(row_list)
row_list = []
bar_h = 0.5 * numpy.ones((1, row_image.shape[1], 3))
img_list.append(bar_h)
img_list.append(row_image)
img_list.append(bar_h)
img_image = numpy.vstack(img_list)
to_save = Image.fromarray(numpy.uint8(255 * img_image))
filename = 'filters_' + time_string + '.png'
filepath = os.path.join(self.save_path, filename)
to_save.save(filepath)
class FilterVisualizerGrayscale(object):
def __init__(self, model, steps=2000):
self.model = model
self.count = 0
self.steps = steps
self.save_path = os.path.join(self.model.path, 'filters')
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
def run(self):
if self.count % self.steps == 0:
self._save()
self.count += 1
def _save(self):
tt = datetime.now()
time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')
W = self.model.conv1.W.get_value()
W = W.transpose(1, 2, 0, 3)
row_list = []
img_list = []
k = 0
rows = W.shape[3] / 16
bar = 0.5 * numpy.ones((W.shape[0], 1, 3))
for i in range(rows):
row_list.append(bar)
for j in range(16):
W0 = W[:, :, :, k]
W0 -= W0.min()
W0 /= W0.max()
W0 = numpy.tile(W0, (1, 1, 3))
# W0[:, :,0] -= W0[:, :,0].min()
# W0[:, :,0] /= W0[:, :,0].max()
# W0[:, :,1] -= W0[:, :,1].min()
# W0[:, :,1] /= W0[:, :,1].max()
# W0[:, :,2] -= W0[:, :,2].min()
# W0[:, :,2] /= W0[:, :,2].max()
row_list.append(W0)
row_list.append(bar)
k += 1
row_image = numpy.hstack(row_list)
row_list = []
bar_h = 0.5 * numpy.ones((1, row_image.shape[1], 3))
img_list.append(bar_h)
img_list.append(row_image)
img_list.append(bar_h)
img_image = numpy.vstack(img_list)
to_save = Image.fromarray(numpy.uint8(255 * img_image))
filename = 'filters_' + time_string + '.png'
filepath = os.path.join(self.save_path, filename)
to_save.save(filepath)
class Monitor(object):
errors = []
times = []
big_errors = []
big_times = []
def __init__(self, model,
step_number=0,
best=1,
short_steps=10,
long_steps=50,
save_steps=2000,
test_steps=50,
checkpoint_directory='checkpoints'):
self.step_number = step_number
self.best = best
self.short_steps = short_steps
self.long_steps = long_steps
self.save_steps = save_steps
self.model = model
self.test = False
self.test_steps = test_steps
self.checkpoint_directory = checkpoint_directory
# Check if model.path exists, if not create it
# (with a checkpoint folder)
if model.path and not os.path.exists(
os.path.join(model.path, self.checkpoint_directory)):
os.makedirs(os.path.join(model.path, self.checkpoint_directory))
def start(self):
self.tic = time()
def stop_test(self, error):
if self.test:
self.toc = time()
_time = self.toc - self.tic
print '&%d, test error: %.5f, time: %.2f' % (self.step_number,
error, _time)
def stop(self, error):
self.toc = time()
_time = self.toc - self.tic
self.errors.append(error)
self.times.append(_time)
self.big_errors.append(error)
self.big_times.append(_time)
if self.step_number % self.test_steps == 0:
self.test = True
else:
self.test = False
if self.step_number % self.save_steps == 0:
save_checkpoint(self.model, self.checkpoint_directory)
if self.step_number % self.long_steps == 0:
mean_error = numpy.mean(self.big_errors)
mean_time = numpy.mean(self.big_times)
print '*%d, train error: %.5f, time: %.2f' % (self.step_number,
mean_error,
mean_time)
self.big_errors = []
self.big_times = []
if self.step_number % self.short_steps == 0:
mean_error = numpy.mean(self.errors)
mean_time = numpy.mean(self.times)
print '%d, train error: %.5f, time: %.2f' % (self.step_number,
mean_error, mean_time)
self.errors = []
self.times = []
self.step_number += 1
class Normer2(object):
def __init__(self, filter_size=7, num_channels=3):
# magic numbers that make things work for stl10
self.filter_size = filter_size
self.pad = self.filter_size / 2 # -1
self.num_channels = num_channels
self.num_filters = 16
input = T.ftensor4(name='input')
filter = T.ftensor4(name='filter')
gpu_input = gpu_contiguous(input)
gpu_filter = gpu_contiguous(filter)
self.conv_func = theano.function([input, filter],
FilterActs(pad=self.pad)(gpu_input,
gpu_filter))
n = self.num_channels * self.filter_size * self.filter_size
self.w = numpy.float32(numpy.ones((self.num_channels, self.filter_size,
self.filter_size,
self.num_filters))) / n
def run(self, x_batch):
mean_batch = self.conv_func(x_batch, self.w)
mean_batch = numpy.tile(numpy.array(
mean_batch[0, :, :, :])[None, :, :],
(self.num_channels, 1, 1, 1))
diff_batch = x_batch - mean_batch
std_batch = self.conv_func(diff_batch ** 2, self.w)
std_batch = numpy.tile(numpy.array(std_batch[0, :, :, :])[None, :, :],
(self.num_channels, 1, 1, 1))
norm_batch = diff_batch / (numpy.array(std_batch) ** (1 / 2))
return norm_batch
class Normer3(object):
def __init__(self, filter_size=7, num_channels=3):
self.filter_size = filter_size
self.pad = self.filter_size / 2
self.num_channels = num_channels
n = self.filter_size*self.filter_size*self.num_channels
self.w = numpy.float32(numpy.ones(
(1, self.num_channels, self.filter_size,
self.filter_size))) / n
input = T.ftensor4(name='input')
filter = T.ftensor4(name='filter')
gpu_input = gpu_contiguous(input)
gpu_filter = gpu_contiguous(filter)
self.conv_func = theano.function([input, filter],
theano.sandbox.cuda.dnn.dnn_conv(
gpu_input,
gpu_filter,
border_mode=(self.pad, self.pad)))
def run(self, x_batch):
mean_batch = self.conv_func(x_batch, self.w)
mean_batch = numpy.tile(mean_batch, (1, self.num_channels, 1, 1))
diff_batch = x_batch - mean_batch
std_batch = self.conv_func((diff_batch)**2, self.w)
std_batch = numpy.tile(std_batch, (1, self.num_channels, 1, 1))
norm_batch = diff_batch / (numpy.array(std_batch) ** (1 / 2))
return norm_batch
class PatchGrabber(object):
def __init__(self, num_patches, patch_size, num_channels=3):
self.num_patches = num_patches
self.patch_size = patch_size
self.num_channels = num_channels
def run(self, x_batch):
image_size = x_batch.shape[1]
batch_size = x_batch.shape[-1]
patches = numpy.zeros((self.num_channels, self.patch_size,
self.patch_size, self.num_patches),
dtype=numpy.float32)
for i_patch in range(self.num_patches):
x_start = numpy.random.randint(image_size - self.patch_size)
y_start = numpy.random.randint(image_size - self.patch_size)
image_id = numpy.random.randint(batch_size)
x_slice = slice(x_start, x_start + self.patch_size)
y_slice = slice(y_start, y_start + self.patch_size)
patch = x_batch[:, x_slice, y_slice, image_id]
patches[:, :, :, i_patch] = patch
return patches
class WeightVisualizer(object):
def __init__(self, model, model_layer, layer_name, steps=2000):
self.model_layer = model_layer
self.name = layer_name
self.path = model.path
self.count = 0
self.steps = steps
def run(self):
if self.count % self.steps == 0:
self._save()
self.count += 1
def _save(self):
tt = datetime.now()
time_string = tt.strftime('%mm-%dd-%Hh-%Mm-%Ss')
W = self.model_layer.W.get_value()
W -= W.min()
W /= W.max()
input_filters, width, height, output_filters = W.shape
tall_bar = numpy.zeros((height, 1))
output_filter = 0
row_list = []
image_list = []
for output_filter in range(output_filters):
row_list.append(tall_bar)
for input_filter in range(input_filters):
temp = W[input_filter, :, :, output_filter]
row_list.append(temp)
row_list.append(tall_bar)
row_image = numpy.hstack(row_list)
row_list = []
long_bar = numpy.zeros((1, row_image.shape[1]))
image_list.append(long_bar)
image_list.append(row_image)
image_list.append(long_bar)
image_image = numpy.vstack(image_list)
to_save = Image.fromarray(numpy.uint8(255 * image_image))
filename = os.path.join(self.path, '%s-%s.png' % (self.name,
time_string))
to_save.save(filename)
def set_parameters_from_unsupervised_model(model, checkpoint):
f = open(checkpoint, 'rb')
checkpoint_params = cPickle.load(f)
f.close()
checkpoint_params_flipped = checkpoint_params[::-1]
model_params = model.all_save_parameters_symbol
model_params_flipped = model_params[::-1]
for i in range(len(checkpoint_params_flipped)):
if (list(checkpoint_params_flipped[i].shape) !=
list(model_params_flipped[i].shape.eval())):
raise Exception('Size mismatch!')
model_params_flipped[i].set_value(checkpoint_params_flipped[i])
class Evaluator(object):
def __init__(self, model, data_container, checkpoint,
preprocessor_module_list):
self.model = model
self.data_container = data_container
self.checkpoint = checkpoint
self.preprocessor = Preprocessor(preprocessor_module_list)
self.batch_size = model.batch
# Load parameters from checkpoint
load_checkpoint(self.model, self.checkpoint)
self._switch_off_dropout_flags()
def run(self):
predictions = self._get_predictions()
# Compute accuracy
accuracy = (100.0 * numpy.sum(predictions == self.data_container.y)
) / len(self.data_container.y)
return accuracy
def set_checkpoint(self, checkpoint):
self.checkpoint = checkpoint
load_checkpoint(self.model, self.checkpoint)
self._switch_off_dropout_flags()
def set_preprocessor(self, preprocessor_module_list):
self.preprocessor = Preprocessor(preproessor_module_list)
def _switch_off_dropout_flags(self):
# Switch off dropout flag (if present) in every layer
all_layers = layers.all_layers(self.model.output)
for layer in all_layers:
if hasattr(layer, 'dropout'):
layer.dropout = 0.0
# Re-compile the model
self.model._compile()
def _get_iterator(self):
dataset = supervised_dataset.SupervisedDataset(self.data_container.X,
self.data_container.y)
iterator = dataset.iterator(mode='sequential',
batch_size=self.batch_size)
return iterator
def _get_predictions(self):
iterator = self._get_iterator()
# Compute accuracy on each training batch
predictions = []
for x_batch, y_batch in iterator:
x_batch = self.preprocessor.run(x_batch)
batch_pred = self.model.prediction(x_batch)
batch_pred = numpy.argmax(batch_pred, axis=1)
predictions.append(batch_pred)
# Classify last training batch
num_samples, num_channels, height, width = self.data_container.X.shape
last_batch_start_ind = numpy.floor(num_samples /
self.batch_size) * self.batch_size
last_batch_start_ind = int(last_batch_start_ind)
last_batch = self.data_container.X[last_batch_start_ind:, :, :, :]
dummy_batch = numpy.zeros((self.batch_size, num_channels, height,
width),
dtype=numpy.float32)
dummy_batch[0:last_batch.shape[0], :, :, :] = last_batch
dummy_batch = self.preprocessor.run(dummy_batch)
batch_pred = self.model.prediction(dummy_batch)
batch_pred = batch_pred[0:last_batch.shape[0], :]
batch_pred = numpy.argmax(batch_pred, axis=1)
predictions.append(batch_pred)
# Get all predictions
predictions = numpy.hstack(predictions)
# print predictions.shape
return predictions
class Preprocessor(object):
def __init__(self, module_list):
self.module_list = module_list
def run(self, batch):
for module in self.module_list:
batch = module.run(batch)
return batch
class DataAugmenter(object):
def __init__(self, amount_pad, window_shape,
flip=True,
color_on=False,
gray_on=False):
self.amount_pad = amount_pad
self.window_shape = window_shape
self.flip = flip
self.color_on = color_on
self.gray_on = gray_on
if len(window_shape) != 2:
raise ValueError("window_shape should be length 2")
def run(self, x_batch):
pad_seq = ((0, 0), (self.amount_pad, self.amount_pad),
(self.amount_pad, self.amount_pad), (0, 0))
x_batch_pad = numpy.pad(x_batch, pad_seq, mode='constant')
x_batch_pad_aug = self._random_window_and_flip(x_batch_pad)
if self.color_on:
x_batch_out = self._color_augment(x_batch_pad_aug)
elif self.gray_on:
x_batch_out = self._gray_augment(x_batch_pad_aug)
else:
x_batch_out = x_batch_pad_aug
return x_batch_out
def _random_window_and_flip(self, x_batch_pad):
num_channels, _, _, num_images = x_batch_pad.shape
crop_batch_shape = (num_channels, self.window_shape[0],
self.window_shape[1], num_images)
x_batch_crop = numpy.empty(crop_batch_shape,
dtype=x_batch_pad.dtype)
for i in range(num_images):
sample = x_batch_pad[:, :, :, i]
if self.flip:
flip_rv = numpy.random.randint(0, 2)
if flip_rv == 1:
sample = sample[:, :, ::-1]
width_start = numpy.random.randint(0, self.amount_pad)
height_start = numpy.random.randint(0, self.amount_pad)
sample = sample[:,
height_start:(height_start+self.window_shape[0]),
width_start:(width_start+self.window_shape[1])]
x_batch_crop[:, :, :, i] = sample
return x_batch_crop
def _color_augment(self, x_batch):
out_batch = numpy.zeros(x_batch.shape, dtype=x_batch.dtype)
__, __, __, num_samples = x_batch.shape
for i in range(num_samples):
out_batch[:, :, :, i] = color_augment_image(x_batch[:, :, :, i])
out_batch *= 2
return out_batch
def _gray_augment(self, x_batch):
out_batch = numpy.zeros(x_batch.shape, dtype=x_batch.dtype)
__, __, __, num_samples = x_batch.shape
for i in range(num_samples):
out_batch[:, :, :, i] = gray_augment_image(x_batch[:, :, :, i])
out_batch *= 2
return out_batch
class Crop(object):
def __init__(self, input_size, output_size):
self.input_size = input_size
self.output_size = output_size
# Get input keypoints (only need center)
input_width, input_height = self.input_size
self.input_center = numpy.array([input_width / 2, input_height / 2, 1])
# Get output keypoints
output_width, output_height = self.output_size
self.corner_1 = numpy.array([0, 0, 1])
self.corner_2 = numpy.array([0, output_height, 1])
self.corner_3 = numpy.array([output_width, 0, 1])
self.corner_4 = numpy.array([output_width, output_height, 1])
self.center = numpy.array([output_width / 2, output_height / 2, 1])
self.transform = skimage.transform.AffineTransform(scale=(1.0, 1.0))
def get(self, image):
"""Takes an image as an ndarray, and returns a cropped image as an
ndarray of dtype float32"""
num_channels = image.shape[0]
current_corner_1 = numpy.dot(self.transform.params, self.corner_1)
current_corner_2 = numpy.dot(self.transform.params, self.corner_2)
current_corner_3 = numpy.dot(self.transform.params, self.corner_3)
current_corner_4 = numpy.dot(self.transform.params, self.corner_4)
current_center = numpy.dot(self.transform.params, self.center)
matrix = self.transform.params
output = numpy.empty(
(num_channels, self.output_size[0], self.output_size[1]),
dtype=numpy.float32)
for channel in range(num_channels):
output[channel, :, :] = skimage.transform._warps_cy._warp_fast(
image=image[channel, :, :],
H=matrix,
output_shape=self.output_size)
return numpy.float32(output)
def scale(self, scale):
self.transform += skimage.transform.AffineTransform(
scale=(scale, scale))
def rotate(self, angle):
self.transform += skimage.transform.AffineTransform(
rotation=numpy.deg2rad(angle))
def translate(self, x, y):
self.transform += skimage.transform.AffineTransform(translation=(x, y))
def centered(self):
current_center = numpy.dot(self.transform.params, self.center)
shift = self.input_center - current_center
self.transform += skimage.transform.AffineTransform(
translation=shift[0:2])
def show(self, image):
current_corner_1 = numpy.dot(self.transform.params, self.corner_1)
current_corner_2 = numpy.dot(self.transform.params, self.corner_2)
current_corner_3 = numpy.dot(self.transform.params, self.corner_3)
current_corner_4 = numpy.dot(self.transform.params, self.corner_4)
current_center = numpy.dot(self.transform.params, self.center)
pyplot.imshow(image)
pyplot.plot(current_corner_1[0], current_corner_1[1], 'r.')
pyplot.plot(current_corner_2[0], current_corner_2[1], 'r.')
pyplot.plot(current_corner_3[0], current_corner_3[1], 'r.')
pyplot.plot(current_corner_4[0], current_corner_4[1], 'r.')
pyplot.plot(current_center[0], current_center[1], 'b.')
pyplot.show()
class DataAugmenter2(object):
def __init__(self, crop_shape, flip=True, scale=True, rotate=True,
color_on=False, gray_on=False, kernel='cudnn'):
""""""
self.crop_shape = crop_shape
self.flip = flip
self.scale = scale
self.rotate = rotate
self.color_on = color_on
self.gray_on = gray_on
self.kernel = kernel
if len(crop_shape) != 2:
raise ValueError("window_shape should be length 2")
if kernel != 'cudnn' and kernel != 'cuda_convnet':
raise ValueError("kernel must be cudnn or cuda_convnet")
def run(self, batch):
"""Applies random crops to each image in a batch.
Args:
batch: 4D ndarray with shape (batch_size, channels, width, height)
Returns:
batch_out: 4D ndarray with shape (batch_size, channels,
crop_shape[0], crop_shape[1])
"""
if self.kernel == 'cuda_convnet':
# Transpose to cudnn batch shape (to be switch back later)
batch = batch.transpose(3, 0, 1, 2)
batch_size, channels, width, height = batch.shape
out_shape = (batch_size, channels,
self.crop_shape[0], self.crop_shape[1])
batch_out = numpy.empty(out_shape, dtype=numpy.float32)
for sample_index in range(batch_size):
sample = batch[sample_index, :, :, :]
if self.rotate:
angle = (numpy.random.rand() - 0.5) * 10
else:
angle = 0.0
if self.scale:
scale = numpy.random.rand() * 0.7 + 0.7
else:
scale = 1.0
diff = (width-scale*self.crop_shape[0])
translation_x = numpy.random.rand() * diff - diff / 2
translation_y = numpy.random.rand() * diff - diff / 2
if self.flip:
flip_rv = numpy.random.randint(0, 2)
if flip_rv == 1:
sample = sample[:, :, ::-1]
crop = Crop((width, height), self.crop_shape)
crop.rotate(angle)
crop.scale(scale)
crop.centered()
crop.translate(translation_x, translation_y)
output = crop.get(sample)
batch_out[sample_index, :, :, :] = output
if self.color_on:
x_batch_out = self._color_augment(batch_out)
elif self.gray_on:
x_batch_out = self._gray_augment(batch_out)
else:
x_batch_out = batch_out
if self.kernel == 'cuda_convnet':
x_batch_out = x_batch_out.transpose(1, 2, 3, 0)
return x_batch_out
def _color_augment(self, x_batch):
out_batch = numpy.zeros(x_batch.shape, dtype=x_batch.dtype)
num_samples, __, __, __ = x_batch.shape
for i in range(num_samples):
out_batch[i, :, :, :] = color_augment_image(x_batch[i, :, :, :])
out_batch *= 2
return out_batch
def _gray_augment(self, x_batch):
out_batch = numpy.zeros(x_batch.shape, dtype=x_batch.dtype)
num_samples, __, __, __ = x_batch.shape
for i in range(num_samples):
out_batch[i, :, :, :] = gray_augment_image(x_batch[i, :, :, :])
out_batch *= 2
return out_batch
class Annealer(object):
def __init__(self, model, steps_per_epoch, func=None):
self.model = model
self.steps_per_epoch = steps_per_epoch
if func is None:
self.func = self.exp_decay
else:
self.func = func
self.step_count = 0
self.epoch_count = 0
self.init_learning_rate = self.model.learning_rate_symbol.get_value()
self.current_learning_rate = self.init_learning_rate
def run(self):
self.step_count += 1
if (self.step_count % self.steps_per_epoch) == 0:
self.epoch_count += 1
# Compute new learning rate
new_learning_rate = self.func(self.init_learning_rate,
self.epoch_count)
# Set model's learning rate to new learning rate
self.set_learning_rate(self.model, new_learning_rate)
def set_learning_rate(self, model, new_learning_rate):
print 'Learning rate before: ', model.learning_rate_symbol.get_value()
model.learning_rate_symbol.set_value(numpy.float32(new_learning_rate))
self.current_learning_rate = new_learning_rate
print 'Learning rate now: ', model.learning_rate_symbol.get_value()
def get_current_learning_rate(self):
return self.current_learning_rate
def exp_decay(self, init_learning_rate, epoch_count):
return init_learning_rate * (0.1)**(epoch_count)
|
|
#!/usr/bin/env python3
import sys
import os
import shutil
import unittest
import mvtools_test_fixture
import git_test_fixture
import create_and_write_file
import path_utils
import git_wrapper
import git_lib
import collect_git_patch
import apply_git_patch
class ApplyGitPatchTest(unittest.TestCase):
def setUp(self):
v, r = self.delegate_setUp()
if not v:
self.tearDown()
self.fail(r)
def delegate_setUp(self):
v, r = mvtools_test_fixture.makeAndGetTestFolder("apply_git_patch_test")
if not v:
return v, r
self.test_base_dir = r[0] # base test folder. shared amongst other test cases
self.test_dir = r[1] # test folder, specific for each test case (i.e. one level above self.test_base_dir)
self.nonexistent = path_utils.concat_path(self.test_dir, "nonexistent")
self.nonrepo = path_utils.concat_path(self.test_dir, "nonrepo")
os.mkdir(self.nonrepo)
# storage path
self.storage_path = path_utils.concat_path(self.test_dir, "storage_path")
os.mkdir(self.storage_path)
# first repo
self.first_repo = path_utils.concat_path(self.test_dir, "first")
v, r = git_wrapper.init(self.test_dir, "first", False)
if not v:
return v, r
self.first_file1 = path_utils.concat_path(self.first_repo, "file1.txt")
v, r = git_test_fixture.git_createAndCommit(self.first_repo, "file1.txt", "first-file1-content", "first-file1-msg")
if not v:
return v, r
self.first_file2 = path_utils.concat_path(self.first_repo, "file2.txt")
v, r = git_test_fixture.git_createAndCommit(self.first_repo, "file2.txt", "first-file2-content", "first-file2-msg")
if not v:
return v, r
self.first_file3 = path_utils.concat_path(self.first_repo, "file3.txt")
v, r = git_test_fixture.git_createAndCommit(self.first_repo, "file3.txt", "first-file3-content", "first-file3-msg")
if not v:
return v, r
# second repo - clone of first
self.second_repo = path_utils.concat_path(self.test_dir, "second")
v, r = git_wrapper.clone(self.first_repo, self.second_repo, "origin")
if not v:
return v, r
return True, ""
def tearDown(self):
shutil.rmtree(self.test_base_dir)
def testApplyGitPatchBasicChecks(self):
v, r = apply_git_patch.apply_git_patch(self.nonexistent, [], [], [], [])
self.assertFalse(v)
v, r = apply_git_patch.apply_git_patch(self.nonrepo, [], [], [], [])
self.assertFalse(v)
def testApplyGitApplyUnversionedFail(self):
first_sub = path_utils.concat_path(self.first_repo, "sub")
os.mkdir(first_sub)
self.assertTrue(os.path.exists(first_sub))
second_sub = path_utils.concat_path(self.second_repo, "sub")
os.mkdir(second_sub)
self.assertTrue(os.path.exists(second_sub))
first_sub_file4 = path_utils.concat_path(first_sub, "file4.txt")
self.assertTrue(create_and_write_file.create_file_contents(first_sub_file4, "first, sub, file4, contents"))
second_sub_file4 = path_utils.concat_path(second_sub, "file4.txt")
self.assertTrue(create_and_write_file.create_file_contents(second_sub_file4, "second, sub, file4, contents"))
first_file5 = path_utils.concat_path(self.first_repo, "file5.txt")
self.assertTrue(create_and_write_file.create_file_contents(first_file5, "first, file5, contents"))
second_file5 = path_utils.concat_path(self.second_repo, "file5.txt")
self.assertFalse( os.path.exists(second_file5) )
v, r = collect_git_patch.collect_git_patch_unversioned(self.first_repo, self.storage_path, "include", [], [])
self.assertTrue(v)
generated_patches = r
unversioned_param = []
unversioned_base = path_utils.concat_path(self.storage_path, self.first_repo, "unversioned")
for gp in generated_patches:
unversioned_param.append( (unversioned_base, gp) )
v, r = apply_git_patch.apply_git_patch_unversioned(self.second_repo, unversioned_param)
self.assertFalse(v)
def testApplyGitApplyUnversioned(self):
first_sub = path_utils.concat_path(self.first_repo, "sub")
os.mkdir(first_sub)
self.assertTrue(os.path.exists(first_sub))
second_sub = path_utils.concat_path(self.second_repo, "sub")
self.assertFalse(os.path.exists(second_sub))
first_sub_file4 = path_utils.concat_path(first_sub, "file4.txt")
self.assertTrue(create_and_write_file.create_file_contents(first_sub_file4, "first, sub, file4, contents"))
second_sub_file4 = path_utils.concat_path(second_sub, "file4.txt")
self.assertFalse(os.path.exists(second_sub_file4))
first_file5 = path_utils.concat_path(self.first_repo, "file5.txt")
self.assertTrue(create_and_write_file.create_file_contents(first_file5, "first, file5, contents"))
second_file5 = path_utils.concat_path(self.second_repo, "file5.txt")
self.assertFalse( os.path.exists(second_file5) )
v, r = collect_git_patch.collect_git_patch_unversioned(self.first_repo, self.storage_path, "include", [], [])
self.assertTrue(v)
generated_patches = r
unversioned_param = []
unversioned_base = path_utils.concat_path(self.storage_path, self.first_repo, "unversioned")
for gp in generated_patches:
unversioned_param.append( (unversioned_base, gp) )
v, r = apply_git_patch.apply_git_patch_unversioned(self.second_repo, unversioned_param)
self.assertTrue(v)
self.assertTrue(os.path.exists(second_sub))
self.assertTrue(os.path.exists(second_sub_file4))
self.assertTrue( os.path.exists(second_file5) )
def testApplyGitPatchHead(self):
second_file1 = path_utils.concat_path(self.second_repo, "file1.txt")
self.assertTrue(os.path.exists(second_file1))
with open(self.first_file1, "a") as f:
f.write("more stuff")
v, r = collect_git_patch.collect_git_patch_head(self.first_repo, self.storage_path, "include", [], [])
self.assertTrue(v)
generated_patches = [r]
v, r = apply_git_patch.apply_git_patch_head(self.second_repo, generated_patches)
self.assertTrue(v)
contents = ""
with open(second_file1, "r") as f:
contents = f.read()
self.assertTrue("more stuff" in contents)
def testApplyGitPatchStaged(self):
second_file1 = path_utils.concat_path(self.second_repo, "file1.txt")
self.assertTrue(os.path.exists(second_file1))
with open(self.first_file1, "a") as f:
f.write("more stuff")
v, r = git_wrapper.stage(self.first_repo)
self.assertTrue(v)
v, r = collect_git_patch.collect_git_patch_staged(self.first_repo, self.storage_path, "include", [], [])
self.assertTrue(v)
generated_patches = [r]
v, r = apply_git_patch.apply_git_patch_staged(self.second_repo, generated_patches)
self.assertTrue(v)
contents = ""
with open(second_file1, "r") as f:
contents = f.read()
self.assertTrue("more stuff" in contents)
v, r = git_lib.get_staged_files(self.second_repo)
self.assertTrue(v)
sf = r
self.assertTrue(sf is not None)
self.assertTrue(second_file1 in sf)
def testApplyGitPatchStash(self):
with open(self.first_file1, "a") as f:
f.write("more stuff")
v, r = git_lib.get_stash_list(self.first_repo)
self.assertTrue(v)
self.assertEqual(len(r), 0)
v, r = git_wrapper.stash(self.first_repo)
self.assertTrue(v)
v, r = git_lib.get_stash_list(self.first_repo)
self.assertTrue(v)
self.assertEqual(len(r), 1)
v, r = git_lib.get_stash_list(self.second_repo)
self.assertTrue(v)
self.assertEqual(len(r), 0)
v, r = collect_git_patch.collect_git_patch_stash(self.first_repo, self.storage_path, -1)
self.assertTrue(v)
generated_patches = r
v, r = apply_git_patch.apply_git_patch_stash(self.second_repo, generated_patches)
self.assertTrue(v)
v, r = git_lib.get_stash_list(self.second_repo)
self.assertTrue(v)
self.assertEqual(len(r), 1)
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
from page_sets import key_mobile_sites_pages
def _IssueMarkerAndScroll(action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage()
def _CreatePageClassWithSmoothInteractions(page_cls):
class DerivedSmoothPage(page_cls): # pylint: disable=W0232
def RunPageInteractions(self, action_runner):
_IssueMarkerAndScroll(action_runner)
return DerivedSmoothPage
class KeyMobileSitesSmoothPage(page_module.Page):
def __init__(self, url, page_set, name='', labels=None,
action_on_load_complete=False):
super(KeyMobileSitesSmoothPage, self).__init__(
url=url, page_set=page_set, name=name,
credentials_path='data/credentials.json', labels=labels,
shared_page_state_class=shared_page_state.SharedMobilePageState)
self.archive_data_file = 'data/key_mobile_sites.json'
self.action_on_load_complete = action_on_load_complete
def RunPageInteractions(self, action_runner):
if self.action_on_load_complete:
action_runner.WaitForJavaScriptCondition(
'document.readyState == "complete"', 30)
_IssueMarkerAndScroll(action_runner)
class LinkedInSmoothPage(key_mobile_sites_pages.LinkedInPage):
def __init__(self, page_set):
super(LinkedInSmoothPage, self).__init__(page_set=page_set)
# Linkedin has expensive shader compilation so it can benefit from shader
# cache from reload.
def RunNavigateSteps(self, action_runner):
super(LinkedInSmoothPage, self).RunNavigateSteps(action_runner)
action_runner.ScrollPage()
action_runner.ReloadPage()
super(LinkedInSmoothPage, self).RunNavigateSteps(action_runner)
class WowwikiSmoothPage(KeyMobileSitesSmoothPage):
"""Why: Mobile wiki."""
def __init__(self, page_set):
super(WowwikiSmoothPage, self).__init__(
url='http://www.wowwiki.com/World_of_Warcraft:_Mists_of_Pandaria',
page_set=page_set)
# Wowwiki has expensive shader compilation so it can benefit from shader
# cache from reload.
def RunNavigateSteps(self, action_runner):
super(WowwikiSmoothPage, self).RunNavigateSteps(action_runner)
action_runner.ScrollPage()
action_runner.ReloadPage()
super(WowwikiSmoothPage, self).RunNavigateSteps(action_runner)
class GmailSmoothPage(key_mobile_sites_pages.GmailPage):
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollElement(element_function=(
'document.getElementById("views").childNodes[1].firstChild'))
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollElement(element_function=(
'document.getElementById("views").childNodes[1].firstChild'))
class GroupClonedSmoothPage(key_mobile_sites_pages.GroupClonedPage):
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage(
distance_expr='''
Math.max(0, 1250 + document.getElementById("element-19")
.contentDocument
.getElementById("element-22")
.getBoundingClientRect().top);''',
use_touch=True)
class GroupClonedListImagesPage(
key_mobile_sites_pages.GroupClonedListImagesPage):
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollPage(
distance_expr='''
Math.max(0, 1250 +
document.getElementById("element-5")
.getBoundingClientRect().top);''',
use_touch=True)
class GoogleNewsMobile2SmoothPage(
key_mobile_sites_pages.GoogleNewsMobile2Page):
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollElement(
element_function='document.getElementById(":5")',
distance_expr='''
Math.max(0, 2500 +
document.getElementById(':h').getBoundingClientRect().top)''',
use_touch=True)
class AmazonNicolasCageSmoothPage(
key_mobile_sites_pages.AmazonNicolasCagePage):
def RunPageInteractions(self, action_runner):
with action_runner.CreateGestureInteraction('ScrollAction'):
action_runner.ScrollElement(
selector='#search',
distance_expr='document.body.scrollHeight - window.innerHeight')
class KeyMobileSitesSmoothPageSet(story.StorySet):
""" Key mobile sites with smooth interactions. """
def __init__(self):
super(KeyMobileSitesSmoothPageSet, self).__init__(
archive_data_file='data/key_mobile_sites_smooth.json',
cloud_storage_bucket=story.PARTNER_BUCKET)
# Add pages with predefined classes that contain custom navigation logic.
predefined_page_classes = [
key_mobile_sites_pages.CapitolVolkswagenPage,
key_mobile_sites_pages.TheVergeArticlePage,
key_mobile_sites_pages.CnnArticlePage,
key_mobile_sites_pages.FacebookPage,
key_mobile_sites_pages.YoutubeMobilePage,
key_mobile_sites_pages.YahooAnswersPage,
key_mobile_sites_pages.GoogleNewsMobilePage,
]
for page_class in predefined_page_classes:
self.AddUserStory(
_CreatePageClassWithSmoothInteractions(page_class)(self))
self.AddUserStory(
_CreatePageClassWithSmoothInteractions(LinkedInSmoothPage)(self))
self.AddUserStory(WowwikiSmoothPage(self))
# Add pages with custom page interaction logic.
# Page behaves non-deterministically, replaced with test version for now.
# self.AddUserStory(GroupClonedSmoothPage(self))
# mean_input_event_latency cannot be tracked correctly for
# GroupClonedListImagesPage.
# See crbug.com/409086.
# self.AddUserStory(GroupClonedListImagesSmoothPage(self))
self.AddUserStory(GoogleNewsMobile2SmoothPage(self))
self.AddUserStory(AmazonNicolasCageSmoothPage(self))
# Add pages with custom labels.
# Why: Top news site.
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://nytimes.com/', page_set=self, labels=['fastpath']))
# Why: Image-heavy site.
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://cuteoverload.com', page_set=self, labels=['fastpath']))
# Why: #11 (Alexa global), google property; some blogger layouts
# have infinite scroll but more interesting.
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://googlewebmastercentral.blogspot.com/',
page_set=self, name='Blogger'))
# Why: #18 (Alexa global), Picked an interesting post """
self.AddUserStory(KeyMobileSitesSmoothPage(
# pylint: disable=line-too-long
url='http://en.blog.wordpress.com/2012/09/04/freshly-pressed-editors-picks-for-august-2012/',
page_set=self,
name='Wordpress'))
# Why: #6 (Alexa) most visited worldwide, picked an interesting page
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://en.wikipedia.org/wiki/Wikipedia',
page_set=self,
name='Wikipedia (1 tab)'))
# Why: Wikipedia page with a delayed scroll start
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://en.wikipedia.org/wiki/Wikipedia',
page_set=self,
name='Wikipedia (1 tab) - delayed scroll start',
action_on_load_complete=True))
# Why: #8 (Alexa global), picked an interesting page
# Forbidden (Rate Limit Exceeded)
# self.AddUserStory(KeyMobileSitesSmoothPage(
# url='http://twitter.com/katyperry', page_set=self, name='Twitter'))
# Why: #37 (Alexa global) """
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://pinterest.com',
page_set=self,
name='Pinterest'))
# Why: #1 sports.
# Fails often; crbug.com/249722'
# self.AddUserStory(KeyMobileSitesSmoothPage(
# url='http://espn.go.com', page_set=self, name='ESPN'))
# Why: crbug.com/231413
# Doesn't scroll; crbug.com/249736
# self.AddUserStory(KeyMobileSitesSmoothPage(
# url='http://forecast.io', page_set=self))
# Why: crbug.com/169827
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://slashdot.org/', page_set=self, labels=['fastpath']))
# Why: #5 Alexa news """
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://www.reddit.com/r/programming/comments/1g96ve',
page_set=self, labels=['fastpath']))
# Why: Problematic use of fixed position elements """
self.AddUserStory(KeyMobileSitesSmoothPage(
url='http://www.boingboing.net', page_set=self, labels=['fastpath']))
# Add simple pages with no custom navigation logic or labels.
urls_list = [
# Why: Social; top Google property; Public profile; infinite scrolls.
# pylint: disable=line-too-long
'https://plus.google.com/app/basic/110031535020051778989/posts?source=apppromo',
# Why: crbug.com/242544
('http://www.androidpolice.com/2012/10/03/rumor-evidence-mounts-that-an-'
'lg-optimus-g-nexus-is-coming-along-with-a-nexus-phone-certification-'
'program/'),
# Why: crbug.com/149958
'http://gsp.ro',
# Why: Top tech blog
'http://theverge.com',
# Why: Top tech site
'http://digg.com',
# Why: Top Google property; a Google tab is often open
'https://www.google.com/#hl=en&q=barack+obama',
# Why: #1 news worldwide (Alexa global)
'http://news.yahoo.com',
# Why: #2 news worldwide
'http://www.cnn.com',
# Why: #1 commerce website by time spent by users in US
'http://shop.mobileweb.ebay.com/searchresults?kw=viking+helmet',
# Why: #1 Alexa recreation
# pylint: disable=line-too-long
'http://www.booking.com/searchresults.html?src=searchresults&latitude=65.0500&longitude=25.4667',
# Why: Top tech blog
'http://techcrunch.com',
# Why: #6 Alexa sports
'http://mlb.com/',
# Why: #14 Alexa California
'http://www.sfgate.com/',
# Why: Non-latin character set
'http://worldjournal.com/',
# Why: #15 Alexa news
'http://online.wsj.com/home-page',
# Why: Image-heavy mobile site
'http://www.deviantart.com/',
# Why: Top search engine
('http://www.baidu.com/s?wd=barack+obama&rsv_bp=0&rsv_spt=3&rsv_sug3=9&'
'rsv_sug=0&rsv_sug4=3824&rsv_sug1=3&inputT=4920'),
# Why: Top search engine
'http://www.bing.com/search?q=sloths',
# Why: Good example of poor initial scrolling
'http://ftw.usatoday.com/2014/05/spelling-bee-rules-shenanigans'
]
for url in urls_list:
self.AddUserStory(KeyMobileSitesSmoothPage(url, self))
|
|
# Copyright (C) 2011, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import logging
from webkitpy.common.memoized import memoized
_log = logging.getLogger(__name__)
# FIXME: Should this function be somewhere more general?
def _invert_dictionary(dictionary):
inverted_dictionary = {}
for key, value in dictionary.items():
if inverted_dictionary.get(value):
inverted_dictionary[value].append(key)
else:
inverted_dictionary[value] = [key]
return inverted_dictionary
class BaselineOptimizer(object):
ROOT_LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
def __init__(self, host, port_names, skip_scm_commands):
self._filesystem = host.filesystem
self._port_factory = host.port_factory
self._skip_scm_commands = skip_scm_commands
self._files_to_delete = []
self._files_to_add = []
self._scm = host.scm()
self._port_names = port_names
# Only used by unittests.
self.new_results_by_directory = []
def _baseline_root(self, port, baseline_name):
virtual_suite = port.lookup_virtual_suite(baseline_name)
if virtual_suite:
return self._filesystem.join(self.ROOT_LAYOUT_TESTS_DIRECTORY, virtual_suite.name)
return self.ROOT_LAYOUT_TESTS_DIRECTORY
def _baseline_search_path(self, port, baseline_name):
virtual_suite = port.lookup_virtual_suite(baseline_name)
if virtual_suite:
return port.virtual_baseline_search_path(baseline_name)
return port.baseline_search_path()
@memoized
def _relative_baseline_search_paths(self, port_name, baseline_name):
port = self._port_factory.get(port_name)
relative_paths = [self._filesystem.relpath(path, port.webkit_base()) for path in self._baseline_search_path(port, baseline_name)]
return relative_paths + [self._baseline_root(port, baseline_name)]
def _join_directory(self, directory, baseline_name):
# This code is complicated because both the directory name and the baseline_name have the virtual
# test suite in the name and the virtual baseline name is not a strict superset of the non-virtual name.
# For example, virtual/gpu/fast/canvas/foo-expected.png corresponds to fast/canvas/foo-expected.png and
# the baseline directories are like platform/mac/virtual/gpu/fast/canvas. So, to get the path
# to the baseline in the platform directory, we need to append jsut foo-expected.png to the directory.
virtual_suite = self._port_factory.get().lookup_virtual_suite(baseline_name)
if virtual_suite:
baseline_name_without_virtual = baseline_name[len(virtual_suite.name) + 1:]
else:
baseline_name_without_virtual = baseline_name
return self._filesystem.join(self._scm.checkout_root, directory, baseline_name_without_virtual)
def read_results_by_directory(self, baseline_name):
results_by_directory = {}
directories = reduce(set.union, map(set, [self._relative_baseline_search_paths(port_name, baseline_name) for port_name in self._port_names]))
for directory in directories:
path = self._join_directory(directory, baseline_name)
if self._filesystem.exists(path):
results_by_directory[directory] = self._filesystem.sha1(path)
return results_by_directory
def _results_by_port_name(self, results_by_directory, baseline_name):
results_by_port_name = {}
for port_name in self._port_names:
for directory in self._relative_baseline_search_paths(port_name, baseline_name):
if directory in results_by_directory:
results_by_port_name[port_name] = results_by_directory[directory]
break
return results_by_port_name
@memoized
def _directories_immediately_preceding_root(self, baseline_name):
directories = set()
for port_name in self._port_names:
port = self._port_factory.get(port_name)
directory = self._filesystem.relpath(self._baseline_search_path(port, baseline_name)[-1], port.webkit_base())
directories.add(directory)
return directories
def _optimize_result_for_root(self, new_results_by_directory, baseline_name):
# The root directory (i.e. LayoutTests) is the only one that doesn't correspond
# to a specific platform. As such, it's the only one where the baseline in fallback directories
# immediately before it can be promoted up, i.e. if win and mac
# have the same baseline, then it can be promoted up to be the LayoutTests baseline.
# All other baselines can only be removed if they're redundant with a baseline earlier
# in the fallback order. They can never promoted up.
directories_immediately_preceding_root = self._directories_immediately_preceding_root(baseline_name)
shared_result = None
root_baseline_unused = False
for directory in directories_immediately_preceding_root:
this_result = new_results_by_directory.get(directory)
# If any of these directories don't have a baseline, there's no optimization we can do.
if not this_result:
return
if not shared_result:
shared_result = this_result
elif shared_result != this_result:
root_baseline_unused = True
baseline_root = self._baseline_root(self._port_factory.get(), baseline_name)
# The root baseline is unused if all the directories immediately preceding the root
# have a baseline, but have different baselines, so the baselines can't be promoted up.
if root_baseline_unused:
if baseline_root in new_results_by_directory:
del new_results_by_directory[baseline_root]
return
new_results_by_directory[baseline_root] = shared_result
for directory in directories_immediately_preceding_root:
del new_results_by_directory[directory]
def _find_optimal_result_placement(self, baseline_name):
results_by_directory = self.read_results_by_directory(baseline_name)
results_by_port_name = self._results_by_port_name(results_by_directory, baseline_name)
port_names_by_result = _invert_dictionary(results_by_port_name)
new_results_by_directory = self._remove_redundant_results(results_by_directory, results_by_port_name, port_names_by_result, baseline_name)
self._optimize_result_for_root(new_results_by_directory, baseline_name)
return results_by_directory, new_results_by_directory
def _remove_redundant_results(self, results_by_directory, results_by_port_name, port_names_by_result, baseline_name):
new_results_by_directory = copy.copy(results_by_directory)
for port_name in self._port_names:
current_result = results_by_port_name.get(port_name)
# This happens if we're missing baselines for a port.
if not current_result:
continue;
fallback_path = self._relative_baseline_search_paths(port_name, baseline_name)
current_index, current_directory = self._find_in_fallbackpath(fallback_path, current_result, new_results_by_directory)
for index in range(current_index + 1, len(fallback_path)):
new_directory = fallback_path[index]
if not new_directory in new_results_by_directory:
# No result for this baseline in this directory.
continue
elif new_results_by_directory[new_directory] == current_result:
# Result for new_directory are redundant with the result earlier in the fallback order.
if current_directory in new_results_by_directory:
del new_results_by_directory[current_directory]
else:
# The new_directory contains a different result, so stop trying to push results up.
break
return new_results_by_directory
def _find_in_fallbackpath(self, fallback_path, current_result, results_by_directory):
for index, directory in enumerate(fallback_path):
if directory in results_by_directory and (results_by_directory[directory] == current_result):
return index, directory
assert False, "result %s not found in fallback_path %s, %s" % (current_result, fallback_path, results_by_directory)
def _platform(self, filename):
platform_dir = self.ROOT_LAYOUT_TESTS_DIRECTORY + self._filesystem.sep + 'platform' + self._filesystem.sep
if filename.startswith(platform_dir):
return filename.replace(platform_dir, '').split(self._filesystem.sep)[0]
platform_dir = self._filesystem.join(self._scm.checkout_root, platform_dir)
if filename.startswith(platform_dir):
return filename.replace(platform_dir, '').split(self._filesystem.sep)[0]
return '(generic)'
def _move_baselines(self, baseline_name, results_by_directory, new_results_by_directory):
data_for_result = {}
for directory, result in results_by_directory.items():
if not result in data_for_result:
source = self._join_directory(directory, baseline_name)
data_for_result[result] = self._filesystem.read_binary_file(source)
scm_files = []
fs_files = []
for directory, result in results_by_directory.items():
if new_results_by_directory.get(directory) != result:
file_name = self._join_directory(directory, baseline_name)
if self._scm.exists(file_name):
scm_files.append(file_name)
else:
fs_files.append(file_name)
if scm_files or fs_files:
if scm_files:
_log.debug(" Deleting (SCM):")
for platform_dir in sorted(self._platform(filename) for filename in scm_files):
_log.debug(" " + platform_dir)
if self._skip_scm_commands:
self._files_to_delete.extend(scm_files)
else:
self._scm.delete_list(scm_files)
if fs_files:
_log.debug(" Deleting (file system):")
for platform_dir in sorted(self._platform(filename) for filename in fs_files):
_log.debug(" " + platform_dir)
for filename in fs_files:
self._filesystem.remove(filename)
else:
_log.debug(" (Nothing to delete)")
file_names = []
for directory, result in new_results_by_directory.items():
if results_by_directory.get(directory) != result:
destination = self._join_directory(directory, baseline_name)
self._filesystem.maybe_make_directory(self._filesystem.split(destination)[0])
self._filesystem.write_binary_file(destination, data_for_result[result])
file_names.append(destination)
if file_names:
_log.debug(" Adding:")
for platform_dir in sorted(self._platform(filename) for filename in file_names):
_log.debug(" " + platform_dir)
if self._skip_scm_commands:
# Have adds win over deletes.
self._files_to_delete = list(set(self._files_to_delete) - set(file_names))
self._files_to_add.extend(file_names)
else:
self._scm.add_list(file_names)
else:
_log.debug(" (Nothing to add)")
def write_by_directory(self, results_by_directory, writer, indent):
for path in sorted(results_by_directory):
writer("%s%s: %s" % (indent, self._platform(path), results_by_directory[path][0:6]))
def _optimize_subtree(self, baseline_name):
basename = self._filesystem.basename(baseline_name)
results_by_directory, new_results_by_directory = self._find_optimal_result_placement(baseline_name)
if new_results_by_directory == results_by_directory:
if new_results_by_directory:
_log.debug(" %s: (already optimal)" % basename)
self.write_by_directory(results_by_directory, _log.debug, " ")
else:
_log.debug(" %s: (no baselines found)" % basename)
# This is just used for unittests. Intentionally set it to the old data if we don't modify anything.
self.new_results_by_directory.append(results_by_directory)
return True
if self._results_by_port_name(results_by_directory, baseline_name) != self._results_by_port_name(new_results_by_directory, baseline_name):
# This really should never happen. Just a sanity check to make sure the script fails in the case of bugs
# instead of committing incorrect baselines.
_log.error(" %s: optimization failed" % basename)
self.write_by_directory(results_by_directory, _log.warning, " ")
return False
_log.debug(" %s:" % basename)
_log.debug(" Before: ")
self.write_by_directory(results_by_directory, _log.debug, " ")
_log.debug(" After: ")
self.write_by_directory(new_results_by_directory, _log.debug, " ")
self._move_baselines(baseline_name, results_by_directory, new_results_by_directory)
return True
def _optimize_virtual_root(self, baseline_name, non_virtual_baseline_name):
default_port = self._port_factory.get()
virtual_root_expected_baseline_path = self._filesystem.join(default_port.layout_tests_dir(), baseline_name)
if not self._filesystem.exists(virtual_root_expected_baseline_path):
return
root_sha1 = self._filesystem.sha1(virtual_root_expected_baseline_path)
results_by_directory = self.read_results_by_directory(non_virtual_baseline_name)
# See if all the immediate predecessors of the virtual root have the same expected result.
for port_name in self._port_names:
directories = self._relative_baseline_search_paths(port_name, non_virtual_baseline_name)
for directory in directories:
if directory not in results_by_directory:
continue
if results_by_directory[directory] != root_sha1:
return
break
_log.debug("Deleting redundant virtual root expected result.")
if self._skip_scm_commands and virtual_root_expected_baseline_path in self._files_to_add:
self._files_to_add.remove(virtual_root_expected_baseline_path)
if self._scm.exists(virtual_root_expected_baseline_path):
_log.debug(" Deleting (SCM): " + virtual_root_expected_baseline_path)
if self._skip_scm_commands:
self._files_to_delete.append(virtual_root_expected_baseline_path)
else:
self._scm.delete(virtual_root_expected_baseline_path)
else:
_log.debug(" Deleting (file system): " + virtual_root_expected_baseline_path)
self._filesystem.remove(virtual_root_expected_baseline_path)
def optimize(self, baseline_name):
# The virtual fallback path is the same as the non-virtual one tacked on to the bottom of the non-virtual path.
# See https://docs.google.com/a/chromium.org/drawings/d/1eGdsIKzJ2dxDDBbUaIABrN4aMLD1bqJTfyxNGZsTdmg/edit for
# a visual representation of this.
#
# So, we can optimize the virtual path, then the virtual root and then the regular path.
_log.debug("Optimizing regular fallback path.")
result = self._optimize_subtree(baseline_name)
non_virtual_baseline_name = self._port_factory.get().lookup_virtual_test_base(baseline_name)
if not non_virtual_baseline_name:
return result, self._files_to_delete, self._files_to_add
self._optimize_virtual_root(baseline_name, non_virtual_baseline_name)
_log.debug("Optimizing non-virtual fallback path.")
result |= self._optimize_subtree(non_virtual_baseline_name)
return result, self._files_to_delete, self._files_to_add
|
|
from rpython.flowspace.model import (Constant, Variable, SpaceOperation,
mkentrymap)
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.translator.unsimplify import insert_empty_block, split_block
def fold_op_list(operations, constants, exit_early=False, exc_catch=False):
newops = []
folded_count = 0
for spaceop in operations:
vargsmodif = False
vargs = []
args = []
for v in spaceop.args:
if isinstance(v, Constant):
args.append(v.value)
elif v in constants:
v = constants[v]
vargsmodif = True
args.append(v.value)
vargs.append(v)
try:
op = getattr(llop, spaceop.opname)
except AttributeError:
pass
else:
if not op.sideeffects and len(args) == len(vargs):
RESTYPE = spaceop.result.concretetype
try:
result = op(RESTYPE, *args)
except TypeError:
pass
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass # turn off reporting these as warnings: useless
#log.WARNING('constant-folding %r:' % (spaceop,))
#log.WARNING(' %s: %s' % (e.__class__.__name__, e))
else:
# success in folding this space operation
if spaceop.opname in fixup_op_result:
result = fixup_op_result[spaceop.opname](result)
constants[spaceop.result] = Constant(result, RESTYPE)
folded_count += 1
continue
# failed to fold an operation, exit early if requested
if exit_early:
return folded_count
else:
if vargsmodif:
if (spaceop.opname == 'indirect_call'
and isinstance(vargs[0], Constant)):
spaceop = SpaceOperation('direct_call', vargs[:-1],
spaceop.result)
else:
spaceop = SpaceOperation(spaceop.opname, vargs,
spaceop.result)
newops.append(spaceop)
# end
if exit_early:
return folded_count
else:
return newops
def constant_fold_block(block):
constants = {}
block.operations = fold_op_list(block.operations, constants,
exc_catch=block.canraise)
if constants:
if block.exitswitch in constants:
switch = constants[block.exitswitch].value
remaining_exits = [link for link in block.exits
if link.llexitcase == switch]
if not remaining_exits:
assert block.exits[-1].exitcase == 'default'
remaining_exits = [block.exits[-1]]
assert len(remaining_exits) == 1
remaining_exits[0].exitcase = None
remaining_exits[0].llexitcase = None
block.exitswitch = None
block.recloseblock(*remaining_exits)
for link in block.exits:
link.args = [constants.get(v, v) for v in link.args]
def fixup_solid(p):
# Operations returning pointers to inlined parts of a constant object
# have to be tweaked so that the inlined part keeps the whole object alive.
# XXX This is done with a hack. (See test_keepalive_const_*())
container = p._obj
assert isinstance(container, lltype._parentable)
container._keepparent = container._parentstructure()
# Instead of 'p', return a solid pointer, to keep the inlined part
# itself alive.
return container._as_ptr()
fixup_op_result = {
"getsubstruct": fixup_solid,
"getarraysubstruct": fixup_solid,
"direct_fieldptr": fixup_solid,
"direct_arrayitems": fixup_solid,
}
def complete_constants(link, constants):
# 'constants' maps some Variables of 'block' to Constants.
# Some input args of 'block' may be absent from 'constants'
# and must be fixed in the link to be passed directly from
# 'link.prevblock' instead of via 'block'.
for v1, v2 in zip(link.args, link.target.inputargs):
if v2 in constants:
assert constants[v2] is v1
else:
constants[v2] = v1
def rewire_link_for_known_exitswitch(link1, llexitvalue):
# For the case where link1.target contains only a switch, rewire link1
# to go directly to the correct exit based on a constant switch value.
# This is a situation that occurs typically after inlining; see
# test_fold_exitswitch_along_one_path.
block = link1.target
if block.exits[-1].exitcase == "default":
defaultexit = block.exits[-1]
nondefaultexits = block.exits[:-1]
else:
defaultexit = None
nondefaultexits = block.exits
for nextlink in nondefaultexits:
if nextlink.llexitcase == llexitvalue:
break # found -- the result is in 'nextlink'
else:
if defaultexit is None:
return # exit case not found! just ignore the problem here
nextlink = defaultexit
blockmapping = dict(zip(block.inputargs, link1.args))
newargs = []
for v in nextlink.args:
if isinstance(v, Variable):
v = blockmapping[v]
newargs.append(v)
link1.target = nextlink.target
link1.args = newargs
def prepare_constant_fold_link(link, constants, splitblocks):
block = link.target
if not block.operations:
# when the target block has no operation, there is nothing we can do
# except trying to fold an exitswitch
if block.exitswitch is not None and block.exitswitch in constants:
llexitvalue = constants[block.exitswitch].value
rewire_link_for_known_exitswitch(link, llexitvalue)
return
folded_count = fold_op_list(block.operations, constants, exit_early=True)
n = len(block.operations)
if block.canraise:
n -= 1
# is the next, non-folded operation an indirect_call?
if folded_count < n:
nextop = block.operations[folded_count]
if nextop.opname == 'indirect_call' and nextop.args[0] in constants:
# indirect_call -> direct_call
callargs = [constants[nextop.args[0]]]
constants1 = constants.copy()
complete_constants(link, constants1)
for v in nextop.args[1:-1]:
callargs.append(constants1.get(v, v))
v_result = Variable(nextop.result)
v_result.concretetype = nextop.result.concretetype
constants[nextop.result] = v_result
callop = SpaceOperation('direct_call', callargs, v_result)
newblock = insert_empty_block(link, [callop])
[link] = newblock.exits
assert link.target is block
folded_count += 1
if folded_count > 0:
splits = splitblocks.setdefault(block, [])
splits.append((folded_count, link, constants))
def rewire_links(splitblocks, graph):
for block, splits in splitblocks.items():
# A splitting position is given by how many operations were
# folded with the knowledge of an incoming link's constant.
# Various incoming links may cause various splitting positions.
# We split the block gradually, starting from the end.
splits.sort()
splits.reverse()
for position, link, constants in splits:
assert link.target is block
if position == len(block.operations) and block.exitswitch is None:
# a split here would leave nothing in the 2nd part, so
# directly rewire the links
assert len(block.exits) == 1
splitlink = block.exits[0]
else:
# split the block at the given position
splitlink = split_block(block, position)
assert list(block.exits) == [splitlink]
assert link.target is block
assert splitlink.prevblock is block
complete_constants(link, constants)
args = [constants.get(v, v) for v in splitlink.args]
link.args = args
link.target = splitlink.target
def constant_diffuse(graph):
count = 0
# after 'exitswitch vexit', replace 'vexit' with the corresponding constant
# if it also appears on the outgoing links
for block in graph.iterblocks():
vexit = block.exitswitch
if isinstance(vexit, Variable):
for link in block.exits:
if vexit in link.args and link.exitcase != 'default':
remap = {vexit: Constant(link.llexitcase,
vexit.concretetype)}
link.args = [remap.get(v, v) for v in link.args]
count += 1
# if the same constants appear at the same positions in all links
# into a block remove them from the links, remove the corresponding
# input variables and introduce equivalent same_as at the beginning
# of the block then try to fold the block further
for block, links in mkentrymap(graph).iteritems():
if block is graph.startblock:
continue
if block.exits == ():
continue
firstlink = links[0]
rest = links[1:]
diffuse = []
for i, c in enumerate(firstlink.args):
if not isinstance(c, Constant):
continue
for lnk in rest:
if lnk.args[i] != c:
break
else:
diffuse.append((i, c))
diffuse.reverse()
same_as = []
for i, c in diffuse:
for lnk in links:
del lnk.args[i]
v = block.inputargs.pop(i)
same_as.append(SpaceOperation('same_as', [c], v))
count += 1
block.operations = same_as + block.operations
if same_as:
constant_fold_block(block)
return count
def constant_fold_graph(graph):
# first fold inside the blocks
for block in graph.iterblocks():
if block.operations:
constant_fold_block(block)
# then fold along the links - a fixpoint process, because new links
# with new constants show up, even though we can probably prove that
# a single iteration is enough under some conditions, like the graph
# is in a join_blocks() form.
while 1:
diffused = constant_diffuse(graph)
splitblocks = {}
for link in list(graph.iterlinks()):
constants = {}
for v1, v2 in zip(link.args, link.target.inputargs):
if isinstance(v1, Constant):
constants[v2] = v1
if constants:
prepare_constant_fold_link(link, constants, splitblocks)
if splitblocks:
rewire_links(splitblocks, graph)
if not diffused and not splitblocks:
break # finished
|
|
from __future__ import unicode_literals
import logging
from functools import update_wrapper
from django import http
from django.core.exceptions import ImproperlyConfigured
from django.template.response import TemplateResponse
from django.utils.decorators import classonlymethod
from django.utils import six
logger = logging.getLogger('django.request')
class ContextMixin(object):
"""
A default context mixin that passes the keyword arguments received by
get_context_data as the template context.
"""
def add_context(self):
"""Convenience method; may be overridden to add context by returning a dictionary."""
return {}
def get_context_data(self, **kwargs):
if 'view' not in kwargs:
kwargs['view'] = self
context = self.add_context()
if context:
kwargs.update(context)
return kwargs
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError("You tried to pass in the %s method name as a "
"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError("%s() received an invalid keyword %r. as_view "
"only accepts arguments that are already "
"attributes of the class." % (cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
if hasattr(self, 'get') and not hasattr(self, 'head'):
self.head = self.get
self.request = request
self.user = request.user
self.args = args
self.kwargs = kwargs
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def initsetup(self):
pass
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
self.initsetup()
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
logger.warning('Method Not Allowed (%s): %s', request.method, request.path,
extra={
'status_code': 405,
'request': self.request
}
)
return http.HttpResponseNotAllowed(self._allowed_methods())
def options(self, request, *args, **kwargs):
"""
Handles responding to requests for the OPTIONS HTTP verb.
"""
response = http.HttpResponse()
response['Allow'] = ', '.join(self._allowed_methods())
response['Content-Length'] = '0'
return response
def _allowed_methods(self):
return [m.upper() for m in self.http_method_names if hasattr(self, m)]
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
response_class = TemplateResponse
def render_to_response(self, context, **response_kwargs):
"""
Returns a response, using the `response_class` for this
view, with a template rendered with the given context.
If any keyword arguments are provided, they will be
passed to the constructor of the response class.
"""
return self.response_class(
request = self.request,
template = self.get_template_names(),
context = context,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
def get(self, request, *args, **kwargs):
from detail import BaseDetailView
from edit import FormView, FormSetView, ModelFormSetView, CreateView, UpdateView
from list import ListView
args = [request] + list(args)
context = dict()
update = context.update
if isinstance(self, BaseDetailView) : update( self.detail_get(*args, **kwargs) )
if isinstance(self, FormView) : update( self.form_get(*args, **kwargs) )
if isinstance(self, (FormSetView, ModelFormSetView)) : update( self.formset_get(*args, **kwargs) )
if isinstance(self, CreateView) : update( self.create_get(*args, **kwargs) )
if isinstance(self, UpdateView) : update( self.update_get(*args, **kwargs) )
if isinstance(self, ListView) : update( self.list_get(*args, **kwargs) )
update(self.get_context_data(**kwargs))
return self.render_to_response(context)
class TemplateView(TemplateResponseMixin, ContextMixin, View):
"""
A view that renders a template. This view will also pass into the context
any keyword arguments passed by the url conf.
"""
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = True
url = None
query_string = False
def get_redirect_url(self, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
url = self.url % kwargs
args = self.request.META.get('QUERY_STRING', '')
if args and self.query_string:
url = "%s?%s" % (url, args)
return url
else:
return None
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(**kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s', self.request.path,
extra={
'status_code': 410,
'request': self.request
})
return http.HttpResponseGone()
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def options(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
|
|
from datetime import datetime
from decimal import Decimal
from django.contrib.auth.models import User
from django.core.files.images import ImageFile
from django.db import IntegrityError
from model_mommy import mommy
from .base import RecipeTestCase
from recipes.models import Kind, Ingredient, Recipe, WeightUnit
class IngredientTest(RecipeTestCase):
def test_creates_a_basic_ingredient(self):
Ingredient.objects.create(
user=self.user,
name='Sand',
price=Decimal('1.99'),
kind=Kind.Addition,
weight_unit=WeightUnit.Kg,
)
ingredient = Ingredient.objects.get(pk=1)
self.assertEqual(ingredient.user, self.user)
self.assertEqual(str(ingredient), 'Sand')
self.assertEqual(ingredient.name, 'Sand')
self.assertEqual(ingredient.price, Decimal('1.99'))
self.assertEqual(ingredient.kind, Kind.Addition)
self.assertIsInstance(ingredient.created, datetime)
self.assertIsInstance(ingredient.updated, datetime)
self.assertEqual(ingredient.weight_unit, WeightUnit.Kg)
def test_cannot_create_ingredients_with_same_name(self):
Ingredient.objects.create(
user=self.user,
name='Sand',
price=Decimal('1.99'),
kind=Kind.Addition,
weight_unit=WeightUnit.Kg,
)
with self.assertRaises(IntegrityError):
Ingredient.objects.create(
user=self.user,
name='Sand',
price=Decimal('1.99'),
)
def test_can_create_ingredients_with_same_name_but_different_users(self):
Ingredient.objects.create(
user=self.user,
name='Sand',
price=Decimal('1.99'),
kind=Kind.Addition,
weight_unit=WeightUnit.Kg,
)
Ingredient.objects.create(
user=mommy.make(User),
name='Sand',
price=Decimal('1.99'),
kind=Kind.Addition,
weight_unit=WeightUnit.Kg,
)
class RecipeTest(RecipeTestCase):
def test_creates_a_basic_recipe(self):
Recipe.objects.create(
user=self.user,
name='Interesting Yellow'
)
recipe = Recipe.objects.get(pk=1)
self.assertEqual(recipe.name, 'Interesting Yellow')
self.assertEqual(recipe.user, self.user)
self.assertIsInstance(recipe.created, datetime)
self.assertIsInstance(recipe.updated, datetime)
def test_can_have_description_field(self):
Recipe.objects.create(
user=self.user,
name='Interesting Yellow',
description='Some description',
)
recipe = Recipe.objects.get(pk=1)
self.assertEqual(recipe.description, 'Some description')
def test_can_have_image_field(self):
with self.fixture('django.gif') as f:
Recipe.objects.create(
user=self.user,
name='Interesting Yellow',
image=ImageFile(f),
)
recipe = Recipe.objects.get(pk=1)
self.assertIn('django', recipe.image.name)
self.assertIn('gif', recipe.image.name)
def test_can_load_image(self):
with self.fixture('django.gif') as f:
Recipe.objects.create(
user=self.user,
name='Interesting Yellow',
image=ImageFile(f, 'fixtures/django.gif'),
)
recipe = Recipe.objects.get(pk=1)
response = self.client.get(recipe.image.url)
self.assertEqual(response.status_code, 200)
def test_contains_ingredients_in_certain_percentages(self):
ingredient1 = self.create_ingredient(
price=Decimal('1.23'), weight_unit=WeightUnit.g)
ingredient2 = self.create_ingredient(
price=Decimal('2.34'), weight_unit=WeightUnit.Kg)
recipe = Recipe.objects.create(
user=self.user,
name='Interesting Yellow'
)
recipe.add_part(ingredient1, percentage=Decimal('20'))
recipe.add_part(ingredient2, percentage=Decimal('30'))
parts = recipe.parts
self.assertEqual(parts[0].ingredient, ingredient1)
self.assertEqual(parts[1].ingredient, ingredient2)
self.assertEqual(parts[0].percentage, Decimal('20'))
self.assertEqual(parts[1].percentage, Decimal('30'))
self.assertEqual(
parts[0].relative_price,
Decimal('1.23') * Decimal('1000') * Decimal('20') / Decimal('100'))
self.assertEqual(
parts[1].relative_price,
Decimal('2.34') * Decimal('1') * Decimal('30') / Decimal('100'))
def test_calculates_price_based_on_ingredients(self):
ingredient1 = self.create_ingredient(
price=Decimal('1.23'), weight_unit=WeightUnit.g)
ingredient2 = self.create_ingredient(
price=Decimal('2.34'), weight_unit=WeightUnit.Kg)
recipe = Recipe.objects.create(
user=self.user,
name='Interesting Yellow'
)
recipe.add_part(ingredient1, percentage=Decimal('20'))
recipe.add_part(ingredient2, percentage=Decimal('30'))
self.assertEqual(recipe.price, (
Decimal('1.23') * Decimal('1000') * Decimal('20') +
Decimal('2.34') * Decimal('1') * Decimal('30')
) / (Decimal('20') + Decimal('30')))
def test_uses_correct_multiplication_for_price(self):
"""Just a sanity check test."""
ingredient1 = self.create_ingredient(
price=Decimal('0.05'), weight_unit=WeightUnit.g)
ingredient2 = self.create_ingredient(
price=Decimal('50.00'), weight_unit=WeightUnit.Kg)
recipe = Recipe.objects.create(
user=self.user,
name='Interesting Yellow'
)
recipe.add_part(ingredient1, percentage=Decimal('50'))
recipe.add_part(ingredient2, percentage=Decimal('50'))
self.assertEqual(recipe.price, Decimal('50.00'))
def test_calculates_price_for_no_parts(self):
recipe = Recipe.objects.create(
user=self.user,
name='Null Recipe',
)
self.assertEqual(recipe.price, Decimal('0'))
def test_copies_a_recipe(self):
ingredient1 = self.create_ingredient(
price=Decimal('1.23'), weight_unit=WeightUnit.g)
ingredient2 = self.create_ingredient(
price=Decimal('2.34'), weight_unit=WeightUnit.Kg)
recipe1 = Recipe.objects.create(
user=self.user,
name='Interesting Yellow'
)
recipe1.add_part(ingredient1, percentage=Decimal('20'))
recipe1.add_part(ingredient2, percentage=Decimal('30'))
id1 = recipe1.pk
price1 = recipe1.price
name1 = recipe1.name
recipe2 = recipe1.clone()
recipe2 = Recipe.objects.get(pk=recipe2.pk)
self.assertNotEqual(recipe2.pk, id1)
self.assertEqual(recipe2.price, price1)
self.assertEqual(recipe2.price, recipe1.price)
self.assertEqual(recipe2.name, '{} (Copy of {})'.format(name1, id1))
class KindTest(RecipeTestCase):
def test_converts_to_pretty_name(self):
self.assertEqual(str(Kind.Base), 'Base')
self.assertEqual(str(Kind.Addition), 'Addition')
class WeightUnitTest(RecipeTestCase):
def test_gets_weighted_in_for_equal_units(self):
self.assertEqual(
WeightUnit.Kg.weighted_in(WeightUnit.Kg), Decimal('1'))
self.assertEqual(
WeightUnit.g.weighted_in(WeightUnit.g), Decimal('1'))
def test_gets_kg_weighted_in_g(self):
self.assertEqual(
WeightUnit.Kg.weighted_in(WeightUnit.g), Decimal('1000'))
def test_gets_g_weighted_in_kg(self):
self.assertEqual(
WeightUnit.g.weighted_in(WeightUnit.Kg), Decimal('0.001'))
|
|
"""
dj-stripe Card Model Tests.
"""
from copy import deepcopy
from unittest.mock import ANY, patch
import pytest
import stripe
from django.contrib.auth import get_user_model
from django.test import TestCase
from stripe.error import InvalidRequestError
from djstripe import enums
from djstripe.exceptions import StripeObjectManipulationException
from djstripe.models import Account, Card, Customer
from . import (
FAKE_CARD,
FAKE_CARD_III,
FAKE_CARD_IV,
FAKE_CUSTOM_ACCOUNT,
FAKE_CUSTOMER,
FAKE_STANDARD_ACCOUNT,
IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
AssertStripeFksMixin,
)
pytestmark = pytest.mark.django_db
class TestStrCard:
@pytest.mark.parametrize(
"fake_stripe_data, has_account, has_customer",
[
(deepcopy(FAKE_CARD), False, True),
(deepcopy(FAKE_CARD_IV), True, False),
],
)
def test__str__(self, fake_stripe_data, has_account, has_customer, monkeypatch):
def mock_customer_get(*args, **kwargs):
data = deepcopy(FAKE_CUSTOMER)
data["default_source"] = None
data["sources"] = []
return data
def mock_account_get(*args, **kwargs):
return deepcopy(FAKE_CUSTOM_ACCOUNT)
# monkeypatch stripe.Account.retrieve and stripe.Customer.retrieve calls to return
# the desired json response.
monkeypatch.setattr(stripe.Account, "retrieve", mock_account_get)
monkeypatch.setattr(stripe.Customer, "retrieve", mock_customer_get)
card = Card.sync_from_stripe_data(fake_stripe_data)
default = False
if has_account:
account = Account.objects.filter(id=fake_stripe_data["account"]).first()
default = fake_stripe_data["default_for_currency"]
assert (
f"{enums.CardBrand.humanize(fake_stripe_data['brand'])} {account.default_currency} {'Default' if default else ''} {fake_stripe_data['last4']}"
== str(card)
)
if has_customer:
customer = Customer.objects.filter(id=fake_stripe_data["customer"]).first()
default_source = customer.default_source
default_payment_method = customer.default_payment_method
if (
default_payment_method
and fake_stripe_data["id"] == default_payment_method.id
) or (default_source and fake_stripe_data["id"] == default_source.id):
# current card is the default payment method or source
default = True
assert (
f"{enums.CardBrand.humanize(fake_stripe_data['brand'])} {fake_stripe_data['last4']} {'Default' if default else ''} Expires {fake_stripe_data['exp_month']} {fake_stripe_data['exp_year']}"
== str(card)
)
class CardTest(AssertStripeFksMixin, TestCase):
def setUp(self):
# create a Standard Stripe Account
self.standard_account = FAKE_STANDARD_ACCOUNT.create()
# create a Custom Stripe Account
self.custom_account = FAKE_CUSTOM_ACCOUNT.create()
user = get_user_model().objects.create_user(
username="testuser", email="[email protected]"
)
fake_empty_customer = deepcopy(FAKE_CUSTOMER)
fake_empty_customer["default_source"] = None
fake_empty_customer["sources"] = []
self.customer = fake_empty_customer.create_for_user(user)
def test_attach_objects_hook_without_customer(self):
FAKE_CARD_DICT = deepcopy(FAKE_CARD)
FAKE_CARD_DICT["customer"] = None
card = Card.sync_from_stripe_data(FAKE_CARD_DICT)
self.assertEqual(card.customer, None)
def test_attach_objects_hook_without_account(self):
card = Card.sync_from_stripe_data(FAKE_CARD)
self.assertEqual(card.account, None)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
@patch(
"stripe.Account.retrieve_external_account",
return_value=deepcopy(FAKE_CARD),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
@patch(
"stripe.Customer.retrieve_source",
return_value=deepcopy(FAKE_CARD),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
def test_api_retrieve_by_customer_equals_retrieval_by_account(
self,
customer_retrieve_source_mock,
account_retrieve_external_account_mock,
customer_retrieve_mock,
):
# deepcopy the CardDict object
FAKE_CARD_DICT = deepcopy(FAKE_CARD)
card = Card.sync_from_stripe_data(deepcopy(FAKE_CARD_DICT))
card_by_customer = card.api_retrieve()
# Add account
FAKE_CARD_DICT["account"] = FAKE_CUSTOM_ACCOUNT["id"]
FAKE_CARD_DICT["customer"] = None
card = Card.sync_from_stripe_data(FAKE_CARD_DICT)
card_by_account = card.api_retrieve()
# assert the same card object gets retrieved
self.assertCountEqual(card_by_customer, card_by_account)
def test_create_card_finds_customer_with_account_absent(self):
card = Card.sync_from_stripe_data(FAKE_CARD)
self.assertEqual(self.customer, card.customer)
self.assertEqual(
card.get_stripe_dashboard_url(), self.customer.get_stripe_dashboard_url()
)
self.assert_fks(
card,
expected_blank_fks={
"djstripe.Card.account",
"djstripe.BankAccount.account",
"djstripe.Customer.coupon",
"djstripe.Customer.default_payment_method",
"djstripe.Customer.default_source",
},
)
def test_create_card_finds_customer_with_account_present(self):
# deepcopy the CardDict object
FAKE_CARD_DICT = deepcopy(FAKE_CARD)
# Add account
FAKE_CARD_DICT["account"] = self.standard_account.id
card = Card.sync_from_stripe_data(FAKE_CARD_DICT)
self.assertEqual(self.customer, card.customer)
self.assertEqual(self.standard_account, card.account)
self.assertEqual(
card.get_stripe_dashboard_url(),
self.customer.get_stripe_dashboard_url(),
)
self.assert_fks(
card,
expected_blank_fks={
"djstripe.BankAccount.account",
"djstripe.Customer.coupon",
"djstripe.Customer.default_payment_method",
"djstripe.Customer.default_source",
},
)
def test_create_card_finds_account_with_customer_absent(self):
# deepcopy the CardDict object
FAKE_CARD_DICT = deepcopy(FAKE_CARD)
# Add account and remove customer
FAKE_CARD_DICT["account"] = self.standard_account.id
FAKE_CARD_DICT["customer"] = None
card = Card.sync_from_stripe_data(FAKE_CARD_DICT)
self.assertEqual(self.standard_account, card.account)
self.assertEqual(
card.get_stripe_dashboard_url(),
f"https://dashboard.stripe.com/{card.account.id}/settings/payouts",
)
self.assert_fks(
card,
expected_blank_fks={
"djstripe.Card.customer",
"djstripe.BankAccount.account",
"djstripe.Customer.coupon",
"djstripe.Customer.default_payment_method",
"djstripe.Customer.default_source",
},
)
@patch("stripe.Token.create", autospec=True)
def test_card_create_token(self, token_create_mock):
card = {"number": "4242", "exp_month": 5, "exp_year": 2012, "cvc": 445}
Card.create_token(**card)
token_create_mock.assert_called_with(api_key=ANY, card=card)
def test_api_call_no_customer_and_no_account(self):
exception_message = (
"Cards must be manipulated through either a Stripe Connected Account or a customer. "
"Pass a Customer or an Account object into this call."
)
with self.assertRaisesMessage(
StripeObjectManipulationException, exception_message
):
Card._api_create()
with self.assertRaisesMessage(
StripeObjectManipulationException, exception_message
):
Card.api_list()
def test_api_call_bad_customer(self):
exception_message = (
"Cards must be manipulated through a Customer. "
"Pass a Customer object into this call."
)
with self.assertRaisesMessage(
StripeObjectManipulationException, exception_message
):
Card._api_create(customer="fish")
with self.assertRaisesMessage(
StripeObjectManipulationException, exception_message
):
Card.api_list(customer="fish")
def test_api_call_bad_account(self):
exception_message = (
"Cards must be manipulated through a Stripe Connected Account. "
"Pass an Account object into this call."
)
with self.assertRaisesMessage(
StripeObjectManipulationException, exception_message
):
Card._api_create(account="fish")
with self.assertRaisesMessage(
StripeObjectManipulationException, exception_message
):
Card.api_list(account="fish")
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test__api_create_with_account_absent(self, customer_retrieve_mock):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
self.assertEqual(FAKE_CARD, stripe_card)
@patch(
"stripe.Account.retrieve",
return_value=deepcopy(FAKE_CUSTOM_ACCOUNT),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
def test__api_create_with_customer_absent(self, account_retrieve_mock):
stripe_card = Card._api_create(
account=self.custom_account, source=FAKE_CARD_IV["id"]
)
self.assertEqual(FAKE_CARD_IV, stripe_card)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
@patch(
"stripe.Account.retrieve",
return_value=deepcopy(FAKE_CUSTOM_ACCOUNT),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
def test__api_create_with_customer_and_account(
self, account_retrieve_mock, customer_retrieve_mock
):
FAKE_CARD_DICT = deepcopy(FAKE_CARD)
FAKE_CARD_DICT["account"] = FAKE_CUSTOM_ACCOUNT["id"]
stripe_card = Card._api_create(
account=self.custom_account,
customer=self.customer,
source=FAKE_CARD_DICT["id"],
)
self.assertEqual(FAKE_CARD, stripe_card)
@patch(
"stripe.Customer.delete_source",
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
@patch("stripe.Card.retrieve", return_value=deepcopy(FAKE_CARD), autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
@patch(
"stripe.Customer.retrieve_source",
return_value=deepcopy(FAKE_CARD),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
def test_remove_card_by_customer(
self,
customer_retrieve_source_mock,
customer_retrieve_mock,
card_retrieve_mock,
card_delete_mock,
):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
Card.sync_from_stripe_data(stripe_card)
self.assertEqual(1, self.customer.legacy_cards.count())
# remove card
card = self.customer.legacy_cards.all()[0]
card.remove()
self.assertEqual(0, self.customer.legacy_cards.count())
api_key = card.default_api_key
stripe_account = card._get_stripe_account_id(api_key)
card_delete_mock.assert_called_once_with(
self.customer.id, card.id, api_key=api_key, stripe_account=stripe_account
)
@patch(
"stripe.Account.delete_external_account",
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
@patch(
"stripe.Account.retrieve",
return_value=deepcopy(FAKE_CUSTOM_ACCOUNT),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
def test_remove_card_by_account(self, account_retrieve_mock, card_delete_mock):
stripe_card = Card._api_create(
account=self.custom_account, source=FAKE_CARD_IV["id"]
)
card = Card.sync_from_stripe_data(stripe_card)
self.assertEqual(1, Card.objects.filter(id=stripe_card["id"]).count())
# remove card
card.remove()
self.assertEqual(0, Card.objects.filter(id=stripe_card["id"]).count())
api_key = card.default_api_key
stripe_account = card._get_stripe_account_id(api_key)
card_delete_mock.assert_called_once_with(
self.custom_account.id,
card.id,
api_key=api_key,
stripe_account=stripe_account,
)
@patch(
"stripe.Account.delete_external_account",
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
@patch(
"stripe.Account.retrieve",
return_value=deepcopy(FAKE_CUSTOM_ACCOUNT),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
def test_remove_already_deleted_card_by_account(
self, account_retrieve_mock, card_delete_mock
):
stripe_card = Card._api_create(
account=self.custom_account, source=FAKE_CARD_IV["id"]
)
card = Card.sync_from_stripe_data(stripe_card)
self.assertEqual(1, Card.objects.filter(id=stripe_card["id"]).count())
# remove card
card.remove()
self.assertEqual(0, Card.objects.filter(id=stripe_card["id"]).count())
# remove card again
count, _ = Card.objects.filter(id=stripe_card["id"]).delete()
self.assertEqual(0, count)
api_key = card.default_api_key
stripe_account = card._get_stripe_account_id(api_key)
card_delete_mock.assert_called_once_with(
self.custom_account.id,
card.id,
api_key=api_key,
stripe_account=stripe_account,
)
@patch(
"stripe.Customer.delete_source",
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
@patch(
"stripe.Customer.retrieve_source",
return_value=deepcopy(FAKE_CARD),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
def test_remove_already_deleted_card(
self,
customer_retrieve_source_mock,
customer_retrieve_mock,
card_delete_mock,
):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
Card.sync_from_stripe_data(stripe_card)
self.assertEqual(self.customer.legacy_cards.count(), 1)
card_object = self.customer.legacy_cards.first()
Card.objects.filter(id=stripe_card["id"]).delete()
self.assertEqual(self.customer.legacy_cards.count(), 0)
card_object.remove()
self.assertEqual(self.customer.legacy_cards.count(), 0)
@patch("djstripe.models.Card._api_delete", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_remove_no_such_source(self, customer_retrieve_mock, card_delete_mock):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
Card.sync_from_stripe_data(stripe_card)
card_delete_mock.side_effect = InvalidRequestError("No such source:", "blah")
self.assertEqual(1, self.customer.legacy_cards.count())
card = self.customer.legacy_cards.all()[0]
card.remove()
self.assertEqual(0, self.customer.legacy_cards.count())
self.assertTrue(card_delete_mock.called)
@patch("djstripe.models.Card._api_delete", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_remove_no_such_customer(self, customer_retrieve_mock, card_delete_mock):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
Card.sync_from_stripe_data(stripe_card)
card_delete_mock.side_effect = InvalidRequestError("No such customer:", "blah")
self.assertEqual(1, self.customer.legacy_cards.count())
card = self.customer.legacy_cards.all()[0]
card.remove()
self.assertEqual(0, self.customer.legacy_cards.count())
self.assertTrue(card_delete_mock.called)
@patch("djstripe.models.Card._api_delete", autospec=True)
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_remove_unexpected_exception(
self, customer_retrieve_mock, card_delete_mock
):
stripe_card = Card._api_create(customer=self.customer, source=FAKE_CARD["id"])
Card.sync_from_stripe_data(stripe_card)
card_delete_mock.side_effect = InvalidRequestError(
"Unexpected Exception", "blah"
)
self.assertEqual(1, self.customer.legacy_cards.count())
card = self.customer.legacy_cards.all()[0]
with self.assertRaisesMessage(InvalidRequestError, "Unexpected Exception"):
card.remove()
@patch(
"stripe.Customer.retrieve", return_value=deepcopy(FAKE_CUSTOMER), autospec=True
)
def test_api_list(self, customer_retrieve_mock):
card_list = Card.api_list(customer=self.customer)
self.assertCountEqual([FAKE_CARD, FAKE_CARD_III], [i for i in card_list])
|
|
# -*- coding: utf-8 -*-
#
# Copyright Adam Pritchard 2020
# MIT License : https://adampritchard.mit-license.org/
#
"""
Functions and classes to be used for accessing the data stored in Google Sheets.
Google is inconsistent with terminology for "sheets" stuff. We're going to use
"sheet" or "spreadsheet" to mean the whole document/file and "worksheet" to mean the
tabs of data in a spreadsheet.
Note that nothing here is terribly robust. For example, sheet rows are gathered and then
modified or deleted based on the row number at the time of extraction. But if another
deletion happened in between, the number would be off and the wrong rows would be changed
after. This is okay for our very small user-base with very limited middle-of-the-night
deletion occurring, but isn't good enough for a real application.
"""
from __future__ import annotations
from typing import Tuple, Callable, Optional, Union, List
import itertools
import json
import logging
from googleapiclient.discovery import build
from googleapiclient.discovery_cache.base import Cache
from google.oauth2 import service_account
import config
# from https://github.com/googleapis/google-api-python-client/issues/325#issuecomment-274349841
class MemoryCache(Cache):
_CACHE = {}
def get(self, url):
return MemoryCache._CACHE.get(url)
def set(self, url, content):
MemoryCache._CACHE[url] = content
class Row(object):
"""Represents a row from a sheet.
Which properties are filled in depend on whether the Row was constructed in code or
retrieved from a sheet. It makes no sense for `dict` or `sheet` to not be set, but
`num` or `headings` could be unset.
`num` is 1-based (although this shouldn't matter to external callers).
"""
def __init__(self, dct: dict = {}, sheet: config.Spreadsheet = None, num: int = 0, headings: List[str] = None):
self.dict = dct
self.sheet = sheet
self.num = num # 0 is invalid
self.headings = headings
@staticmethod
def find(sheet: config.Spreadsheet, matcher: Callable[[dict], bool]) -> Optional[Row]:
"""Find the (first) matching row in the given sheet.
"""
match = find_rows(sheet, matcher, 1)
if not match:
return None
return match[0]
def _to_tuple(self):
"""Convert the dict of data into a tuple, appropriate for API operations.
"""
return _row_dict_to_tuple(
self.sheet.spreadsheet_id,
self.sheet.worksheet_title,
self.dict,
self.headings)
def append(self):
"""Append the current row to the given sheet.
WARNING: If you directly construct a list of new Rows -- with no `headings` set --
and then `append()` them in a loop, you'll be incurring two network operations
each -- one to fetch headings, and one to append. We don't do that right now, but
one day we might need batch operation that is more efficient.
"""
_add_row(
self.sheet.spreadsheet_id, self.sheet.worksheet_title,
self._to_tuple())
def update(self):
"""Update the current row in the sheet.
"""
if self.num <= 0:
# We need to find the location of the row to update
match_row = Row.find(
self.sheet,
lambda d: d[self.sheet.id_field().name] == self.dict[self.sheet.id_field().name])
if not match_row:
raise Exception('could not find own row to update')
self.num = match_row.num
update_rows(self.sheet, [self])
_service_account_info = json.load(open(config.SERVICE_ACCOUNT_CREDS_JSON_FILE_PATH))
def _sheets_service():
"""Get the Google Sheets service.
"""
# Using the "default" (derived from App Engine env) credentials doesn't seem to work.
# It results in the error "Request had insufficient authentication scopes."
credentials = service_account.Credentials.from_service_account_info(_service_account_info)
return build('sheets', 'v4', credentials=credentials, cache=MemoryCache()).spreadsheets()
def _drive_service():
"""Get the Google Drive service.
"""
credentials = service_account.Credentials.from_service_account_info(_service_account_info)
return build('drive', 'v3', credentials=credentials, cache=MemoryCache())
def _add_row(spreadsheet_id: str, worksheet_title: str, row_values: List):
"""Add a row to the given sheet.
"""
body = {
'values': [row_values]
}
ss = _sheets_service()
ss.values().append(spreadsheetId=spreadsheet_id,
range=worksheet_title,
body=body,
insertDataOption='INSERT_ROWS',
valueInputOption='USER_ENTERED').execute()
def update_rows(sheet: config.Spreadsheet, rows: List[Row]):
"""Update all of the given rows in the sheet.
Note that the `num` property of the rows must be populated (so these row objects
should have retrieved from the sheet).
"""
if not rows:
return
body = { 'valueInputOption': 'USER_ENTERED', 'data': [] }
for r in rows:
if r.num <= 0:
raise ValueError('row.num not populated')
elif r.num == 1:
# This is an attempt to overwrite the headings. Disallow.
msg = f'sheetdata.update_rows: attempt to overwrite headings prevented; {r}'
logging.error(msg)
raise ValueError(msg)
logging.debug('sheetdata.update_rows: %s::%d', type(sheet.fields), r.num)
body['data'].append({
'range': f'A{r.num}',
'majorDimension': 'ROWS',
'values': [r._to_tuple()],
})
ss = _sheets_service()
ss.values().batchUpdate(spreadsheetId=sheet.spreadsheet_id, body=body).execute()
def delete_rows(sheet: config.Spreadsheet, row_nums: List[int]):
"""Deletes rows at the given numbers from the sheet.
Note that row numbers are 1-based.
"""
if not row_nums:
return
# To account for rows shifting as they're deleted, we have to do it from the bottom up
row_nums.sort(reverse=True)
body = { 'requests': [] }
for n in row_nums:
row_idx = n - 1 # deleteDimension uses 0-based rows
body['requests'].append({
'deleteDimension': {
'range': {
'dimension': 'ROWS',
'sheetId': sheet.worksheet_id,
'startIndex': row_idx,
'endIndex': row_idx+1
}
}
})
ss = _sheets_service()
ss.batchUpdate(spreadsheetId=sheet.spreadsheet_id, body=body).execute()
def _get_sheet_data(spreadsheet_id: str, worksheet_title: str, row_num_start: int = None, row_num_end: int = None) -> List[List]:
"""Get data in the sheet, bounded by the given start and end (which are 1-based and inclusive).
If the start and end are None, the entire sheet will be retrieved (including headings).
"""
rng = worksheet_title
if row_num_start:
rng += f'!{row_num_start}'
if row_num_end:
if not row_num_start:
rng += f'!1'
rng += f':{row_num_end}'
ss = _sheets_service()
result = ss.values().get(spreadsheetId=spreadsheet_id,
range=rng,
dateTimeRenderOption='FORMATTED_STRING',
majorDimension='ROWS',
valueRenderOption='UNFORMATTED_VALUE').execute()
if not result.get('values'):
# This can happen if the spreadsheet is empty
logging.error('_get_sheet_data: not values present')
return []
return result['values']
def find_rows(sheet: config.Spreadsheet, matcher: Callable[[dict], bool], max_matches: int = None) -> List[Row]:
"""Find matching rows in the sheet. The number of rows returned will be up to `max_matches`,
unless it is None, in which case all matches will be turned.
If matcher is None, all rows will be returned.
"""
tuples = _get_sheet_data(sheet.spreadsheet_id, sheet.worksheet_title)
if len(tuples) == 0:
# There aren't any headings in the spreadsheet. Game over.
msg = f'spreadsheet is missing headings: {sheet.spreadsheet_id}::{sheet.worksheet_title}'
logging.critical(msg)
raise Exception(msg)
headings = tuples[0]
matches = []
for i in range(1, len(tuples)):
row_num = i + 1 # 1-based
t = tuples[i]
row_dict = _row_tuple_to_dict(sheet.spreadsheet_id, sheet.worksheet_title, t, headings)
if not matcher or matcher(row_dict):
matches.append(Row(row_dict, sheet=sheet, num=row_num, headings=headings))
if max_matches and len(matches) >= max_matches:
break
logging.debug(f'sheetdata.find_rows: {type(sheet.fields)}: matches len is {len(matches)} of {max_matches}')
return matches
def copy_drive_file(file_id: str, new_title: str, new_description: str):
"""Copy a Google Drive file, with a new title and description.
"""
drive = _drive_service()
# Make the copy
request_body = { 'name': new_title, 'description': new_description }
new_file_info = drive.files().copy(fileId=file_id, body=request_body).execute()
# The service account will be the owner of the new file, so we need to transfer it to
# the owner of the original file.
orig_file_info = drive.files().get(fileId=file_id, fields="owners").execute()
orig_owner_permission_id = orig_file_info['owners'][0]['permissionId']
drive.permissions().update(
fileId=new_file_info['id'],
permissionId=orig_owner_permission_id,
transferOwnership=True,
body={'role': 'owner'}).execute()
def get_first_sheet_properties(spreadsheet_id: str) -> dict:
"""Returns the properties dict of the first sheet in the spreadsheet.
This includes 'title', for use in A1 range notation, and 'id'.
Throws exception if not found.
"""
ss = _sheets_service()
result = ss.get(spreadsheetId=spreadsheet_id).execute()
return result['sheets'][0]['properties']
def _get_sheet_headings(spreadsheet_id: str, worksheet_title: str) -> List:
"""Get the headings from the given sheet.
"""
return _get_sheet_data(spreadsheet_id, worksheet_title, 1, 1)[0]
def _row_dict_to_tuple(spreadsheet_id: str, worksheet_title: str, row_dict: dict, headings: List) -> List:
"""Convert a dict with a row of sheet data into a tuple suitable for API operations.
If none, `headings` will be fetched, but results in an additional network operation.
"""
if not headings:
headings = _get_sheet_headings(spreadsheet_id, worksheet_title)
return [row_dict.get(h) for h in headings]
def _row_tuple_to_dict(spreadsheet_id: str, worksheet_title: str, row_tuple: List, headings: List) -> dict:
"""Convert a tuple of sheet data into a dict.
If none, `headings` will be fetched, but results in an additional network operation.
"""
if not headings:
headings = _get_sheet_headings(spreadsheet_id, worksheet_title)
return dict(itertools.zip_longest(headings, row_tuple))
|
|
"""
gargoyle.models
~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
from django.conf import settings
from django.db import models
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from jsonfield import JSONField
from .constants import DISABLED, EXCLUDE, FEATURE, GLOBAL, INCLUDE, INHERIT, SELECTIVE
class Switch(models.Model):
"""
Stores information on all switches. Generally handled through an instance of ``ModelDict``,
which is registered under the global ``gargoyle`` namespace.
``value`` is stored with by type label, and then by column:
>>> {
>>> namespace: {
>>> id: [[INCLUDE, 0, 50], [INCLUDE, 'string']] // 50% of users
>>> }
>>> }
"""
STATUS_CHOICES = (
(DISABLED, 'Disabled'),
(SELECTIVE, 'Selective'),
(GLOBAL, 'Global'),
(INHERIT, 'Inherit'),
)
STATUS_LABELS = {
INHERIT: 'Inherit from parent',
GLOBAL: 'Active for everyone',
SELECTIVE: 'Active for conditions',
DISABLED: 'Disabled for everyone',
}
key = models.CharField(max_length=64, primary_key=True)
value = JSONField()
label = models.CharField(max_length=64, null=True)
date_created = models.DateTimeField(default=now)
date_modified = models.DateTimeField(auto_now=True)
description = models.TextField(null=True)
status = models.PositiveSmallIntegerField(default=DISABLED, choices=STATUS_CHOICES)
class Meta:
app_label = 'gargoyle'
permissions = (
("can_view", "Can view"),
)
verbose_name = _('switch')
verbose_name_plural = _('switches')
def __init__(self, *args, **kwargs):
if (
kwargs and
hasattr(settings, 'GARGOYLE_SWITCH_DEFAULTS') and
'key' in kwargs and
'status' not in kwargs
):
key = kwargs['key']
switch_default = settings.GARGOYLE_SWITCH_DEFAULTS.get(key)
if switch_default is not None:
is_active = switch_default.get('is_active')
if is_active is True:
kwargs['status'] = GLOBAL
elif is_active is False:
kwargs['status'] = DISABLED
elif switch_default.get('initial_status') in self.STATUS_LABELS:
kwargs['status'] = switch_default['initial_status']
if not kwargs.get('label'):
kwargs['label'] = switch_default.get('label')
if not kwargs.get('description'):
kwargs['description'] = switch_default.get('description')
return super(Switch, self).__init__(*args, **kwargs)
def __unicode__(self):
return u"%s=%s" % (self.key, self.value)
def to_dict(self, manager):
data = {
'key': self.key,
'status': self.status,
'statusLabel': self.get_status_label(),
'label': self.label or self.key.title(),
'description': self.description,
'date_modified': self.date_modified,
'date_created': self.date_created,
'conditions': [],
}
last = None
for condition_set_id, group, field, value, exclude, condition_type in self.get_active_conditions(manager):
if not last or last['id'] != condition_set_id:
if last:
data['conditions'].append(last)
last = {
'id': condition_set_id,
'label': group,
'conditions': [],
}
last['conditions'].append((field.name, value, field.display(value), exclude, condition_type))
if last:
data['conditions'].append(last)
return data
def add_condition(self, manager, condition_set, field_name, condition,
exclude=False, commit=True, condition_type=FEATURE):
"""
Adds a new condition and registers it in the global ``gargoyle`` switch manager.
If ``commit`` is ``False``, the data will not be written to the database.
>>> switch = gargoyle['my_switch']
>>> condition_set_id = condition_set.get_id()
>>> switch.add_condition(condition_set_id, 'percent', '0-50', exclude=False)
"""
condition_set = manager.get_condition_set_by_id(condition_set)
assert isinstance(condition, six.string_types), 'conditions must be strings'
namespace = condition_set.get_namespace()
if namespace not in self.value:
self.value[namespace] = {}
if field_name not in self.value[namespace]:
self.value[namespace][field_name] = []
if condition not in self.value[namespace][field_name]:
self.value[namespace][field_name].append((
exclude and EXCLUDE or INCLUDE,
condition,
condition_type,
))
if commit:
self.save()
def remove_condition(self, manager, condition_set, field_name, condition, commit=True):
"""
Removes a condition and updates the global ``gargoyle`` switch manager.
If ``commit`` is ``False``, the data will not be written to the database.
>>> switch = gargoyle['my_switch']
>>> condition_set_id = condition_set.get_id()
>>> switch.remove_condition(condition_set_id, 'percent', [0, 50])
"""
condition_set = manager.get_condition_set_by_id(condition_set)
namespace = condition_set.get_namespace()
if namespace not in self.value:
return
if field_name not in self.value[namespace]:
return
self.value[namespace][field_name] = [c for c in self.value[namespace][field_name] if c[1] != condition]
if not self.value[namespace][field_name]:
del self.value[namespace][field_name]
if not self.value[namespace]:
del self.value[namespace]
if commit:
self.save()
def clear_conditions(self, manager, condition_set, field_name=None, commit=True):
"""
Clears conditions given a set of parameters.
If ``commit`` is ``False``, the data will not be written to the database.
Clear all conditions given a ConditionSet, and a field name:
>>> switch = gargoyle['my_switch']
>>> condition_set_id = condition_set.get_id()
>>> switch.clear_conditions(condition_set_id, 'percent')
You can also clear all conditions given a ConditionSet:
>>> switch = gargoyle['my_switch']
>>> condition_set_id = condition_set.get_id()
>>> switch.clear_conditions(condition_set_id)
"""
condition_set = manager.get_condition_set_by_id(condition_set)
namespace = condition_set.get_namespace()
if namespace not in self.value:
return
if not field_name:
del self.value[namespace]
elif field_name not in self.value[namespace]:
return
else:
del self.value[namespace][field_name]
if commit:
self.save()
def get_active_conditions(self, manager):
"""
Returns a generator which yields groups of lists of conditions.
>>> for label, set_id, field, value, exclude, condition_type in gargoyle.get_all_conditions():
>>> print("%(label)s: %(field)s = %(value)s (exclude: %(exclude)s)" % (label, field.label, value, exclude))
"""
for condition_set in sorted(manager.get_condition_sets(), key=lambda x: x.get_group_label()):
ns = condition_set.get_namespace()
condition_set_id = condition_set.get_id()
if ns in self.value:
group = condition_set.get_group_label()
for name, field in six.iteritems(condition_set.fields):
for value in self.value[ns].get(name, []):
try:
excludes = value[0] == EXCLUDE
data = value[1]
condition_type = value[2] if len(value) > 2 else FEATURE
yield condition_set_id, group, field, data, excludes, condition_type
except TypeError:
continue
def get_status_label(self):
if self.status == SELECTIVE and not self.value:
status = GLOBAL
else:
status = self.status
return self.STATUS_LABELS[status]
|
|
## @package checkpoint
# Module caffe2.python.checkpoint
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
from caffe2.python import core, context
from caffe2.python.net_builder import ops
from caffe2.python.task import Node, Task, TaskGroup, TaskOutput, WorkspaceType
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# The name of the special net that is used to store all the blob names in the
# workspace.
__BLOB_NAMES_NET__ = 'get_blob_list'
@context.define_context()
class Job(object):
"""
A Job defines three TaskGroups: the `init_group`, the `epoch_group` and the
`exit_group` which will be run by a JobRunner.
The `init_group` will be run only once at startup. Its role is to
initialize globally persistent blobs such as model weights, accumulators
and data file lists.
The `epoch_group` will be run in a loop after init_group. The loop will
exit when any of the stop signals added with `add_stop_signal` is True
at the end of an epoch.
The `exit_group` will be run only once at the very end of the job, when one
of the stopping criterias for `epoch_group` was met. The role of this group
is save the results of training in the end of the job.
Jobs are context-driven, so that Tasks can be added to the active Job
without having to explicitly pass the job object around.
Example of usage:
def build_reader(partitions):
with Job.current().init_group:
reader = HiveReader(init_reader, ..., partitions)
Task(step=init_reader)
with Job.current().epoch_group:
limited_reader = ReaderWithLimit(reader, num_iter=10000)
data_queue = pipe(limited_reader, num_threads=8)
Job.current().add_stop_signal(limited_reader.data_finished())
return data_queue
def build_hogwild_trainer(reader, model):
with Job.current().init_group:
Task(step=model.param_init_net)
with Job.current().epoch_group:
pipe(reader, processor=model, num_threads=8)
with Job.current().exit_group:
Task(step=model.save_model_net)
with Job() as job:
reader = build_reader(partitions)
model = build_model(params)
build_hogwild_trainer(reader, model)
"""
def __init__(self,
init_group=None, epoch_group=None,
exit_group=None, stop_signals=None,
nodes_to_checkpoint=None):
self.init_group = init_group or TaskGroup(
workspace_type=WorkspaceType.GLOBAL)
self.epoch_group = epoch_group or TaskGroup()
self.exit_group = exit_group or TaskGroup()
self.stop_signals = stop_signals or []
self._nodes_to_checkpoint = nodes_to_checkpoint
def nodes_to_checkpoint(self):
if self._nodes_to_checkpoint:
return self._nodes_to_checkpoint
else:
return self.init_group.used_nodes()
def compile(self, session_class):
return Job(
init_group=session_class.compile(self.init_group),
epoch_group=session_class.compile(self.epoch_group),
exit_group=session_class.compile(self.exit_group),
stop_signals=self.stop_signals,
nodes_to_checkpoint=self.nodes_to_checkpoint())
def __enter__(self):
self.epoch_group.__enter__()
return self
def __exit__(self, *args):
self.epoch_group.__exit__()
def add_stop_signal(self, output):
if isinstance(output, core.BlobReference):
t = Task(outputs=[output], group=self.epoch_group)
output = t.outputs()[0]
assert isinstance(output, TaskOutput)
self.stop_signals.append(output)
class CheckpointManager(object):
"""
Controls saving and loading of workspaces on every epoch boundary of a job.
If a CheckpointManager instance is passed to JobRunner, then JobRunner will
call `init`, `read` and `save` at different moments in between epoch runs.
"""
def __init__(self, db, db_type):
self._db = db
self._db_type = db_type
# make sure these blobs are the first in the checkpoint file.
self._net = core.Net('!!checkpoint_mngr')
self._blob_names = self._net.AddExternalInput('blob_names')
self._names_output = None
def init(self, nodes=None, retrieve_from_epoch=None):
"""
Build a Task that will be run once after the job's `init_group` is run.
This task will determine which blobs need to be checkpointed.
If retrieve_from_epoch is not None, then the checkpoint metadata is
retrieved from a previously saved checkpoint.
"""
assert nodes is None or len(nodes) == 1, (
'CheckpointManager only supports single node.')
with Task(outputs=[self._blob_names]) as task:
if retrieve_from_epoch is None:
ops.GetAllBlobNames(
[],
self._blob_names,
include_shared=False)
else:
ops.Load(
[], self._blob_names,
db=self._db_name(retrieve_from_epoch),
db_type=self._db_type,
absolute_path=True)
self._names_output = task.outputs()[0]
return task
def blob_list(self):
assert self._names_output
return self._names_output.fetch().tolist()
def _db_name(self, epoch):
return '%s.%06d' % (self._db, epoch)
def load(self, epoch):
"""
Build a Task that will be run by JobRunner when the job is to be
resumed from a given epoch. This task will run a Load op that will
load and deserialize all relevant blobs from a persistent storage.
"""
logger.info('Load from %s' % self._db_name(epoch))
with Task() as task:
ops.Load(
[],
self.blob_list(),
db=self._db_name(epoch),
db_type=self._db_type,
absolute_path=True)
return task
def load_blobs_from_checkpoint(self, blob_names, epoch):
"""
Builds a Task that loads only the necessary blobs from a checkpoint of
the given epoch. The necessary blobs are given in the blob_names
argument.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: The checkpoint epoch to load from.
Returns:
A Task which loads the specified blobs from the checkpoint of the
given epoch.
"""
logger.info('Load from %s' % self._db_name(epoch))
with Task() as task:
ops.Load(
[],
blob_names,
db=self._db_name(epoch),
db_type=self._db_type,
absolute_path=True,
allow_incomplete=True)
return task
def check_db_exists(self, epoch):
logger.info('Check existence of %s' % self._db_name(epoch))
with Task() as task:
existence = ops.Const(False)
ops.DBExists(
[],
[existence],
db_name=self._db_name(epoch),
db_type=self._db_type,
absolute_path=True)
task.add_output(existence)
return task
def save(self, epoch):
"""
Build a Task that is run once after `init_group` and after each
epoch is run. This will execute a Save ops to serialize and persist
blobs present in the global workspaace.
"""
logger.info('Save to %s' % self._db_name(epoch))
with Task() as task:
ops.Save(
self.blob_list(), [], db=self._db_name(epoch),
db_type=self._db_type, absolute_path=True)
return task
class MultiNodeCheckpointManager(object):
"""
Coordinates checkpointing and checkpointing across multiple nodes.
Each of `init`, `load` and `save` will build TaskGroups which will
trigger checkpointing on each of the nodes involved in a distributed job.
"""
def __init__(
self, db_prefix, db_type, node_manager_class=CheckpointManager):
self._node_manager_class = node_manager_class
self._node_managers = None
self._db_prefix = db_prefix
self._db_type = db_type
def _task_group(self, func, *args, **kw):
assert self._node_managers is not None, 'init must be called first.'
with TaskGroup(WorkspaceType.GLOBAL) as task_group:
for node, manager in self._node_managers:
with Node(node):
func(manager, *args, **kw)
return task_group
def init(self, nodes, retrieve_from_epoch=None):
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
return
self._node_managers = []
for node in nodes:
with Node(node):
manager = self._node_manager_class(
db=os.path.join(self._db_prefix, node),
db_type=self._db_type)
self._node_managers.append((node, manager))
return self._task_group(
self._node_manager_class.init,
nodes=[node],
retrieve_from_epoch=retrieve_from_epoch)
def load(self, epoch):
return self._task_group(self._node_manager_class.load, epoch)
def load_blobs_locally(self, nodes, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints to the current node.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the Load ops.
"""
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
else:
self._node_managers = []
for node in nodes:
with Node(node):
manager = self._node_manager_class(
db=os.path.join(self._db_prefix, node),
db_type=self._db_type)
self._node_managers.append((node, manager))
assert self._node_managers is not None, 'must initialize node managers'
for _, manager in self._node_managers:
existence_task = manager.check_db_exists(epoch)
session.run(existence_task)
existence = existence_task.outputs()[0].fetch()
if not existence:
logger.info('DB %s does not exist!' % manager._db_name(epoch))
return False
load_task = manager.load_blobs_from_checkpoint(blob_names, epoch)
session.run(load_task)
logger.info('Successfully loaded from checkpoints.')
return True
def save(self, epoch):
return self._task_group(self._node_manager_class.save, epoch)
class JobRunner(object):
"""
Implement the runtime logic for jobs with checkpointing at the level of
epoch. Can be used to run either single-host or distributed jobs. Job
runner is a callable to be called once from the client, passing a Session
as argument. This call will block until the Job execution is complete.
If a checkpoint_manager is passed, checkpoints will be taken after
initialization and after each epoch execution. If, in addition,
`resume_from_epoch` is an epoch number, the corresponding checkpoint will
be loaded and job execution will continue from the given epoch. In
this case, the job's init_group will not be run.
Refer to checkpoint_test.py for an example.
"""
def __init__(self, job, checkpoint_manager=None, resume_from_epoch=None):
self.resume_from_epoch = resume_from_epoch
self.checkpoint = checkpoint_manager
self.job = job
def __call__(self, client):
from_scratch = self.resume_from_epoch is None
if from_scratch:
client.run(self.job.init_group)
if self.checkpoint:
logger.info('Preparing checkpoint ...')
client.run(self.checkpoint.init(
self.job.nodes_to_checkpoint(),
retrieve_from_epoch=self.resume_from_epoch))
if from_scratch:
logger.info('Saving first checkpoint ...')
client.run(self.checkpoint.save(0))
logger.info('First checkpoint saved.')
else:
logger.info('Loading checkpoint for epoch {} ...'.format(
self.resume_from_epoch))
client.run(self.checkpoint.load(self.resume_from_epoch))
logger.info('Checkpoint loaded.')
epoch = 1 if from_scratch else self.resume_from_epoch + 1
while True:
logger.info('Starting epoch %d.' % epoch)
client.run(self.job.epoch_group)
logger.info('Ran epoch %d.' % epoch)
stop_signals = [o.fetch() for o in self.job.stop_signals]
if self.checkpoint:
logger.info('Saving checkpoint ...')
client.run(self.checkpoint.save(epoch))
logger.info('Checkpoint saved.')
if any(stop_signals):
logger.info('Stopping.')
break
epoch += 1
client.run(self.job.exit_group)
return epoch
def load_blobs_from_checkpoints(self, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints.
Checkpoints store the snapshots of the workspace in each node.
Sometimes we only need to load a subset of the blobs from the
checkpoints. One common scenario is to load only the model blobs from
the checkpoints for evaluation purpose. Given the names of the necessary
blobs, this function goes over all the checkpoints of all the nodes, but
only loads the blobs specified in the blob_names to the current
workspace.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the load ops.
Raises:
ValueError: When the checkpoint manager is invalid.
"""
if not self.checkpoint:
raise ValueError('Checkpoint manager is None')
logger.info('Loading checkpoint for epoch {} ...'.format(epoch))
return self.checkpoint.load_blobs_locally(self.job.nodes_to_checkpoint(),
blob_names, epoch, session)
def epoch_limiter(num_epochs):
"""
Creates a task that will output True when a given
number of epochs has finished.
"""
with Job.current().init_group:
init_net = core.Net('epoch_counter_init')
counter = init_net.CreateCounter([], init_count=num_epochs - 1)
Task(step=init_net)
epoch_net = core.Net('epoch_countdown')
finished = epoch_net.CountDown(counter)
output = Task(step=epoch_net, outputs=finished).outputs()[0]
Job.current().add_stop_signal(output)
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
from re import compile, escape, MULTILINE
from ..util import tagre
from ..scraper import _BasicScraper, _ParserScraper
from ..helpers import regexNamer, bounceStarter, indirectStarter
from .common import _WordPressScraper, xpath_class, WP_LATEST_SEARCH
class AbstruseGoose(_BasicScraper):
url = 'http://abstrusegoose.com/'
rurl = escape(url)
starter = bounceStarter
stripUrl = url + '%s'
firstStripUrl = stripUrl % '1'
imageSearch = compile(tagre('img', 'src',
r'(http://abstrusegoose\.com/strips/[^<>"]+)'))
prevSearch = compile(tagre('a', 'href', r'(%s\d+)' % rurl) +
r'« Previous')
nextSearch = compile(tagre('a', 'href', r'(%s\d+)' % rurl) +
r'Next »')
help = 'Index format: n (unpadded)'
textSearch = compile(tagre("img", "title", r'([^"]+)'))
def namer(self, image_url, page_url):
index = int(page_url.rstrip('/').split('/')[-1])
name = image_url.split('/')[-1].split('.')[0]
return 'c%03d-%s' % (index, name)
class AbsurdNotions(_BasicScraper):
baseUrl = 'http://www.absurdnotions.org/'
url = baseUrl + 'page129.html'
stripUrl = baseUrl + 'page%s.html'
firstStripUrl = stripUrl % '1'
imageSearch = compile(tagre('img', 'src', r'(an[^"]+)'))
multipleImagesPerStrip = True
prevSearch = compile(tagre('a', 'href', r'([^"]+)') +
tagre('img', 'src', 'nprev\.gif'))
help = 'Index format: n (unpadded)'
class AcademyVale(_BasicScraper):
url = 'http://www.imagerie.com/vale/'
stripUrl = url + 'avarch.cgi?%s'
firstStripUrl = stripUrl % '001'
imageSearch = compile(tagre('img', 'src', r'(avale\d{4}-\d{2}\.gif)'))
prevSearch = compile(tagre('a', 'href', r'(avarch[^">]+)', quote="") +
tagre('img', 'src', 'AVNavBack\.gif'))
help = 'Index format: nnn'
class Achewood(_BasicScraper):
url = 'http://www.achewood.com/'
stripUrl = url + 'index.php?date=%s'
firstStripUrl = stripUrl % '00000000'
imageSearch = compile(tagre("img", "src", r'(/comic\.php\?date=\d+)'))
prevSearch = compile(tagre("a", "href", r'(index\.php\?date=\d+)',
after="Previous"))
help = 'Index format: mmddyyyy'
namer = regexNamer(compile(r'date=(\d+)'))
class AfterStrife(_WordPressScraper):
baseUrl = 'http://afterstrife.com/'
stripUrl = baseUrl + '?p=%s'
url = stripUrl % '262'
firstStripUrl = stripUrl % '1'
prevSearch = '//a[%s]' % xpath_class('navi-prev')
help = 'Index format: nnn'
endOfLife = True
class AGirlAndHerFed(_BasicScraper):
url = 'http://www.agirlandherfed.com/'
stripUrl = url + '1.%s.html'
firstStripUrl = stripUrl % '1'
imageSearch = compile(tagre("img", "src", r'(img/strip/[^"]+\.jpg)'))
prevSearch = compile(r'<a href="([^"]+)">[^>]+Back')
help = 'Index format: nnn'
class AhoiPolloi(_ParserScraper):
url = 'https://ahoipolloi.blogger.de/'
stripUrl = url + '?day=%s'
firstStripUrl = stripUrl % '20060306'
multipleImagesPerStrip = True
lang = 'de'
imageSearch = '//img[contains(@src, "/static/antville/ahoipolloi/")]'
prevSearch = '//a[contains(@href, "/?day=")]'
help = 'Index format: yyyymmdd'
class AhoyEarth(_WordPressScraper):
url = 'http://www.ahoyearth.com/'
prevSearch = '//a[%s]' % xpath_class('navi-prev')
class AirForceBlues(_WordPressScraper):
url = 'http://farvatoons.com/'
firstStripUrl = url + 'comic/in-texas-there-are-texans/'
class ALessonIsLearned(_BasicScraper):
url = 'http://www.alessonislearned.com/'
prevSearch = compile(tagre("a", "href", r"(index\.php\?comic=\d+)",
quote="'") + r"[^>]+previous")
stripUrl = url + 'index.php?comic=%s'
firstStripUrl = stripUrl % '1'
imageSearch = compile(tagre("img", "src", r"(cmx/lesson\d+\.[a-z]+)"))
help = 'Index format: nnn'
class Alice(_WordPressScraper):
url = 'http://www.alicecomics.com/'
latestSearch = '//a[text()="Latest Alice!"]'
starter = indirectStarter
class AlienLovesPredator(_BasicScraper):
url = 'http://alienlovespredator.com/'
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2004/10/12/unavoidable-delay'
imageSearch = compile(tagre("img", "src", r'([^"]+)',
after='border="1" alt="" width="750"'))
prevSearch = compile(tagre("a", "href", r'([^"]+)', after="prev"))
help = 'Index format: yyyy/mm/dd/name'
class AlienShores(_WordPressScraper):
url = 'http://alienshores.com/alienshores_band/'
firstStripUrl = url + 'AScomic/updated-cover/'
class AllTheGrowingThings(_BasicScraper):
url = 'http://growingthings.typodmary.com/'
rurl = escape(url)
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2009/04/21/all-the-growing-things'
imageSearch = compile(tagre("img", "src", r'(%sfiles/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s[^"]+)' % rurl, after="prev"))
help = 'Index format: yyyy/mm/dd/strip-name'
class AlphaLuna(_BasicScraper):
url = 'http://www.alphaluna.net/'
stripUrl = url + 'issue-%s/'
firstStripUrl = stripUrl % '1/cover'
imageSearch = compile(tagre("a", "href",
r'[^"]*/(?:issue-|support/upcoming)[^"]+') +
tagre("img", "src", r'([^"]*/PAGINAS/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'([^"]+)') +
tagre("img", "alt", "Prev"))
help = 'Index format: issue/page (e.g. 4/05)'
class AlphaLunaSpanish(AlphaLuna):
name = 'AlphaLuna/Spanish'
lang = 'es'
url = 'http://alphaluna.net/spanish/'
stripUrl = url + 'issue-%s/'
firstStripUrl = stripUrl % '1/portada'
class Altermeta(_BasicScraper):
url = 'http://altermeta.net/'
rurl = escape(url)
stripUrl = url + 'archive.php?comic=%s'
firstStripUrl = stripUrl % '0'
imageSearch = compile(r'<img src="(comics/[^"]+)" />')
prevSearch = compile(r'<a href="([^"]+)"><img src="%stemplate/default/images/sasha/back\.png' % rurl)
help = 'Index format: n (unpadded)'
class AltermetaOld(Altermeta):
url = Altermeta.url + 'oldarchive/index.php'
stripUrl = Altermeta.url + 'oldarchive/archive.php?comic=%s'
firstStripUrl = stripUrl % '0'
prevSearch = compile(r'<a href="([^"]+)">Back')
class AmazingSuperPowers(_BasicScraper):
url = 'http://www.amazingsuperpowers.com/'
rurl = escape(url)
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '2007/09/heredity'
imageSearch = compile(tagre("img", "src", r'(%scomics/[^"]+)' % rurl))
prevSearch = compile(tagre("a", "href", r'(%s[^"]+)' % rurl, after="prev"))
help = 'Index format: yyyy/mm/name'
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return url in (
# video
self.stripUrl % '2013/05/orbital-deathray-kickstarter',
)
class Amya(_WordPressScraper):
url = 'http://www.amyachronicles.com/'
class Angband(_BasicScraper):
url = 'http://angband.calamarain.net/'
stripUrl = url + 'view.php?date=%s'
firstStripUrl = stripUrl % '2005-12-30'
imageSearch = compile(tagre("img", "src", r'(comics/Scroll[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(view\.php\?date\=[^"]+)') +
"Previous")
help = 'Index format: yyyy-mm-dd'
class Angels2200(_BasicScraper):
url = 'http://www.janahoffmann.com/angels/'
stripUrl = url + '%s'
imageSearch = compile(tagre("img", "src", r"(http://www\.janahoffmann\.com/angels/comics/[^']+)", quote="'"))
prevSearch = compile(tagre("a", "href", r'([^"]+)') + "« Previous")
help = 'Index format: yyyy/mm/dd/part-<n>-comic-<n>'
class Annyseed(_ParserScraper):
baseUrl = 'http://www.mirrorwoodcomics.com/'
url = baseUrl + 'AnnyseedLatest.htm'
stripUrl = baseUrl + 'Annyseed%s.htm'
imageSearch = '//div/img[contains(@src, "Annyseed")]'
prevSearch = '//a[img[@name="Previousbtn"]]'
help = 'Index format: nnn'
class AoiHouse(_ParserScraper):
url = 'http://www.aoihouse.net/'
imageSearch = '//div[@id="comic"]/a[2]/img'
prevSearch = '//a[@id="cndprev"]'
class AppleGeeks(_BasicScraper):
url = 'http://www.applegeeks.com/'
stripUrl = url + 'comics/viewcomic.php?issue=%s'
firstStripUrl = stripUrl % '1'
imageSearch = compile(tagre("img", "src", r'((?:/comics/)?issue\d+\.jpg)'))
prevSearch = compile(r'<div class="caption">Previous Comic</div>\s*<p><a href="([^"]+)">', MULTILINE)
help = 'Index format: n (unpadded)'
class ARedTailsDream(_BasicScraper):
baseUrl = 'http://www.minnasundberg.fi/'
stripUrl = baseUrl + 'comic/page%s.php'
firstStripUrl = stripUrl % '00'
url = baseUrl + 'comic/recent.php'
imageSearch = compile(tagre('img', 'src', r'(chapter.+?/eng[^"]*)'))
prevSearch = compile(tagre('a', 'href', r'(page\d+\.php)') +
tagre("img", "src", r'.*?aprev.*?'))
help = 'Index format: nn'
class Ashes(_WordPressScraper):
url = 'http://www.flowerlarkstudios.com/comic/prologue/10232009/'
firstStripUrl = url
latestSearch = WP_LATEST_SEARCH
starter = indirectStarter
class ASofterWorld(_ParserScraper):
url = 'http://www.asofterworld.com/'
stripUrl = url + 'index.php?id=%s'
firstStripUrl = stripUrl % '1'
imageSearch = '//div[@id="comicimg"]//img'
prevSearch = '//div[@id="previous"]/a'
help = 'Index format: n (unpadded)'
class AstronomyPOTD(_ParserScraper):
baseUrl = 'http://apod.nasa.gov/apod/'
url = baseUrl + 'astropix.html'
starter = bounceStarter
stripUrl = baseUrl + 'ap%s.html'
firstStripUrl = stripUrl % '061012'
imageSearch = '//a/img'
multipleImagesPerStrip = True
prevSearch = '//a[text()="<"]'
nextSearch = '//a[text()=">"]'
help = 'Index format: yymmdd'
def shouldSkipUrl(self, url, data):
"""Skip pages without images."""
return data.xpath('//iframe') # videos
def namer(self, image_url, page_url):
return '%s-%s' % (page_url.split('/')[-1].split('.')[0][2:],
image_url.split('/')[-1].split('.')[0])
class AxeCop(_WordPressScraper):
url = 'http://axecop.com/comic/season-two/'
|
|
# pylint: disable-msg=E1101,W0612
import numpy as np
import pytest
import pandas as pd
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
class TestSparseSeriesIndexing(object):
def setup_method(self, method):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[2] == 0
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2],
orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2],
orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2],
orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:],
orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc[0] == 1
assert np.isnan(sparse.loc[1])
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
assert np.isnan(result[-1])
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list('ABCDE'))
sparse = orig.to_sparse()
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc['C':],
orig.loc['C':].to_sparse(fill_value=0))
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:],
orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[2])
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[1])
assert sparse.iloc[4] == 0
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.iloc[2:],
orig.iloc[2:].to_sparse(fill_value=0))
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
assert sparse.at[0] == orig.at[0]
assert np.isnan(sparse.at[1])
assert np.isnan(sparse.at[2])
assert sparse.at[3] == orig.at[3]
assert np.isnan(sparse.at[4])
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert np.isnan(sparse.at['c'])
assert sparse.at['d'] == orig.at['d']
assert np.isnan(sparse.at['e'])
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert sparse.at['c'] == orig.at['c']
assert sparse.at['d'] == orig.at['d']
assert sparse.at['e'] == orig.at['e']
def test_iat(self):
orig = self.orig
sparse = self.sparse
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert np.isnan(sparse.iat[2])
assert sparse.iat[3] == orig.iat[3]
assert np.isnan(sparse.iat[4])
assert np.isnan(sparse.iat[-1])
assert sparse.iat[-5] == orig.iat[-5]
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert sparse.iat[2] == orig.iat[2]
assert sparse.iat[3] == orig.iat[3]
assert sparse.iat[4] == orig.iat[4]
assert sparse.iat[-1] == orig.iat[-1]
assert sparse.iat[-5] == orig.iat[-5]
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
assert s.get(0) == 1
assert np.isnan(s.get(1))
assert s.get(5) is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_series_equal(sparse.take([0, 1, 3]),
orig.take([0, 1, 3]).to_sparse())
tm.assert_sp_series_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse(fill_value=0))
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(['B', 'E', 'C'])
exp = orig.reindex(['B', 'E', 'C']).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_fill_value_reindex(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(['A', 'B', 'C'])
exp = orig.reindex(['A', 'B', 'C']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0., 0., 0., 0., 0.],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
def test_fill_value_reindex_coerces_float_int(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
floats = pd.Series([1., 2., 3.]).to_sparse()
result = floats.reindex([1, 2, 3], fill_value=0)
expected = pd.Series([2., 3., 0], index=[1, 2, 3]).to_sparse()
tm.assert_sp_series_equal(result, expected)
def test_reindex_nearest(self):
s = pd.Series(np.arange(10, dtype='float64')).to_sparse()
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = pd.Series(np.around(target), target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = pd.Series([0, 1, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest',
tolerance=[0.3, 0.01, 0.4, 3])
expected = pd.Series([0, np.nan, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
@pytest.mark.parametrize("kind", ["integer", "block"])
@pytest.mark.parametrize("fill", [True, False, np.nan])
def tests_indexing_with_sparse(self, kind, fill):
# see gh-13985
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True],
fill_value=fill,
dtype=bool)
expected = arr[indexer]
result = pd.SparseArray([1, 3], kind=kind)
tm.assert_sp_array_equal(result, expected)
s = pd.SparseSeries(arr, index=["a", "b", "c"], dtype=np.float64)
expected = pd.SparseSeries([1, 3], index=["a", "c"], kind=kind,
dtype=SparseDtype(np.float64, s.fill_value))
tm.assert_sp_series_equal(s[indexer], expected)
tm.assert_sp_series_equal(s.loc[indexer], expected)
tm.assert_sp_series_equal(s.iloc[indexer], expected)
indexer = pd.SparseSeries(indexer, index=["a", "b", "c"])
tm.assert_sp_series_equal(s[indexer], expected)
tm.assert_sp_series_equal(s.loc[indexer], expected)
msg = ("iLocation based boolean indexing cannot "
"use an indexable as a mask")
with pytest.raises(ValueError, match=msg):
s.iloc[indexer]
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
def setup_method(self, method):
# Mi with duplicated values
idx = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 0),
('C', 0), ('C', 1)])
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
self.sparse = self.orig.to_sparse()
def test_getitem_multi(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == orig[0]
assert np.isnan(sparse[1])
assert sparse[3] == orig[3]
tm.assert_sp_series_equal(sparse['A'], orig['A'].to_sparse())
tm.assert_sp_series_equal(sparse['B'], orig['B'].to_sparse())
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_multi_tuple(self):
orig = self.orig
sparse = self.sparse
assert sparse['C', 0] == orig['C', 0]
assert np.isnan(sparse['A', 1])
assert np.isnan(sparse['B', 0])
def test_getitems_slice_multi(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[2:], orig[2:].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
def test_loc(self):
# need to be override to use different label
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A'],
orig.loc['A'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B'],
orig.loc['B'].to_sparse())
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# single element list (GH 15447)
result = sparse.loc[['A']]
exp = orig.loc[['A']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_multi_tuple(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc['C', 0] == orig.loc['C', 0]
assert np.isnan(sparse.loc['A', 1])
assert np.isnan(sparse.loc['B', 0])
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A':], orig.loc['A':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
def test_reindex(self):
# GH 15447
orig = self.orig
sparse = self.sparse
res = sparse.reindex([('A', 0), ('C', 1)])
exp = orig.reindex([('A', 0), ('C', 1)]).to_sparse()
tm.assert_sp_series_equal(res, exp)
# On specific level:
res = sparse.reindex(['A', 'C', 'B'], level=0)
exp = orig.reindex(['A', 'C', 'B'], level=0).to_sparse()
tm.assert_sp_series_equal(res, exp)
# single element list (GH 15447)
res = sparse.reindex(['A'], level=0)
exp = orig.reindex(['A'], level=0).to_sparse()
tm.assert_sp_series_equal(res, exp)
with pytest.raises(TypeError):
# Incomplete keys are not accepted for reindexing:
sparse.reindex(['A', 'C'])
# "copy" argument:
res = sparse.reindex(sparse.index, copy=True)
exp = orig.reindex(orig.index, copy=True).to_sparse()
tm.assert_sp_series_equal(res, exp)
assert sparse is not res
class TestSparseDataFrameIndexing(object):
def test_getitem(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse['x'], orig['x'].to_sparse())
tm.assert_sp_frame_equal(sparse[['x']], orig[['x']].to_sparse())
tm.assert_sp_frame_equal(sparse[['z', 'x']],
orig[['z', 'x']].to_sparse())
tm.assert_sp_frame_equal(sparse[[True, False, True, True]],
orig[[True, False, True, True]].to_sparse())
tm.assert_sp_frame_equal(sparse.iloc[[1, 2]],
orig.iloc[[1, 2]].to_sparse())
def test_getitem_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
result = sparse[['z']]
expected = orig[['z']].to_sparse(fill_value=0)
tm.assert_sp_frame_equal(result, expected, check_fill_value=False)
tm.assert_sp_series_equal(sparse['y'],
orig['y'].to_sparse(fill_value=0))
exp = orig[['x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['x']], exp)
exp = orig[['z', 'x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['z', 'x']], exp)
indexer = [True, False, True, True]
exp = orig[indexer].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[indexer], exp)
exp = orig.iloc[[1, 2]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.iloc[[1, 2]], exp)
def test_loc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
assert sparse.loc[0, 'x'] == 1
assert np.isnan(sparse.loc[1, 'z'])
assert sparse.loc[2, 'z'] == 4
# have to specify `kind='integer'`, since we construct a
# new SparseArray here, and the default sparse type is
# integer there, but block in SparseSeries
tm.assert_sp_series_equal(sparse.loc[0],
orig.loc[0].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.loc[1],
orig.loc[1].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
result = sparse.loc[[1, 2]]
exp = orig.loc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[1, 2], :]
exp = orig.loc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[0, 2], ['x', 'z']]
exp = orig.loc[[0, 2], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_index(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
index=list('abc'), columns=list('xyz'))
sparse = orig.to_sparse()
assert sparse.loc['a', 'x'] == 1
assert np.isnan(sparse.loc['b', 'z'])
assert sparse.loc['c', 'z'] == 4
tm.assert_sp_series_equal(sparse.loc['a'],
orig.loc['a'].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.loc['b'],
orig.loc['b'].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
result = sparse.loc[['a', 'b']]
exp = orig.loc[['a', 'b']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['a', 'b'], :]
exp = orig.loc[['a', 'b'], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['c', 'a'], ['x', 'z']]
exp = orig.loc[['c', 'a'], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_iloc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]])
sparse = orig.to_sparse()
assert sparse.iloc[1, 1] == 3
assert np.isnan(sparse.iloc[2, 0])
tm.assert_sp_series_equal(sparse.iloc[0],
orig.loc[0].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.iloc[1],
orig.loc[1].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse(kind='integer'))
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
result = sparse.iloc[[1, 2]]
exp = orig.iloc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[1, 2], :]
exp = orig.iloc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[:, [1, 0]]
exp = orig.iloc[:, [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[2], [1, 0]]
exp = orig.iloc[[2], [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_at(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
assert sparse.at['A', 'x'] == orig.at['A', 'x']
assert np.isnan(sparse.at['B', 'z'])
assert np.isnan(sparse.at['C', 'y'])
assert sparse.at['D', 'x'] == orig.at['D', 'x']
def test_at_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.at['A', 'x'] == orig.at['A', 'x']
assert np.isnan(sparse.at['B', 'z'])
assert np.isnan(sparse.at['C', 'y'])
assert sparse.at['D', 'x'] == orig.at['D', 'x']
def test_iat(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
assert sparse.iat[0, 0] == orig.iat[0, 0]
assert np.isnan(sparse.iat[1, 2])
assert np.isnan(sparse.iat[2, 1])
assert sparse.iat[2, 0] == orig.iat[2, 0]
assert np.isnan(sparse.iat[-1, -2])
assert sparse.iat[-1, -1] == orig.iat[-1, -1]
def test_iat_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.iat[0, 0] == orig.iat[0, 0]
assert np.isnan(sparse.iat[1, 2])
assert np.isnan(sparse.iat[2, 1])
assert sparse.iat[2, 0] == orig.iat[2, 0]
assert np.isnan(sparse.iat[-1, -2])
assert sparse.iat[-1, -1] == orig.iat[-1, -1]
def test_take(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([0, 1]),
orig.take([0, 1]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
exp = orig.take([0]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0]), exp)
exp = orig.take([0, 1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0, 1]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all missing
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all fill_value
orig = pd.DataFrame([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
index=list('ABCD'), columns=list('xyz'),
dtype=np.int)
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
class TestMultitype(object):
def setup_method(self, method):
self.cols = ['string', 'int', 'float', 'object']
self.string_series = pd.SparseSeries(['a', 'b', 'c'])
self.int_series = pd.SparseSeries([1, 2, 3])
self.float_series = pd.SparseSeries([1.1, 1.2, 1.3])
self.object_series = pd.SparseSeries([[], {}, set()])
self.sdf = pd.SparseDataFrame({
'string': self.string_series,
'int': self.int_series,
'float': self.float_series,
'object': self.object_series,
})
self.sdf = self.sdf[self.cols]
self.ss = pd.SparseSeries(['a', 1, 1.1, []], index=self.cols)
def test_frame_basic_dtypes(self):
for _, row in self.sdf.iterrows():
assert row.dtype == SparseDtype(object)
tm.assert_sp_series_equal(self.sdf['string'], self.string_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['int'], self.int_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['float'], self.float_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['object'], self.object_series,
check_names=False)
def test_frame_indexing_single(self):
tm.assert_sp_series_equal(self.sdf.iloc[0],
pd.SparseSeries(['a', 1, 1.1, []],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[1],
pd.SparseSeries(['b', 2, 1.2, {}],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[2],
pd.SparseSeries(['c', 3, 1.3, set()],
index=self.cols),
check_names=False)
def test_frame_indexing_multiple(self):
tm.assert_sp_frame_equal(self.sdf, self.sdf[:])
tm.assert_sp_frame_equal(self.sdf, self.sdf.loc[:])
tm.assert_sp_frame_equal(self.sdf.iloc[[1, 2]],
pd.SparseDataFrame({
'string': self.string_series.iloc[[1, 2]],
'int': self.int_series.iloc[[1, 2]],
'float': self.float_series.iloc[[1, 2]],
'object': self.object_series.iloc[[1, 2]]
}, index=[1, 2])[self.cols])
tm.assert_sp_frame_equal(self.sdf[['int', 'string']],
pd.SparseDataFrame({
'int': self.int_series,
'string': self.string_series,
}))
def test_series_indexing_single(self):
for i, idx in enumerate(self.cols):
assert self.ss.iloc[i] == self.ss[idx]
tm.assert_class_equal(self.ss.iloc[i], self.ss[idx],
obj="series index")
assert self.ss['string'] == 'a'
assert self.ss['int'] == 1
assert self.ss['float'] == 1.1
assert self.ss['object'] == []
def test_series_indexing_multiple(self):
tm.assert_sp_series_equal(self.ss.loc[['string', 'int']],
pd.SparseSeries(['a', 1],
index=['string', 'int']))
tm.assert_sp_series_equal(self.ss.loc[['string', 'object']],
pd.SparseSeries(['a', []],
index=['string', 'object']))
|
|
"""Config flow for UniFi.
Provides user initiated configuration flow.
Discovery of controllers hosted on UDM and UDM Pro devices through SSDP.
Reauthentication when issue with credentials are reported.
Configuration of options through options flow.
"""
import socket
from urllib.parse import urlparse
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_ALLOW_UPTIME_SENSORS,
CONF_BLOCK_CLIENT,
CONF_CONTROLLER,
CONF_DETECTION_TIME,
CONF_DPI_RESTRICTIONS,
CONF_IGNORE_WIRED_BUG,
CONF_POE_CLIENTS,
CONF_SITE_ID,
CONF_SSID_FILTER,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
CONF_TRACK_WIRED_CLIENTS,
DEFAULT_DPI_RESTRICTIONS,
DEFAULT_POE_CLIENTS,
DOMAIN as UNIFI_DOMAIN,
)
from .controller import get_controller
from .errors import AuthenticationRequired, CannotConnect
DEFAULT_PORT = 443
DEFAULT_SITE_ID = "default"
DEFAULT_VERIFY_SSL = False
MODEL_PORTS = {
"UniFi Dream Machine": 443,
"UniFi Dream Machine Pro": 443,
}
class UnifiFlowHandler(config_entries.ConfigFlow, domain=UNIFI_DOMAIN):
"""Handle a UniFi config flow."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return UnifiOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the UniFi flow."""
self.config = {}
self.site_ids = {}
self.site_names = {}
self.reauth_config_entry = None
self.reauth_schema = {}
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
self.config = {
CONF_HOST: user_input[CONF_HOST],
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_PORT: user_input.get(CONF_PORT),
CONF_VERIFY_SSL: user_input.get(CONF_VERIFY_SSL),
CONF_SITE_ID: DEFAULT_SITE_ID,
}
try:
controller = await get_controller(
self.hass,
host=self.config[CONF_HOST],
username=self.config[CONF_USERNAME],
password=self.config[CONF_PASSWORD],
port=self.config[CONF_PORT],
site=self.config[CONF_SITE_ID],
verify_ssl=self.config[CONF_VERIFY_SSL],
)
sites = await controller.sites()
except AuthenticationRequired:
errors["base"] = "faulty_credentials"
except CannotConnect:
errors["base"] = "service_unavailable"
else:
self.site_ids = {site["_id"]: site["name"] for site in sites.values()}
self.site_names = {site["_id"]: site["desc"] for site in sites.values()}
if (
self.reauth_config_entry
and self.reauth_config_entry.unique_id in self.site_names
):
return await self.async_step_site(
{CONF_SITE_ID: self.reauth_config_entry.unique_id}
)
return await self.async_step_site()
if not (host := self.config.get(CONF_HOST, "")) and await async_discover_unifi(
self.hass
):
host = "unifi"
data = self.reauth_schema or {
vol.Required(CONF_HOST, default=host): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(
CONF_PORT, default=self.config.get(CONF_PORT, DEFAULT_PORT)
): int,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): bool,
}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(data),
errors=errors,
)
async def async_step_site(self, user_input=None):
"""Select site to control."""
errors = {}
if user_input is not None:
unique_id = user_input[CONF_SITE_ID]
self.config[CONF_SITE_ID] = self.site_ids[unique_id]
# Backwards compatible config
self.config[CONF_CONTROLLER] = self.config.copy()
config_entry = await self.async_set_unique_id(unique_id)
abort_reason = "configuration_updated"
if self.reauth_config_entry:
config_entry = self.reauth_config_entry
abort_reason = "reauth_successful"
if config_entry:
controller = self.hass.data.get(UNIFI_DOMAIN, {}).get(
config_entry.entry_id
)
if controller and controller.available:
return self.async_abort(reason="already_configured")
self.hass.config_entries.async_update_entry(
config_entry, data=self.config
)
await self.hass.config_entries.async_reload(config_entry.entry_id)
return self.async_abort(reason=abort_reason)
site_nice_name = self.site_names[unique_id]
return self.async_create_entry(title=site_nice_name, data=self.config)
if len(self.site_names) == 1:
return await self.async_step_site(
{CONF_SITE_ID: next(iter(self.site_names))}
)
return self.async_show_form(
step_id="site",
data_schema=vol.Schema(
{vol.Required(CONF_SITE_ID): vol.In(self.site_names)}
),
errors=errors,
)
async def async_step_reauth(self, data: dict):
"""Trigger a reauthentication flow."""
config_entry = self.hass.config_entries.async_get_entry(
self.context["entry_id"]
)
self.reauth_config_entry = config_entry
self.context["title_placeholders"] = {
CONF_HOST: config_entry.data[CONF_HOST],
CONF_SITE_ID: config_entry.title,
}
self.reauth_schema = {
vol.Required(CONF_HOST, default=config_entry.data[CONF_HOST]): str,
vol.Required(CONF_USERNAME, default=config_entry.data[CONF_USERNAME]): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_PORT, default=config_entry.data[CONF_PORT]): int,
vol.Required(
CONF_VERIFY_SSL, default=config_entry.data[CONF_VERIFY_SSL]
): bool,
}
return await self.async_step_user()
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered UniFi device."""
parsed_url = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION])
model_description = discovery_info[ssdp.ATTR_UPNP_MODEL_DESCRIPTION]
mac_address = format_mac(discovery_info[ssdp.ATTR_UPNP_SERIAL])
self.config = {
CONF_HOST: parsed_url.hostname,
}
self._async_abort_entries_match({CONF_HOST: self.config[CONF_HOST]})
await self.async_set_unique_id(mac_address)
self._abort_if_unique_id_configured(updates=self.config)
self.context["title_placeholders"] = {
CONF_HOST: self.config[CONF_HOST],
CONF_SITE_ID: DEFAULT_SITE_ID,
}
if (port := MODEL_PORTS.get(model_description)) is not None:
self.config[CONF_PORT] = port
return await self.async_step_user()
class UnifiOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Unifi options."""
def __init__(self, config_entry):
"""Initialize UniFi options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
self.controller = None
async def async_step_init(self, user_input=None):
"""Manage the UniFi options."""
self.controller = self.hass.data[UNIFI_DOMAIN][self.config_entry.entry_id]
self.options[CONF_BLOCK_CLIENT] = self.controller.option_block_clients
if self.show_advanced_options:
return await self.async_step_device_tracker()
return await self.async_step_simple_options()
async def async_step_simple_options(self, user_input=None):
"""For users without advanced settings enabled."""
if user_input is not None:
self.options.update(user_input)
return await self._update_options()
clients_to_block = {}
for client in self.controller.api.clients.values():
clients_to_block[
client.mac
] = f"{client.name or client.hostname} ({client.mac})"
return self.async_show_form(
step_id="simple_options",
data_schema=vol.Schema(
{
vol.Optional(
CONF_TRACK_CLIENTS,
default=self.controller.option_track_clients,
): bool,
vol.Optional(
CONF_TRACK_DEVICES,
default=self.controller.option_track_devices,
): bool,
vol.Optional(
CONF_BLOCK_CLIENT, default=self.options[CONF_BLOCK_CLIENT]
): cv.multi_select(clients_to_block),
}
),
last_step=True,
)
async def async_step_device_tracker(self, user_input=None):
"""Manage the device tracker options."""
if user_input is not None:
self.options.update(user_input)
return await self.async_step_client_control()
ssids = (
set(self.controller.api.wlans)
| {
f"{wlan.name}{wlan.name_combine_suffix}"
for wlan in self.controller.api.wlans.values()
if not wlan.name_combine_enabled
}
| {
wlan["name"]
for ap in self.controller.api.devices.values()
for wlan in ap.wlan_overrides
if "name" in wlan
}
)
ssid_filter = {ssid: ssid for ssid in sorted(ssids)}
return self.async_show_form(
step_id="device_tracker",
data_schema=vol.Schema(
{
vol.Optional(
CONF_TRACK_CLIENTS,
default=self.controller.option_track_clients,
): bool,
vol.Optional(
CONF_TRACK_WIRED_CLIENTS,
default=self.controller.option_track_wired_clients,
): bool,
vol.Optional(
CONF_TRACK_DEVICES,
default=self.controller.option_track_devices,
): bool,
vol.Optional(
CONF_SSID_FILTER, default=self.controller.option_ssid_filter
): cv.multi_select(ssid_filter),
vol.Optional(
CONF_DETECTION_TIME,
default=int(
self.controller.option_detection_time.total_seconds()
),
): int,
vol.Optional(
CONF_IGNORE_WIRED_BUG,
default=self.controller.option_ignore_wired_bug,
): bool,
}
),
last_step=False,
)
async def async_step_client_control(self, user_input=None):
"""Manage configuration of network access controlled clients."""
errors = {}
if user_input is not None:
self.options.update(user_input)
return await self.async_step_statistics_sensors()
clients_to_block = {}
for client in self.controller.api.clients.values():
clients_to_block[
client.mac
] = f"{client.name or client.hostname} ({client.mac})"
return self.async_show_form(
step_id="client_control",
data_schema=vol.Schema(
{
vol.Optional(
CONF_BLOCK_CLIENT, default=self.options[CONF_BLOCK_CLIENT]
): cv.multi_select(clients_to_block),
vol.Optional(
CONF_POE_CLIENTS,
default=self.options.get(CONF_POE_CLIENTS, DEFAULT_POE_CLIENTS),
): bool,
vol.Optional(
CONF_DPI_RESTRICTIONS,
default=self.options.get(
CONF_DPI_RESTRICTIONS, DEFAULT_DPI_RESTRICTIONS
),
): bool,
}
),
errors=errors,
last_step=False,
)
async def async_step_statistics_sensors(self, user_input=None):
"""Manage the statistics sensors options."""
if user_input is not None:
self.options.update(user_input)
return await self._update_options()
return self.async_show_form(
step_id="statistics_sensors",
data_schema=vol.Schema(
{
vol.Optional(
CONF_ALLOW_BANDWIDTH_SENSORS,
default=self.controller.option_allow_bandwidth_sensors,
): bool,
vol.Optional(
CONF_ALLOW_UPTIME_SENSORS,
default=self.controller.option_allow_uptime_sensors,
): bool,
}
),
last_step=True,
)
async def _update_options(self):
"""Update config entry options."""
return self.async_create_entry(title="", data=self.options)
async def async_discover_unifi(hass):
"""Discover UniFi address."""
try:
return await hass.async_add_executor_job(socket.gethostbyname, "unifi")
except socket.gaierror:
return None
|
|
# encoding: utf-8
"""
Prefiltering components.
Prefilters transform user input before it is exec'd by Python. These
transforms are used to implement additional syntax such as !ls and %magic.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from keyword import iskeyword
import re
from IPython.core.autocall import IPyAutocall
from traitlets.config.configurable import Configurable
from IPython.core.inputsplitter import (
ESC_MAGIC,
ESC_QUOTE,
ESC_QUOTE2,
ESC_PAREN,
)
from IPython.core.macro import Macro
from IPython.core.splitinput import LineInfo
from traitlets import (
List, Integer, Unicode, Bool, Instance, CRegExp
)
#-----------------------------------------------------------------------------
# Global utilities, errors and constants
#-----------------------------------------------------------------------------
class PrefilterError(Exception):
pass
# RegExp to identify potential function names
re_fun_name = re.compile(r'[a-zA-Z_]([a-zA-Z0-9_.]*) *$')
# RegExp to exclude strings with this start from autocalling. In
# particular, all binary operators should be excluded, so that if foo is
# callable, foo OP bar doesn't become foo(OP bar), which is invalid. The
# characters '!=()' don't need to be checked for, as the checkPythonChars
# routine explicitly does so, to catch direct calls and rebindings of
# existing names.
# Warning: the '-' HAS TO BE AT THE END of the first group, otherwise
# it affects the rest of the group in square brackets.
re_exclude_auto = re.compile(r'^[,&^\|\*/\+-]'
r'|^is |^not |^in |^and |^or ')
# try to catch also methods for stuff in lists/tuples/dicts: off
# (experimental). For this to work, the line_split regexp would need
# to be modified so it wouldn't break things at '['. That line is
# nasty enough that I shouldn't change it until I can test it _well_.
#self.re_fun_name = re.compile (r'[a-zA-Z_]([a-zA-Z0-9_.\[\]]*) ?$')
# Handler Check Utilities
def is_shadowed(identifier, ip):
"""Is the given identifier defined in one of the namespaces which shadow
the alias and magic namespaces? Note that an identifier is different
than ifun, because it can not contain a '.' character."""
# This is much safer than calling ofind, which can change state
return (identifier in ip.user_ns \
or identifier in ip.user_global_ns \
or identifier in ip.ns_table['builtin']\
or iskeyword(identifier))
#-----------------------------------------------------------------------------
# Main Prefilter manager
#-----------------------------------------------------------------------------
class PrefilterManager(Configurable):
"""Main prefilter component.
The IPython prefilter is run on all user input before it is run. The
prefilter consumes lines of input and produces transformed lines of
input.
The iplementation consists of two phases:
1. Transformers
2. Checkers and handlers
Over time, we plan on deprecating the checkers and handlers and doing
everything in the transformers.
The transformers are instances of :class:`PrefilterTransformer` and have
a single method :meth:`transform` that takes a line and returns a
transformed line. The transformation can be accomplished using any
tool, but our current ones use regular expressions for speed.
After all the transformers have been run, the line is fed to the checkers,
which are instances of :class:`PrefilterChecker`. The line is passed to
the :meth:`check` method, which either returns `None` or a
:class:`PrefilterHandler` instance. If `None` is returned, the other
checkers are tried. If an :class:`PrefilterHandler` instance is returned,
the line is passed to the :meth:`handle` method of the returned
handler and no further checkers are tried.
Both transformers and checkers have a `priority` attribute, that determines
the order in which they are called. Smaller priorities are tried first.
Both transformers and checkers also have `enabled` attribute, which is
a boolean that determines if the instance is used.
Users or developers can change the priority or enabled attribute of
transformers or checkers, but they must call the :meth:`sort_checkers`
or :meth:`sort_transformers` method after changing the priority.
"""
multi_line_specials = Bool(True).tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
def __init__(self, shell=None, **kwargs):
super(PrefilterManager, self).__init__(shell=shell, **kwargs)
self.shell = shell
self.init_transformers()
self.init_handlers()
self.init_checkers()
#-------------------------------------------------------------------------
# API for managing transformers
#-------------------------------------------------------------------------
def init_transformers(self):
"""Create the default transformers."""
self._transformers = []
for transformer_cls in _default_transformers:
transformer_cls(
shell=self.shell, prefilter_manager=self, parent=self
)
def sort_transformers(self):
"""Sort the transformers by priority.
This must be called after the priority of a transformer is changed.
The :meth:`register_transformer` method calls this automatically.
"""
self._transformers.sort(key=lambda x: x.priority)
@property
def transformers(self):
"""Return a list of checkers, sorted by priority."""
return self._transformers
def register_transformer(self, transformer):
"""Register a transformer instance."""
if transformer not in self._transformers:
self._transformers.append(transformer)
self.sort_transformers()
def unregister_transformer(self, transformer):
"""Unregister a transformer instance."""
if transformer in self._transformers:
self._transformers.remove(transformer)
#-------------------------------------------------------------------------
# API for managing checkers
#-------------------------------------------------------------------------
def init_checkers(self):
"""Create the default checkers."""
self._checkers = []
for checker in _default_checkers:
checker(
shell=self.shell, prefilter_manager=self, parent=self
)
def sort_checkers(self):
"""Sort the checkers by priority.
This must be called after the priority of a checker is changed.
The :meth:`register_checker` method calls this automatically.
"""
self._checkers.sort(key=lambda x: x.priority)
@property
def checkers(self):
"""Return a list of checkers, sorted by priority."""
return self._checkers
def register_checker(self, checker):
"""Register a checker instance."""
if checker not in self._checkers:
self._checkers.append(checker)
self.sort_checkers()
def unregister_checker(self, checker):
"""Unregister a checker instance."""
if checker in self._checkers:
self._checkers.remove(checker)
#-------------------------------------------------------------------------
# API for managing handlers
#-------------------------------------------------------------------------
def init_handlers(self):
"""Create the default handlers."""
self._handlers = {}
self._esc_handlers = {}
for handler in _default_handlers:
handler(
shell=self.shell, prefilter_manager=self, parent=self
)
@property
def handlers(self):
"""Return a dict of all the handlers."""
return self._handlers
def register_handler(self, name, handler, esc_strings):
"""Register a handler instance by name with esc_strings."""
self._handlers[name] = handler
for esc_str in esc_strings:
self._esc_handlers[esc_str] = handler
def unregister_handler(self, name, handler, esc_strings):
"""Unregister a handler instance by name with esc_strings."""
try:
del self._handlers[name]
except KeyError:
pass
for esc_str in esc_strings:
h = self._esc_handlers.get(esc_str)
if h is handler:
del self._esc_handlers[esc_str]
def get_handler_by_name(self, name):
"""Get a handler by its name."""
return self._handlers.get(name)
def get_handler_by_esc(self, esc_str):
"""Get a handler by its escape string."""
return self._esc_handlers.get(esc_str)
#-------------------------------------------------------------------------
# Main prefiltering API
#-------------------------------------------------------------------------
def prefilter_line_info(self, line_info):
"""Prefilter a line that has been converted to a LineInfo object.
This implements the checker/handler part of the prefilter pipe.
"""
# print "prefilter_line_info: ", line_info
handler = self.find_handler(line_info)
return handler.handle(line_info)
def find_handler(self, line_info):
"""Find a handler for the line_info by trying checkers."""
for checker in self.checkers:
if checker.enabled:
handler = checker.check(line_info)
if handler:
return handler
return self.get_handler_by_name('normal')
def transform_line(self, line, continue_prompt):
"""Calls the enabled transformers in order of increasing priority."""
for transformer in self.transformers:
if transformer.enabled:
line = transformer.transform(line, continue_prompt)
return line
def prefilter_line(self, line, continue_prompt=False):
"""Prefilter a single input line as text.
This method prefilters a single line of text by calling the
transformers and then the checkers/handlers.
"""
# print "prefilter_line: ", line, continue_prompt
# All handlers *must* return a value, even if it's blank ('').
# save the line away in case we crash, so the post-mortem handler can
# record it
self.shell._last_input_line = line
if not line:
# Return immediately on purely empty lines, so that if the user
# previously typed some whitespace that started a continuation
# prompt, he can break out of that loop with just an empty line.
# This is how the default python prompt works.
return ''
# At this point, we invoke our transformers.
if not continue_prompt or (continue_prompt and self.multi_line_specials):
line = self.transform_line(line, continue_prompt)
# Now we compute line_info for the checkers and handlers
line_info = LineInfo(line, continue_prompt)
# the input history needs to track even empty lines
stripped = line.strip()
normal_handler = self.get_handler_by_name('normal')
if not stripped:
return normal_handler.handle(line_info)
# special handlers are only allowed for single line statements
if continue_prompt and not self.multi_line_specials:
return normal_handler.handle(line_info)
prefiltered = self.prefilter_line_info(line_info)
# print "prefiltered line: %r" % prefiltered
return prefiltered
def prefilter_lines(self, lines, continue_prompt=False):
"""Prefilter multiple input lines of text.
This is the main entry point for prefiltering multiple lines of
input. This simply calls :meth:`prefilter_line` for each line of
input.
This covers cases where there are multiple lines in the user entry,
which is the case when the user goes back to a multiline history
entry and presses enter.
"""
llines = lines.rstrip('\n').split('\n')
# We can get multiple lines in one shot, where multiline input 'blends'
# into one line, in cases like recalling from the readline history
# buffer. We need to make sure that in such cases, we correctly
# communicate downstream which line is first and which are continuation
# ones.
if len(llines) > 1:
out = '\n'.join([self.prefilter_line(line, lnum>0)
for lnum, line in enumerate(llines) ])
else:
out = self.prefilter_line(llines[0], continue_prompt)
return out
#-----------------------------------------------------------------------------
# Prefilter transformers
#-----------------------------------------------------------------------------
class PrefilterTransformer(Configurable):
"""Transform a line of user input."""
priority = Integer(100).tag(config=True)
# Transformers don't currently use shell or prefilter_manager, but as we
# move away from checkers and handlers, they will need them.
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
enabled = Bool(True).tag(config=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterTransformer, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_transformer(self)
def transform(self, line, continue_prompt):
"""Transform a line, returning the new one."""
return None
def __repr__(self):
return "<%s(priority=%r, enabled=%r)>" % (
self.__class__.__name__, self.priority, self.enabled)
#-----------------------------------------------------------------------------
# Prefilter checkers
#-----------------------------------------------------------------------------
class PrefilterChecker(Configurable):
"""Inspect an input line and return a handler for that line."""
priority = Integer(100).tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
enabled = Bool(True).tag(config=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterChecker, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_checker(self)
def check(self, line_info):
"""Inspect line_info and return a handler instance or None."""
return None
def __repr__(self):
return "<%s(priority=%r, enabled=%r)>" % (
self.__class__.__name__, self.priority, self.enabled)
class EmacsChecker(PrefilterChecker):
priority = Integer(100).tag(config=True)
enabled = Bool(False).tag(config=True)
def check(self, line_info):
"Emacs ipython-mode tags certain input lines."
if line_info.line.endswith('# PYTHON-MODE'):
return self.prefilter_manager.get_handler_by_name('emacs')
else:
return None
class MacroChecker(PrefilterChecker):
priority = Integer(250).tag(config=True)
def check(self, line_info):
obj = self.shell.user_ns.get(line_info.ifun)
if isinstance(obj, Macro):
return self.prefilter_manager.get_handler_by_name('macro')
else:
return None
class IPyAutocallChecker(PrefilterChecker):
priority = Integer(300).tag(config=True)
def check(self, line_info):
"Instances of IPyAutocall in user_ns get autocalled immediately"
obj = self.shell.user_ns.get(line_info.ifun, None)
if isinstance(obj, IPyAutocall):
obj.set_ip(self.shell)
return self.prefilter_manager.get_handler_by_name('auto')
else:
return None
class AssignmentChecker(PrefilterChecker):
priority = Integer(600).tag(config=True)
def check(self, line_info):
"""Check to see if user is assigning to a var for the first time, in
which case we want to avoid any sort of automagic / autocall games.
This allows users to assign to either alias or magic names true python
variables (the magic/alias systems always take second seat to true
python code). E.g. ls='hi', or ls,that=1,2"""
if line_info.the_rest:
if line_info.the_rest[0] in '=,':
return self.prefilter_manager.get_handler_by_name('normal')
else:
return None
class AutoMagicChecker(PrefilterChecker):
priority = Integer(700).tag(config=True)
def check(self, line_info):
"""If the ifun is magic, and automagic is on, run it. Note: normal,
non-auto magic would already have been triggered via '%' in
check_esc_chars. This just checks for automagic. Also, before
triggering the magic handler, make sure that there is nothing in the
user namespace which could shadow it."""
if not self.shell.automagic or not self.shell.find_magic(line_info.ifun):
return None
# We have a likely magic method. Make sure we should actually call it.
if line_info.continue_prompt and not self.prefilter_manager.multi_line_specials:
return None
head = line_info.ifun.split('.',1)[0]
if is_shadowed(head, self.shell):
return None
return self.prefilter_manager.get_handler_by_name('magic')
class PythonOpsChecker(PrefilterChecker):
priority = Integer(900).tag(config=True)
def check(self, line_info):
"""If the 'rest' of the line begins with a function call or pretty much
any python operator, we should simply execute the line (regardless of
whether or not there's a possible autocall expansion). This avoids
spurious (and very confusing) geattr() accesses."""
if line_info.the_rest and line_info.the_rest[0] in '!=()<>,+*/%^&|':
return self.prefilter_manager.get_handler_by_name('normal')
else:
return None
class AutocallChecker(PrefilterChecker):
priority = Integer(1000).tag(config=True)
function_name_regexp = CRegExp(re_fun_name,
help="RegExp to identify potential function names."
).tag(config=True)
exclude_regexp = CRegExp(re_exclude_auto,
help="RegExp to exclude strings with this start from autocalling."
).tag(config=True)
def check(self, line_info):
"Check if the initial word/function is callable and autocall is on."
if not self.shell.autocall:
return None
oinfo = line_info.ofind(self.shell) # This can mutate state via getattr
if not oinfo['found']:
return None
ignored_funs = ['b', 'f', 'r', 'u', 'br', 'rb', 'fr', 'rf']
ifun = line_info.ifun
line = line_info.line
if ifun.lower() in ignored_funs and (line.startswith(ifun + "'") or line.startswith(ifun + '"')):
return None
if callable(oinfo['obj']) \
and (not self.exclude_regexp.match(line_info.the_rest)) \
and self.function_name_regexp.match(line_info.ifun):
return self.prefilter_manager.get_handler_by_name('auto')
else:
return None
#-----------------------------------------------------------------------------
# Prefilter handlers
#-----------------------------------------------------------------------------
class PrefilterHandler(Configurable):
handler_name = Unicode('normal')
esc_strings = List([])
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC', allow_none=True)
prefilter_manager = Instance('IPython.core.prefilter.PrefilterManager', allow_none=True)
def __init__(self, shell=None, prefilter_manager=None, **kwargs):
super(PrefilterHandler, self).__init__(
shell=shell, prefilter_manager=prefilter_manager, **kwargs
)
self.prefilter_manager.register_handler(
self.handler_name,
self,
self.esc_strings
)
def handle(self, line_info):
# print "normal: ", line_info
"""Handle normal input lines. Use as a template for handlers."""
# With autoindent on, we need some way to exit the input loop, and I
# don't want to force the user to have to backspace all the way to
# clear the line. The rule will be in this case, that either two
# lines of pure whitespace in a row, or a line of pure whitespace but
# of a size different to the indent level, will exit the input loop.
line = line_info.line
continue_prompt = line_info.continue_prompt
if (continue_prompt and
self.shell.autoindent and
line.isspace() and
0 < abs(len(line) - self.shell.indent_current_nsp) <= 2):
line = ''
return line
def __str__(self):
return "<%s(name=%s)>" % (self.__class__.__name__, self.handler_name)
class MacroHandler(PrefilterHandler):
handler_name = Unicode("macro")
def handle(self, line_info):
obj = self.shell.user_ns.get(line_info.ifun)
pre_space = line_info.pre_whitespace
line_sep = "\n" + pre_space
return pre_space + line_sep.join(obj.value.splitlines())
class MagicHandler(PrefilterHandler):
handler_name = Unicode('magic')
esc_strings = List([ESC_MAGIC])
def handle(self, line_info):
"""Execute magic functions."""
ifun = line_info.ifun
the_rest = line_info.the_rest
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_arg_s = ifun + " " + the_rest
t_magic_name, _, t_magic_arg_s = t_arg_s.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
cmd = '%sget_ipython().run_line_magic(%r, %r)' % (line_info.pre_whitespace, t_magic_name, t_magic_arg_s)
return cmd
class AutoHandler(PrefilterHandler):
handler_name = Unicode('auto')
esc_strings = List([ESC_PAREN, ESC_QUOTE, ESC_QUOTE2])
def handle(self, line_info):
"""Handle lines which can be auto-executed, quoting if requested."""
line = line_info.line
ifun = line_info.ifun
the_rest = line_info.the_rest
esc = line_info.esc
continue_prompt = line_info.continue_prompt
obj = line_info.ofind(self.shell)['obj']
# This should only be active for single-line input!
if continue_prompt:
return line
force_auto = isinstance(obj, IPyAutocall)
# User objects sometimes raise exceptions on attribute access other
# than AttributeError (we've seen it in the past), so it's safest to be
# ultra-conservative here and catch all.
try:
auto_rewrite = obj.rewrite
except Exception:
auto_rewrite = True
if esc == ESC_QUOTE:
# Auto-quote splitting on whitespace
newcmd = '%s("%s")' % (ifun,'", "'.join(the_rest.split()) )
elif esc == ESC_QUOTE2:
# Auto-quote whole string
newcmd = '%s("%s")' % (ifun,the_rest)
elif esc == ESC_PAREN:
newcmd = '%s(%s)' % (ifun,",".join(the_rest.split()))
else:
# Auto-paren.
if force_auto:
# Don't rewrite if it is already a call.
do_rewrite = not the_rest.startswith('(')
else:
if not the_rest:
# We only apply it to argument-less calls if the autocall
# parameter is set to 2.
do_rewrite = (self.shell.autocall >= 2)
elif the_rest.startswith('[') and hasattr(obj, '__getitem__'):
# Don't autocall in this case: item access for an object
# which is BOTH callable and implements __getitem__.
do_rewrite = False
else:
do_rewrite = True
# Figure out the rewritten command
if do_rewrite:
if the_rest.endswith(';'):
newcmd = '%s(%s);' % (ifun.rstrip(),the_rest[:-1])
else:
newcmd = '%s(%s)' % (ifun.rstrip(), the_rest)
else:
normal_handler = self.prefilter_manager.get_handler_by_name('normal')
return normal_handler.handle(line_info)
# Display the rewritten call
if auto_rewrite:
self.shell.auto_rewrite_input(newcmd)
return newcmd
class EmacsHandler(PrefilterHandler):
handler_name = Unicode('emacs')
esc_strings = List([])
def handle(self, line_info):
"""Handle input lines marked by python-mode."""
# Currently, nothing is done. Later more functionality can be added
# here if needed.
# The input cache shouldn't be updated
return line_info.line
#-----------------------------------------------------------------------------
# Defaults
#-----------------------------------------------------------------------------
_default_transformers = [
]
_default_checkers = [
EmacsChecker,
MacroChecker,
IPyAutocallChecker,
AssignmentChecker,
AutoMagicChecker,
PythonOpsChecker,
AutocallChecker
]
_default_handlers = [
PrefilterHandler,
MacroHandler,
MagicHandler,
AutoHandler,
EmacsHandler
]
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""``PTransforms`` that implement Google Cloud Data Loss Prevention
functionality.
"""
from __future__ import absolute_import
import logging
from google.cloud import dlp_v2
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.transforms import DoFn
from apache_beam.transforms import ParDo
from apache_beam.transforms import PTransform
from apache_beam.utils.annotations import experimental
__all__ = ['MaskDetectedDetails', 'InspectForDetails']
_LOGGER = logging.getLogger(__name__)
@experimental()
class MaskDetectedDetails(PTransform):
"""Scrubs sensitive information detected in text.
The ``PTransform`` returns a ``PCollection`` of ``str``
Example usage::
pipeline | MaskDetectedDetails(project='example-gcp-project',
deidentification_config={
'info_type_transformations: {
'transformations': [{
'primitive_transformation': {
'character_mask_config': {
'masking_character': '#'
}
}
}]
}
}, inspection_config={'info_types': [{'name': 'EMAIL_ADDRESS'}]})
"""
def __init__(
self,
project=None,
deidentification_template_name=None,
deidentification_config=None,
inspection_template_name=None,
inspection_config=None,
timeout=None):
"""Initializes a :class:`MaskDetectedDetails` transform.
Args:
project: Optional. GCP project name in which inspection will be performed
deidentification_template_name (str): Either this or
`deidentification_config` required. Name of
deidentification template to be used on detected sensitive information
instances in text.
deidentification_config
(``Union[dict, google.cloud.dlp_v2.types.DeidentifyConfig]``):
Configuration for the de-identification of the content item.
If both template name and config are supplied,
config is more important.
inspection_template_name (str): This or `inspection_config` required.
Name of inspection template to be used
to detect sensitive data in text.
inspection_config
(``Union[dict, google.cloud.dlp_v2.types.InspectConfig]``):
Configuration for the inspector used to detect sensitive data in text.
If both template name and config are supplied,
config takes precedence.
timeout (float): Optional. The amount of time, in seconds, to wait for
the request to complete.
"""
self.config = {}
self.project = project
self.timeout = timeout
if deidentification_template_name is not None \
and deidentification_config is not None:
raise ValueError(
'Both deidentification_template_name and '
'deidentification_config were specified.'
' Please specify only one of these.')
elif deidentification_template_name is None \
and deidentification_config is None:
raise ValueError(
'deidentification_template_name or '
'deidentification_config must be specified.')
elif deidentification_template_name is not None:
self.config['deidentify_template_name'] = deidentification_template_name
else:
self.config['deidentify_config'] = deidentification_config
if inspection_config is None and inspection_template_name is None:
raise ValueError(
'inspection_template_name or inspection_config must be specified')
if inspection_template_name is not None:
self.config['inspect_template_name'] = inspection_template_name
if inspection_config is not None:
self.config['inspect_config'] = inspection_config
def expand(self, pcoll):
if self.project is None:
self.project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project
if self.project is None:
raise ValueError(
'GCP project name needs to be specified in "project" pipeline option')
return (
pcoll
| ParDo(_DeidentifyFn(self.config, self.timeout, self.project)))
@experimental()
class InspectForDetails(PTransform):
"""Inspects input text for sensitive information.
the ``PTransform`` returns a ``PCollection`` of
``List[google.cloud.dlp_v2.proto.dlp_pb2.Finding]``
Example usage::
pipeline | InspectForDetails(project='example-gcp-project',
inspection_config={'info_types': [{'name': 'EMAIL_ADDRESS'}]})
"""
def __init__(
self,
project=None,
inspection_template_name=None,
inspection_config=None,
timeout=None):
"""Initializes a :class:`InspectForDetails` transform.
Args:
project: Optional. GCP project name in which inspection will be performed
inspection_template_name (str): This or `inspection_config` required.
Name of inspection template to be used
to detect sensitive data in text.
inspection_config
(``Union[dict, google.cloud.dlp_v2.types.InspectConfig]``):
Configuration for the inspector used to detect sensitive data in text.
If both template name and config are supplied,
config takes precedence.
timeout (float): Optional. The amount of time, in seconds, to wait for
the request to complete.
"""
self.timeout = timeout
self.config = {}
self.project = project
if inspection_config is None and inspection_template_name is None:
raise ValueError(
'inspection_template_name or inspection_config must be specified')
if inspection_template_name is not None:
self.config['inspect_template_name'] = inspection_template_name
if inspection_config is not None:
self.config['inspect_config'] = inspection_config
def expand(self, pcoll):
if self.project is None:
self.project = pcoll.pipeline.options.view_as(GoogleCloudOptions).project
if self.project is None:
raise ValueError(
'GCP project name needs to be specified in "project" pipeline option')
return pcoll | ParDo(_InspectFn(self.config, self.timeout, self.project))
class _DeidentifyFn(DoFn):
def __init__(self, config=None, timeout=None, project=None, client=None):
self.config = config
self.timeout = timeout
self.client = client
self.project = project
self.params = {}
def setup(self):
if self.client is None:
self.client = dlp_v2.DlpServiceClient()
self.params = {
'timeout': self.timeout,
'parent': self.client.project_path(self.project)
}
self.params.update(self.config)
def process(self, element, **kwargs):
operation = self.client.deidentify_content(
item={"value": element}, **self.params)
yield operation.item.value
class _InspectFn(DoFn):
def __init__(self, config=None, timeout=None, project=None):
self.config = config
self.timeout = timeout
self.client = None
self.project = project
self.params = {}
def setup(self):
if self.client is None:
self.client = dlp_v2.DlpServiceClient()
self.params = {
'timeout': self.timeout,
"parent": self.client.project_path(self.project)
}
self.params.update(self.config)
def process(self, element, **kwargs):
operation = self.client.inspect_content(
item={"value": element}, **self.params)
hits = [x for x in operation.result.findings]
yield hits
|
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from hashlib import sha1
from pants.backend.android.targets.android_library import AndroidLibrary
from pants.backend.android.targets.android_resources import AndroidResources
from pants.backend.core.tasks.task import Task
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.base.address import Address
from pants.base.build_environment import get_buildroot
from pants.base.fingerprint_strategy import DefaultFingerprintStrategy
from pants.fs.archive import ZIP
class AndroidLibraryFingerprintStrategy(DefaultFingerprintStrategy):
def compute_fingerprint(self, target):
"""AndroidLibrary targets need to be re-unpacked if any of the imported jars have changed."""
# TODO(mateor) Create a utility function to add a block of fingerprints to a hasher with caller
# handing in list of items of the same type and a function to extract a fingerprint from each.
if isinstance(target, AndroidLibrary):
hasher = sha1()
for jar_import in sorted(target.imported_jars, key=lambda t: t.id):
hasher.update(jar_import.cache_key())
hasher.update(target.payload.fingerprint())
return hasher.hexdigest()
return None
class UnpackLibraries(Task):
"""Unpack AndroidDependency artifacts, including .jar and .aar libraries.
The UnpackLibraries task unpacks artifacts imported by AndroidLibraries, as .aar or .jar files,
through a 'libraries' attribute. The .aar files may contain components which require creation
of some synthetic targets, as well as a classes.jar. The classes.jar is packaged into a
JarDependency target and sent to javac compilation. All jar files are then unpacked-
android_binaries repack the class files of all the android_libraries in their transitive
dependencies into a dex file.
All archives are unpacked only once, regardless of differing include/exclude patterns or how many
targets depend upon it. All targets that depend on a particular artifact will be passed the
unpack_libraries product, which is a directory containing the entire source of the unpacked jars.
These sources are filtered against the AndroidLibrary's include/exclude patterns during the
creation of the dex file.
"""
class MissingElementException(Exception):
"""Raised if an unpacked file or directory unexpectedly does not exist."""
class UnexpectedArchiveType(Exception):
"""Raised if an archive has an extension that is not explicitly handled by this class."""
@classmethod
def prepare(cls, options, round_manager):
super(UnpackLibraries, cls).prepare(options, round_manager)
round_manager.require_data('ivy_imports')
@classmethod
def product_types(cls):
return ['unpacked_libraries']
@staticmethod
def is_library(target):
"""Return True for AndroidLibrary targets."""
# TODO(mateor) add AndroidBinary support. If include/exclude patterns aren't needed, an
# android_binary should be able to simply declare an android_dependency as a dep.
return isinstance(target, AndroidLibrary)
def __init__(self, *args, **kwargs):
super(UnpackLibraries, self).__init__(*args, **kwargs)
self._created_targets = {}
self._unpacked_archives = set()
def create_classes_jar_target(self, target, archive, jar_file):
"""Create a JarLibrary target containing the jar_file as a JarDependency.
:param AndroidLibrary target: The new JarLibrary will be derived from this AndroidLibrary .
:param string archive: Archive name as fetched by ivy, e.g. 'org.pantsbuild.example-1.0.aar'.
:param string jar_file: Full path of the classes.jar contained within unpacked aar files.
:return: new_target.
:rtype::class:`pants.backend.jvm.targets.java_library.JarLibrary`
"""
# TODO(mateor) add another JarDependency for every jar under 'libs'.
# Try to parse revision number. This is just to satisfy the spec, the rev is part of 'archive'.
archive_version = os.path.splitext(archive)[0].rpartition('-')[-1]
jar_url = 'file://{0}'.format(jar_file)
jar_dep = JarDependency(org=target.id, name=archive, rev=archive_version, url=jar_url)
address = Address(self.workdir, '{}-classes.jar'.format(archive))
new_target = self.context.add_new_target(address, JarLibrary, jars=[jar_dep],
derived_from=target)
return new_target
def create_resource_target(self, target, archive, manifest, resource_dir):
"""Create an AndroidResources target.
:param AndroidLibrary target: AndroidLibrary that the new AndroidResources target derives from.
:param string archive: Archive name as fetched by ivy, e.g. 'org.pantsbuild.example-1.0.aar'.
:param string resource_dir: Full path of the res directory contained within aar files.
:return: new_target.
:rtype::class:`pants.backend.android.targets.AndroidResources`
"""
address = Address(self.workdir, '{}-resources'.format(archive))
new_target = self.context.add_new_target(address, AndroidResources,
manifest=manifest, resource_dir=resource_dir,
derived_from=target)
return new_target
def create_android_library_target(self, target, archive, unpacked_aar_location):
"""Create an AndroidLibrary target.
The aar files are unpacked and the contents used to create a new AndroidLibrary target.
:param AndroidLibrary target: AndroidLibrary that the new AndroidLibrary target derives from.
:param string archive: An archive name as fetched by ivy, e.g. 'org.pantsbuild.example-1.0.aar'.
:param string unpacked_aar_location: Full path of dir holding contents of an unpacked aar file.
:return: new_target
:rtype::class:`pants.backend.android.targets.AndroidLibrary`
"""
# The following three elements of an aar file have names mandated by the aar spec:
# http://tools.android.com/tech-docs/new-build-system/aar-format
# They are said to be mandatory although in practice that assumption only holds for manifest.
manifest = os.path.join(unpacked_aar_location, 'AndroidManifest.xml')
jar_file = os.path.join(unpacked_aar_location, 'classes.jar')
resource_dir = os.path.join(unpacked_aar_location, 'res')
# Sanity check to make sure all .aar files we expect to be unpacked are actually unpacked.
if not os.path.isfile(manifest):
raise self.MissingElementException("An AndroidManifest.xml is expected in every unpacked "
".aar file but none was found in the {} archive "
"for the {} target".format(archive, target))
# Depending on the contents of the unpacked aar file, create the dependencies.
deps = []
if os.path.isdir(resource_dir):
deps.append(self.create_resource_target(target, archive, manifest, resource_dir))
if os.path.isfile(jar_file):
deps.append(self.create_classes_jar_target(target, archive, jar_file))
address = Address(self.workdir, '{}-android_library'.format(archive))
new_target = self.context.add_new_target(address, AndroidLibrary,
manifest=manifest,
include_patterns=target.include_patterns,
exclude_patterns=target.exclude_patterns,
dependencies=deps,
derived_from=target)
return new_target
def _unpack_artifacts(self, imports):
# Unpack the aar and jar library artifacts. If the aar files have a jar in the contents,
# unpack that jar as well.
for archive_path in imports:
for archive in imports[archive_path]:
jar_outdir = self.unpacked_jar_location(archive)
if archive.endswith('.jar'):
jar_file = os.path.join(archive_path, archive)
elif archive.endswith('.aar'):
unpacked_aar_destination = self.unpacked_aar_location(archive)
jar_file = os.path.join(unpacked_aar_destination, 'classes.jar')
# Unpack .aar files.
if archive not in self._unpacked_archives:
ZIP.extract(os.path.join(archive_path, archive), unpacked_aar_destination)
self._unpacked_archives.update([archive])
# Create an .aar/classes.jar signature for self._unpacked_archives.
archive = os.path.join(archive, 'classes.jar')
else:
raise self.UnexpectedArchiveType('Android dependencies can be .aar or .jar '
'archives (was: {})'.format(archive))
# Unpack the jar files.
if archive not in self._unpacked_archives and os.path.isfile(jar_file):
ZIP.extract(jar_file, jar_outdir)
self._unpacked_archives.update([archive])
def _create_target(self, target, imports):
# Create a target for the components of an unpacked .aar file.
for archives in imports.values():
for archive in archives:
# The contents of the unpacked aar file must be made into an AndroidLibrary target.
if archive.endswith('.aar'):
if archive not in self._created_targets:
unpacked_location = self.unpacked_aar_location(archive)
if not os.path.isdir(unpacked_location):
raise self.MissingElementException('{}: Expected to unpack {} at {} but did not!'
.format(target, archive, unpacked_location))
new_target = self.create_android_library_target(target, archive, unpacked_location)
self._created_targets[archive] = new_target
target.inject_dependency(self._created_targets[archive].address)
# The unpacked_libraries product is a dir containing the full unpacked source. The files
# that match the include/exclude patterns are calculated during DxCompile.
unpacked_products = self.context.products.get('unpacked_libraries')
unpacked_products.add(target, get_buildroot()).append(self.unpacked_jar_location(archive))
def execute(self):
ivy_imports = self.context.products.get('ivy_imports')
library_targets = self.context.targets(predicate=self.is_library)
targets_to_unpack = []
with self.invalidated(library_targets,
fingerprint_strategy=AndroidLibraryFingerprintStrategy(),
invalidate_dependents=True) as invalidation_check:
if invalidation_check.invalid_vts:
targets_to_unpack.extend([vt.target for vt in invalidation_check.invalid_vts])
for target in targets_to_unpack:
imports = ivy_imports.get(target)
if imports:
self._unpack_artifacts(imports)
# Create the new targets from the contents of unpacked aar files.
for target in library_targets:
imports = ivy_imports.get(target)
if imports:
self._create_target(target, imports)
def unpacked_jar_location(self, archive):
"""Location for unpacked jar files, whether imported as-is or found inside an aar file."""
return os.path.join(self.workdir, 'explode-jars', archive)
def unpacked_aar_location(self, archive):
"""Output location for unpacking .aar archives."""
return os.path.join(self.workdir, archive)
|
|
#!/usr/bin/env python
# Copyright 2015 VMware, Inc. All rights reserved. -- VMware Confidential
"""
Base script which takes care of all VECS/VMCA/DIR_CLI operations.
"""
__author__ = 'Charudath Gopal ([email protected])'
__copyright__ = 'Copyright 2015, VMware Inc.'
__version__ = 1.0
import subprocess
import logging
import os
import os.path
import getpass
import tempfile
from cis.defaults import def_by_os, get_component_home_dir, get_cis_log_dir
from cis.exceptions import *
from certificateManagerUtils import *
from utils import *
isLinux = os.name == 'posix'
if not isLinux:
import pywintypes
import win32service as w32s
import win32serviceutil as w32su
__SERVICE_CTL_PREFIX = '"' + os.getenv('VMWARE_CIS_HOME',"C:\\Program Files\\VMware\\vCenter Server") +'\\bin\\service-control.bat' + '"'
else:
__SERVICE_CTL_PREFIX = 'service-control'
global password
def cli_path(cli_name):
"""
This function return the install path of the provided cli
:param cli_name: Name of cli
:return: Install directory of cli
"""
if cli_name == 'certool':
component_dir = get_component_home_dir(def_by_os('vmca', 'vmcad'))
else:
component_dir = get_component_home_dir(def_by_os('vmafd', 'vmafdd'))
cli_rel_path = def_by_os('bin/%s', '%s.exe') % cli_name
return os.path.join(component_dir, cli_rel_path)
def start_service(service_name):
"""
This function starts the given service using service-control
:param service_name: Name of service to be started
"""
cmd = __SERVICE_CTL_PREFIX + " --start " + service_name
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error while starting service : {0}'.format(service_name)
log_error_msg(msg)
e.appendErrorStack(msg)
raise e
def stop_services(service):
"""
This function stops all or non-core services using service-control
:param service: If service is 'all' the all services will be stopped
"""
cmd = __SERVICE_CTL_PREFIX + ' --stop --ignore '
if 'all' in service:
cmd = cmd + ' --all'
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error while stopping services'
log_error_msg(msg)
e.appendErrorStack(msg)
raise e
logging.info('{0} services stopped successfully.'.format(service))
def start_services(service):
"""
This function starts all or non-core services using service-control
:param service: If service is 'all' the all services will be started
:param service:
"""
cmd = __SERVICE_CTL_PREFIX + ' --start '
if 'all' in service:
cmd = cmd + ' --all'
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error while starting services, please see log for more details'
log_error_msg(msg)
e.appendErrorStack(msg)
raise e
logging.info('{0} services started successfully.'.format(service))
def vmafd_machine_id():
"""
This function returns the machine ID of the local machine
:return: machine ID
"""
cli = cli_path('vmafd-cli')
cmd = [cli, 'get-machine-id', '--server-name', 'localhost']
logging.info('Running command : ' + str(cmd))
try:
(code, result, err) = run_command(cmd, None, True)
logging.info('Output : \n {0}'.format(str(result)))
except InvokeCommandException as e:
msg = 'Error in retrieving machine id of local machine.'
e.appendErrorStack(msg)
log_error_msg(msg)
raise e
return result.strip()
def execute_command(cmd, quiet=False):
"""
This function is responsible for executing command
:param cmd: Command to be executed
:param quiet: Flag to turnoff logging for this command
:return:
"""
if '--password' in cmd:
#Donot print password
tmp = list(cmd)
tmp[len(tmp)-1] = '*****'
logging.info('Running command : ' + str(tmp))
else:
logging.info("Running command :- " + str(cmd))
# Service control command logs in to console even though logs are redirected to file,
# so use default log directory and push logs instead of console
if 'service-control' in cmd:
quiet = True
out_file = open(get_log_file(), 'a')
logging.info('please see service-control.log for service status')
p = subprocess.Popen(cmd, stdout=open(os.devnull, 'w'), stderr=out_file, shell=True)
(output, err) = p.communicate()
code = p.wait()
else:
(code, output, err) = run_command(cmd, None, True)
logging.info("Command output :- \n {0}".format(str(output)))
if isinstance(code, int):
if code == 0:
pass
else:
msg = (str(output))
if quiet:
logging.error(msg)
else:
log_error_msg(msg)
raise InvokeCommandException(msg)
else:
for status in code:
if status == 0:
pass
else:
msg = (str(output))
if quiet:
logging.error(msg)
else:
log_error_msg(msg)
raise InvokeCommandException(msg)
logging.info("Command executed successfully")
def read_and_validate_password():
"""
This function is to read sso password from user and authenticate which will further used for
certificate operations
"""
global password
dir_cli = DirCliOps()
log_info_msg('Please provide valid SSO password to perform certificate operations.')
password = getpass.getpass()
result = authenticate_password(dir_cli)
for i in reversed(range(1, 3)):
if result:
logging.info('Authentication successful')
return
else:
log_info_msg('Incorrect Password! Try Again! ({0} attempt/s left)'.format(i))
password = getpass.getpass()
result = authenticate_password(dir_cli)
if result:
logging.info('Authentication successful')
return
log_info_msg(Constants.PASSWORD_ATTEMPTS_ERROR)
exit(1)
def authenticate_password(dir_cli):
"""
Function to authenticate SSO password
:param dir_cli:
:return:
"""
try:
if password.strip() == '':
logging.info('password should not be empty')
return False
dir_cli.get_services_list()
return True
except Exception as e:
return False
class VecsOps():
"""
This Class Implements functions that are used to perform VECS operations
"""
def __init__(self):
self._cli = cli_path('vecs-cli')
self._management_node = False
self._infra_node = False
dir_cli = DirCliOps()
services = dir_cli.get_services_list()
if not check_file_exists(get_root_cert_dir() + Constants.ROOT_CERT):
self._management_node = True
self._solution_user_stores = []
stores = self.list_stores()
if 'vpxd' not in stores:
self._infra_node = True
if not Constants.BACKUP_STORE in stores:
self.store_create(Constants.BACKUP_STORE)
logging.info('Successfully created BACKUP_STORE in VECS')
for store in stores:
for service in services:
if (store in service) and (store not in self._solution_user_stores):
self._solution_user_stores.append(store)
def is_solution_user_store(self, store):
"""
Function to check given store is a solution user store
:param store: name of the store
:return: 'True' is given store is a solution user store
"""
return store in self._solution_user_stores
def entry_delete(self, store_name, alias):
"""
Function to delete entry in the VECS store
:param store_name: Name of the store
:param alias: entry alias which needs to deleted
"""
cmd = [self._cli, 'entry', 'delete',
'-y',
'--store', store_name,
'--alias', alias]
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error in deleting entry {0} from VECS Store {1}.'.format(alias, store_name)
e.appendErrorStack(msg)
raise e
def store_create(self, store_name):
"""
Function to create store in VECS
:param store_name: Name of the store to be created
"""
cmd = [self._cli, 'store', 'create',
'--name', store_name]
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error in creating store {0} in vecs.'.format(store_name)
e.appendErrorStack(msg)
raise e
def entry_create(self, store_name, alias, cert_path, private_key_path):
"""
Function to create a new entry in the VECS store
:param store_name: Name of the store where entry needs to be created
:param alias: Alias name for new entry
:param cert_path: certificate file path
:param private_key_path: private key file path
"""
cmd = [self._cli, 'entry', 'create',
'--store', store_name,
'--alias', alias,
'--cert', cert_path]
if len(private_key_path) > 0:
cmd.append('--key')
cmd.append(private_key_path)
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error in creating a new entry for {0} in VECS Store {1}.'.format(alias, store_name)
e.appendErrorStack(msg)
raise e
def get_cert_file(self, store_name, alias, outFile, quiet=False):
"""
Function to backup cert to a file
:param store_name: Name of the store where certificate resides
:param alias: Alias name of certificate
:param outFile: Certificate output file
:param quiet: Flag to mute logging
"""
cmd = [self._cli, 'entry', 'getcert',
'--store', store_name,
'--alias', alias,
'--output', outFile]
try:
execute_command(cmd, quiet)
except InvokeCommandException as e:
msg = 'Error while creating backup cert file for ' + store_name
e.appendErrorStack(msg)
raise e
def get_key_file(self, store_name, alias, outFile, quiet=False):
"""
Function to backup key to a file
:param store_name: Name of the store where certificate resides
:param alias: Alias name of certificate
:param outFile: Certificate output file
:param quiet: Flag to mute logging
"""
cmd = [self._cli, 'entry', 'getkey',
'--store', store_name,
'--alias', alias,
'--output', outFile]
try:
execute_command(cmd, quiet)
except InvokeCommandException as e:
msg = 'Error while creating backup key file for ' + store_name
e.appendErrorStack(msg)
raise e
def list_stores(self):
"""
Function to lists all VECS stores
:return: Returns available stores
"""
cmd = [self._cli, 'store', 'list']
logging.info('Running command : ' + str(cmd))
try:
(code, result, err) = run_command(cmd, None, True)
logging.info('Output :\n' + str(result))
except InvokeCommandException as e:
msg = 'Error in generating list of VECS store instances.'
log_error_msg(msg)
e.appendErrorStack(msg)
logging.error("Output : " + result)
logging.error("StdErr : " + err)
raise e
return result.splitlines()
def list_entries(self, store_name):
"""
Function to list the entries in the VECS store
:param store_name: Name of the store whose entries needs to be listed
:return: Returns entries from store
"""
cmd = [self._cli, 'entry', 'list', '--store', store_name]
logging.info('Running command : ' + str(cmd))
try:
(code, result, err) = run_command(cmd, None, True)
logging.info('Output :\n' + str(result))
except InvokeCommandException as e:
msg = 'Error in listing entries in VECS Store %s.'.format(store_name)
log_error_msg(msg)
e.appendErrorStack(msg)
logging.error("Output : " + result)
logging.error("StdErr : " + err)
raise e
# Just return the aliases
lines = [l for l in result.splitlines() if l.startswith('Alias')]
aliases = [l.split('\t')[1] for l in lines]
return aliases
def get_cert(self, store , alias):
"""
Function to getcert from VECS store
:param store: Name of store
:param alias: Alias name of the entry
:return: Returns certificate
"""
cmd = [self._cli, 'entry', 'getcert', '--text',
'--store',store,
'--alias', alias]
logging.info('Running command : ' + str(cmd))
try:
(code, result, err) = run_command(cmd, None, True)
logging.info('Output :\n' + str(result))
except InvokeCommandException as e:
msg = 'Error in getting cert.'
log_error_msg(msg)
e.appendErrorStack(msg)
logging.error("Output : " + result)
logging.error("StdErr : " + err)
raise e
return result.strip()
class DirCliOps():
"""
This Class Implements functions that are used to perform DIR-CLI operations
"""
def __init__(self):
self._cli = cli_path('dir-cli')
def update_lotus(self, service, cert, ignoreError=False):
"""
This function update lotus service for the given cert
:param service: Service name of solution user
:param cert: certificate to be used by update command
:param ignoreError: Flag to suppress error in case of revert/rollback operation
"""
logging.info("Update Lotus with the new Solution User Cert using dir-cli for : " + service)
logging.info("Do a service update to update the account with a new cert in Lotus...")
cmd = [self._cli, 'service', 'update',
'--cert', cert,
'--name', service.strip(),
'--password', password]
try:
execute_command(cmd, ignoreError)
except InvokeCommandException as e:
if ignoreError:
msg = 'Ignoring dir-cli update command error thrown while rollback operation'
logging.warning(msg)
return
msg = 'Error while updating lotus for service : ' + service
e.appendErrorStack(msg)
raise e
def trusted_cert_publish(self, cert_path):
"""
Function to publish certificate using dir-cli
:param cert_path: certificate file which needs to be published
"""
cmd = [self._cli, 'trustedcert', 'publish',
'--cert', cert_path,
'--password', password]
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error while publishing cert using dir-cli.'
e.appendErrorStack(msg)
raise e
def get_services_list(self):
"""
Function to get available services list from lotus
:return: Returns services list from lotus
"""
cmd = [self._cli, 'service', 'list', '--password', password]
#Donot print password
tmp = list(cmd)
tmp[len(tmp)-1] = '*****'
logging.info('Running command : ' + str(tmp))
try:
(code, result, err) = run_command(cmd, None, True)
logging.info("Output : \n" + str(result))
if result.strip() == '':
raise InvokeCommandException('Failed to get service list using dir-cli')
except InvokeCommandException as e:
msg = 'Error while getting service account name using dir-cli'
e.appendErrorStack(msg)
logging.error("Output : " + str(result))
logging.error("Error ({0} : {1})".format(str(code), str(err)))
raise e
return result.splitlines()
def get_service_name_for_solution_user(self, solution_user):
"""
Function to parse and return service name for a given solution user
:param solution_user: Solution user name whose service name is required
:return: Returns service name from lotus
"""
machine_id = vmafd_machine_id()
for line in self.get_services_list():
if (solution_user + "-" + machine_id) in line:
return line.rstrip()[3:]
return ''
class VmcaOps():
"""
This Class Implements functions that are used to perform VMCA operations
"""
def __init__(self):
self._cli = cli_path('certool')
def generate_cert(self, service_acc_name, server):
"""
Function to generate certificate for given service account
:param service_acc_name: Name of the store
:param server: Provide PSC/Infra IP in case of distributed env else 'localhost'
"""
# Service account name would be same as VECS store name.
logging.info("Generating cert for store : " + service_acc_name)
logging.info("Generating key pair...")
# Generate Private Key and Public Keys First
cmd = [self._cli,
'--genkey',
'--privkey=' + get_cert_dir() + service_acc_name + Constants.KEY_EXT,
'--pubkey=' + get_cert_dir() + service_acc_name + Constants.PUB_EXT,
'--server=' + server]
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error in generating Private and Public Keys.'
e.appendErrorStack(msg)
raise e
logging.info("Generating cert...")
cmd = [self._cli,
'--server=' + server,
'--gencert',
'--privkey=' + get_cert_dir() + service_acc_name + Constants.KEY_EXT,
'--cert=' + get_cert_dir() + service_acc_name + Constants.CERT_EXT,
'--config=' + get_dest_config_file_loc()]
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error in generating cert for store {0}'.format(service_acc_name)
e.appendErrorStack(msg)
raise e
def generate_solution_user_cert(self, service_acc_name, server):
"""
Function to generate solution user certificate
:param service_acc_name: Name of the store
:param server: Provide PSC/Infra IP in case of distributed env else 'localhost'
"""
# Service account name would be same as VECS store name.
logging.info("Generating solution user cert for : " + service_acc_name)
logging.info("Generating key pair...")
# Generate Private Key and Public Keys First
cmd = [self._cli,
'--genkey',
'--privkey=' + get_cert_dir() + service_acc_name + Constants.KEY_EXT,
'--pubkey=' + get_cert_dir() + service_acc_name + Constants.PUB_EXT,
'--server=' + server]
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error in generating Private and Public Keys.'
e.appendErrorStack(msg)
raise e
logging.info("Generating cert...")
cmd = [self._cli,
'--server=' + server,
'--genCIScert',
'--privkey=' + get_cert_dir() + service_acc_name + Constants.KEY_EXT,
'--cert=' + get_cert_dir() + service_acc_name + Constants.CERT_EXT,
'--Name=' + service_acc_name]
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error in generating cert for store {0}'.format(service_acc_name)
e.appendErrorStack(msg)
raise e
def get_root_ca(self, server):
"""
Function to get root signing certificate
:param server: Provide PSC/Infra IP in case of distributed env else 'localhost'
:return: Returns root signing certificate as text
"""
cmd = [self._cli, '--getrootca', '--server', server]
logging.info('Running command : ' + str(cmd))
try:
(code, result, err) = run_command(cmd, None, True)
logging.info("Output : \n" + str(result))
if result.strip() == '':
raise InvokeCommandException('Failed to get RootCA')
except InvokeCommandException as e:
msg = 'Error while getting root certificate using certool getRootCa command'
log_error_msg(msg)
e.appendErrorStack(msg)
logging.error("Output : " + str(result))
logging.error("Error ({0} : {1})".format(str(code), str(err)))
raise e
return result
def selfca(self, server):
"""
Function to regenerate Root signing certificate using VMCA
:param server: Provide PSC/Infra IP in case of distributed env else 'localhost'
"""
cmd = [self._cli, '--selfca',
'--config', get_dest_config_file_loc(),
'--server', server]
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error while generating root cert using selfca command.'
e.appendErrorStack(msg)
raise e
def rootca(self, cert_path, key_path, server):
"""
Function to publish custom certificate as Root signing certificate
:param cert_path: Custom certificate file path
:param key_path: Custom key file path
"""
cmd = [self._cli, '--rootca',
'--cert', cert_path,
'--privkey', key_path,
'--server', server]
try:
execute_command(cmd)
except InvokeCommandException as e:
msg = 'Error while performing certool rootca command'
e.appendErrorStack(msg)
raise e
def generateCSR(self, cert_path, key_output_path, csr_output_path, server):
"""
Function to generate CSR
:param cert_path: certificate file path
:param key_path: output key path
:param csr_output_path: output csr path
"""
if not os.path.isfile(cert_path):
raise FileNotFoundError ('Cannot find certificate file')
pubKeyTempPath = os.path.join(tempfile.gettempdir(), 'pubkey.pub')
logging.info("Generating key ")
cmd = [self._cli, '--genkey',
'--privkey', key_output_path,
'--pubkey' , pubKeyTempPath]
logging.info('Running command: '+ str(cmd))
try:
result = invoke_command(cmd)
except InvokeCommandException as e:
msg = 'Error in generating Private Key'
e.appendErrorStack(msg)
raise e
os.remove(pubKeyTempPath)
cmd = [self._cli, '--gencsrfromcert',
'--privkey', key_output_path,
'--cert',cert_path,
'--csrfile', csr_output_path]
logging.info('Running command: ' + str(cmd))
try:
result = invoke_command(cmd)
except InvokeCommandException as e:
msg = 'Error in generating CSR'
e.appendErrorStack(msg)
raise e
|
|
from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.db.models import Q
from django.core.cache import cache
from django.core.exceptions import ValidationError
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from friendship.exceptions import AlreadyExistsError
from friendship.signals import friendship_request_created, \
friendship_request_rejected, friendship_request_canceled, \
friendship_request_viewed, friendship_request_accepted, \
friendship_removed, follower_created, follower_removed, \
followee_created, followee_removed, following_created, following_removed
AUTH_USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
CACHE_TYPES = {
'friends': 'f-%d',
'followers': 'fo-%d',
'following': 'fl-%d',
'requests': 'fr-%d',
'sent_requests': 'sfr-%d',
'unread_requests': 'fru-%d',
'unread_request_count': 'fruc-%d',
'read_requests': 'frr-%d',
'rejected_requests': 'frj-%d',
'unrejected_requests': 'frur-%d',
'unrejected_request_count': 'frurc-%d',
}
BUST_CACHES = {
'friends': ['friends'],
'followers': ['followers'],
'following': ['following'],
'requests': [
'requests',
'unread_requests',
'unread_request_count',
'read_requests',
'rejected_requests',
'unrejected_requests',
'unrejected_request_count',
],
'sent_requests': ['sent_requests'],
}
def cache_key(type, user_pk):
"""
Build the cache key for a particular type of cached value
"""
return CACHE_TYPES[type] % user_pk
def bust_cache(type, user_pk):
"""
Bust our cache for a given type, can bust multiple caches
"""
bust_keys = BUST_CACHES[type]
keys = [CACHE_TYPES[k] % user_pk for k in bust_keys]
cache.delete_many(keys)
@python_2_unicode_compatible
class FriendshipRequest(models.Model):
""" Model to represent friendship requests """
from_user = models.ForeignKey(AUTH_USER_MODEL, related_name='friendship_requests_sent')
to_user = models.ForeignKey(AUTH_USER_MODEL, related_name='friendship_requests_received')
message = models.TextField(_('Message'), blank=True)
created = models.DateTimeField(default=timezone.now)
rejected = models.DateTimeField(blank=True, null=True)
viewed = models.DateTimeField(blank=True, null=True)
class Meta:
verbose_name = _('Friendship Request')
verbose_name_plural = _('Friendship Requests')
unique_together = ('from_user', 'to_user')
def __str__(self):
return "User #%d friendship requested #%d" % (self.from_user_id, self.to_user_id)
def accept(self):
""" Accept this friendship request """
relation1 = Friend.objects.create(
from_user=self.from_user,
to_user=self.to_user
)
relation2 = Friend.objects.create(
from_user=self.to_user,
to_user=self.from_user
)
friendship_request_accepted.send(
sender=self,
from_user=self.from_user,
to_user=self.to_user
)
self.delete()
# Delete any reverse requests
FriendshipRequest.objects.filter(
from_user=self.to_user,
to_user=self.from_user
).delete()
# Bust requests cache - request is deleted
bust_cache('requests', self.to_user.pk)
bust_cache('sent_requests', self.from_user.pk)
# Bust reverse requests cache - reverse request might be deleted
bust_cache('requests', self.from_user.pk)
bust_cache('sent_requests', self.to_user.pk)
# Bust friends cache - new friends added
bust_cache('friends', self.to_user.pk)
bust_cache('friends', self.from_user.pk)
return True
def reject(self):
""" reject this friendship request """
self.rejected = timezone.now()
self.save()
friendship_request_rejected.send(sender=self)
bust_cache('requests', self.to_user.pk)
def cancel(self):
""" cancel this friendship request """
self.delete()
friendship_request_canceled.send(sender=self)
bust_cache('requests', self.to_user.pk)
bust_cache('sent_requests', self.from_user.pk)
return True
def mark_viewed(self):
self.viewed = timezone.now()
friendship_request_viewed.send(sender=self)
self.save()
bust_cache('requests', self.to_user.pk)
return True
class FriendshipManager(models.Manager):
""" Friendship manager """
def friends(self, user):
""" Return a list of all friends """
key = cache_key('friends', user.pk)
friends = cache.get(key)
if friends is None:
qs = Friend.objects.select_related('from_user', 'to_user').filter(to_user=user).all()
friends = [u.from_user for u in qs]
cache.set(key, friends)
return friends
def requests(self, user):
""" Return a list of friendship requests """
key = cache_key('requests', user.pk)
requests = cache.get(key)
if requests is None:
qs = FriendshipRequest.objects.select_related('from_user', 'to_user').filter(
to_user=user).all()
requests = list(qs)
cache.set(key, requests)
return requests
def sent_requests(self, user):
""" Return a list of friendship requests from user """
key = cache_key('sent_requests', user.pk)
requests = cache.get(key)
if requests is None:
qs = FriendshipRequest.objects.select_related('from_user', 'to_user').filter(
from_user=user).all()
requests = list(qs)
cache.set(key, requests)
return requests
def unread_requests(self, user):
""" Return a list of unread friendship requests """
key = cache_key('unread_requests', user.pk)
unread_requests = cache.get(key)
if unread_requests is None:
qs = FriendshipRequest.objects.select_related('from_user', 'to_user').filter(
to_user=user,
viewed__isnull=True).all()
unread_requests = list(qs)
cache.set(key, unread_requests)
return unread_requests
def unread_request_count(self, user):
""" Return a count of unread friendship requests """
key = cache_key('unread_request_count', user.pk)
count = cache.get(key)
if count is None:
count = FriendshipRequest.objects.select_related('from_user', 'to_user').filter(
to_user=user,
viewed__isnull=True).count()
cache.set(key, count)
return count
def read_requests(self, user):
""" Return a list of read friendship requests """
key = cache_key('read_requests', user.pk)
read_requests = cache.get(key)
if read_requests is None:
qs = FriendshipRequest.objects.select_related('from_user', 'to_user').filter(
to_user=user,
viewed__isnull=False).all()
read_requests = list(qs)
cache.set(key, read_requests)
return read_requests
def rejected_requests(self, user):
""" Return a list of rejected friendship requests """
key = cache_key('rejected_requests', user.pk)
rejected_requests = cache.get(key)
if rejected_requests is None:
qs = FriendshipRequest.objects.select_related('from_user', 'to_user').filter(
to_user=user,
rejected__isnull=False).all()
rejected_requests = list(qs)
cache.set(key, rejected_requests)
return rejected_requests
def unrejected_requests(self, user):
""" All requests that haven't been rejected """
key = cache_key('unrejected_requests', user.pk)
unrejected_requests = cache.get(key)
if unrejected_requests is None:
qs = FriendshipRequest.objects.select_related('from_user', 'to_user').filter(
to_user=user,
rejected__isnull=True).all()
unrejected_requests = list(qs)
cache.set(key, unrejected_requests)
return unrejected_requests
def unrejected_request_count(self, user):
""" Return a count of unrejected friendship requests """
key = cache_key('unrejected_request_count', user.pk)
count = cache.get(key)
if count is None:
count = FriendshipRequest.objects.select_related('from_user', 'to_user').filter(
to_user=user,
rejected__isnull=True).count()
cache.set(key, count)
return count
def add_friend(self, from_user, to_user, message=None):
""" Create a friendship request """
if from_user == to_user:
raise ValidationError("Users cannot be friends with themselves")
if message is None:
message = ''
request, created = FriendshipRequest.objects.get_or_create(
from_user=from_user,
to_user=to_user,
message=message,
)
if created is False:
raise AlreadyExistsError("Friendship already requested")
bust_cache('requests', to_user.pk)
bust_cache('sent_requests', from_user.pk)
friendship_request_created.send(sender=request)
return request
def remove_friend(self, to_user, from_user):
""" Destroy a friendship relationship """
try:
qs = Friend.objects.filter(
Q(to_user=to_user, from_user=from_user) |
Q(to_user=from_user, from_user=to_user)
).distinct().all()
if qs:
friendship_removed.send(
sender=qs[0],
from_user=from_user,
to_user=to_user
)
qs.delete()
bust_cache('friends', to_user.pk)
bust_cache('friends', from_user.pk)
return True
else:
return False
except Friend.DoesNotExist:
return False
def are_friends(self, user1, user2):
""" Are these two users friends? """
friends1 = cache.get(cache_key('friends', user1.pk))
friends2 = cache.get(cache_key('friends', user2.pk))
if friends1 and user2 in friends1:
return True
elif friends2 and user1 in friends2:
return True
else:
try:
Friend.objects.get(to_user=user1, from_user=user2)
return True
except Friend.DoesNotExist:
return False
@python_2_unicode_compatible
class Friend(models.Model):
""" Model to represent Friendships """
to_user = models.ForeignKey(AUTH_USER_MODEL, related_name='friends')
from_user = models.ForeignKey(AUTH_USER_MODEL, related_name='_unused_friend_relation')
created = models.DateTimeField(default=timezone.now)
objects = FriendshipManager()
class Meta:
verbose_name = _('Friend')
verbose_name_plural = _('Friends')
unique_together = ('from_user', 'to_user')
def __str__(self):
return "User #%d is friends with #%d" % (self.to_user_id, self.from_user_id)
def save(self, *args, **kwargs):
# Ensure users can't be friends with themselves
if self.to_user == self.from_user:
raise ValidationError("Users cannot be friends with themselves.")
super(Friend, self).save(*args, **kwargs)
class FollowingManager(models.Manager):
""" Following manager """
def followers(self, user):
""" Return a list of all followers """
key = cache_key('followers', user.pk)
followers = cache.get(key)
if followers is None:
qs = Follow.objects.filter(followee=user).all()
followers = [u.follower for u in qs]
cache.set(key, followers)
return followers
def following(self, user):
""" Return a list of all users the given user follows """
key = cache_key('following', user.pk)
following = cache.get(key)
if following is None:
qs = Follow.objects.filter(follower=user).all()
following = [u.followee for u in qs]
cache.set(key, following)
return following
def add_follower(self, follower, followee):
""" Create 'follower' follows 'followee' relationship """
if follower == followee:
raise ValidationError("Users cannot follow themselves")
relation, created = Follow.objects.get_or_create(follower=follower, followee=followee)
if created is False:
raise AlreadyExistsError("User '%s' already follows '%s'" % (follower, followee))
follower_created.send(sender=self, follower=follower)
followee_created.send(sender=self, followee=followee)
following_created.send(sender=self, following=relation)
bust_cache('followers', followee.pk)
bust_cache('following', follower.pk)
return relation
def remove_follower(self, follower, followee):
""" Remove 'follower' follows 'followee' relationship """
try:
rel = Follow.objects.get(follower=follower, followee=followee)
follower_removed.send(sender=rel, follower=rel.follower)
followee_removed.send(sender=rel, followee=rel.followee)
following_removed.send(sender=rel, following=rel)
rel.delete()
bust_cache('followers', followee.pk)
bust_cache('following', follower.pk)
return True
except Follow.DoesNotExist:
return False
def follows(self, follower, followee):
""" Does follower follow followee? Smartly uses caches if exists """
followers = cache.get(cache_key('following', follower.pk))
following = cache.get(cache_key('followers', followee.pk))
if followers and followee in followers:
return True
elif following and follower in following:
return True
else:
try:
Follow.objects.get(follower=follower, followee=followee)
return True
except Follow.DoesNotExist:
return False
@python_2_unicode_compatible
class Follow(models.Model):
""" Model to represent Following relationships """
follower = models.ForeignKey(AUTH_USER_MODEL, related_name='following')
followee = models.ForeignKey(AUTH_USER_MODEL, related_name='followers')
created = models.DateTimeField(default=timezone.now)
objects = FollowingManager()
class Meta:
verbose_name = _('Following Relationship')
verbose_name_plural = _('Following Relationships')
unique_together = ('follower', 'followee')
def __str__(self):
return "User #%d follows #%d" % (self.follower_id, self.followee_id)
def save(self, *args, **kwargs):
# Ensure users can't be friends with themselves
if self.follower == self.followee:
raise ValidationError("Users cannot follow themselves.")
super(Follow, self).save(*args, **kwargs)
|
|
#!/usr/bin/env python3
from unittest import TestCase
from tests.slackwrapper_mock import SlackWrapperMock
import unittest
from util.loghandler import log, logging
from server.botserver import BotServer
from bottypes.invalid_command import InvalidCommand
class BotBaseTest(TestCase):
def setUp(self):
self.botserver = BotServer()
self.botserver.config = {
"bot_name": "unittest_bot",
"api_key": "unittest_apikey",
"send_help_as_dm": "1",
"admin_users": [
"admin_user"
],
"auto_invite": [],
"wolfram_app_id": "wolfram_dummyapi"
}
self.botserver.slack_wrapper = SlackWrapperMock("testapikey")
self.botserver.init_bot_data()
# replace set_config_option to avoid overwriting original bot configuration.
self.botserver.set_config_option = self.set_config_option_mock
def set_config_option_mock(self, option, value):
if option in self.botserver.config:
self.botserver.config[option] = value
else:
raise InvalidCommand("The specified configuration option doesn't exist: {}".format(option))
def create_slack_wrapper_mock(self, api_key):
return SlackWrapperMock(api_key)
def exec_command(self, msg, exec_user="normal_user", channel="UNITTESTCHANNELID"):
"""Simulate execution of the specified message as the specified user in the test environment."""
testmsg = [{'type': 'message', 'user': exec_user, 'text': msg, 'client_msg_id': '738e4beb-d50e-42a4-a60e-3fafd4bd71da',
'team': 'UNITTESTTEAMID', 'channel': channel, 'event_ts': '1549715670.002000', 'ts': '1549715670.002000'}]
self.botserver.handle_message(testmsg)
def exec_reaction(self, reaction, exec_user="normal_user"):
"""Simulate execution of the specified reaction as the specified user in the test environment."""
testmsg = [{'type': 'reaction_added', 'user': exec_user, 'item': {'type': 'message', 'channel': 'UNITTESTCHANNELID', 'ts': '1549117537.000500'},
'reaction': reaction, 'item_user': 'UNITTESTUSERID', 'event_ts': '1549715822.000800', 'ts': '1549715822.000800'}]
self.botserver.handle_message(testmsg)
def check_for_response_available(self):
return len(self.botserver.slack_wrapper.message_list) > 0
def check_for_response(self, expected_result):
""" Check if the simulated slack responses contain an expected result. """
for msg in self.botserver.slack_wrapper.message_list:
print(msg.message)
if expected_result in msg.message:
return True
return False
class TestSyscallsHandler(BotBaseTest):
def test_available(self):
self.exec_command("!syscalls available")
self.assertTrue(self.check_for_response("Available architectures"),
msg="Available architectures didn't respond correct.")
def test_show_x86_execve(self):
self.exec_command("!syscalls show x86 execve")
self.assertTrue(self.check_for_response("execve"), msg="Didn't receive execve syscall from bot")
self.assertTrue(self.check_for_response("0x0b"),
msg="Didn't receive correct execve syscall no for x86 from bot")
def test_show_amd64_execve(self):
self.exec_command("!syscalls show x64 execve")
self.assertTrue(self.check_for_response("execve"), msg="Didn't receive execve syscall from bot")
self.assertTrue(self.check_for_response("0x3b"),
msg="Didn't receive correct execve syscall no for x64 from bot")
def test_syscall_not_found(self):
self.exec_command("!syscalls show x64 notexist")
self.assertTrue(self.check_for_response("Specified syscall not found"),
msg="Bot didn't respond with expected response on non-existing syscall")
class TestBotHandler(BotBaseTest):
def test_ping(self):
self.exec_command("!bot ping")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertTrue(self.check_for_response("Pong!"), msg="Ping command didn't reply with pong.")
def test_intro(self):
self.exec_command("!bot intro")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response(
"Unknown handler or command"), msg="Intro didn't execute properly.")
def test_version(self):
self.exec_command("!bot version")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response(
"Unknown handler or command"), msg="Version didn't execute properly.")
class TestAdminHandler(BotBaseTest):
def test_show_admins(self):
self.exec_command("!admin show_admins", "admin_user")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="ShowAdmins didn't execute properly.")
self.assertTrue(self.check_for_response("Administrators"),
msg="ShowAdmins didn't reply with expected result.")
def test_add_admin(self):
self.exec_command("!admin add_admin test", "admin_user")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response(
"Unknown handler or command"), msg="AddAdmin didn't execute properly.")
def test_remove_admin(self):
self.exec_command("!admin remove_admin test", "admin_user")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="RemoveAdmin didn't execute properly.")
def test_as(self):
self.exec_command("!admin as @unittest_user1 addchallenge test pwn", "admin_user")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response(
"Unknown handler or command"), msg="As didn't execute properly.")
class TestChallengeHandler(BotBaseTest):
def test_addctf_name_too_long(self):
ctf_name = "unittest_{}".format("A"*50)
self.exec_command("!ctf addctf {} unittest_ctf".format(ctf_name))
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertTrue(self.check_for_response("CTF name must be <= {} characters.".format(40)),
msg="Challenge handler didn't respond with expected result for name_too_long.")
def test_addctf_success(self):
self.exec_command("!ctf addctf test_ctf test_ctf")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertTrue(self.check_for_response("Created channel #test_ctf"),
msg="Challenge handler failed on creating ctf channel.")
def test_addchallenge(self):
self.exec_command("!ctf addchall testchall pwn")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="AddChallenge command didn't execute properly.")
def test_addtag(self):
self.exec_command("!ctf tag laff lawl lull")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="AddChallenge command didn't execute properly.")
def test_removetag(self):
self.exec_command("!ctf tag laff lawl lull")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="AddChallenge command didn't execute properly.")
def test_workon(self):
self.exec_command("!ctf workon test_challenge")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="Workon command didn't execute properly.")
def test_status(self):
self.exec_command("!ctf status")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertTrue(self.check_for_response("Current CTFs"),
msg="Staus command didn't return the correct response")
self.assertTrue(self.check_for_response("Finished CTFs"),
msg="Staus command didn't return the correct response")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="Status command didn't execute properly.")
def test_solve(self):
self.exec_command("!ctf solve testchall")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="Solve command didn't execute properly.")
def test_solve_support(self):
self.exec_command("!ctf solve testchall supporter")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="Solve with supporter didn't execute properly.")
def test_rename_challenge_name(self):
self.exec_command("!ctf renamechallenge testchall test1")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="RenameChallenge didn't execute properly.")
def test_renamectf(self):
self.exec_command("!ctf renamectf testctf test2")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="RenameCTF didn't execute properly.")
def test_reload(self):
self.exec_command("!ctf reload", "admin_user")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertTrue(self.check_for_response(
"Updating CTFs and challenges"), msg="Reload didn't execute properly.")
def test_addcreds(self):
self.exec_command("!ctf addcreds user pw url")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="RenameCTF didn't execute properly.")
def test_endctf(self):
self.exec_command("!ctf endctf", "admin_user")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response(
"Unknown handler or command"), msg="EndCTF didn't execute properly.")
def test_showcreds(self):
self.exec_command("!ctf showcreds")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="RenameCTF didn't execute properly.")
def test_unsolve(self):
self.exec_command("!ctf unsolve testchall")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="RenameCTF didn't execute properly.")
def test_removechallenge(self):
self.exec_command("!ctf removechallenge testchall", "admin_user")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="RenameCTF didn't execute properly.")
def test_roll(self):
self.exec_command("!ctf roll")
self.assertTrue(self.check_for_response_available(),
msg="Bot didn't react on unit test. Check for possible exceptions.")
self.assertFalse(self.check_for_response("Unknown handler or command"),
msg="RenameCTF didn't execute properly.")
def run_tests():
# borrowed from gef test suite (https://github.com/hugsy/gef/blob/dev/tests/runtests.py)
test_instances = [
TestSyscallsHandler,
TestBotHandler,
TestAdminHandler,
TestChallengeHandler
]
# don't show bot debug messages for running tests
log.setLevel(logging.ERROR)
runner = unittest.TextTestRunner(verbosity=3)
total_failures = 0
for test in [unittest.TestLoader().loadTestsFromTestCase(x) for x in test_instances]:
res = runner.run(test)
total_failures += len(res.errors) + len(res.failures)
return total_failures
if __name__ == "__main__":
run_tests()
|
|
# -*- coding: ascii -*-
import sys, os, os.path
import unittest, doctest
try:
import pickle as pickle
except ImportError:
import pickle
from datetime import datetime, time, timedelta, tzinfo
import warnings
if __name__ == '__main__':
# Only munge path if invoked as a script. Testrunners should have setup
# the paths already
sys.path.insert(0, os.path.abspath(os.path.join(os.pardir, os.pardir)))
import pytz
from pytz import reference
from pytz.tzfile import _byte_string
from pytz.tzinfo import DstTzInfo, StaticTzInfo
# I test for expected version to ensure the correct version of pytz is
# actually being tested.
EXPECTED_VERSION='2013d'
fmt = '%Y-%m-%d %H:%M:%S %Z%z'
NOTIME = timedelta(0)
# GMT is a tzinfo.StaticTzInfo--the class we primarily want to test--while
# UTC is reference implementation. They both have the same timezone meaning.
UTC = pytz.timezone('UTC')
GMT = pytz.timezone('GMT')
assert isinstance(GMT, StaticTzInfo), 'GMT is no longer a StaticTzInfo'
def prettydt(dt):
"""datetime as a string using a known format.
We don't use strftime as it doesn't handle years earlier than 1900
per http://bugs.python.org/issue1777412
"""
if dt.utcoffset() >= timedelta(0):
offset = '+%s' % (dt.utcoffset(),)
else:
offset = '-%s' % (-1 * dt.utcoffset(),)
return '%04d-%02d-%02d %02d:%02d:%02d %s %s' % (
dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.tzname(), offset)
try:
str
except NameError:
# Python 3.x doesn't have unicode(), making writing code
# for Python 2.3 and Python 3.x a pain.
str = str
class BasicTest(unittest.TestCase):
def testVersion(self):
# Ensuring the correct version of pytz has been loaded
self.assertEqual(EXPECTED_VERSION, pytz.__version__,
'Incorrect pytz version loaded. Import path is stuffed '
'or this test needs updating. (Wanted %s, got %s)'
% (EXPECTED_VERSION, pytz.__version__)
)
def testGMT(self):
now = datetime.now(tz=GMT)
self.assertTrue(now.utcoffset() == NOTIME)
self.assertTrue(now.dst() == NOTIME)
self.assertTrue(now.timetuple() == now.utctimetuple())
self.assertTrue(now==now.replace(tzinfo=UTC))
def testReferenceUTC(self):
now = datetime.now(tz=UTC)
self.assertTrue(now.utcoffset() == NOTIME)
self.assertTrue(now.dst() == NOTIME)
self.assertTrue(now.timetuple() == now.utctimetuple())
def testUnknownOffsets(self):
# This tzinfo behavior is required to make
# datetime.time.{utcoffset, dst, tzname} work as documented.
dst_tz = pytz.timezone('US/Eastern')
# This information is not known when we don't have a date,
# so return None per API.
self.assertTrue(dst_tz.utcoffset(None) is None)
self.assertTrue(dst_tz.dst(None) is None)
# We don't know the abbreviation, but this is still a valid
# tzname per the Python documentation.
self.assertEqual(dst_tz.tzname(None), 'US/Eastern')
def clearCache(self):
pytz._tzinfo_cache.clear()
def testUnicodeTimezone(self):
# We need to ensure that cold lookups work for both Unicode
# and traditional strings, and that the desired singleton is
# returned.
self.clearCache()
eastern = pytz.timezone(str('US/Eastern'))
self.assertTrue(eastern is pytz.timezone('US/Eastern'))
self.clearCache()
eastern = pytz.timezone('US/Eastern')
self.assertTrue(eastern is pytz.timezone(str('US/Eastern')))
class PicklingTest(unittest.TestCase):
def _roundtrip_tzinfo(self, tz):
p = pickle.dumps(tz)
unpickled_tz = pickle.loads(p)
self.assertTrue(tz is unpickled_tz, '%s did not roundtrip' % tz.zone)
def _roundtrip_datetime(self, dt):
# Ensure that the tzinfo attached to a datetime instance
# is identical to the one returned. This is important for
# DST timezones, as some state is stored in the tzinfo.
tz = dt.tzinfo
p = pickle.dumps(dt)
unpickled_dt = pickle.loads(p)
unpickled_tz = unpickled_dt.tzinfo
self.assertTrue(tz is unpickled_tz, '%s did not roundtrip' % tz.zone)
def testDst(self):
tz = pytz.timezone('Europe/Amsterdam')
dt = datetime(2004, 2, 1, 0, 0, 0)
for localized_tz in list(tz._tzinfos.values()):
self._roundtrip_tzinfo(localized_tz)
self._roundtrip_datetime(dt.replace(tzinfo=localized_tz))
def testRoundtrip(self):
dt = datetime(2004, 2, 1, 0, 0, 0)
for zone in pytz.all_timezones:
tz = pytz.timezone(zone)
self._roundtrip_tzinfo(tz)
def testDatabaseFixes(self):
# Hack the pickle to make it refer to a timezone abbreviation
# that does not match anything. The unpickler should be able
# to repair this case
tz = pytz.timezone('Australia/Melbourne')
p = pickle.dumps(tz)
tzname = tz._tzname
hacked_p = p.replace(_byte_string(tzname), _byte_string('???'))
self.assertNotEqual(p, hacked_p)
unpickled_tz = pickle.loads(hacked_p)
self.assertTrue(tz is unpickled_tz)
# Simulate a database correction. In this case, the incorrect
# data will continue to be used.
p = pickle.dumps(tz)
new_utcoffset = tz._utcoffset.seconds + 42
# Python 3 introduced a new pickle protocol where numbers are stored in
# hexadecimal representation. Here we extract the pickle
# representation of the number for the current Python version.
old_pickle_pattern = pickle.dumps(tz._utcoffset.seconds)[3:-1]
new_pickle_pattern = pickle.dumps(new_utcoffset)[3:-1]
hacked_p = p.replace(old_pickle_pattern, new_pickle_pattern)
self.assertNotEqual(p, hacked_p)
unpickled_tz = pickle.loads(hacked_p)
self.assertEqual(unpickled_tz._utcoffset.seconds, new_utcoffset)
self.assertTrue(tz is not unpickled_tz)
def testOldPickles(self):
# Ensure that applications serializing pytz instances as pickles
# have no troubles upgrading to a new pytz release. These pickles
# where created with pytz2006j
east1 = pickle.loads(_byte_string(
"cpytz\n_p\np1\n(S'US/Eastern'\np2\nI-18000\n"
"I0\nS'EST'\np3\ntRp4\n."
))
east2 = pytz.timezone('US/Eastern')
self.assertTrue(east1 is east2)
# Confirm changes in name munging between 2006j and 2007c cause
# no problems.
pap1 = pickle.loads(_byte_string(
"cpytz\n_p\np1\n(S'America/Port_minus_au_minus_Prince'"
"\np2\nI-17340\nI0\nS'PPMT'\np3\ntRp4\n."))
pap2 = pytz.timezone('America/Port-au-Prince')
self.assertTrue(pap1 is pap2)
gmt1 = pickle.loads(_byte_string(
"cpytz\n_p\np1\n(S'Etc/GMT_plus_10'\np2\ntRp3\n."))
gmt2 = pytz.timezone('Etc/GMT+10')
self.assertTrue(gmt1 is gmt2)
class USEasternDSTStartTestCase(unittest.TestCase):
tzinfo = pytz.timezone('US/Eastern')
# 24 hours before DST changeover
transition_time = datetime(2002, 4, 7, 7, 0, 0, tzinfo=UTC)
# Increase for 'flexible' DST transitions due to 1 minute granularity
# of Python's datetime library
instant = timedelta(seconds=1)
# before transition
before = {
'tzname': 'EST',
'utcoffset': timedelta(hours = -5),
'dst': timedelta(hours = 0),
}
# after transition
after = {
'tzname': 'EDT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
def _test_tzname(self, utc_dt, wanted):
tzname = wanted['tzname']
dt = utc_dt.astimezone(self.tzinfo)
self.assertEqual(dt.tzname(), tzname,
'Expected %s as tzname for %s. Got %s' % (
tzname, str(utc_dt), dt.tzname()
)
)
def _test_utcoffset(self, utc_dt, wanted):
utcoffset = wanted['utcoffset']
dt = utc_dt.astimezone(self.tzinfo)
self.assertEqual(
dt.utcoffset(), wanted['utcoffset'],
'Expected %s as utcoffset for %s. Got %s' % (
utcoffset, utc_dt, dt.utcoffset()
)
)
def _test_dst(self, utc_dt, wanted):
dst = wanted['dst']
dt = utc_dt.astimezone(self.tzinfo)
self.assertEqual(dt.dst(),dst,
'Expected %s as dst for %s. Got %s' % (
dst, utc_dt, dt.dst()
)
)
def test_arithmetic(self):
utc_dt = self.transition_time
for days in range(-420, 720, 20):
delta = timedelta(days=days)
# Make sure we can get back where we started
dt = utc_dt.astimezone(self.tzinfo)
dt2 = dt + delta
dt2 = dt2 - delta
self.assertEqual(dt, dt2)
# Make sure arithmetic crossing DST boundaries ends
# up in the correct timezone after normalization
utc_plus_delta = (utc_dt + delta).astimezone(self.tzinfo)
local_plus_delta = self.tzinfo.normalize(dt + delta)
self.assertEqual(
prettydt(utc_plus_delta),
prettydt(local_plus_delta),
'Incorrect result for delta==%d days. Wanted %r. Got %r'%(
days,
prettydt(utc_plus_delta),
prettydt(local_plus_delta),
)
)
def _test_all(self, utc_dt, wanted):
self._test_utcoffset(utc_dt, wanted)
self._test_tzname(utc_dt, wanted)
self._test_dst(utc_dt, wanted)
def testDayBefore(self):
self._test_all(
self.transition_time - timedelta(days=1), self.before
)
def testTwoHoursBefore(self):
self._test_all(
self.transition_time - timedelta(hours=2), self.before
)
def testHourBefore(self):
self._test_all(
self.transition_time - timedelta(hours=1), self.before
)
def testInstantBefore(self):
self._test_all(
self.transition_time - self.instant, self.before
)
def testTransition(self):
self._test_all(
self.transition_time, self.after
)
def testInstantAfter(self):
self._test_all(
self.transition_time + self.instant, self.after
)
def testHourAfter(self):
self._test_all(
self.transition_time + timedelta(hours=1), self.after
)
def testTwoHoursAfter(self):
self._test_all(
self.transition_time + timedelta(hours=1), self.after
)
def testDayAfter(self):
self._test_all(
self.transition_time + timedelta(days=1), self.after
)
class USEasternDSTEndTestCase(USEasternDSTStartTestCase):
tzinfo = pytz.timezone('US/Eastern')
transition_time = datetime(2002, 10, 27, 6, 0, 0, tzinfo=UTC)
before = {
'tzname': 'EDT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
after = {
'tzname': 'EST',
'utcoffset': timedelta(hours = -5),
'dst': timedelta(hours = 0),
}
class USEasternEPTStartTestCase(USEasternDSTStartTestCase):
transition_time = datetime(1945, 8, 14, 23, 0, 0, tzinfo=UTC)
before = {
'tzname': 'EWT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
after = {
'tzname': 'EPT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
class USEasternEPTEndTestCase(USEasternDSTStartTestCase):
transition_time = datetime(1945, 9, 30, 6, 0, 0, tzinfo=UTC)
before = {
'tzname': 'EPT',
'utcoffset': timedelta(hours = -4),
'dst': timedelta(hours = 1),
}
after = {
'tzname': 'EST',
'utcoffset': timedelta(hours = -5),
'dst': timedelta(hours = 0),
}
class WarsawWMTEndTestCase(USEasternDSTStartTestCase):
# In 1915, Warsaw changed from Warsaw to Central European time.
# This involved the clocks being set backwards, causing a end-of-DST
# like situation without DST being involved.
tzinfo = pytz.timezone('Europe/Warsaw')
transition_time = datetime(1915, 8, 4, 22, 36, 0, tzinfo=UTC)
before = {
'tzname': 'WMT',
'utcoffset': timedelta(hours=1, minutes=24),
'dst': timedelta(0),
}
after = {
'tzname': 'CET',
'utcoffset': timedelta(hours=1),
'dst': timedelta(0),
}
class VilniusWMTEndTestCase(USEasternDSTStartTestCase):
# At the end of 1916, Vilnius changed timezones putting its clock
# forward by 11 minutes 35 seconds. Neither timezone was in DST mode.
tzinfo = pytz.timezone('Europe/Vilnius')
instant = timedelta(seconds=31)
transition_time = datetime(1916, 12, 31, 22, 36, 00, tzinfo=UTC)
before = {
'tzname': 'WMT',
'utcoffset': timedelta(hours=1, minutes=24),
'dst': timedelta(0),
}
after = {
'tzname': 'KMT',
'utcoffset': timedelta(hours=1, minutes=36), # Really 1:35:36
'dst': timedelta(0),
}
class VilniusCESTStartTestCase(USEasternDSTStartTestCase):
# In 1941, Vilnius changed from MSG to CEST, switching to summer
# time while simultaneously reducing its UTC offset by two hours,
# causing the clocks to go backwards for this summer time
# switchover.
tzinfo = pytz.timezone('Europe/Vilnius')
transition_time = datetime(1941, 6, 23, 21, 00, 00, tzinfo=UTC)
before = {
'tzname': 'MSK',
'utcoffset': timedelta(hours=3),
'dst': timedelta(0),
}
after = {
'tzname': 'CEST',
'utcoffset': timedelta(hours=2),
'dst': timedelta(hours=1),
}
class LondonHistoryStartTestCase(USEasternDSTStartTestCase):
# The first known timezone transition in London was in 1847 when
# clocks where synchronized to GMT. However, we currently only
# understand v1 format tzfile(5) files which does handle years
# this far in the past, so our earliest known transition is in
# 1916.
tzinfo = pytz.timezone('Europe/London')
# transition_time = datetime(1847, 12, 1, 1, 15, 00, tzinfo=UTC)
# before = {
# 'tzname': 'LMT',
# 'utcoffset': timedelta(minutes=-75),
# 'dst': timedelta(0),
# }
# after = {
# 'tzname': 'GMT',
# 'utcoffset': timedelta(0),
# 'dst': timedelta(0),
# }
transition_time = datetime(1916, 5, 21, 2, 00, 00, tzinfo=UTC)
before = {
'tzname': 'GMT',
'utcoffset': timedelta(0),
'dst': timedelta(0),
}
after = {
'tzname': 'BST',
'utcoffset': timedelta(hours=1),
'dst': timedelta(hours=1),
}
class LondonHistoryEndTestCase(USEasternDSTStartTestCase):
# Timezone switchovers are projected into the future, even
# though no official statements exist or could be believed even
# if they did exist. We currently only check the last known
# transition in 2037, as we are still using v1 format tzfile(5)
# files.
tzinfo = pytz.timezone('Europe/London')
# transition_time = datetime(2499, 10, 25, 1, 0, 0, tzinfo=UTC)
transition_time = datetime(2037, 10, 25, 1, 0, 0, tzinfo=UTC)
before = {
'tzname': 'BST',
'utcoffset': timedelta(hours=1),
'dst': timedelta(hours=1),
}
after = {
'tzname': 'GMT',
'utcoffset': timedelta(0),
'dst': timedelta(0),
}
class NoumeaHistoryStartTestCase(USEasternDSTStartTestCase):
# Noumea adopted a whole hour offset in 1912. Previously
# it was 11 hours, 5 minutes and 48 seconds off UTC. However,
# due to limitations of the Python datetime library, we need
# to round that to 11 hours 6 minutes.
tzinfo = pytz.timezone('Pacific/Noumea')
transition_time = datetime(1912, 1, 12, 12, 54, 12, tzinfo=UTC)
before = {
'tzname': 'LMT',
'utcoffset': timedelta(hours=11, minutes=6),
'dst': timedelta(0),
}
after = {
'tzname': 'NCT',
'utcoffset': timedelta(hours=11),
'dst': timedelta(0),
}
class NoumeaDSTEndTestCase(USEasternDSTStartTestCase):
# Noumea dropped DST in 1997.
tzinfo = pytz.timezone('Pacific/Noumea')
transition_time = datetime(1997, 3, 1, 15, 00, 00, tzinfo=UTC)
before = {
'tzname': 'NCST',
'utcoffset': timedelta(hours=12),
'dst': timedelta(hours=1),
}
after = {
'tzname': 'NCT',
'utcoffset': timedelta(hours=11),
'dst': timedelta(0),
}
class NoumeaNoMoreDSTTestCase(NoumeaDSTEndTestCase):
# Noumea dropped DST in 1997. Here we test that it stops occuring.
transition_time = (
NoumeaDSTEndTestCase.transition_time + timedelta(days=365*10))
before = NoumeaDSTEndTestCase.after
after = NoumeaDSTEndTestCase.after
class TahitiTestCase(USEasternDSTStartTestCase):
# Tahiti has had a single transition in its history.
tzinfo = pytz.timezone('Pacific/Tahiti')
transition_time = datetime(1912, 10, 1, 9, 58, 16, tzinfo=UTC)
before = {
'tzname': 'LMT',
'utcoffset': timedelta(hours=-9, minutes=-58),
'dst': timedelta(0),
}
after = {
'tzname': 'TAHT',
'utcoffset': timedelta(hours=-10),
'dst': timedelta(0),
}
class SamoaInternationalDateLineChange(USEasternDSTStartTestCase):
# At the end of 2011, Samoa will switch from being east of the
# international dateline to the west. There will be no Dec 30th
# 2011 and it will switch from UTC-10 to UTC+14.
tzinfo = pytz.timezone('Pacific/Apia')
transition_time = datetime(2011, 12, 30, 10, 0, 0, tzinfo=UTC)
before = {
'tzname': 'WSDT',
'utcoffset': timedelta(hours=-10),
'dst': timedelta(hours=1),
}
after = {
'tzname': 'WSDT',
'utcoffset': timedelta(hours=14),
'dst': timedelta(hours=1),
}
class ReferenceUSEasternDSTStartTestCase(USEasternDSTStartTestCase):
tzinfo = reference.Eastern
def test_arithmetic(self):
# Reference implementation cannot handle this
pass
class ReferenceUSEasternDSTEndTestCase(USEasternDSTEndTestCase):
tzinfo = reference.Eastern
def testHourBefore(self):
# Python's datetime library has a bug, where the hour before
# a daylight savings transition is one hour out. For example,
# at the end of US/Eastern daylight savings time, 01:00 EST
# occurs twice (once at 05:00 UTC and once at 06:00 UTC),
# whereas the first should actually be 01:00 EDT.
# Note that this bug is by design - by accepting this ambiguity
# for one hour one hour per year, an is_dst flag on datetime.time
# became unnecessary.
self._test_all(
self.transition_time - timedelta(hours=1), self.after
)
def testInstantBefore(self):
self._test_all(
self.transition_time - timedelta(seconds=1), self.after
)
def test_arithmetic(self):
# Reference implementation cannot handle this
pass
class LocalTestCase(unittest.TestCase):
def testLocalize(self):
loc_tz = pytz.timezone('Europe/Amsterdam')
loc_time = loc_tz.localize(datetime(1930, 5, 10, 0, 0, 0))
# Actually +00:19:32, but Python datetime rounds this
self.assertEqual(loc_time.strftime('%Z%z'), 'AMT+0020')
loc_time = loc_tz.localize(datetime(1930, 5, 20, 0, 0, 0))
# Actually +00:19:32, but Python datetime rounds this
self.assertEqual(loc_time.strftime('%Z%z'), 'NST+0120')
loc_time = loc_tz.localize(datetime(1940, 5, 10, 0, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'NET+0020')
loc_time = loc_tz.localize(datetime(1940, 5, 20, 0, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'CEST+0200')
loc_time = loc_tz.localize(datetime(2004, 2, 1, 0, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'CET+0100')
loc_time = loc_tz.localize(datetime(2004, 4, 1, 0, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'CEST+0200')
tz = pytz.timezone('Europe/Amsterdam')
loc_time = loc_tz.localize(datetime(1943, 3, 29, 1, 59, 59))
self.assertEqual(loc_time.strftime('%Z%z'), 'CET+0100')
# Switch to US
loc_tz = pytz.timezone('US/Eastern')
# End of DST ambiguity check
loc_time = loc_tz.localize(datetime(1918, 10, 27, 1, 59, 59), is_dst=1)
self.assertEqual(loc_time.strftime('%Z%z'), 'EDT-0400')
loc_time = loc_tz.localize(datetime(1918, 10, 27, 1, 59, 59), is_dst=0)
self.assertEqual(loc_time.strftime('%Z%z'), 'EST-0500')
self.assertRaises(pytz.AmbiguousTimeError,
loc_tz.localize, datetime(1918, 10, 27, 1, 59, 59), is_dst=None
)
# Start of DST non-existent times
loc_time = loc_tz.localize(datetime(1918, 3, 31, 2, 0, 0), is_dst=0)
self.assertEqual(loc_time.strftime('%Z%z'), 'EST-0500')
loc_time = loc_tz.localize(datetime(1918, 3, 31, 2, 0, 0), is_dst=1)
self.assertEqual(loc_time.strftime('%Z%z'), 'EDT-0400')
self.assertRaises(pytz.NonExistentTimeError,
loc_tz.localize, datetime(1918, 3, 31, 2, 0, 0), is_dst=None
)
# Weird changes - war time and peace time both is_dst==True
loc_time = loc_tz.localize(datetime(1942, 2, 9, 3, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'EWT-0400')
loc_time = loc_tz.localize(datetime(1945, 8, 14, 19, 0, 0))
self.assertEqual(loc_time.strftime('%Z%z'), 'EPT-0400')
loc_time = loc_tz.localize(datetime(1945, 9, 30, 1, 0, 0), is_dst=1)
self.assertEqual(loc_time.strftime('%Z%z'), 'EPT-0400')
loc_time = loc_tz.localize(datetime(1945, 9, 30, 1, 0, 0), is_dst=0)
self.assertEqual(loc_time.strftime('%Z%z'), 'EST-0500')
def testNormalize(self):
tz = pytz.timezone('US/Eastern')
dt = datetime(2004, 4, 4, 7, 0, 0, tzinfo=UTC).astimezone(tz)
dt2 = dt - timedelta(minutes=10)
self.assertEqual(
dt2.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'2004-04-04 02:50:00 EDT-0400'
)
dt2 = tz.normalize(dt2)
self.assertEqual(
dt2.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'2004-04-04 01:50:00 EST-0500'
)
def testPartialMinuteOffsets(self):
# utcoffset in Amsterdam was not a whole minute until 1937
# However, we fudge this by rounding them, as the Python
# datetime library
tz = pytz.timezone('Europe/Amsterdam')
utc_dt = datetime(1914, 1, 1, 13, 40, 28, tzinfo=UTC) # correct
utc_dt = utc_dt.replace(second=0) # But we need to fudge it
loc_dt = utc_dt.astimezone(tz)
self.assertEqual(
loc_dt.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'1914-01-01 14:00:00 AMT+0020'
)
# And get back...
utc_dt = loc_dt.astimezone(UTC)
self.assertEqual(
utc_dt.strftime('%Y-%m-%d %H:%M:%S %Z%z'),
'1914-01-01 13:40:00 UTC+0000'
)
def no_testCreateLocaltime(self):
# It would be nice if this worked, but it doesn't.
tz = pytz.timezone('Europe/Amsterdam')
dt = datetime(2004, 10, 31, 2, 0, 0, tzinfo=tz)
self.assertEqual(
dt.strftime(fmt),
'2004-10-31 02:00:00 CET+0100'
)
class CommonTimezonesTestCase(unittest.TestCase):
def test_bratislava(self):
# Bratislava is the default timezone for Slovakia, but our
# heuristics where not adding it to common_timezones. Ideally,
# common_timezones should be populated from zone.tab at runtime,
# but I'm hesitant to pay the startup cost as loading the list
# on demand whilst remaining backwards compatible seems
# difficult.
self.assertTrue('Europe/Bratislava' in pytz.common_timezones)
self.assertTrue('Europe/Bratislava' in pytz.common_timezones_set)
def test_us_eastern(self):
self.assertTrue('US/Eastern' in pytz.common_timezones)
self.assertTrue('US/Eastern' in pytz.common_timezones_set)
def test_belfast(self):
# Belfast uses London time.
self.assertTrue('Europe/Belfast' in pytz.all_timezones_set)
self.assertFalse('Europe/Belfast' in pytz.common_timezones)
self.assertFalse('Europe/Belfast' in pytz.common_timezones_set)
class BaseTzInfoTestCase:
'''Ensure UTC, StaticTzInfo and DstTzInfo work consistently.
These tests are run for each type of tzinfo.
'''
tz = None # override
tz_class = None # override
def test_expectedclass(self):
self.assertTrue(isinstance(self.tz, self.tz_class))
def test_fromutc(self):
# naive datetime.
dt1 = datetime(2011, 10, 31)
# localized datetime, same timezone.
dt2 = self.tz.localize(dt1)
# Both should give the same results. Note that the standard
# Python tzinfo.fromutc() only supports the second.
for dt in [dt1, dt2]:
loc_dt = self.tz.fromutc(dt)
loc_dt2 = pytz.utc.localize(dt1).astimezone(self.tz)
self.assertEqual(loc_dt, loc_dt2)
# localized datetime, different timezone.
new_tz = pytz.timezone('Europe/Paris')
self.assertTrue(self.tz is not new_tz)
dt3 = new_tz.localize(dt1)
self.assertRaises(ValueError, self.tz.fromutc, dt3)
def test_normalize(self):
other_tz = pytz.timezone('Europe/Paris')
self.assertTrue(self.tz is not other_tz)
dt = datetime(2012, 3, 26, 12, 0)
other_dt = other_tz.localize(dt)
local_dt = self.tz.normalize(other_dt)
self.assertTrue(local_dt.tzinfo is not other_dt.tzinfo)
self.assertNotEqual(
local_dt.replace(tzinfo=None), other_dt.replace(tzinfo=None))
def test_astimezone(self):
other_tz = pytz.timezone('Europe/Paris')
self.assertTrue(self.tz is not other_tz)
dt = datetime(2012, 3, 26, 12, 0)
other_dt = other_tz.localize(dt)
local_dt = other_dt.astimezone(self.tz)
self.assertTrue(local_dt.tzinfo is not other_dt.tzinfo)
self.assertNotEqual(
local_dt.replace(tzinfo=None), other_dt.replace(tzinfo=None))
class OptimizedUTCTestCase(unittest.TestCase, BaseTzInfoTestCase):
tz = pytz.utc
tz_class = tz.__class__
class LegacyUTCTestCase(unittest.TestCase, BaseTzInfoTestCase):
# Deprecated timezone, but useful for comparison tests.
tz = pytz.timezone('Etc/UTC')
tz_class = StaticTzInfo
class StaticTzInfoTestCase(unittest.TestCase, BaseTzInfoTestCase):
tz = pytz.timezone('GMT')
tz_class = StaticTzInfo
class DstTzInfoTestCase(unittest.TestCase, BaseTzInfoTestCase):
tz = pytz.timezone('Australia/Melbourne')
tz_class = DstTzInfo
def test_suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite('pytz'))
suite.addTest(doctest.DocTestSuite('pytz.tzinfo'))
import test_tzinfo
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_tzinfo))
return suite
if __name__ == '__main__':
warnings.simplefilter("error") # Warnings should be fatal in tests.
unittest.main(defaultTest='test_suite')
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Emmanuel Blot <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Include a basic SMTP server, based on L. Smithson
# ([email protected]) extensible Python SMTP Server
#
# This file does not contain unit tests, but provides a set of
# classes to run SMTP notification tests
#
import socket
import string
import threading
import re
import base64
import quopri
LF = '\n'
CR = '\r'
email_re = re.compile(r"([\w\d_\.\-])+\@(([\w\d\-])+\.)+([\w\d]{2,4})+")
header_re = re.compile(r'^=\?(?P<charset>[\w\d\-]+)\?(?P<code>[qb])\?(?P<value>.*)\?=$')
class SMTPServerInterface:
"""
A base class for the imlementation of an application specific SMTP
Server. Applications should subclass this and overide these
methods, which by default do nothing.
A method is defined for each RFC821 command. For each of these
methods, 'args' is the complete command received from the
client. The 'data' method is called after all of the client DATA
is received.
If a method returns 'None', then a '250 OK'message is
automatically sent to the client. If a subclass returns a non-null
string then it is returned instead.
"""
def helo(self, args):
return None
def mail_from(self, args):
return None
def rcpt_to(self, args):
return None
def data(self, args):
return None
def quit(self, args):
return None
def reset(self, args):
return None
#
# Some helper functions for manipulating from & to addresses etc.
#
def strip_address(address):
"""
Strip the leading & trailing <> from an address. Handy for
getting FROM: addresses.
"""
start = string.index(address, '<') + 1
end = string.index(address, '>')
return address[start:end]
def split_to(address):
"""
Return 'address' as undressed (host, fulladdress) tuple.
Handy for use with TO: addresses.
"""
start = string.index(address, '<') + 1
sep = string.index(address, '@') + 1
end = string.index(address, '>')
return (address[sep:end], address[start:end],)
#
# This drives the state for a single RFC821 message.
#
class SMTPServerEngine:
"""
Server engine that calls methods on the SMTPServerInterface object
passed at construction time. It is constructed with a bound socket
connection to a client. The 'chug' method drives the state,
returning when the client RFC821 transaction is complete.
"""
ST_INIT = 0
ST_HELO = 1
ST_MAIL = 2
ST_RCPT = 3
ST_DATA = 4
ST_QUIT = 5
def __init__(self, socket, impl):
self.impl = impl
self.socket = socket
self.state = SMTPServerEngine.ST_INIT
def chug(self):
"""
Chug the engine, till QUIT is received from the client. As
each RFC821 message is received, calls are made on the
SMTPServerInterface methods on the object passed at
construction time.
"""
self.socket.send("220 Welcome to Trac notification test server\r\n")
while 1:
data = ''
completeLine = 0
# Make sure an entire line is received before handing off
# to the state engine. Thanks to John Hall for pointing
# this out.
while not completeLine:
try:
lump = self.socket.recv(1024)
if len(lump):
data += lump
if (len(data) >= 2) and data[-2:] == '\r\n':
completeLine = 1
if self.state != SMTPServerEngine.ST_DATA:
rsp, keep = self.do_command(data)
else:
rsp = self.do_data(data)
if rsp == None:
continue
self.socket.send(rsp + "\r\n")
if keep == 0:
self.socket.close()
return
else:
# EOF
return
except socket.error:
return
def do_command(self, data):
"""Process a single SMTP Command"""
cmd = data[0:4]
cmd = string.upper(cmd)
keep = 1
rv = None
if cmd == "HELO":
self.state = SMTPServerEngine.ST_HELO
rv = self.impl.helo(data[5:])
elif cmd == "RSET":
rv = self.impl.reset(data[5:])
self.data_accum = ""
self.state = SMTPServerEngine.ST_INIT
elif cmd == "NOOP":
pass
elif cmd == "QUIT":
rv = self.impl.quit(data[5:])
keep = 0
elif cmd == "MAIL":
if self.state != SMTPServerEngine.ST_HELO:
return ("503 Bad command sequence", 1)
self.state = SMTPServerEngine.ST_MAIL
rv = self.impl.mail_from(data[5:])
elif cmd == "RCPT":
if (self.state != SMTPServerEngine.ST_MAIL) and \
(self.state != SMTPServerEngine.ST_RCPT):
return ("503 Bad command sequence", 1)
self.state = SMTPServerEngine.ST_RCPT
rv = self.impl.rcpt_to(data[5:])
elif cmd == "DATA":
if self.state != SMTPServerEngine.ST_RCPT:
return ("503 Bad command sequence", 1)
self.state = SMTPServerEngine.ST_DATA
self.data_accum = ""
return ("354 OK, Enter data, terminated with a \\r\\n.\\r\\n", 1)
else:
return ("505 Eh? WTF was that?", 1)
if rv:
return (rv, keep)
else:
return("250 OK", keep)
def do_data(self, data):
"""
Process SMTP Data. Accumulates client DATA until the
terminator is found.
"""
self.data_accum = self.data_accum + data
if len(self.data_accum) > 4 and self.data_accum[-5:] == '\r\n.\r\n':
self.data_accum = self.data_accum[:-5]
rv = self.impl.data(self.data_accum)
self.state = SMTPServerEngine.ST_HELO
if rv:
return rv
else:
return "250 OK - Data and terminator. found"
else:
return None
class SMTPServer:
"""
A single threaded SMTP Server connection manager. Listens for
incoming SMTP connections on a given port. For each connection,
the SMTPServerEngine is chugged, passing the given instance of
SMTPServerInterface.
"""
def __init__(self, port):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(("127.0.0.1", port))
self._socket_service = None
def serve(self, impl):
while ( self._resume ):
try:
nsd = self._socket.accept()
except socket.error:
return
self._socket_service = nsd[0]
engine = SMTPServerEngine(self._socket_service, impl)
engine.chug()
self._socket_service = None
def start(self):
self._socket.listen(1)
self._resume = True
def stop(self):
self._resume = False
def terminate(self):
if self._socket_service:
# force the blocking socket to stop waiting for data
try:
#self._socket_service.shutdown(2)
self._socket_service.close()
except AttributeError:
# the SMTP server may also discard the socket
pass
self._socket_service = None
if self._socket:
#self._socket.shutdown(2)
self._socket.close()
self._socket = None
class SMTPServerStore(SMTPServerInterface):
"""
Simple store for SMTP data
"""
def __init__(self):
self.reset(None)
def helo(self, args):
self.reset(None)
def mail_from(self, args):
if args.lower().startswith('from:'):
self.sender = strip_address(args[5:].replace('\r\n','').strip())
def rcpt_to(self, args):
if args.lower().startswith('to:'):
rcpt = args[3:].replace('\r\n','').strip()
self.recipients.append(strip_address(rcpt))
def data(self, args):
self.message = args
def quit(self, args):
pass
def reset(self, args):
self.sender = None
self.recipients = []
self.message = None
class SMTPThreadedServer(threading.Thread):
"""
Run a SMTP server for a single connection, within a dedicated thread
"""
def __init__(self, port):
self.port = port
self.server = SMTPServer(port)
self.store = SMTPServerStore()
threading.Thread.__init__(self)
def run(self):
# run from within the SMTP server thread
self.server.serve(impl = self.store)
def start(self):
# run from the main thread
self.server.start()
threading.Thread.start(self)
def stop(self):
# run from the main thread
self.server.stop()
# send a message to make the SMTP server quit gracefully
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect(('127.0.0.1', self.port))
r = s.send("QUIT\r\n")
except socket.error:
pass
s.close()
# wait for the SMTP server to complete (for up to 2 secs)
self.join(2.0)
# clean up the SMTP server (and force quit if needed)
self.server.terminate()
def get_sender(self):
return self.store.sender
def get_recipients(self):
return self.store.recipients
def get_message(self):
return self.store.message
def cleanup(self):
self.store.reset(None)
def smtp_address(fulladdr):
mo = email_re.search(fulladdr)
if mo:
return mo.group(0)
if start >= 0:
return fulladdr[start+1:-1]
return fulladdr
def decode_header(header):
""" Decode a MIME-encoded header value """
mo = header_re.match(header)
# header does not seem to be MIME-encoded
if not mo:
return header
# attempts to decode the hedear,
# following the specified MIME endoding and charset
try:
encoding = mo.group('code').lower()
if encoding == 'q':
val = quopri.decodestring(mo.group('value'), header=True)
elif encoding == 'b':
val = base64.decodestring(mo.group('value'))
else:
raise AssertionError, "unsupported encoding: %s" % encoding
header = unicode(val, mo.group('charset'))
except Exception, e:
raise AssertionError, e
return header
def parse_smtp_message(msg):
""" Split a SMTP message into its headers and body.
Returns a (headers, body) tuple
We do not use the email/MIME Python facilities here
as they may accept invalid RFC822 data, or data we do not
want to support nor generate """
headers = {}
lh = None
body = None
# last line does not contain the final line ending
msg += '\r\n'
for line in msg.splitlines(True):
if body != None:
# append current line to the body
if line[-2] == CR:
body += line[0:-2]
body += '\n'
else:
raise AssertionError, "body misses CRLF: %s (0x%x)" \
% (line, ord(line[-1]))
else:
if line[-2] != CR:
# RFC822 requires CRLF at end of field line
raise AssertionError, "header field misses CRLF: %s (0x%x)" \
% (line, ord(line[-1]))
# discards CR
line = line[0:-2]
if line.strip() == '':
# end of headers, body starts
body = ''
else:
val = None
if line[0] in ' \t':
# continution of the previous line
if not lh:
# unexpected multiline
raise AssertionError, \
"unexpected folded line: %s" % line
val = decode_header(line.strip(' \t'))
# appends the current line to the previous one
if not isinstance(headers[lh], tuple):
headers[lh] += val
else:
headers[lh][-1] = headers[lh][-1] + val
else:
# splits header name from value
(h, v) = line.split(':', 1)
val = decode_header(v.strip())
if headers.has_key(h):
if isinstance(headers[h], tuple):
headers[h] += val
else:
headers[h] = (headers[h], val)
else:
headers[h] = val
# stores the last header (for multilines headers)
lh = h
# returns the headers and the message body
return (headers, body)
|
|
import urllib2
import re
import bs4
from django.core.management.base import BaseCommand
from django.template.defaultfilters import slugify
from django.utils import timezone
from django.db import DatabaseError
from main.models import Country, CIAWFBEntry
VERBOSE = True
field_data_codes = {'administrative_divisions': '2051',
'age_structure': '2010',
'agriculture_products': '2052',
'airports': '2053',
'airports_with_paved_runways': '2030',
'airports_with_unpaved_runways': '2031',
'area': '2147',
'area_comparative': '2023',
'background': '2028',
'birth_rate': '2054',
'broadcast_media': '2213',
'budget': '2056',
'budget_surplus_or_deficit': '2222',
'capital': '2057',
'carbon_dioxide_emissions_from_consumption_of_energy': '2254',
'central_bank_discount_rate': '2207',
'children_under_the_age_of_5_years_underweight': '2224',
'climate': '2059',
'coastline': '2060',
'commercial_bank_prime_lending_rate': '2208',
'communications_note': '2138',
'constitution': '2063',
'country_name': '2142',
'crude_oil_exports': '2242',
'crude_oil_imports': '2243',
'crude_oil_production': '2241',
'crude_oil_proved_reserves': '2244',
'current_account_balance': '2187',
'death_rate': '2066',
'debt_external': '2079',
'demographic_profile': '2257',
'dependency_status': '2006',
'dependent_areas': '2068',
'diplomatic_representation_from_the_us': '2007',
'diplomatic_representation_in_the_us': '2149',
'disputes_international': '2070',
'distribution_of_family_income_gini_index': '2172',
'drinking_water_source': '2216',
'economy_overview': '2116',
'education_expenditures': '2206',
'electricity_consumption': '2233',
'electricity_exports': '2234',
'electricity_from_fossil_fuels': '2237',
'electricity_from_hydroelectric_plants': '2238',
'electricity_from_nuclear_fuels': '2239',
'electricity_from_other_renewable_sources': '2240',
'electricity_imports': '2235',
'electricity_installed_generating_capacity': '2236',
'electricity_production': '2232',
'elevation_extremes': '2020',
'environment_current_issues': '2032',
'environment_international_agreements': '2033',
'ethnic_groups': '2075',
'exchange_rates': '2076',
'executive_branch': '2077',
'exports': '2078',
'exports_commodities': '2049',
'exports_partners': '2050',
'fiscal_year': '2080',
'flag_description': '2081',
'freshwater_withdrawal_domesticindustrialagricultural': '2202',
'gdp_official_exchange_rate': '2195',
'gdp_purchasing_power_parity': '2001',
'gdp_composition_by_sector': '2012',
'gdp_per_capita_ppp': '2004',
'gdp_real_growth_rate': '2003',
'geographic_coordinates': '2011',
'geography_note': '2113',
'government_note': '2140',
'government_type': '2128',
'health_expenditures': '2225',
'heliports': '2019',
'hivaids_adult_prevalence_rate': '2155',
'hivaids_deaths': '2157',
'hivaids_people_living_with_hivaids': '2156',
'hospital_bed_density': '2227',
'household_income_or_consumption_by_percentage_share': '2047',
'illicit_drugs': '2086',
'imports': '2087',
'imports_commodities': '2058',
'imports_partners': '2061',
'independence': '2088',
'industrial_production_growth_rate': '2089',
'industries': '2090',
'infant_mortality_rate': '2091',
'inflation_rate_consumer_prices': '2092',
'international_law_organization_participation': '2220',
'international_organization_participation': '2107',
'internet_country_code': '2154',
'internet_hosts': '2184',
'internet_users': '2153',
'investment_gross_fixed': '2185',
'irrigated_land': '2146',
'judicial_branch': '2094',
'labor_force': '2095',
'labor_force_by_occupation': '2048',
'land_boundaries': '2096',
'land_use': '2097',
'languages': '2098',
'legal_system': '2100',
'legislative_branch': '2101',
'life_expectancy_at_birth': '2102',
'literacy': '2103',
'location': '2144',
'major_cities_population': '2219',
'major_infectious_diseases': '2193',
'manpower_available_for_military_service': '2105',
'manpower_fit_for_military_service': '2025',
'manpower_reaching_militarily_significant_age_annually': '2026',
'map_references': '2145',
'maritime_claims': '2106',
'market_value_of_publicly_traded_shares': '2200',
'maternal_mortality_rate': '2223',
'median_age': '2177',
'merchant_marine': '2108',
'military_note': '2137',
'military_branches': '2055',
'military_expenditures': '2034',
'military_service_age_and_obligation': '2024',
'national_anthem': '2218',
'national_holiday': '2109',
'national_symbols': '2230',
'nationality': '2110',
'natural_gas_consumption': '2250',
'natural_gas_exports': '2251',
'natural_gas_imports': '2252',
'natural_gas_production': '2249',
'natural_gas_proved_reserves': '2253',
'natural_hazards': '2021',
'natural_resources': '2111',
'net_migration_rate': '2112',
'obesity_adult_prevalence_rate': '2228',
'people_note': '2022',
'physicians_density': '2226',
'pipelines': '2117',
'political_parties_and_leaders': '2118',
'political_pressure_groups_and_leaders': '2115',
'population': '2119',
'population_below_poverty_line': '2046',
'population_growth_rate': '2002',
'ports_and_terminals': '2120',
'public_debt': '2186',
'railways': '2121',
'refined_petroleum_products_consumption': '2246',
'refined_petroleum_products_exports': '2247',
'refined_petroleum_products_imports': '2248',
'refined_petroleum_products_production': '2245',
'refugees_and_internally_displaced_persons': '2194',
'religions': '2122',
'reserves_of_foreign_exchange_and_gold': '2188',
'roadways': '2085',
'sanitation_facility_access': '2217',
'school_life_expectancy_primary_to_tertiary_education': '2205',
'sex_ratio': '2018',
'stock_of_broad_money': '2215',
'stock_of_direct_foreign_investment_abroad': '2199',
'stock_of_direct_foreign_investment_at_home': '2198',
'stock_of_domestic_credit': '2211',
'stock_of_narrow_money': '2214',
'suffrage': '2123',
'taxes_and_other_revenues': '2221',
'telephone_system': '2124',
'telephones_main_lines_in_use': '2150',
'telephones_mobile_cellular': '2151',
'terrain': '2125',
'total_fertility_rate': '2127',
'total_renewable_water_resources': '2201',
'trafficking_in_persons': '2196',
'transportation_note': '2008',
'unemployment_rate': '2129',
'unemployment_youth_ages_15_24': '2229',
'urbanization': '2212',
'waterways': '2093'}
class Command(BaseCommand):
help = 'Updates Country Data from CIA World Factbook'
def handle(self, *args, **options):
def extract_field_data(field_name, field_url):
""" Note: Requires HTML5 Library: pip intall html5lib
"""
country_attribute_list = {}
rootURL = "https://www.cia.gov/library/publications/the-world-factbook/fields/"
fullURL = rootURL + field_url + '.html'
soup = bs4.BeautifulSoup(urllib2.urlopen(fullURL).read())
tables = soup.find_all('table', width="638")
for table in tables:
try:
country = table.find('a', href=re.compile('geos')).text.strip()
except AttributeError:
continue
try:
field_value = table.find('td', class_="category_data").text.strip()
except AttributeError:
continue
country_attribute_list[country] = field_value
return country_attribute_list
def write_field_data_to_db(field_name, field_data):
for country_name in field_data.keys():
# get country if it exists; create it if it doesn't.
country_slug = slugify(country_name)
try:
country = Country.objects.get(url_name=country_slug)
except Country.DoesNotExist:
country = Country(url_name=country_slug)
country.CIAWFB_name_short = country_name
country.save()
# Get CIA WFB Entry if it exists; create it if it doesn't.
try:
CIAWFB_object = CIAWFBEntry.objects.get(country__id=country.id)
except CIAWFBEntry.DoesNotExist:
CIAWFB_object = CIAWFBEntry(country=country, date_entered=timezone.now())
CIAWFB_object.save()
# Now update the field we've got for that CIAWFB entry
db_name = slugify(field_name).replace('-', '_')
try:
setattr(CIAWFB_object, db_name, field_data[country_name])
CIAWFB_object.save()
except DatabaseError:
print('Unable to write field "%s" (country "%s"). Size to write was %s.' %
(db_name, country_name, len(field_data[country_name])))
longest_field = 0
for cname in field_data.keys():
len_data = len(field_data[cname])
if len_data > longest_field:
longest_field = len_data
print("Field: %s; Max Length: %s" % (field_name, longest_field))
raise DatabaseError
for field_name in sorted(field_data_codes.keys()):
if VERBOSE:
print('Processing field: %s' % field_name)
field_data = extract_field_data(field_name, field_data_codes[field_name])
write_field_data_to_db(field_name, field_data)
|
|
#!/usr/bin/env python
'''@package dctopo
Data center network topology creation and drawing.
@author Brandon Heller ([email protected])
This package includes code to create and draw networks with a regular,
repeated structure. The main class is StructuredTopo, which augments the
standard Mininet Topo object with layer metadata plus convenience functions to
enumerate up, down, and layer edges.
'''
from mininet.topo import Topo
PORT_BASE = 1 # starting index for OpenFlow switch ports
class NodeID(object):
'''Topo node identifier.'''
def __init__(self, dpid = None):
'''Init.
@param dpid dpid
'''
# DPID-compatible hashable identifier: opaque 64-bit unsigned int
self.dpid = dpid
def __str__(self):
'''String conversion.
@return str dpid as string
'''
return str(self.dpid)
def name_str(self):
'''Name conversion.
@return name name as string
'''
return str(self.dpid)
def ip_str(self):
'''Name conversion.
@return ip ip as string
'''
hi = (self.dpid & 0xff0000) >> 16
mid = (self.dpid & 0xff00) >> 8
lo = self.dpid & 0xff
return "10.%i.%i.%i" % (hi, mid, lo)
class MyTopo( Topo ):
"Simple topology example."
def __init__( self ):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts and switches
s1 = self.addSwitch( 's1' )
s2 = self.addSwitch( 's2' )
s3 = self.addSwitch( 's3' )
s4 = self.addSwitch( 's4' )
s5 = self.addSwitch( 's5' )
s6 = self.addSwitch( 's6' )
s7 = self.addSwitch( 's7' )
s8 = self.addSwitch( 's8' )
s9 = self.addSwitch( 's9' )
s10 = self.addSwitch( 's10' )
s11 = self.addSwitch( 's11' )
s12 = self.addSwitch( 's12' )
s13 = self.addSwitch( 's13' )
s14 = self.addSwitch( 's14' )
s15 = self.addSwitch( 's15' )
s16 = self.addSwitch( 's16' )
s17 = self.addSwitch( 's17' )
s18 = self.addSwitch( 's18' )
s19 = self.addSwitch( 's19' )
s20 = self.addSwitch( 's20' )
s21 = self.addSwitch( 's21' )
s22 = self.addSwitch( 's22' )
s23 = self.addSwitch( 's23' )
s24 = self.addSwitch( 's24' )
s25 = self.addSwitch( 's25' )
h1 = self.addHost( 'h1' )
h2 = self.addHost( 'h2' )
h3 = self.addHost( 'h3' )
h4 = self.addHost( 'h4' )
h5 = self.addHost( 'h5' )
h6 = self.addHost( 'h6' )
h7 = self.addHost( 'h7' )
h8 = self.addHost( 'h8' )
h9 = self.addHost( 'h9' )
h10 = self.addHost( 'h10' )
h11 = self.addHost( 'h11' )
h12 = self.addHost( 'h12' )
h13 = self.addHost( 'h13' )
h14 = self.addHost( 'h14' )
h15 = self.addHost( 'h15' )
h16 = self.addHost( 'h16' )
h17 = self.addHost( 'h17' )
h18 = self.addHost( 'h18' )
h19 = self.addHost( 'h19' )
h20 = self.addHost( 'h20' )
# Add links
self.addLink( s21, s1 )
self.addLink( s21, s2 )
self.addLink( s21, s3 )
self.addLink( s21, s4 )
self.addLink( s22, s5 )
self.addLink( s22, s6 )
self.addLink( s22, s7 )
self.addLink( s22, s8 )
self.addLink( s23, s9 )
self.addLink( s23, s10 )
self.addLink( s23, s11 )
self.addLink( s23, s12 )
self.addLink( s24, s13 )
self.addLink( s24, s14 )
self.addLink( s24, s15 )
self.addLink( s24, s16 )
self.addLink( s25, s17 )
self.addLink( s25, s18 )
self.addLink( s25, s19 )
self.addLink( s25, s20 )
self.addLink( s1, s5 )
self.addLink( s2, s9 )
self.addLink( s3, s13 )
self.addLink( s4, s17 )
self.addLink( s6, s10 )
self.addLink( s7, s14 )
self.addLink( s8, s18 )
self.addLink( s11, s15 )
self.addLink( s12, s19 )
self.addLink( s16, s20 )
self.addLink( s1, h1 )
self.addLink( s2, h2 )
self.addLink( s3, h3 )
self.addLink( s4, h4 )
self.addLink( s5, h5 )
self.addLink( s6, h6 )
self.addLink( s7, h7 )
self.addLink( s8, h8 )
self.addLink( s9, h9 )
self.addLink( s10, h10 )
self.addLink( s11, h11 )
self.addLink( s12, h12 )
self.addLink( s13, h13 )
self.addLink( s14, h14 )
self.addLink( s15, h15 )
self.addLink( s16, h16 )
self.addLink( s17, h17 )
self.addLink( s18, h18 )
self.addLink( s19, h19 )
self.addLink( s20, h20 )
class StructuredNodeSpec(object):
'''Layer-specific vertex metadata for a StructuredTopo graph.'''
def __init__(self, up_total, down_total, up_speed, down_speed,
type_str = None):
'''Init.
@param up_total number of up links
@param down_total number of down links
@param up_speed speed in Gbps of up links
@param down_speed speed in Gbps of down links
@param type_str string; model of switch or server
'''
self.up_total = up_total
self.down_total = down_total
self.up_speed = up_speed
self.down_speed = down_speed
self.type_str = type_str
class StructuredEdgeSpec(object):
'''Static edge metadata for a StructuredTopo graph.'''
def __init__(self, speed = 1.0):
'''Init.
@param speed bandwidth in Gbps
'''
self.speed = speed
class StructuredTopo(Topo):
'''Data center network representation for structured multi-trees.'''
def __init__(self, node_specs, edge_specs):
'''Create StructuredTopo object.
@param node_specs list of StructuredNodeSpec objects, one per layer
@param edge_specs list of StructuredEdgeSpec objects for down-links,
one per layer
'''
super(StructuredTopo, self).__init__()
self.node_specs = node_specs
self.edge_specs = edge_specs
def def_nopts(self, layer):
'''Return default dict for a structured topo.
@param layer layer of node
@return d dict with layer key/val pair, plus anything else (later)
'''
return {'layer': layer}
def layer(self, name):
'''Return layer of a node
@param name name of switch
@return layer layer of switch
'''
return self.node_info[name]['layer']
def isPortUp(self, port):
''' Returns whether port is facing up or down
@param port port number
@return portUp boolean is port facing up?
'''
return port % 2 == PORT_BASE
def layer_nodes(self, layer):
'''Return nodes at a provided layer.
@param layer layer
@return names list of names
'''
def is_layer(n):
'''Returns true if node is at layer.'''
return self.layer(n) == layer
nodes = [n for n in self.g.nodes() if is_layer(n)]
return nodes
def up_nodes(self, name):
'''Return edges one layer higher (closer to core).
@param name name
@return names list of names
'''
layer = self.layer(name) - 1
nodes = [n for n in self.g[name] if self.layer(n) == layer]
return nodes
def down_nodes(self, name):
'''Return edges one layer higher (closer to hosts).
@param name name
@return names list of names
'''
layer = self.layer(name) + 1
nodes = [n for n in self.g[name] if self.layer(n) == layer]
return nodes
def up_edges(self, name):
'''Return edges one layer higher (closer to core).
@param name name
@return up_edges list of name pairs
'''
edges = [(name, n) for n in self.up_nodes(name)]
return edges
def down_edges(self, name):
'''Return edges one layer lower (closer to hosts).
@param name name
@return down_edges list of name pairs
'''
edges = [(name, n) for n in self.down_nodes(name)]
return edges
# def draw(self, filename = None, edge_width = 1, node_size = 1,
# node_color = 'g', edge_color = 'b'):
# '''Generate image of RipL network.
#
# @param filename filename w/ext to write; if None, show topo on screen
# @param edge_width edge width in pixels
# @param node_size node size in pixels
# @param node_color node color (ex 'b' , 'green', or '#0000ff')
# @param edge_color edge color
# '''
# import matplotlib.pyplot as plt
#
# pos = {} # pos[vertex] = (x, y), where x, y in [0, 1]
# for layer in range(len(self.node_specs)):
# v_boxes = len(self.node_specs)
# height = 1 - ((layer + 0.5) / v_boxes)
#
# layer_nodes = sorted(self.layer_nodes(layer, False))
# h_boxes = len(layer_nodes)
# for j, dpid in enumerate(layer_nodes):
# pos[dpid] = ((j + 0.5) / h_boxes, height)
#
# fig = plt.figure(1)
# fig.clf()
# ax = fig.add_axes([0, 0, 1, 1], frameon = False)
#
# draw_networkx_nodes(self.g, pos, ax = ax, node_size = node_size,
# node_color = node_color, with_labels = False)
# # Work around networkx bug; does not handle color arrays properly
# for edge in self.edges(False):
# draw_networkx_edges(self.g, pos, [edge], ax = ax,
# edge_color = edge_color, width = edge_width)
#
# # Work around networkx modifying axis limits
# ax.set_xlim(0, 1.0)
# ax.set_ylim(0, 1.0)
# ax.set_axis_off()
#
# if filename:
# plt.savefig(filename)
# else:
# plt.show()
class FatTreeTopo(StructuredTopo):
'''Three-layer homogeneous Fat Tree.
From "A scalable, commodity data center network architecture, M. Fares et
al. SIGCOMM 2008."
'''
LAYER_CORE = 0
LAYER_AGG = 1
LAYER_EDGE = 2
LAYER_HOST = 3
class FatTreeNodeID(NodeID):
'''Fat Tree-specific node.'''
def __init__(self, pod = 0, sw = 0, host = 0, dpid = None, name = None):
'''Create FatTreeNodeID object from custom params.
Either (pod, sw, host) or dpid must be passed in.
@param pod pod ID
@param sw switch ID
@param host host ID
@param dpid optional dpid
@param name optional name
'''
if dpid:
self.pod = (dpid & 0xff0000) >> 16
self.sw = (dpid & 0xff00) >> 8
self.host = (dpid & 0xff)
self.dpid = dpid
elif name:
pod, sw, host = [int(s) for s in name.split('_')]
self.pod = pod
self.sw = sw
self.host = host
self.dpid = (pod << 16) + (sw << 8) + host
else:
self.pod = pod
self.sw = sw
self.host = host
self.dpid = (pod << 16) + (sw << 8) + host
def __str__(self):
return "(%i, %i, %i)" % (self.pod, self.sw, self.host)
def name_str(self):
'''Return name string'''
return "%i_%i_%i" % (self.pod, self.sw, self.host)
def mac_str(self):
'''Return MAC string'''
return "00:00:00:%02x:%02x:%02x" % (self.pod, self.sw, self.host)
def ip_str(self):
'''Return IP string'''
return "10.%i.%i.%i" % (self.pod, self.sw, self.host)
"""
def _add_port(self, src, dst):
'''Generate port mapping for new edge.
Since Node IDs are assumed hierarchical and unique, we don't need to
maintain a port mapping. Instead, compute port values directly from
node IDs and topology knowledge, statelessly, for calls to self.port.
@param src source switch DPID
@param dst destination switch DPID
'''
pass
"""
def def_nopts(self, layer, name = None):
'''Return default dict for a FatTree topo.
@param layer layer of node
@param name name of node
@return d dict with layer key/val pair, plus anything else (later)
'''
d = {'layer': layer}
if name:
id = self.id_gen(name = name)
# For hosts only, set the IP
if layer == self.LAYER_HOST:
d.update({'ip': id.ip_str()})
d.update({'mac': id.mac_str()})
d.update({'dpid': "%016x" % id.dpid})
return d
def __init__(self, k = 4, speed = 1.0):
'''Init.
@param k switch degree
@param speed bandwidth in Gbps
'''
core = StructuredNodeSpec(0, k, None, speed, type_str = 'core')
agg = StructuredNodeSpec(k / 2, k / 2, speed, speed, type_str = 'agg')
edge = StructuredNodeSpec(k / 2, k / 2, speed, speed,
type_str = 'edge')
host = StructuredNodeSpec(1, 0, speed, None, type_str = 'host')
node_specs = [core, agg, edge, host]
edge_specs = [StructuredEdgeSpec(speed)] * 3
super(FatTreeTopo, self).__init__(node_specs, edge_specs)
self.k = k
self.id_gen = FatTreeTopo.FatTreeNodeID
self.numPods = k
self.aggPerPod = k / 2
pods = range(0, k)
core_sws = range(1, k / 2 + 1)
agg_sws = range(k / 2, k)
edge_sws = range(0, k / 2)
hosts = range(2, k / 2 + 2)
for p in pods:
for e in edge_sws:
edge_id = self.id_gen(p, e, 1).name_str()
edge_opts = self.def_nopts(self.LAYER_EDGE, edge_id)
self.addSwitch(edge_id, **edge_opts)
for h in hosts:
host_id = self.id_gen(p, e, h).name_str()
host_opts = self.def_nopts(self.LAYER_HOST, host_id)
self.addHost(host_id, **host_opts)
self.addLink(host_id, edge_id)
for a in agg_sws:
agg_id = self.id_gen(p, a, 1).name_str()
agg_opts = self.def_nopts(self.LAYER_AGG, agg_id)
self.addSwitch(agg_id, **agg_opts)
self.addLink(edge_id, agg_id)
for a in agg_sws:
agg_id = self.id_gen(p, a, 1).name_str()
c_index = a - k / 2 + 1
for c in core_sws:
core_id = self.id_gen(k, c_index, c).name_str()
core_opts = self.def_nopts(self.LAYER_CORE, core_id)
self.addSwitch(core_id, **core_opts)
self.addLink(core_id, agg_id)
def port(self, src, dst):
'''Get port number (optional)
Note that the topological significance of DPIDs in FatTreeTopo enables
this function to be implemented statelessly.
@param src source switch DPID
@param dst destination switch DPID
@return tuple (src_port, dst_port):
src_port: port on source switch leading to the destination switch
dst_port: port on destination switch leading to the source switch
'''
src_layer = self.layer(src)
dst_layer = self.layer(dst)
src_id = self.id_gen(name = src)
dst_id = self.id_gen(name = dst)
LAYER_CORE = 0
LAYER_AGG = 1
LAYER_EDGE = 2
LAYER_HOST = 3
if src_layer == LAYER_HOST and dst_layer == LAYER_EDGE:
src_port = 0
dst_port = (src_id.host - 2) * 2 + 1
elif src_layer == LAYER_EDGE and dst_layer == LAYER_CORE:
src_port = (dst_id.sw - 2) * 2
dst_port = src_id.pod
elif src_layer == LAYER_EDGE and dst_layer == LAYER_AGG:
src_port = (dst_id.sw - self.k / 2) * 2
dst_port = src_id.sw * 2 + 1
elif src_layer == LAYER_AGG and dst_layer == LAYER_CORE:
src_port = (dst_id.host - 1) * 2
dst_port = src_id.pod
elif src_layer == LAYER_CORE and dst_layer == LAYER_AGG:
src_port = dst_id.pod
dst_port = (src_id.host - 1) * 2
elif src_layer == LAYER_AGG and dst_layer == LAYER_EDGE:
src_port = dst_id.sw * 2 + 1
dst_port = (src_id.sw - self.k / 2) * 2
elif src_layer == LAYER_CORE and dst_layer == LAYER_EDGE:
src_port = dst_id.pod
dst_port = (src_id.sw - 2) * 2
elif src_layer == LAYER_EDGE and dst_layer == LAYER_HOST:
src_port = (dst_id.host - 2) * 2 + 1
dst_port = 0
else:
raise Exception("Could not find port leading to given dst switch")
# Shift by one; as of v0.9, OpenFlow ports are 1-indexed.
if src_layer != LAYER_HOST:
src_port += 1
if dst_layer != LAYER_HOST:
dst_port += 1
return (src_port, dst_port)
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
implementation for ParlAI.
"""
from parlai.core.params import ParlaiParser
import copy
import os
import pandas as pd
from parlai.core.opt import Opt
import parlai.core.tod.tod_core as tod
import json
from typing import Optional
from parlai.utils.data import DatatypeHelper
from parlai.utils.io import PathManager
import parlai.tasks.multiwoz_v22.build as build_
import parlai.core.tod.tod_agents as tod_agents
DOMAINS = [
"attraction",
"bus",
"hospital",
"hotel",
"police",
"restaurant",
"taxi",
"train",
]
WELL_FORMATTED_DOMAINS = ["attraction", "bus", "hotel", "restaurant", "train", "taxi"]
class MultiwozV22Parser(tod_agents.TodStructuredDataParser):
"""
Abstract data loader for Multiwoz V2.2 into TOD structured data format.
Multiwoz 2.2 has 'find' and 'book' as the only intents.
For API calls, we look for turns that are not 'NONE' `active_intents` in the USER's turn state. We then filter these for whether or not the SYSTSEM has actually made an api call by looking in the dialogue act of the SYSTEM turn.
* For 'find' intents, we make an API call if it does an "Inform" or gives a "NoOffer". We look in the corresponding `.db` file to return the relevant information.
* For 'book' intents, we make an API call if the SYSTEM's dialogue act includes booking and then offer the slots/values of that key as the API response.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
group = parser.add_argument_group("Multiwoz args")
group.add_argument(
"--well-formatted-domains-only",
type=bool,
default=True,
help="Some of the domains in Multiwoz are not super well formatted. Use only the well formatted ones.",
)
group.add_argument(
"--dialogue-id",
type=str,
default="",
help="If non-empty, filters for a particular dialogue id",
)
return super().add_cmdline_args(parser, partial_opt)
def __init__(self, opt: Opt, shared=None):
self.fold = DatatypeHelper.fold(opt["datatype"])
opt["datafile"] = self.fold
self.dpath = os.path.join(opt["datapath"], "multiwoz_v22")
build_.build(opt)
self.last_call = {}
super().__init__(opt, shared)
def load_schemas(self):
with PathManager.open(os.path.join(self.dpath, "schema.json")) as f:
raw = json.load(f)
result = {}
for service in raw:
domain = service["service_name"]
prefix_end_idx = len(domain) + 1
all_slots = set([x["name"][prefix_end_idx:] for x in service["slots"]])
for intent in service["intents"]:
call_name = intent["name"]
result[call_name] = {tod.STANDARD_API_NAME_SLOT: call_name}
req_slots = set([x[prefix_end_idx:] for x in intent["required_slots"]])
if len(req_slots) > 0:
result[call_name][tod.STANDARD_REQUIRED_KEY] = list(req_slots)
# Not fully trusting the original schema data...
optional_slots = set(
[x[prefix_end_idx:] for x in intent["optional_slots"].keys()]
)
optional_slots = optional_slots | all_slots
optional_slots = optional_slots - req_slots
if len(optional_slots) > 0:
result[call_name][tod.STANDARD_OPTIONAL_KEY] = list(optional_slots)
if domain == "police": # Multiwoz 2.2 only lists "police"
result["find_police"] = {
tod.STANDARD_OPTIONAL_KEY: list(all_slots),
tod.STANDARD_API_NAME_SLOT: "find_police",
}
if (
domain == "taxi"
): # Multiwoz 2.2 has "book taxi" in the schema but it's "find taxi" in the data...
result["find_taxi"] = copy.deepcopy(result["book_taxi"])
result["find_taxi"][tod.STANDARD_API_NAME_SLOT] = "find_taxi"
return result
def load_dbs(self):
dbs = {}
for key in DOMAINS:
if (
key == "hospital"
): # has funky extra format, so we're gonna deal with it manually.
with PathManager.open(
os.path.join(self.dpath, "db", key + "_db.json")
) as f:
file_lines = f.readlines()
hospital_address_lines = file_lines[1:4]
partial = [
x.replace("#", "").strip().lower().split(":")
for x in hospital_address_lines
]
self.hospital_address = {x[0]: x[1] for x in partial}
self.hospital_department_details = json.loads("".join(file_lines[6:]))
continue
if (
key == "taxi"
): # Taxi domain is funky and the db for it is just response slot options.
continue
with PathManager.open(
os.path.join(self.dpath, "db", key + "_db.json")
) as f:
blob = json.load(f)
for i, entry in enumerate(blob):
cased = {}
for slot_name in entry:
cased[slot_name.lower().replace(" ", "")] = entry[slot_name]
blob[i] = cased
dbs[key] = pd.DataFrame.from_dict(blob)
return dbs
def load_chunks(self, fold):
if fold == "valid":
fold = "dev" # change name to match file structure
for path in PathManager.ls(os.path.join(self.dpath, fold)):
with PathManager.open(os.path.join(self.dpath, fold, path)) as f:
blob = json.load(f)
for convo in blob:
yield convo
def _get_find_api_response(self, intent, raw_slots, sys_dialog_act):
"""
Get an API response out of the lookup databases.
"""
domain = ""
for cand in DOMAINS:
if cand in intent:
domain = cand
if domain == "taxi": # handle separately cause funky
for action in sys_dialog_act:
if action == "Taxi-Inform":
return {x[0]: x[1] for x in sys_dialog_act[action]}
return {domain: domain} # too much work to do this right...
if domain == "hospital": # handle separately cause funky
res = self.hospital_address
if "hospital-department" in raw_slots:
for blob in self.hospital_department_details:
if blob["department"] in raw_slots["hospital-department"]:
res[blob["department"]] = blob
return res
slots = {}
for raw_key in raw_slots:
key = raw_key[len(domain + "-") :]
slots[key] = raw_slots[raw_key]
for action in sys_dialog_act:
if "Recommend" in action:
add_slots = {}
for x in sys_dialog_act[action]:
name = x[0]
val = x[1]
if self._slot_in_schema(name, intent):
if name not in add_slots:
add_slots[name] = []
add_slots[name].append(val)
for key in add_slots:
slots[key] = add_slots[key]
find = self.dbs[domain]
for slot, values in slots.items():
if slot == "arriveby":
condition = find[slot] < values[0]
elif slot == "leaveat":
condition = find[slot] > values[0]
else:
condition = find[slot].isin(values)
find = find[condition]
filtered = self.dbs[domain].iloc[find.index]
count = len(filtered.index)
if count == 0:
return {}
blob = filtered.head(1).to_dict('records')
results = {}
results["COUNT"] = count
results["OPTIONS"] = json.dumps(blob)
return results
def _slot_in_schema(self, slot, intent):
return slot in self.schemas[intent].get(
tod.STANDARD_OPTIONAL_KEY, []
) or slot in self.schemas[intent].get(tod.STANDARD_REQUIRED_KEY, [])
def _get_round(self, dialogue_id, raw_episode, turn_id):
"""
Parse to TodStructuredRound.
Assume User turn first.
"""
user_turn = raw_episode[turn_id]
if user_turn["speaker"] != "USER":
raise RuntimeError(
f"Got non-user turn when it should have been in {dialogue_id}; turn id {turn_id}"
)
sys_turn = raw_episode[turn_id + 1]
sys_dialog_act = self.dialog_acts[dialogue_id][str(turn_id + 1)]["dialog_act"]
if sys_turn["speaker"] != "SYSTEM":
raise RuntimeError(
f"Got non-system turn when it should have been in {dialogue_id}; turn id {turn_id}"
)
frames = user_turn.get("frames", [])
call = {}
resp = {}
for frame in frames:
if frame.get("state", {}).get("active_intent", "NONE") != "NONE":
intent = frame["state"]["active_intent"]
domain = frame["service"]
maybe_call_raw = copy.deepcopy(frame["state"]["slot_values"])
maybe_call = {}
truncate_length = len(domain) + 1
for key in maybe_call_raw:
maybe_call[key[truncate_length:]] = maybe_call_raw[key][0]
maybe_call[tod.STANDARD_API_NAME_SLOT] = intent
if "find" in intent:
for key in sys_dialog_act:
if "Inform" in key or "NoOffer" in key:
# Gotta check to make sure if it's inform, that it's about the right topic
if "Inform" in key:
valid = True
slots = [x[0] for x in sys_dialog_act[key]]
for slot in slots:
valid &= self._slot_in_schema(slot, intent) | (
slot == "choice"
)
if not valid:
continue
call = maybe_call
resp = self._get_find_api_response(
intent, frame["state"]["slot_values"], sys_dialog_act
)
elif "book" in intent:
for key in sys_dialog_act:
if "Book" in key: # and "Inform" not in key:
resp = {x[0]: x[1] for x in sys_dialog_act[key]}
call = maybe_call
if call == self.last_call:
call = {}
resp = {}
if len(call) > 0:
self.last_call = call
return (
call,
tod.TodStructuredRound(
user_utt=user_turn["utterance"],
api_call_machine=call,
api_resp_machine=resp,
sys_utt=sys_turn["utterance"],
),
)
def _get_schemas_for_goal_calls(self, goals):
result = []
seen = set()
for goal in goals:
call_name = goal[tod.STANDARD_API_NAME_SLOT]
if call_name not in seen:
result.append(self.schemas[call_name])
seen.add(call_name)
return result
def setup_episodes(self, fold):
"""
Parses into TodStructuredEpisode.
"""
self.dbs = self.load_dbs()
self.schemas = self.load_schemas()
with PathManager.open(os.path.join(self.dpath, "dialog_acts.json")) as f:
self.dialog_acts = json.load(f)
chunks = self.load_chunks(fold)
episodes = []
for raw_episode in chunks:
domains = raw_episode["services"]
if self.opt.get("dialogue_id", "") != "":
if raw_episode["dialogue_id"] != self.opt["dialogue_id"]:
continue
skip = (
False
) # need to skip outer for loop while in `for domains` inner for loop
if self.opt.get("well_formatted_domains_only", True):
if len(domains) == 0:
skip = True
for domain in domains:
if domain not in WELL_FORMATTED_DOMAINS:
skip = True
if skip:
continue
turn_id = 0 # matching naming in the `dialogues` files.
turns = raw_episode["turns"]
rounds = []
goal_calls = []
while turn_id < len(turns):
goal, r = self._get_round(raw_episode['dialogue_id'], turns, turn_id)
turn_id += 2
rounds.append(r)
if len(goal) > 0:
goal_calls.append(goal)
episode = tod.TodStructuredEpisode(
domain=tod.SerializationHelpers.inner_list_join(domains),
api_schemas_machine=self._get_schemas_for_goal_calls(goal_calls),
goal_calls_machine=goal_calls,
rounds=rounds,
)
episodes.append(episode)
return episodes
def get_id_task_prefix(self):
return "MultiwozV22"
class UserSimulatorTeacher(MultiwozV22Parser, tod_agents.TodUserSimulatorTeacher):
pass
class SystemTeacher(MultiwozV22Parser, tod_agents.TodSystemTeacher):
pass
class DefaultTeacher(SystemTeacher):
pass
|
|
from active_learning import *
def read_labels(fname):
labels = []
fd = open(fname)
for line in fd:
line = line.strip().split(',')
id = line[0]
v = line[1][1:(len(line[1])-1)].strip().split(' ')
v = [int(vv) for vv in v]
labels.append(set(v))
fd.close()
return labels
def read_label_lst(fname):
labels = []
fd = open(fname)
for line in fd:
line = line.strip()
v = float(line)
labels.append(int(v))
fd.close()
return labels
def is_anc_in_set(l, root, parents, label_set):
p = parents[l]
while p != root:
if p not in label_set:
return False
p = parents[p]
return True
def make_predicted_labels(root, folder, fname_ext, test_labels, parents):
for c in root.children:
#read all examples' predicted labels for this node
predicted_labels = read_label_lst(folder + '/' + str(c.labelIndex) + '.' + fname_ext)
#update each examples' label set
for i in range(len(test_labels)):
if predicted_labels[i] == 1 and is_anc_in_set(c.labelIndex, 0, parents, test_labels[i]):
test_labels[i].add(c.labelIndex)
#go to deeper level
make_predicted_labels(c, folder, fname_ext, test_labels, parents)
#output docs whose F1 score is less than 0.7
def get_bad_hier_f1_docs(true_labels, predicted_labels, threshold):
ex_size = len(true_labels)
bad_ids = []
good_ids= []
for i in range(ex_size):
t_labels = true_labels[i]
p_labels = predicted_labels[i]
p_n = len(t_labels & p_labels)
p = len(p_labels)
t = len(t_labels)
#compute macro loss
if p != 0:
pre = float(p_n) / p
else:
pre = 0
if t != 0:
rec = float(p_n) / t
else:
rec = 0
if pre != 0 and rec != 0:
f1 = 2* pre * rec / (pre + rec)
else:
f1 = 0
if f1 < threshold:
bad_ids.append(i)
else:
good_ids.append(i)
return bad_ids, good_ids
def compute_hier_f1(true_labels, predicted_labels):
ex_size = len(true_labels)
sum_p = 0
sum_t = 0
sum_p_t = 0
sum_macro_pres = 0
sum_macro_recs = 0
sum_macro_f1s = 0
for i in range(ex_size):
t_labels = true_labels[i]
p_labels = predicted_labels[i]
p_n = len(t_labels & p_labels)
p = len(p_labels)
t = len(t_labels)
#compute micro loss
sum_p_t += p_n
sum_p += p
sum_t += t
#compute macro loss
if p != 0:
pre = float(p_n) / p
else:
pre = 0
if t != 0:
rec = float(p_n) / t
else:
rec = 0
if pre != 0 and rec != 0:
f1 = 2* pre * rec / (pre + rec)
else:
f1 = 0
sum_macro_pres += pre
sum_macro_recs += rec
sum_macro_f1s += f1
#compute micro loss
if sum_p != 0:
micro_prec = float(sum_p_t) / sum_p
else:
micro_prec = 0
if sum_t != 0:
micro_rec = float(sum_p_t) / sum_t
else:
micro_rec = 0
if micro_prec != 0 and micro_rec != 0:
micro_f1 = 2 * micro_prec * micro_rec / (micro_prec + micro_rec)
else:
micro_f1 = 0
#compute macro loss
macro_prec = sum_macro_pres / ex_size
macro_rec = sum_macro_recs / ex_size
macro_f1 = sum_macro_f1s / ex_size
return micro_prec, micro_rec, micro_f1, macro_prec, macro_rec, macro_f1
#compute hier f1 measure for top-k levels
def compute_hier_f1_in_top_k_levels(true_labels, predicted_labels, top_k_label_set):
ex_size = len(true_labels)
sum_p = 0
sum_t = 0
sum_p_t = 0
sum_macro_pres = 0
sum_macro_recs = 0
sum_macro_f1s = 0
for i in range(ex_size):
t_labels = true_labels[i]
p_labels = predicted_labels[i]
#restrict the labels to the only top_k set
t_labels = set([t for t in t_labels if t in top_k_label_set])
p_labels = set([t for t in p_labels if t in top_k_label_set])
p_n = len(t_labels & p_labels)
p = len(p_labels)
t = len(t_labels)
#compute micro loss
sum_p_t += p_n
sum_p += p
sum_t += t
#compute macro loss
if p != 0:
pre = float(p_n) / p
else:
pre = 0
if t != 0:
rec = float(p_n) / t
else:
rec = 0
if pre != 0 and rec != 0:
f1 = 2* pre * rec / (pre + rec)
else:
f1 = 0
sum_macro_pres += pre
sum_macro_recs += rec
sum_macro_f1s += f1
#compute micro loss
if sum_p != 0:
micro_prec = float(sum_p_t) / sum_p
else:
micro_prec = 0
if sum_t != 0:
micro_rec = float(sum_p_t) / sum_t
else:
micro_rec = 0
if micro_prec != 0 and micro_rec != 0:
micro_f1 = 2 * micro_prec * micro_rec / (micro_prec + micro_rec)
else:
micro_f1 = 0
#compute macro loss
macro_prec = sum_macro_pres / ex_size
macro_rec = sum_macro_recs / ex_size
macro_f1 = sum_macro_f1s / ex_size
return micro_prec, micro_rec, micro_f1, macro_prec, macro_rec, macro_f1
def init_test_labels(fname):
#just make an empty slot list
test_labels = []
fd = open(fname)
for line in fd:
test_labels.append(set())
fd.close()
return test_labels
def get_doc_length(fname, ids):
lends = []
ids = set(ids)
fd = open(fname)
i = 0
for line in fd:
line = line.strip().split(' ')
if i in ids:
lends.append(len(line) - 1)
i += 1
fd.close()
return lends
def statistics_lends(lens):
import math
len_len = len(lens)
tot_len = sum(lens)
min_len = min(lens)
max_len = max(lens)
avg_len = float(tot_len) / len_len
if len_len == 1:
std_dev_len = 0
else:
std_dev_len = math.sqrt(sum([(v-avg_len)*(v-avg_len) for v in lens]) / (len_len - 1))
return len_len, min_len, max_len, avg_len, tot_len
if __name__ == '__main__':
folder = '/home/xiao/datasets/software/data'
svm_folder = '/home/xiao/workspace/software/svm_models'
svm_output_folder = '/home/xiao/workspace/software/svm_output'
hier_fname = folder + '/' + 'sf_topics_nodag_id.txt'
test_feature_fname = folder + '/' + 'sf_stemmed_testing_files_lx.svm'
test_label_fname = folder + '/' + 'sf_stemmed_testing_tags_lx.svm'
train_feature_fname = folder + '/' + 'sf_stemmed_training_files_lx.svm'
train_label_fname = folder + '/' + 'sf_stemmed_training_tags_lx.svm'
root, all_nodes = Node().read_parent_child_pair_tree(hier_fname)
all_labels = all_nodes.keys()
tree_size = root.get_tree_size() - 1
levels = root.get_max_level()
nodes_per_level = [[] for i in range(levels)]
parents = {}
nd_leaves = []
root.get_nodes_per_level(0, nodes_per_level)
root.get_leaves(nd_leaves)
root.get_parents(parents)
leaves = [l.labelIndex for l in nd_leaves]
print tree_size, levels
for i in range(levels):
print i, len(nodes_per_level[i])
print len(leaves)
#read true lables
true_labels = read_labels(test_label_fname)
#read predicted labels
predicted_labels = init_test_labels(test_label_fname)
#adjust the labels with tree constrain
make_predicted_labels(root, svm_output_folder, 'test_labels', predicted_labels, parents)
#output the loss
print compute_hier_f1(true_labels, predicted_labels)
#check the bad and good testing examples as splited by per-instance f1 threshold
threshold = 0.5
bad_ids, good_ids = get_bad_hier_f1_docs(true_labels, predicted_labels, threshold)
#get doc length for bad and good prediction
bad_lengths = get_doc_length(test_feature_fname, bad_ids)
good_lengths = get_doc_length(test_feature_fname, good_ids)
print 'wrong', statistics_lends(bad_lengths)
print 'correct', statistics_lends(good_lengths)
#check loss for top-k level
top_k_level_nodes = set()
for d in range(levels):
#get top-k level nodes
top_k_level_nodes |= set(nodes_per_level[d])
print 'top_k_levles', d, compute_hier_f1_in_top_k_levels(true_labels, predicted_labels, top_k_level_nodes)
|
|
# -*- coding: utf-8 -*-
"""
.. module:: djstripe.fields.
:synopsis: dj-stripe Custom Field Definitions
.. moduleauthor:: Bill Huneke (@wahuneke)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import decimal
from django.core.exceptions import FieldError, ImproperlyConfigured
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from .settings import USE_NATIVE_JSONFIELD
from .utils import convert_tstamp, dict_nested_accessor
if USE_NATIVE_JSONFIELD:
from django.contrib.postgres.fields import JSONField
else:
from jsonfield import JSONField
class PaymentMethodForeignKey(models.ForeignKey):
def __init__(self, **kwargs):
kwargs.setdefault("to", "PaymentMethod")
super(PaymentMethodForeignKey, self).__init__(**kwargs)
class StripeFieldMixin(object):
"""
Custom fields for all Stripe data.
This allows keeping track of which database fields are suitable for
sending to or receiving from Stripe. Also, allows a few handy extra parameters.
"""
# Used if the name at stripe is different from the name in our database
# Include a . in name if value is nested in dict in Stripe's object
# (e.g. stripe_name = "data.id" --> obj["data"]["id"])
stripe_name = None
# If stripe_name is None, this can also be used to specify a nested value, but
# the final value is assumed to be the database field name
# (e.g. nested_name = "data" --> obj["data"][db_field_name]
nested_name = None
# This indicates that this field will always appear in a stripe object. It will be
# an Exception if we try to parse a stripe object that does not include this field
# in the data. If set to False then null=True attribute will be automatically set
stripe_required = True
# If a field was populated in previous API versions but we don't want to drop the old
# data for some reason, mark it as deprecated. This will make sure we never try to send
# it to Stripe or expect in Stripe data received
# This setting automatically implies Null=True
deprecated = False
def __init__(self, *args, **kwargs):
"""
Assign class instance variables based on kwargs.
Assign extra class instance variables if stripe_required is defined or
if deprecated is defined.
"""
self.stripe_name = kwargs.pop('stripe_name', self.stripe_name)
self.nested_name = kwargs.pop('nested_name', self.nested_name)
self.stripe_required = kwargs.pop('stripe_required', self.stripe_required)
self.deprecated = kwargs.pop('deprecated', self.deprecated)
if not self.stripe_required:
kwargs["null"] = True
if self.deprecated:
kwargs["null"] = True
kwargs["default"] = None
super(StripeFieldMixin, self).__init__(*args, **kwargs)
def stripe_to_db(self, data):
"""Try converting stripe fields to defined database fields."""
if not self.deprecated:
try:
if self.stripe_name:
result = dict_nested_accessor(data, self.stripe_name)
elif self.nested_name:
result = dict_nested_accessor(data, self.nested_name + "." + self.name)
else:
result = data[self.name]
except (KeyError, TypeError):
if self.stripe_required:
model_name = self.model._meta.object_name if hasattr(self, "model") else ""
raise FieldError("Required stripe field '{field_name}' was not"
" provided in {model_name} data object.".format(field_name=self.name,
model_name=model_name))
else:
result = None
return result
class StripePercentField(StripeFieldMixin, models.DecimalField):
"""A field used to define a percent according to djstripe logic."""
def __init__(self, *args, **kwargs):
"""Assign default args to this field."""
defaults = {
'decimal_places': 2,
'max_digits': 5,
'validators': [MinValueValidator(1.00), MaxValueValidator(100.00)]
}
defaults.update(kwargs)
super(StripePercentField, self).__init__(*args, **defaults)
class StripeCurrencyField(StripeFieldMixin, models.DecimalField):
"""
A field used to define currency according to djstripe logic.
Stripe is always in cents. djstripe stores everything in dollars.
"""
def __init__(self, *args, **kwargs):
"""Assign default args to this field."""
defaults = {
'decimal_places': 2,
'max_digits': 8,
}
defaults.update(kwargs)
super(StripeCurrencyField, self).__init__(*args, **defaults)
def stripe_to_db(self, data):
"""Convert the raw value to decimal representation."""
val = super(StripeCurrencyField, self).stripe_to_db(data)
# Note: 0 is a possible return value, which is 'falseish'
if val is not None:
return val / decimal.Decimal("100")
class StripeBooleanField(StripeFieldMixin, models.BooleanField):
"""A field used to define a boolean value according to djstripe logic."""
def __init__(self, *args, **kwargs):
"""Throw an error when a user tries to deprecate."""
if kwargs.get("deprecated", False):
raise ImproperlyConfigured("Boolean field cannot be deprecated. Change field type to "
"StripeNullBooleanField")
super(StripeBooleanField, self).__init__(*args, **kwargs)
class StripeNullBooleanField(StripeFieldMixin, models.NullBooleanField):
"""A field used to define a NullBooleanField value according to djstripe logic."""
pass
class StripeCharField(StripeFieldMixin, models.CharField):
"""A field used to define a CharField value according to djstripe logic."""
pass
class StripeIdField(StripeCharField):
"""A field with enough space to hold any stripe ID."""
def __init__(self, *args, **kwargs):
"""
Assign default args to this field.
As per: https://stripe.com/docs/upgrades
You can safely assume object IDs we generate will never exceed 255
characters, but you should be able to handle IDs of up to that
length.
"""
defaults = {
'max_length': 255,
'blank': False,
'null': False,
}
defaults.update(kwargs)
super(StripeIdField, self).__init__(*args, **defaults)
class StripeTextField(StripeFieldMixin, models.TextField):
"""A field used to define a TextField value according to djstripe logic."""
pass
class StripeDateTimeField(StripeFieldMixin, models.DateTimeField):
"""A field used to define a DateTimeField value according to djstripe logic."""
def stripe_to_db(self, data):
"""Convert the raw timestamp value to a DateTime representation."""
val = super(StripeDateTimeField, self).stripe_to_db(data)
# Note: 0 is a possible return value, which is 'falseish'
if val is not None:
return convert_tstamp(val)
class StripeIntegerField(StripeFieldMixin, models.IntegerField):
"""A field used to define a IntegerField value according to djstripe logic."""
pass
class StripePositiveIntegerField(StripeFieldMixin, models.PositiveIntegerField):
"""A field used to define a PositiveIntegerField value according to djstripe logic."""
pass
class StripeJSONField(StripeFieldMixin, JSONField):
"""A field used to define a JSONField value according to djstripe logic."""
pass
|
|
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import subprocess
from lstools_base import MbedLsToolsBase
class MbedLsToolsLinuxGeneric(MbedLsToolsBase):
""" MbedLsToolsLinuxGeneric supports mbed-enabled platforms detection across Linux family
"""
def __init__(self):
"""! ctor
"""
MbedLsToolsBase.__init__(self)
self.os_supported.append('LinuxGeneric')
self.hex_uuid_pattern = "usb-[0-9a-zA-Z_-]*_([0-9a-zA-Z]*)-.*"
# Since Ubuntu 15 DAplink serial port device can have pci- prefix, not only usb- one
self.name_link_pattern = '((%s)-[0-9a-zA-Z_-]*_[0-9a-zA-Z]*-.*$)'% ('|'.join(["pci", "usb"]))
self.mount_media_pattern = "^/[a-zA-Z0-9/]* on (/[a-zA-Z0-9/]*) "
self.nlp = re.compile(self.name_link_pattern)
self.hup = re.compile(self.hex_uuid_pattern)
def list_mbeds(self):
"""! Returns detailed list of connected mbeds
@return Returns list of structures with detailed info about each mbed
@details Function returns list of dictionaries with mbed attributes such as mount point, TargetID name etc.
Function returns mbed list with platform names if possible
all_devices =
[
['*not detected', 'sdi', '/media/usb3', '/dev/ttyACM7', 'usb-MBED_microcontroller_066EFF534951775087215736-0:0 -> ../../sdi'],
['*not detected', 'sdg', '/media/usb5', '/dev/ttyACM5', 'usb-MBED_microcontroller_066EFF525257775087141721-0:0 -> ../../sdg'],
['*not detected', 'sdf', '/media/przemek/NUCLEO', '/dev/ttyACM4', 'usb-MBED_microcontroller_0671FF534951775087131543-0:0 -> ../../sdf'],
['*not detected', 'sdd', '/media/usb4', '/dev/ttyACM2', 'usb-MBED_microcontroller_0670FF494951785087152739-0:0 -> ../../sdd'],
['*not detected', 'sdb', '/media/usb0', '/dev/ttyACM0', 'usb-MBED_microcontroller_0674FF484951775087083114-0:0 -> ../../sdb'],
['*not detected', 'sdh', '/media/usb6', '/dev/ttyACM6', 'usb-MBED_microcontroller_066FFF525257775087155144-0:0 -> ../../sdh'],
['*not detected', 'sdc', '/media/usb1', '/dev/ttyACM1', 'usb-MBED_microcontroller_066AFF494956805087155327-0:0 -> ../../sdc'],
['*not detected', 'sde', '/media/usb2', '/dev/ttyACM3', 'usb-MBED_microcontroller_066CFF534951775087112139-0:0 -> ../../sde']
]
MBED format
{
'mount_point' : <>,
'serial_port' : <>,
'target_id' : <>,
'platform_name' : <>,
}
TIDS format
{
"1168": "LPC11U68",
"1549": "LPC1549",
"1070": "NRF51822",
"0200": "KL25Z",
"0220": "KL46Z",
"0230": "K20D50M",
"0240": "K64F"
}
"""
# We harness information about what is mounted and connected to serial ports
disk_ids = self.get_dev_by_id('disk')
serial_ids = self.get_dev_by_id('serial')
mount_ids = self.get_mounts()
# Extra data to identify mbeds by target_id
tids = self.manufacture_ids
# Listing known and undetected / orphan devices
mbeds = self.get_detected(tids, disk_ids, serial_ids, mount_ids)
orphans = self.get_not_detected(tids, disk_ids, serial_ids, mount_ids)
all_devices = mbeds + orphans
self.ERRORLEVEL_FLAG = 0
result = []
tidhex = re.compile(r'_([0-9a-fA-F]+)-\d+:\d+')
for device in all_devices:
tid = None
m = tidhex.search(device[4])
if m and len(m.groups()):
tid = m.group(1)
mbed = {'mount_point' : device[2],
'serial_port' : device[3],
'target_id' : tid,
'platform_name' : device[0]
}
# Deducing mbed-enabled TargetID based on available targetID definition DB.
# If TargetID from USBID is not recognized we will try to check URL in mbed.htm
mbed_htm_target_id = self.get_mbed_htm_target_id(device[2]) # device[2] is a 'mount_point'
if mbed_htm_target_id is not None:
mbed_htm_target_id_prefix = mbed_htm_target_id[0:4]
if mbed_htm_target_id_prefix in tids:
# We need to update platform_name and corresponding TargetID (not USBID, but from mbed.htm)
mbed['platform_name'] = tids[mbed_htm_target_id_prefix]
mbed['target_id'] = mbed_htm_target_id
mbed['target_id_usb_id'] = tid
mbed['target_id_mbed_htm'] = mbed_htm_target_id
result.append(mbed)
if None in mbed:
self.ERRORLEVEL_FLAG = -1
return result
# Private methods
def get_dev_by_id_cmd(self, subdir):
"""! Calls command line 'ls' to get devices by their ids
@details Uses Linux shell command: 'ls -oA /dev/disk/by-id/'
@return tuple(stdout lines, retcode)
"""
cmd = 'ls -oA /dev/' + subdir + '/by-id/'
if self.DEBUG_FLAG:
self.debug(self.get_dev_by_id_cmd.__name__, cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return (p.stdout.readlines(), p.wait())
def get_dev_by_id_process(self, lines, retval):
"""! Remove unnecessary lines from command line output
"""
result = []
if not retval:
for line in lines:
line = line.rstrip()
if not line.lower().startswith('total '): # total 0
result.append(line)
if self.DEBUG_FLAG:
self.debug(self.get_dev_by_id_process.__name__, line)
return result
def get_dev_by_id(self, subdir):
"""! Lists disk devices by id
@return List of strings from 'ls' command executed in shell
"""
lines, retval = self.get_dev_by_id_cmd(subdir)
return self.get_dev_by_id_process(lines, retval)
def get_mounts(self):
"""! Lists mounted devices with vfat file system (potential mbeds)
@result Returns list of all mounted vfat devices
@details Uses Linux shell command: 'mount | grep vfat'
"""
result = []
cmd = 'mount | grep vfat'
if self.DEBUG_FLAG:
self.debug(self.get_mounts.__name__, cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
retval = p.wait()
if not retval:
for line in p.stdout.readlines():
line = line.rstrip()
result.append(line)
if self.DEBUG_FLAG:
self.debug(self.get_mounts.__name__, line)
return result
def get_disk_hex_ids(self, disk_list):
"""! Get only hexadecimal IDs for mbed disks
@param disk_list List of disks in a system with USBID decoration
@return Returns map of disks and corresponding disks' Hex ids
@details Uses regular expressions to get Hex strings (TargeTIDs) from list of disks
"""
disk_hex_ids = {}
for dl in disk_list:
m = self.nlp.search(dl)
if m and len(m.groups()):
disk_link = m.group(1)
m = self.hup.search(disk_link)
if m and len(m.groups()):
disk_hex_ids[m.group(1)] = disk_link
return disk_hex_ids
def get_mbed_serial(self, serial_list, dhi):
"""! Get mbed serial by unique hex id (dhi) in disk name
@param serial_list List of all serial ports
@param dhi Unique Hex id of possible mbed device
@return Returns None if corresponding serial device is not found, else returns serial device path
@details Devices are located in Linux '/dev/' directory structure
"""
for sl in serial_list:
if dhi in sl:
m = self.nlp.search(sl)
if m and len(m.groups()):
serial_link = m.group(1)
mbed_dev_serial = "/dev/" + self.get_dev_name(serial_link)
return mbed_dev_serial
return None
def get_detected(self, tids, disk_list, serial_list, mount_list):
"""! Find all known mbed devices and assign name by targetID
@param tids TargetID comprehensive list for detection (manufacturers_ids)
@param disk_list List of disks (mount points in /dev/disk)
@param serial_list List of serial devices (serial ports in /dev/serial)
@param mount_list List of lines from 'mount' command
@return list of lists [mbed_name, mbed_dev_disk, mbed_mount_point, mbed_dev_serial, disk_hex_id]
@details Find for all disk connected all MBED ones we know about from TID list
"""
# Find for all disk connected all MBED ones we know about from TID list
disk_hex_ids = self.get_disk_hex_ids(disk_list)
map_tid_to_mbed = self.get_tid_mbed_name_remap(tids)
result = []
# Search if we have
for dhi in disk_hex_ids.keys():
for mttm in map_tid_to_mbed.keys():
if dhi.startswith(mttm):
mbed_name = map_tid_to_mbed[mttm]
mbed_dev_disk = ""
mbed_dev_serial = ""
disk_link = disk_hex_ids[dhi]
# print "Fount MBED disk: " + disk_link #mbed_name + ": " + mttm + " (" + dhi + ")"
mbed_dev_disk = self.get_dev_name(disk_link) # m.group(1) if m and len(m.groups()) else "unknown"
mbed_dev_serial = self.get_mbed_serial(serial_list, dhi)
# Print detected device
mbed_mount_point = self.get_mount_point(mbed_dev_disk, mount_list)
if mbed_mount_point:
result.append([mbed_name, mbed_dev_disk, mbed_mount_point, mbed_dev_serial, disk_hex_ids[dhi]])
return result
def get_not_detected(self, tids, disk_list, serial_list, mount_list):
"""! Find all unknown mbed-enabled devices (may have 'mbed' string in USBID name)
@param tids TargetID comprehensive list for detection (manufacturers_ids)
@param disk_list List of disks (mount points in /dev/disk)
@param serial_list List of serial devices (serial ports in /dev/serial)
@param mount_list List of lines from 'mount' command
@return list of lists [mbed_name, mbed_dev_disk, mbed_mount_point, mbed_dev_serial, disk_hex_id]
@details Find for all disk connected all MBED ones we know about from TID list
"""
disk_hex_ids = self.get_disk_hex_ids(disk_list)
map_tid_to_mbed = self.get_tid_mbed_name_remap(tids)
orphan_mbeds = {}
for disk in disk_hex_ids:
if "mbed" in disk_hex_ids[disk].lower():
orphan_found = True
for tid in map_tid_to_mbed.keys():
if disk.startswith(tid):
orphan_found = False
break
if orphan_found:
orphan_mbeds[disk] = disk_hex_ids[disk]
# Search for corresponding MBED serial
result = []
# Find orphan serial name
for dhi in orphan_mbeds:
orphan_serial = self.get_mbed_serial(serial_list, dhi)
orphan_dev_disk = self.get_dev_name(disk_hex_ids[dhi])
orphan_dev_serial = '/dev/' + self.get_dev_name(orphan_serial) if orphan_serial else None
orphan_mount_point = self.get_mount_point(orphan_dev_disk, mount_list)
if orphan_mount_point:
result.append([None, orphan_dev_disk, orphan_mount_point, orphan_dev_serial, disk_hex_ids[dhi]])
return result
def get_tid_mbed_name_remap(self, tids):
"""! Remap to get mapping: ID -> mbed name
"""
return tids
def get_dev_name(self, link):
"""! Get device name from symbolic link list
"""
device_sufix_pattern = ".*/([a-zA-Z0-9]*)$"
dsp = re.compile(device_sufix_pattern)
m = dsp.search(link)
mbed_dev = m.group(1) if m and len(m.groups()) else "unknown"
return mbed_dev
def get_mount_point(self, dev_name, mount_list):
"""! Find mount points for MBED devices using mount command output
@param dev_name Device name (e.g 'sda')
@param mount_list List of all mounted devices (strings from Linux mount shell command)
@return Returns None if mount point not found. Else returns device mount path
@details We want to scan names of mount points like this:
/media/MBED_xxx
/media/MBED__xxx
/media/MBED-xxx
"""
mount_media_pattern = "^/[a-zA-Z0-9/]*/" + dev_name + " on (/[a-zA-Z0-9_\-/]*) "
mmp = re.compile(mount_media_pattern)
for mount in mount_list:
m = mmp.search(mount)
if m and len(m.groups()):
return m.group(1)
return None
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spin-weighted spherical CNN models.
Implements CNNs based on spin-weighted spherical convolutions, and corresponding
baselines, reproducing models from [1]. There are still missing features so
perfect reproduction is not yet achieved.
[1] Spin-Weighted Spherical CNNs, NeurIPS'20.
"""
import functools
import operator
from typing import Any, Optional, Sequence, Type, Union
from flax import linen as nn
import jax.numpy as jnp
import numpy as np
from spin_spherical_cnns import layers
from spin_spherical_cnns import sphere_utils
from spin_spherical_cnns import spin_spherical_harmonics
Array = Union[np.ndarray, jnp.ndarray]
# All classifiers used for spherical MNIST in the original SWSCNN paper [1] have
# the same resolution per layer.
_SMALL_CLASSIFIER_RESOLUTIONS = (64, 64, 64, 32, 32, 16, 16)
class SpinSphericalBlock(nn.Module):
"""Spin-weighted block with pooling, convolution, batch norm and nonlinearity.
Attributes:
num_channels: Number of convolution output channels.
spins_in: A sequence of input spin weights.
spins_out: A sequence of output spin weights.
downsampling_factor: How much to downsample before applying the
convolution. Will not downsample when downsampling_factor==1.
axis_name: Identifier for the mapped axis in parallel training.
after_conv_module: Module to apply after convolution. Usually a
non-linearity or batch norm + non-linearity. Must follow the
interface of `layers.SpinSphericalBatchNormMagnitudeNonlin`.
transformer: SpinSphericalFourierTransformer instance.
num_filter_params: Number of filter parameters in the convolutional layer.
"""
num_channels: int
spins_in: Sequence[int]
spins_out: Sequence[int]
downsampling_factor: int
axis_name: Any
transformer: spin_spherical_harmonics.SpinSphericalFourierTransformer
after_conv_module: Type[
nn.Module] = layers.SpinSphericalBatchNormMagnitudeNonlin
num_filter_params: Optional[int] = None
@nn.compact
def __call__(self,
inputs,
train,
weights = None):
"""Apply block to `inputs`.
Args:
inputs: (batch_size, resolution, resolution, n_spins_in, n_channels_in)
array of spin-weighted spherical functions with equiangular sampling.
train: whether to run in training or inference mode.
weights: Weights per batch element used in batchnorm mean/std
computation.
Returns:
A (batch_size, resolution // downsampling_factor, resolution //
downsampling_factor, n_spins_out, num_channels) complex64 array.
"""
feature_maps = inputs
if self.downsampling_factor != 1:
feature_maps = layers.SphericalPooling(
stride=self.downsampling_factor, name='spherical_pool')(feature_maps)
feature_maps = layers.SpinSphericalConvolution(
features=self.num_channels,
spins_in=self.spins_in,
spins_out=self.spins_out,
num_filter_params=self.num_filter_params,
transformer=self.transformer,
name='spherical_conv')(feature_maps)
return self.after_conv_module(
spins=self.spins_out,
use_running_stats=not train,
axis_name=self.axis_name,
name='batch_norm_nonlin')(feature_maps, weights=weights)
class SpinSphericalClassifier(nn.Module):
"""Construct a spin-weighted spherical CNN for classification.
Attributes:
num_classes: Number of nodes in the final layer.
resolutions: (n_layers,) list of resolutions at each layer. For consecutive
resolutions a, b, we must have either a == b or a == 2*b. The latter
triggers inclusion of a pooling layer.
spins: A (n_layers,) list of (n_spins,) lists of spin weights per layer.
widths: (n_layers,) list of width per layer (number of channels).
axis_name: Identifier for the mapped axis in parallel training.
num_filter_params: (n_layers,) the number of filter parameters per layer.
input_transformer: None, or SpinSphericalFourierTransformer
instance. Will be computed automatically if None.
"""
num_classes: int
resolutions: Sequence[int]
spins: Sequence[Sequence[int]]
widths: Sequence[int]
axis_name: Any
num_filter_params: Optional[Sequence[int]] = None
input_transformer: Optional[
spin_spherical_harmonics.SpinSphericalFourierTransformer] = None
def setup(self):
if self.input_transformer is None:
# Flatten spins.
all_spins = functools.reduce(operator.concat, self.spins)
self.transformer = spin_spherical_harmonics.SpinSphericalFourierTransformer(
resolutions=np.unique(self.resolutions),
spins=np.unique(all_spins))
else:
self.transformer = self.input_transformer
num_layers = len(self.resolutions)
if len(self.spins) != num_layers or len(self.widths) != num_layers:
raise ValueError('resolutions, spins, and widths must be the same size!')
model_layers = []
for layer_id in range(num_layers - 1):
resolution_in = self.resolutions[layer_id]
resolution_out = self.resolutions[layer_id + 1]
spins_in = self.spins[layer_id]
spins_out = self.spins[layer_id + 1]
if self.num_filter_params is None:
num_filter_params = None
else:
num_filter_params = self.num_filter_params[layer_id + 1]
num_channels = self.widths[layer_id + 1]
# We pool before conv to avoid expensive increase of number of channels at
# higher resolution.
if resolution_out == resolution_in // 2:
downsampling_factor = 2
elif resolution_out != resolution_in:
raise ValueError('Consecutive resolutions must be equal or halved.')
else:
downsampling_factor = 1
model_layers.append(
SpinSphericalBlock(num_channels=num_channels,
spins_in=spins_in,
spins_out=spins_out,
downsampling_factor=downsampling_factor,
num_filter_params=num_filter_params,
axis_name=self.axis_name,
transformer=self.transformer,
name=f'spin_block_{layer_id}'))
self.layers = model_layers
self.final_dense = nn.Dense(self.num_classes, name='final_dense')
def __call__(self, inputs, train):
"""Apply the network to `inputs`.
Args:
inputs: (batch_size, resolution, resolution, n_spins, n_channels) array of
spin-weighted spherical functions (SWSF) with equiangular sampling.
train: whether to run in training or inference mode.
Returns:
A (batch_size, num_classes) float32 array with per-class scores (logits).
"""
resolution, num_spins, num_channels = inputs.shape[2:]
if (resolution != self.resolutions[0] or
num_spins != len(self.spins[0]) or
num_channels != self.widths[0]):
raise ValueError('Incorrect input dimensions!')
feature_maps = inputs
for layer in self.layers:
feature_maps = layer(feature_maps, train=train)
# Current feature maps are still spin spherical. Do final processing.
# Global pooling is not equivariant for spin != 0, so me must take the
# absolute values before.
mean_abs = sphere_utils.spin_spherical_mean(jnp.abs(feature_maps))
mean = sphere_utils.spin_spherical_mean(feature_maps).real
spins = jnp.expand_dims(jnp.array(self.spins[-1]), [0, 2])
feature_maps = jnp.where(spins == 0, mean, mean_abs)
# Shape is now (batch, spins, channel).
feature_maps = feature_maps.reshape((feature_maps.shape[0], -1))
return self.final_dense(feature_maps)
class CNNClassifier(nn.Module):
"""Construct a conventional CNN for classification.
This serves as a baseline. It takes the same inputs as the spin models and
uses the same format for number of layers, resolutions and channels per layer.
Attributes:
num_classes: Number of nodes in the final layer.
resolutions: (num_layers,) list of resolutions at each layer. For
consecutive resolutions a, b, we must have either a == b or a == 2*b. The
latter triggers inclusion of a pooling layer.
widths: (num_layers,) list of widths per layer (number of channels).
axis_name: Identifier for the mapped axis in parallel training.
"""
num_classes: int
resolutions: Sequence[int]
widths: Sequence[int]
axis_name: Any
@nn.compact
def __call__(self, inputs, train):
"""Applies the network to inputs.
Args:
inputs: (batch_size, resolution, resolution, n_spins, n_channels) array.
train: whether to run in training or inference mode.
Returns:
A (batch_size, num_classes) float32 array with per-class scores (logits).
Raises:
ValueError: If resolutions cannot be enforced with 2x2 pooling.
"""
num_layers = len(self.resolutions)
# Merge spin and channel dimensions.
features = inputs.reshape((*inputs.shape[:3], -1))
for layer_id in range(num_layers - 1):
resolution_in = self.resolutions[layer_id]
resolution_out = self.resolutions[layer_id + 1]
n_channels = self.widths[layer_id + 1]
if resolution_out == resolution_in // 2:
features = nn.avg_pool(features,
window_shape=(2, 2),
strides=(2, 2),
padding='SAME')
elif resolution_out != resolution_in:
raise ValueError('Consecutive resolutions must be equal or halved.')
features = nn.Conv(features=n_channels,
kernel_size=(3, 3),
strides=(1, 1))(features)
features = nn.BatchNorm(use_running_average=not train,
axis_name=self.axis_name)(features)
features = nn.relu(features)
features = jnp.mean(features, axis=(1, 2))
features = nn.Dense(self.num_classes)(features)
return features
def tiny_classifier(num_classes, axis_name=None, input_transformer=None):
"""Wrapper around SpinSphericalClassifier; builds tiny model for testing."""
return SpinSphericalClassifier(num_classes,
resolutions=(8, 4),
spins=((0,), (0, 1)),
widths=(1, 3),
axis_name=axis_name,
input_transformer=input_transformer)
# The hyperparameters for small (six layers) classifiers used for spherical
# MNIST follow the original SWSCNN paper [1].
def spin_classifier_6_layers(num_classes, axis_name):
"""Returns the SpinSphericalClassifier used for spherical MNIST."""
# Input layer has only spin zero. All others have spins zero and one.
num_layers = len(_SMALL_CLASSIFIER_RESOLUTIONS)
spins = tuple([(0,)] + [(0, 1)] * (num_layers - 1))
widths = (1, 16, 16, 20, 24, 28, 32)
num_filter_params_per_layer = (1, 6, 6, 4, 4, 3, 3)
return SpinSphericalClassifier(num_classes,
resolutions=_SMALL_CLASSIFIER_RESOLUTIONS,
spins=spins,
widths=widths,
num_filter_params=num_filter_params_per_layer,
axis_name=axis_name)
def spherical_classifier_6_layers(num_classes, axis_name):
"""Returns the Spherical CNN baseline used for spherical MNIST."""
num_layers = len(_SMALL_CLASSIFIER_RESOLUTIONS)
widths = (1, 16, 16, 32, 32, 58, 58)
num_filter_params_per_layer = tuple([8] * num_layers)
# The difference between spherical and spin-weighted models is that spins are
# zero in every layer for the spherical.
spins = tuple([(0,)] * num_layers)
return SpinSphericalClassifier(num_classes,
resolutions=_SMALL_CLASSIFIER_RESOLUTIONS,
spins=spins,
widths=widths,
num_filter_params=num_filter_params_per_layer,
axis_name=axis_name)
def cnn_classifier_6_layers(num_classes, axis_name):
"""Returns the conventional CNN baseline used for spherical MNIST."""
widths = (1, 16, 16, 32, 32, 54, 54)
return CNNClassifier(num_classes,
resolutions=_SMALL_CLASSIFIER_RESOLUTIONS,
widths=widths,
axis_name=axis_name)
|
|
# -*- coding: utf-8 -*-
"""
PyLTI decorator implementation for chalice framework
"""
from __future__ import absolute_import
from functools import wraps
import logging
import os
from chalice import Chalice
try:
from urllib.parse import parse_qs
except ImportError:
from urlparse import parse_qs
try:
from urllib.parse import urlunparse
except ImportError:
from urlparse import urlunparse
from .common import (
LTI_SESSION_KEY,
LTI_PROPERTY_LIST,
verify_request_common,
default_error,
LTIException,
LTIBase
)
logging.basicConfig()
log = logging.getLogger('pylti.chalice') # pylint: disable=invalid-name
class LTI(LTIBase):
"""
LTI Object represents abstraction of current LTI session. It provides
callback methods and methods that allow developer to inspect
LTI basic-launch-request.
This object is instantiated by @lti wrapper.
"""
def __init__(self, lti_args, lti_kwargs):
# Chalice does not support sessions. Yet, we want the experiance
# to be the same as Flask. Therefore, use a simple dictionary
# to keep session variables for the length of this request.
self.session = {}
LTIBase.__init__(self, lti_args, lti_kwargs)
def _consumers(self):
"""
Gets consumers from Lambda environment variables prefixed with
CONSUMER_KEY_SECRET_. For example, given a consumer key of foo
and a shared secret of bar, you should have an environment
variable CONSUMER_KEY_SECRET_foo=bar.
:return: consumers map
:raises: LTIException if environment variables are not found
"""
consumers = {}
for env in os.environ:
if env.startswith('CONSUMER_KEY_SECRET_'):
key = env[20:] # Strip off the CONSUMER_KEY_SECRET_ prefix
# TODO: remove below after live test
# consumers[key] = {"secret": os.environ[env], "cert": 'NA'}
consumers[key] = {"secret": os.environ[env], "cert": None}
if not consumers:
raise LTIException("No consumers found. Chalice stores "
"consumers in Lambda environment variables. "
"Have you created the environment variables?")
return consumers
def verify_request(self):
"""
Verify LTI request
:raises: LTIException if request validation failed
"""
request = self.lti_kwargs['app'].current_request
if request.method == 'POST':
# Chalice expects JSON and does not nativly support forms data in
# a post body. The below is copied from the parsing of query
# strings as implimented in match_route of Chalice local.py
parsed_url = request.raw_body.decode()
parsed_qs = parse_qs(parsed_url, keep_blank_values=True)
params = {k: v[0] for k, v in parsed_qs .items()}
else:
params = request.query_params
log.debug(params)
log.debug('verify_request?')
try:
# Chalice does not have a url property therefore building it.
protocol = request.headers.get('x-forwarded-proto', 'http')
hostname = request.headers['host']
path = request.context['path']
url = urlunparse((protocol, hostname, path, "", "", ""))
verify_request_common(self._consumers(), url,
request.method, request.headers,
params)
log.debug('verify_request success')
# All good to go, store all of the LTI params into a
# session dict for use in views
for prop in LTI_PROPERTY_LIST:
if params.get(prop, None):
log.debug("params %s=%s", prop, params.get(prop, None))
self.session[prop] = params[prop]
# Set logged in session key
self.session[LTI_SESSION_KEY] = True
return True
except LTIException:
log.debug('verify_request failed')
for prop in LTI_PROPERTY_LIST:
if self.session.get(prop, None):
del self.session[prop]
self.session[LTI_SESSION_KEY] = False
raise
@property
def response_url(self):
"""
Returns remapped lis_outcome_service_url
uses PYLTI_URL_FIX map to support edX dev-stack
:return: remapped lis_outcome_service_url
"""
url = ""
url = self.session['lis_outcome_service_url']
# TODO: Remove this section if not needed
# app_config = self.config
# urls = app_config.get('PYLTI_URL_FIX', dict())
# # url remapping is useful for using devstack
# # devstack reports httpS://localhost:8000/ and listens on HTTP
# for prefix, mapping in urls.items():
# if url.startswith(prefix):
# for _from, _to in mapping.items():
# url = url.replace(_from, _to)
return url
def _verify_any(self):
"""
Verify that request is in session or initial request
:raises: LTIException
"""
raise LTIException("The Request Type any is not "
"supported because Chalice does not support "
"session state. Change the Request Type to "
"initial or omit it from the declaration.")
@staticmethod
def _verify_session():
"""
Verify that session was already created
:raises: LTIException
"""
raise LTIException("The Request Type session is not "
"supported because Chalice does not support "
"session state. Change the Request Type to "
"initial or omit it from the declaration.")
@staticmethod
def close_session():
"""
Invalidates session
:raises: LTIException
"""
raise LTIException("Can not close session. Chalice does "
"not support session state.")
def lti(app, request='initial', error=default_error, role='any',
*lti_args, **lti_kwargs):
"""
LTI decorator
:param: app - Chalice App object.
:param: error - Callback if LTI throws exception (optional).
:param: request - Request type from
:py:attr:`pylti.common.LTI_REQUEST_TYPE`. (default: any)
:param: roles - LTI Role (default: any)
:return: wrapper
"""
def _lti(function):
"""
Inner LTI decorator
:param: function:
:return:
"""
@wraps(function)
def wrapper(*args, **kwargs):
"""
Pass LTI reference to function or return error.
"""
try:
the_lti = LTI(lti_args, lti_kwargs)
the_lti.verify()
the_lti._check_role() # pylint: disable=protected-access
kwargs['lti'] = the_lti
return function(*args, **kwargs)
except LTIException as lti_exception:
error = lti_kwargs.get('error')
exception = dict()
exception['exception'] = lti_exception
exception['kwargs'] = kwargs
exception['args'] = args
return error(exception=exception)
return wrapper
lti_kwargs['request'] = request
lti_kwargs['error'] = error
lti_kwargs['role'] = role
if (not app) or isinstance(app, Chalice):
lti_kwargs['app'] = app
return _lti
else:
# We are wrapping without arguments
lti_kwargs['app'] = None
return _lti(app)
|
|
#
# lasheight_classify.py
#
# (c) 2012, Martin Isenburg
# LASSO - rapid tools to catch reality
#
# uses lasheight to compute the height of LiDAR points above the ground
# and uses the height information to classify the points.
#
# The LiDAR input can be in LAS/LAZ/BIN/TXT/SHP/... format.
# The LiDAR output can be in LAS/LAZ/BIN/TXT format.
#
# for licensing details see http://rapidlasso.com/download/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def return_classification(classification):
if (classification == "created, never classified (0)"):
return "0"
if (classification == "unclassified (1)"):
return "1"
if (classification == "ground (2)"):
return "2"
if (classification == "low vegetation (3)"):
return "3"
if (classification == "medium vegetation (4)"):
return "4"
if (classification == "high vegetation (5)"):
return "5"
if (classification == "building (6)"):
return "6"
if (classification == "low point (7)"):
return "7"
if (classification == "keypoint (8)"):
return "8"
if (classification == "water (9)"):
return "9"
if (classification == "high point (10)"):
return "10"
if (classification == "(11)"):
return "11"
if (classification == "overlap point (12)"):
return "12"
if (classification == "(13)"):
return "13"
if (classification == "(14)"):
return "14"
if (classification == "(15)"):
return "15"
if (classification == "(16)"):
return "16"
if (classification == "(17)"):
return "17"
if (classification == "(18)"):
return "18"
return "unknown"
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting lasheight ...")
### get number of arguments
argc = len(sys.argv)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get the path to the LAStools binaries
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))+"\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\lastools\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lasheight executable
lasheight_path = lastools_path+"\\lasheight.exe"
### check if executable exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find lasheight.exe at " + lasheight_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasheight_path + " ...")
### create the command string for lasheight.exe
command = [lasheight_path]
### maybe use '-verbose' option
if sys.argv[argc-1] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append(sys.argv[1])
### maybe use ground points from external file
if sys.argv[2] != "#":
command.append("-ground_points")
command.append(sys.argv[2])
### else maybe use points with a different classification as ground
elif sys.argv[3] != "#":
command.append("-class")
command.append(return_classification(sys.argv[3]))
### maybe we should ignore/preserve some existing classifications when classifying
if sys.argv[4] != "#":
command.append("-ignore_class")
command.append(return_classification(sys.argv[4]))
### maybe we should ignore/preserve some more existing classifications when classifying
if sys.argv[5] != "#":
command.append("-ignore_class")
command.append(return_classification(sys.argv[5]))
### maybe we classify points below
if sys.argv[6] != "#":
command.append("-classify_below")
command.append(sys.argv[7])
command.append(return_classification(sys.argv[6]))
### maybe we classify points between [interval 1]
if sys.argv[8] != "#":
command.append("-classify_between")
command.append(sys.argv[9])
command.append(sys.argv[10])
command.append(return_classification(sys.argv[8]))
### maybe we classify points between [interval 2]
if sys.argv[11] != "#":
command.append("-classify_between")
command.append(sys.argv[12])
command.append(sys.argv[13])
command.append(return_classification(sys.argv[11]))
### maybe we classify points between [interval 3]
if sys.argv[14] != "#":
command.append("-classify_between")
command.append(sys.argv[15])
command.append(sys.argv[16])
command.append(return_classification(sys.argv[14]))
### maybe we classify points below
if sys.argv[17] != "#":
command.append("-classify_above")
command.append(sys.argv[18])
command.append(return_classification(sys.argv[17]))
### this is where the output arguments start
out = 19
### maybe an output format was selected
if sys.argv[out] != "#":
if sys.argv[out] == "las":
command.append("-olas")
elif sys.argv[out] == "laz":
command.append("-olaz")
elif sys.argv[out] == "bin":
command.append("-obin")
elif sys.argv[out] == "xyzc":
command.append("-otxt")
command.append("-oparse")
command.append("xyzc")
elif sys.argv[out] == "xyzci":
command.append("-otxt")
command.append("-oparse")
command.append("xyzci")
elif sys.argv[out] == "txyzc":
command.append("-otxt")
command.append("-oparse")
command.append("txyzc")
elif sys.argv[out] == "txyzci":
command.append("-otxt")
command.append("-oparse")
command.append("txyzci")
### maybe an output file name was selected
if sys.argv[out+1] != "#":
command.append("-o")
command.append(sys.argv[out+1])
### maybe an output directory was selected
if sys.argv[out+2] != "#":
command.append("-odir")
command.append(sys.argv[out+2])
### maybe an output appendix was selected
if sys.argv[out+3] != "#":
command.append("-odix")
command.append(sys.argv[out+3])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lasheight
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. lasheight failed.")
sys.exit(1)
### report happy end
gp.AddMessage("Success. lasheight done.")
|
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Cisco ASA devicemodel.
This module implements a device interface of base_device.py for
most of the herd of variants of Cisco ASA devices.
"""
import hashlib
import os
import re
import time
import pexpect
from absl import flags as gflags
import logging
import base_device
import pexpect_connection
import push_exceptions as exceptions
FLAGS = gflags.FLAGS
gflags.DEFINE_float('asa_timeout_response', None,
'ASA device response timeout in seconds.')
gflags.DEFINE_float('asa_timeout_connect', None,
'ASA device connect timeout in seconds.')
gflags.DEFINE_float('asa_timeout_idle', None,
'ASA device idle timeout in seconds.')
gflags.DEFINE_float('asa_timeout_disconnect', None,
'ASA device disconnect timeout in seconds.')
gflags.DEFINE_float('asa_timeout_act_user', None,
'ASA device user activation timeout in seconds.')
MD5_RE = re.compile(r'verify /md5 \(\S+\)\s+=\s+([A-Fa-f0-9]+)')
# Used in sleep statements for a minor pause.
MINOR_PAUSE = 0.05
# Some Cisco ways of saying 'access denied' and/or 'invalid command'.
# Due to the way Cisco privilege levels work and since unknown commands
# may be looked up in DNS, any of these could be a response which really
# means 'access denied', or they could mean what they say.
INVALID_1 = "% Invalid input detected at '^' marker.\n\n"
INVALID_2 = ('% Unknown command or computer name, or unable to find computer '
'address\n')
INVALID_3 = 'Command authorization failed.\n\n'
INVALID_4 = '% Authorization failed.\n\n'
INVALID_5 = '% Incomplete command.\n\n'
INVALID_6_PREFIX = '% Ambiguous command:'
class DeleteFileError(Exception):
"""A file was not successfully deleted."""
class AsaDevice(base_device.BaseDevice):
"""A device model for devices with ASA-like interfaces."""
def __init__(self, **kwargs):
self.vendor_name = 'ios'
super(AsaDevice, self).__init__(**kwargs)
# The response regexp indicating connection success.
self._success = r'(?:\r)([]A-Za-z0-9\.\-[]+[>#])'
def _Connect(self, username, password=None, ssh_keys=None,
enable_password=None, ssl_cert_set=None):
_ = ssl_cert_set
self._connection = pexpect_connection.ParamikoSshConnection(
self.loopback_ipv4, username, password, self._success,
timeout=self.timeout_connect, find_prompt=True,
find_prompt_prefix=r'\r', ssh_keys=ssh_keys,
enable_password=enable_password)
try:
self._connection.Connect()
self._DisablePager()
self.connected = True
except pexpect_connection.ConnectionError as e:
self.connected = False
raise exceptions.ConnectError(e)
except pexpect_connection.TimeoutError as e:
self.connected = False
raise exceptions.ConnectError('Timed out connecting to %s(%s) after '
'%s seconds.' %
(self.host, self.loopback_ipv4, str(e)))
def _Cmd(self, command, mode=None):
def SendAndWait(command):
"""Sends a command and waits for a response."""
self._connection.child.send(command+'\r')
self._connection.child.expect('\r\n', timeout=self.timeout_response)
self._connection.child.expect(self._connection.re_prompt,
timeout=self.timeout_response,
searchwindowsize=128)
return self._connection.child.before.replace('\r\n', os.linesep)
# Quieten pylint.
_ = mode
# We strip question-marks ('?') from the input as they upset the
# buffering for minimal gain (they work only on ASA and not on FTOS).
command = command.replace('?', '')
result = ''
try:
result = SendAndWait(command)
except pexpect.TIMEOUT as e:
self.connected = False
raise exceptions.CmdError('%s: %s' % (e.__class__, str(e)))
except pexpect.EOF:
# Retry once on EOF error, in case we have been idle disconnected.
try:
self.connected = False
self._connection.Connect()
self._DisablePager()
self.connected = True
result = SendAndWait(command)
except pexpect.EOF:
raise exceptions.CmdError('Failed with EOF error twice.')
except pexpect_connection.ConnectionError as e:
raise exceptions.CmdError('Auto-reconnect failed: %s' % e)
except pexpect_connection.TimeoutError as e:
raise exceptions.CmdError('Auto-reconnect timed out: %s' % e)
# Fix trailing \r to \n (if \n of last \r\n is captured by prompt).
if result and result[-1] == '\r':
result = result[:-1] + '\n'
if (result.endswith(INVALID_1) or result.endswith(INVALID_2) or
result.endswith(INVALID_3) or result.endswith(INVALID_4) or
result.endswith(INVALID_5) or (
result.endswith('\n') and
result[result[:-1].rfind('\n') + 1:].startswith(
INVALID_6_PREFIX))):
raise exceptions.CmdError('Command failed: %s' % result)
return result
def _SetConfig(self, destination_file, data, canary):
# Canarying is not supported on ASA.
if canary:
raise exceptions.SetConfigCanaryingError('%s devices do not support '
'configuration canarying.' %
self.vendor_name)
# We only support copying to 'running-config' or 'startup-config' on ASA.
if destination_file not in ('running-config', 'startup-config'):
raise exceptions.SetConfigError('destination_file argument must be '
'"running-config" or "startup-config" '
'for %s devices.' % self.vendor_name)
# Result object.
result = base_device.SetConfigResult()
# Get the MD5 sum of the file.
local_digest = hashlib.md5(data).hexdigest()
try:
# Get the working path from the remote device
remote_path = 'nvram:/'
except exceptions.CmdError as e:
msg = 'Error obtaining working directory: %s' % e
logging.error(msg)
raise exceptions.SetConfigError(msg)
# Use a random remote file name
remote_tmpfile = '%s/push.%s' % (
remote_path.rstrip(), os.urandom(8).encode('hex'))
# Upload the file to the device.
scp = pexpect_connection.ScpPutConnection(
self.loopback_ipv4,
username=self._username,
password=self._password)
try:
scp.Copy(data, remote_tmpfile)
except pexpect_connection.Error as e:
raise exceptions.SetConfigError(
'Failed to copy configuration to remote device. %s' % str(e))
# Get the file size on the router.
try:
# Get the MD5 hexdigest of the file on the remote device.
try:
verify_output = self._Cmd('verify /md5 %s' % remote_tmpfile)
match = MD5_RE.search(verify_output)
if match is not None:
remote_digest = match.group(1)
else:
raise exceptions.SetConfigError(
'The "verify /md5 <filename>" command did not produce '
'expected results. It returned: %r' % verify_output)
except exceptions.CmdError as e:
raise exceptions.SetConfigError(
'The MD5 hash command on the router did not succed. '
'The device may not support: "verify /md5 <filename>"')
# Verify the local_digest and remote_digest are the same.
if local_digest != remote_digest:
raise exceptions.SetConfigError(
'File transfer to remote host corrupted. Local digest: %r, '
'Remote digest: %r' % (local_digest, remote_digest))
# Copy the file from flash to the
# destination(running-config, startup-config).
# Catch errors that may occur during application, and report
# these to the user.
try:
self._connection.child.send(
'copy %s %s\r' % (remote_tmpfile, destination_file))
pindex = self._connection.child.expect(
[r'Destination filename \[%s\]\?' % destination_file,
r'%\s*\S*.*',
r'%Error.*',
self._connection.re_prompt],
timeout=self.timeout_act_user)
if pindex == 0:
self._connection.child.send('\r')
try:
pindex = self._connection.child.expect(
[r'Invalid input detected',
self._connection.re_prompt,
r'%Warning:There is a file already existing.*'
'Do you want to over write\? \[confirm\]'],
timeout=self.timeout_act_user)
if pindex == 0:
# Search again using findall to get all bad lines.
bad_lines = re.findall(
r'^(.*)$[\s\^]+% Invalid input',
self._connection.child.match.string,
re.MULTILINE)
raise exceptions.SetConfigSyntaxError(
'Configuration loaded, but with bad lines:\n%s' %
'\n'.join(bad_lines))
if pindex == 2:
# Don't over-write.
self._connection.child.send('n')
raise exceptions.SetConfigError(
'Destination file %r already exists, cannot overwrite.'
% destination_file)
except (pexpect.EOF, pexpect.TIMEOUT) as e:
raise exceptions.SetConfigError(
'Copied file to device, but did not '
'receive prompt afterwards. %s %s' %
(self._connection.child.before, self._connection.child.after))
elif pindex == 2:
print "MATCHED 2"
# The expect does a re.search, search again using findall to get all
raise exceptions.SetConfigError('Could not copy temporary '
'file to %s.' % destination_file)
except (pexpect.EOF, pexpect.TIMEOUT) as e:
raise exceptions.SetConfigError(
'Attempted to copy to bootflash, but a timeout occurred.')
# We need to 'write memory' if we are doing running-config.
if destination_file == 'running-config':
logging.debug('Attempting to copy running-config to startup-config '
'on %s(%s)', self.host, self.loopback_ipv4)
try:
self._Cmd('wr mem')
except exceptions.CmdError as e:
raise exceptions.SetConfigError('Failed to write startup-config '
'for %s(%s). Changes applied. '
'Error was: %s' %
(self.host, self.loopback_ipv4,
str(e)))
finally:
try:
self._DeleteFile(remote_tmpfile)
except DeleteFileError as e:
result.transcript = 'SetConfig warning: %s' % str(e)
logging.warn(result.transcript)
# And finally, return the result text.
return result
def _DeleteFile(self, file_name):
"""Delete a file.
Args:
file_name: A string, the file name.
Raises:
DeleteFileError, if the deletion failed.
"""
try:
self._connection.child.send('\r')
self._connection.child.expect('\r\n', timeout=self.timeout_act_user)
self._connection.child.expect(self._connection.re_prompt,
timeout=self.timeout_act_user,
searchwindowsize=128)
self._connection.child.send('delete %s\r' % file_name)
except pexpect.ExceptionPexpect:
raise DeleteFileError('DeleteFile operation failed. %s' %
self._connection.child)
try:
pindex = self._connection.child.expect(
[r'Delete filename \[.*\]\?',
r'%.*Error.*'],
timeout=self.timeout_act_user)
if pindex == 0:
self._connection.child.send('\r')
logging.debug('DeleteFile: answering first confirmation.')
self._connection.child.expect([r'Delete .*\[confirm\]'],
timeout=self.timeout_act_user)
logging.debug('DeleteFile: answering second confirmation.')
self._connection.child.send('\r')
elif pindex == 1:
raise DeleteFileError('DeleteFile operation failed. %s' %
self._connection.child.match)
pindex = self._connection.child.expect([self._connection.re_prompt,
r'%.*Error.*'],
timeout=self.timeout_act_user)
if pindex == 1:
raise DeleteFileError('DeleteFile operation failed. %s' %
self._connection.child.match)
logging.debug('DeleteFile: success.')
except pexpect.ExceptionPexpect:
raise DeleteFileError('DeleteFile operation failed. %s' %
self._connection.child)
def _GetConfig(self, source):
try:
if source in ('running-config', 'startup-config'):
result = self._Cmd('show %s' % source)
else:
raise exceptions.GetConfigError('source argument must be '
'"running-config" or '
'"startup-config".')
if not result:
return exceptions.EmptyConfigError('%s has an empty configuration.' %
self.host)
else:
return result
except exceptions.Error as e:
raise exceptions.GetConfigError('Could not fetch config from %s. %s.' %
(self.host, str(e)))
def _Disconnect(self):
if hasattr(self, '_connection'):
try:
self._connection.child.send('exit\r')
self._connection.child.expect(self._connection.exit_list,
timeout=self.timeout_act_user)
self.connected = False
except (pexpect.EOF, pexpect.TIMEOUT) as e:
self.connected = False
raise exceptions.DisconnectError('%s: %s' % (e.__class__, str(e)))
def _DisablePager(self):
"""Disables the pager."""
try:
self._connection.child.send('\r')
self._connection.child.expect(r'\r\n',
timeout=self.timeout_connect)
self._connection.child.expect(self._connection.re_prompt,
timeout=self.timeout_connect,
searchwindowsize=128)
self._connection.child.send('terminal pager 0\r')
pindex = self._connection.child.expect(
[self._connection.re_prompt, r'Command authorization failed\.'],
timeout=self.timeout_connect)
if pindex == 1:
self.connected = False
raise exceptions.ConnectError('terminal length 0 command denied.')
# Pause momentarily to avoid a TAC+ packet drop.
time.sleep(0.5)
except (pexpect.EOF, pexpect.TIMEOUT) as e:
self.connected = False
raise exceptions.ConnectError('%s: %s' % (e.__class__, str(e)))
logging.debug('terminal length set to 0')
|
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
#
# Author Yann Bayle
# E-mail [email protected]
# License MIT
# Created 30/03/2017
# Updated 30/03/2017
# Version 1.0.0
#
"""
Description of recisio.py
======================
:Example:
source activate py27
python recisio.py
"""
import os
import re
import sys
import utils
import shutil
import pymysql
from pydub import AudioSegment
def rm_space(string):
"""
remove the first and last space if present in the str
"""
if " " in string[0]:
string = string[1:]
if " " in string[-1]:
string = string[:-1]
return string
def list_files():
folders = ["../features/marsyas/", "../features/yaafe/"]
data = []
for fold in folders:
file_list = os.listdir(fold)
for filen in file_list:
filen = filen.replace("_", " ")
info = filen.split("-")
data.append(rm_space(info[2]) + "," + rm_space(info[0]) + "," + rm_space(info[1]) + "\n")
data = list(set(data))
with open("file_list.csv", "w") as filep:
filep.write("id,artist,track\n")
for line in data:
filep.write(line)
def bextract_features(in_fn, out_fn, verbose=False):
bextract_cmd = "bextract -mfcc -zcrs -ctd -rlf -flx -ws 1024 -as 898 -sv -fe " + in_fn + " -w " + out_fn
if not verbose:
bextract_cmd += " > /dev/null 2>&1"
# print(bextract_cmd)
# sys.exit()
os.system(bextract_cmd)
def validate_arff(filename):
"""Description of validate_arff
Check if filename exists on path and is a file
If file corresponds to valid arff file return absolute path
Otherwise move file to invalid directory and return False
"""
# Check if file exists
if os.path.isfile(filename) and os.path.exists(filename):
filename = os.path.abspath(filename)
else:
return False
# If does not satisfy min size, move to "empty" folder
if os.stat(filename).st_size < 8100:
tmp_path = filename.split("/")
empty_dirname = "/".join(tmp_path[:-1]) + "/empty/"
if not os.path.exists(empty_dirname):
os.makedirs(empty_dirname)
shutil.move(filename, empty_dirname + tmp_path[-1])
return False
# # If filename does not match with feature name, move to "invalid" folder
# name_file = filename.split("/")[-1][:12]
# with open(filename) as filep:
# for i, line in enumerate(filep):
# if i == 70:
# # 71th line
# name_feat = line.split(" ")[2][1:13]
# break
# if name_file != name_feat:
# tmp_path = filename.split("/")
# invalid_dirname = "/".join(tmp_path[:-1]) + "/invalid/"
# if not os.path.exists(invalid_dirname):
# os.makedirs(invalid_dirname)
# shutil.move(filename, invalid_dirname + tmp_path[-1])
# return False
# If everything went well, return filename absolute path
return filename
def merge_arff(indir, outfilename):
"""Description of merge_arff
bextract program from Marsyas generate one output file per audio file
This function merge them all in one unique file
Check if analysed file are valid i.e. not empty
"""
utils.print_success("Preprocessing ARFFs")
indir = utils.abs_path_dir(indir)
filenames = os.listdir(indir)
outfn = open(outfilename, 'w')
cpt_invalid_fn = 0
# Write first lines of ARFF template file
for filename in filenames:
if os.path.isfile(indir + filename):
new_fn = validate_arff(indir + filename)
if new_fn:
with open(new_fn, 'r') as template:
nb_line = 74
for line in template:
if not nb_line:
break
nb_line -= 1
outfn.write(line)
break
else:
cpt_invalid_fn += 1
# Append all arff file to the output file
cur_file_num = 1
for filename in filenames:
if os.path.isfile(indir + filename):
new_fn = validate_arff(indir + filename)
if new_fn:
cur_file_num = cur_file_num + 1
utils.print_progress_start("Analysing file\t" + str(cur_file_num))
fname = open(new_fn, 'r')
outfn.write("".join(fname.readlines()[74:77]))
fname.close()
else:
cpt_invalid_fn += 1
utils.print_progress_end()
outfn.close()
# os.system("rm " + indir + "*.arff")
if cpt_invalid_fn:
utils.print_warning(str(cpt_invalid_fn) + " ARFF files with errors found")
return outfilename
def marsyas(out_dir, filelist):
"""Definition of marsyas
bextract is the cmd in marsyas that extract the features.
It needs as input a file which contains a list of audio files to compute.
If an audio file is corrupted, bextract crashes.
So, it is necessary to call bextract with only one audio file each time.
bextract then produces one output file for each audio file.
It is neccesary to merge those files into one common file.
"""
dir_feat = utils.create_dir(utils.create_dir(out_dir) + "marsyas/")
tmp = "tmp.mf"
for index, filen in enumerate(filelist):
utils.print_progress_start(str(index+1) + "/" + str(len(filelist)) + " " + filen.split(os.sep)[-1])
dir_audio = filen.split("/")[:-1]
filen = filen.split("/")[-1]
filen = filen.replace(" ", "_")
filen = filen.replace("'", "_")
filen = filen.replace('"', "_")
# tmp = filen + ".mf"
with open(tmp, "a") as filep:
filep.write(os.sep.join(dir_audio) + os.sep + filen + "\n")
outfilename = dir_feat + filen + ".arff"
bextract_features(tmp, outfilename, verbose=True)
os.remove(tmp)
merge_arff(dir_feat, out_dir + "marsyas.arff")
def run_cmd(cmd_name, verbose=False):
if not verbose:
cmd_name += " > /dev/null 2>&1"
os.system(cmd_name)
def essentia_extract_feat(in_fn, out_fn, verbose=False):
cmd = "/home/yann/MTG/Extractor/essentia-extractors-v2.1_beta2/streaming_extractor_music '" + in_fn + "' '" + out_fn + "'"
run_cmd(cmd)
def essentia(out_dir, filen):
dir_feat = utils.create_dir(utils.create_dir(out_dir) + "essentia/")
output = dir_feat + filen.split("/")[-1] + ".json"
essentia_extract_feat(filen, output)
def extract_features(dir_audio, dir_feat):
dir_audio = utils.abs_path_dir(dir_audio)
dir_feat = utils.abs_path_dir(dir_feat)
filelist = []
for elem in os.listdir(dir_audio):
if os.path.isfile(dir_audio + elem):
filelist.append(dir_audio + elem)
else:
for filename in os.listdir(dir_audio + elem):
if "ld.wav" in filename:
filelist.append(dir_audio + elem + "/" + filename)
# marsyas(dir_feat, filelist)
for index, filen in enumerate(filelist):
utils.print_progress_start(str(index+1) + "/" + str(len(filelist)) + " " + filen.split(os.sep)[-1])
utils.yaafe(filen)
essentia(dir_feat, filen)
utils.print_progress_end()
def request(query, verbose=False):
try:
db = pymysql.connect(host="localhost",user="yann",passwd="yann",db="doctorat")
except Exception:
print("Error in MySQL connexion")
else:
cur = db.cursor()
try:
cur.execute(query)
except Exception:
print("Error with query: " + query)
else:
db.commit()
result = cur.fetchall()
print(result)
db.close()
def update_filelist():
"""
@brief Update the database with the boolean indicating if features have
been extracted a tool
"""
main_dir = "E:/_These/DataSets/Recisio/features/"
folders = ["marsyas/", "yaafe/", "essentia/"]
for fold in folders:
fold = main_dir + fold
query = "UPDATE recisio SET " + fold.split("/")[-2] + " = 1 WHERE id = "
file_list = os.listdir(fold)
for index, filen in enumerate(file_list):
print(index)
m = re.search(r"\d{2,10}", filen)
request(query + m.group())
def export(outfile):
"""
@brief Export artist and track name from the database
@param outfile The outfile for storing artist and track name
"""
query = "SELECT artist,track FROM recisio "
query += "WHERE feat_marsyas=1 AND feat_yaafe=1 and feat_essentia=1 "
query += "and artist NOT IN ('christmas-carol', 'traditional', 'comptine', "
query += "'nursery-rhyme', 'happy-birthday-songs', 'mexican-traditional') "
query += "ORDER BY artist ASC "
query += "INTO OUTFILE '" + outfile + "' "
query += "FIELDS TERMINATED BY ',' "
request(query)
def remaining(outfile):
"""
@brief Export remaining tracks to listen to
@param outfile The outfile for storing the artist and track name
"""
query = "SELECT artist,track "
query += "FROM recisio "
query += "WHERE feat_marsyas=1 AND feat_yaafe=1 and feat_essentia=1 and tag_gender ='' "
query += "ORDER BY artist ASC "
query += "INTO OUTFILE '" + outfile + "' "
query += "FIELDS TERMINATED BY ',' "
request(query)
def add_info_bv():
"""
@brief Adds an information about the presence of backing vocals.
So update gender to mixed.
TODO: later need to listen to the lead voice without bv
@return No return value
"""
main_dir = "E:/_These/DataSets/Recisio/audio/"
query1 = "UPDATE recisio SET gender = 'mixed' WHERE id = "
query2 = "UPDATE recisio SET tag_back_voc = 1 WHERE id = "
for index, fold in enumerate(os.listdir(main_dir)):
print(index)
if os.path.isdir(main_dir + fold):
filelist = os.listdir(main_dir + fold)
for filen in filelist:
if "-bv-" in filen:
m = re.search(r"\d{2,10}", filen)
request(query1 + m.group())
request(query2 + m.group())
break
def stat():
"""
@brief Display stat about the database
"""
pass
# query = "SELECT artist,track "
# query += "FROM recisio "
# query += "WHERE marsyas=1 AND yaafe=1 and essentia=1 and gender ='' "
# query += "ORDER BY artist ASC "
#intro
query += "INTO OUTFILE '" + outfile + "' "
# query += "FIELDS TERMINATED BY ',' "
# request(query)
def audio2mp3(folder, verbose=True):
"""
@brief Convert any audio files to mp3
@param folder The folder containing audio files to be converted in mp3
"""
folder = utils.abs_path_dir(folder)
filelist = os.listdir(folder)
for index, entire_fn in enumerate(filelist):
if verbose:
print(str(index + 1) + "/" + str(len(filelist)) + " " + entire_fn)
filen = entire_fn.split(".")[0]
extension = entire_fn.split(".")[1]
print(filen)
print(extension)
print(folder + entire_fn)
print(folder + filen)
audio = AudioSegment.from_file(folder + entire_fn, format=extension)
audio.export(folder + filen + ".mp3", format="mp3")
if verbose:
print("Conversion done")
def main():
"""
@brief Main entry point
"""
# audio2mp3("D:/_Doctorat/ISMIR2017/origins/conv")
# list_files()
# update_filelist()
# export(outfile="D:/_Doctorat/ISMIR2017/data/artist_track.csv")
# remaining(outfile="D:/_Doctorat/ISMIR2017/data/remaining.csv")
# add_info_bv()
# dir_feat1 = "/media/sf_SharedFolder/DataSets/Recisio/features/"
# dir_feat2 = "E:/_These/DataSets/Recisio/features/"
dir_audio = "/media/sf_DATA/ISMIR2017/origins/"
dir_feat3 = "/media/sf_DATA/ISMIR2017/features/origins/"
extract_features(dir_audio, dir_feat3)
if __name__ == "__main__":
main()
|
|
"""
@Author Mwaruwa Chaka, JKUAT ECE Final Year Project 2014
The overall face recognition code, using OpenCV FisherFaces
Detects, recognizes then saves the video names in a database
Cleans up after the job"""
import logging
import os , shutil
import time
import MySQLdb
# cv2 and helper:
import cv2
from lib.common import *
from lib.video import *
# add facerec to system path
import sys
sys.path.append("lib")
# facerec imports
from facerec.model import PredictableModel
from facerec.feature import Fisherfaces
from facerec.distance import EuclideanDistance
from facerec.classifier import NearestNeighbor
from facerec.validation import KFoldCrossValidation
from facerec.serialization import save_model, load_model
# for face detection (you can also use OpenCV2 directly):
from facedet.detector import CascadedDetector
import config
#from indexer import indexer
import indexer
class ExtendedPredictableModel(PredictableModel):
""" Subclasses the PredictableModel to store some more
information, so we don't need to pass the dataset
on each program call...
"""
def __init__(self, feature, classifier, image_size, subject_names):
PredictableModel.__init__(self, feature=feature, classifier=classifier)
self.image_size = image_size
self.subject_names = subject_names
def get_model(image_size, subject_names):
""" This method returns the PredictableModel which is used to learn a model
for possible further usage. If you want to define your own model, this
is the method to return it from!
"""
# Define the Fisherfaces Method as Feature Extraction method:
feature = Fisherfaces()
# Define a 1-NN classifier with Euclidean Distance:
classifier = NearestNeighbor(dist_metric=EuclideanDistance(), k=1)
# Return the model as the combination:
return ExtendedPredictableModel(feature=feature, classifier=classifier, image_size=image_size, subject_names=subject_names)
def read_subject_names(path):
"""Reads the folders of a given directory, which are used to display some
meaningful name instead of simply displaying a number.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
Returns:
folder_names: The names of the folder, so you can display it in a prediction.
"""
folder_names = []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
folder_names.append(subdirname)
return folder_names
def read_images(path, image_size=None):
"""Reads the images in a given folder, resizes images on the fly if size is given.
Args:
path: Path to a folder with subfolders representing the subjects (persons).
sz: A tuple with the size Resizes
Returns:
A list [X, y, folder_names]
X: The images, which is a Python list of numpy arrays.
y: The corresponding labels (the unique number of the subject, person) in a Python list.
folder_names: The names of the folder, so you can display it in a prediction.
"""
c = 0
X = []
y = []
folder_names = []
for dirname, dirnames, filenames in os.walk(path):
for subdirname in dirnames:
folder_names.append(subdirname)
subject_path = os.path.join(dirname, subdirname)
for filename in os.listdir(subject_path):
try:
im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)
# resize to given size (if given)
if (image_size is not None):
im = cv2.resize(im, image_size)
X.append(np.asarray(im, dtype=np.uint8))
y.append(c)
except IOError, (errno, strerror):
print "I/O error({0}): {1}".format(errno, strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
c = c+1
return [X,y,folder_names]
def getTime(name):
try:
n = int(name)
return n
if ((n<1400000000) or (n>1500000000)):
return n
else:
return int(time.time())
except:
return int(time.time())
class App(object):
def __init__(self, model, camera_id, cascade_filename):
cascade_filename = config.cascadefile
self.model = model
self.detector = CascadedDetector(cascade_fn=cascade_filename, minNeighbors=5, scaleFactor=1.3)
#self.cam = create_capture(camera_id)
files = indexer.indexer()
#files = "1417456366.mp4"
while(files is None):
print "2> waiting for files to be uploaded..."
time.sleep(30)
files=indexer.indexer()
if files is not None:
break
file_input = config.rel_temp+'facerec/'+files
filename,ext = os.path.splitext(file_input)
self.filename = filename
height=1080
width=1920
fps=25
#convert to mp4
#os.system('ffmpeg -i '+filename+'.h264 '+filename+'.mp4')
test = filename+".mp4" #work with mp4 now
vid=cv2.VideoCapture(test)
height = int(vid.get(4))
width = int(vid.get(3))
fps = int(vid.get(5))
vid.release()
self.ratio = config.working_ratio
self.cam = create_capture(test)
self.threshold = config.fisherThreshold
print "starting face recognition"
self.started_at = int(time.time())
self.writer=cv2.VideoWriter(filename=filename+".avi",fourcc=cv2.cv.CV_FOURCC(*'XVID'), fps=fps,frameSize=(width,height))
def run(self):
while True:
ret, frame = self.cam.read()
if frame is None:
break
frame=cv2.flip(frame,1)
time_now = int(time.time())
time_taken = (time_now-self.started_at)/60.0
cur_frame = int(self.cam.get(1))
final_frame=int(self.cam.get(7))
percentage = (cur_frame*100.0/final_frame)
print "frame: "+str(cur_frame)+" of "+str(final_frame)+" %.2f" % (percentage) +"%"+" %.2f" % (time_taken) +"minutes"
if cur_frame==20:
"save thumbnail"
cv2.imwrite(config.project_root+'video/thumb/'+os.path.basename(self.filename)+'.jpg',frame)
#continue
# Resize the frame by working_ratio the original size for speeding up the detection process:
img = cv2.resize(frame, (frame.shape[1]/self.ratio, frame.shape[0]/self.ratio), interpolation = cv2.INTER_CUBIC)
imgout = img.copy()
full_imgout = frame.copy()
for i,r in enumerate(self.detector.detect(img)):
x0,y0,x1,y1 = r
# (1) Get face, (2) Convert to grayscale & (3) resize to image_size:
face = img[y0:y1, x0:x1]
face = cv2.cvtColor(face,cv2.COLOR_BGR2GRAY)
face = cv2.resize(face, self.model.image_size, interpolation = cv2.INTER_CUBIC)
# Get a prediction from the model:
[prediction,confidence] = self.model.predict(face)
confidence = float(confidence['distances'])
#adjust rectangle for full size
x0 = self.ratio*x0
y0 = self.ratio*y0
x1 = self.ratio*x1
y1 = self.ratio*y1
color1 = (0,255,0)
color2 = (0,0,255)
font=cv2.FONT_HERSHEY_SIMPLEX
if confidence <= self.threshold:
# Draw the face area in image:
cv2.rectangle(full_imgout, (x0,y0),(x1,y1),(0,255,0),2)
# Draw the predicted name (folder name...):
cv2.putText(full_imgout,self.model.subject_names[prediction],(x0-20,y0-10),font,1,color1,2)
cv2.putText(full_imgout,str(confidence),(x0-20,y0-40),font,1,color1,2)
#draw_str(full_imgout, (x0-20,y0-20), self.model.subject_names[prediction])
#draw_str(full_imgout, (x0-20,y0-40), str(confidence))
else:
# Draw the face area in image:
cv2.rectangle(full_imgout, (x0,y0),(x1,y1),(0,0,255),2)
# Draw the predicted name (folder name...):
cv2.putText(full_imgout,"Unknown",(x0-20,y0-20),font,1,color2,2)
#draw_str(full_imgout, (x0-20,y0-20), "Unknown")
self.writer.write(full_imgout)
#cv2.imshow('videofacerec', full_imgout)
# Show image & exit on escape:
ch = cv2.waitKey(10)
if ch == 27:
break
self.cam.release()
self.writer.release()
cv2.destroyAllWindows()
#move to complete and finish up
try:
filename = self.filename
f_name = os.path.basename(filename)
names = f_name+".avi"
shutil.move(config.abs_temp+'facerec/'+names , config.abs_temp+'facerec/complete/'+names)#source mp4
shutil.move(config.abs_temp+'facerec/'+f_name+'.mp4' , config.abs_temp+'facerec/done/'+f_name+'.mp4')#detected avi
except:
"do nothing"
""""convert written avi to mp4"
filename = self.filename
f_name = os.path.basename(filename)
#os.system('ffmpeg -i '+filename+'.avi '+config.rel_temp+'detected/'+f_name+'.mp4')
"add to db"
db=MySQLdb.connect(host=config.db_host, user=config.db_user, passwd = config.db_passwd, db=config.db_name)
names = f_name+".mp4"
n,ext = os.path.splitext(os.path.basename(names))
uploaded = getTime(n)
cur1=db.cursor()
cur1.execute('INSERT INTO uploads (video_name,uploaded_on) VALUES (%s,%s)' ,(names,uploaded))
db.commit()
"move files to respective folders"
try:
shutil.move(config.abs_temp+'detected/'+names , config.project_root+'video/facerec/'+names)#detected mp4
shutil.move(config.abs_temp+names , config.project_root+'video/raw/'+names)#raw mp4
except:
"do nothing"
"clean up -> delete h264 and avi files"
try:
os.remove(config.abs_temp+f_name+'.avi')#remove avi
os.remove(config.abs_temp+f_name+'.h264')#remove original h264 file
except:
"do nothing"
"""
if __name__ == '__main__':
from optparse import OptionParser
# model.pkl is a pickled (hopefully trained) PredictableModel, which is
# used to make predictions. You can learn a model yourself by passing the
# parameter -d (or --dataset) to learn the model from a given dataset.
usage = "usage: %prog [options] model_filename"
# Add options for training, resizing, validation and setting the camera id:
parser = OptionParser(usage=usage)
parser.add_option("-r", "--resize", action="store", type="string", dest="size", default="100x100",
help="Resizes the given dataset to a given size in format [width]x[height] (default: 100x100).")
parser.add_option("-v", "--validate", action="store", dest="numfolds", type="int", default=None,
help="Performs a k-fold cross validation on the dataset, if given (default: None).")
parser.add_option("-t", "--train", action="store", dest="dataset", type="string", default=None,
help="Trains the model on the given dataset.")
parser.add_option("-i", "--id", action="store", dest="camera_id", type="int", default=0,
help="Sets the Camera Id to be used (default: 0).")
parser.add_option("-c", "--cascade", action="store", dest="cascade_filename", default="haarcascade_frontalface_alt2.xml",
help="Sets the path to the Haar Cascade used for the face detection part (default: haarcascade_frontalface_alt2.xml).")
# Show the options to the user:
#parser.print_help()
#print "Press [ESC] to exit the program!"
#print "Script output:"
# Parse arguments:
(options, args) = parser.parse_args()
# Check if a model name was passed:
if len(args) == 0:
" "
#print "No prediction model was given."
#args[0] = "model.tkl"
#sys.exit()
# This model will be used (or created if the training parameter (-t, --train) exists:
#model_filename = args[0]
model_filename = "model.tkl"
# Check if the given model exists, if no dataset was passed:
if (options.dataset is None) and (not os.path.exists(model_filename)):
print "[Error] No prediction model found at '%s'." % model_filename
sys.exit()
# Check if the given (or default) cascade file exists:
if not os.path.exists(options.cascade_filename):
print "[Error] No Cascade File found at '%s'." % options.cascade_filename
sys.exit()
# We are resizing the images to a fixed size, as this is neccessary for some of
# the algorithms, some algorithms like LBPH don't have this requirement. To
# prevent problems from popping up, we resize them with a default value if none
# was given:
try:
image_size = (int(options.size.split("x")[0]), int(options.size.split("x")[1]))
except:
print "[Error] Unable to parse the given image size '%s'. Please pass it in the format [width]x[height]!" % options.size
sys.exit()
# We have got a dataset to learn a new model from:
if options.dataset:
# Check if the given dataset exists:
if not os.path.exists(options.dataset):
print "[Error] No dataset found at '%s'." % dataset_path
sys.exit()
# Reads the images, labels and folder_names from a given dataset. Images
# are resized to given size on the fly:
print "Loading dataset..."
[images, labels, subject_names] = read_images(options.dataset, image_size)
# Zip us a {label, name} dict from the given data:
list_of_labels = list(xrange(max(labels)+1))
subject_dictionary = dict(zip(list_of_labels, subject_names))
# Get the model we want to compute:
model = get_model(image_size=image_size, subject_names=subject_dictionary)
# Sometimes you want to know how good the model may perform on the data
# given, the script allows you to perform a k-fold Cross Validation before
# the Detection & Recognition part starts:
if options.numfolds:
print "Validating model with %s folds..." % options.numfolds
# We want to have some log output, so set up a new logging handler
# and point it to stdout:
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Add a handler to facerec modules, so we see what's going on inside:
logger = logging.getLogger("facerec")
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
# Perform the validation & print results:
crossval = KFoldCrossValidation(model, k=options.numfolds)
crossval.validate(images, labels)
crossval.print_results()
# Compute the model:
print "Computing the model..."
model.compute(images, labels)
# And save the model, which uses Pythons pickle module:
print "Saving the model..."
save_model(model_filename, model)
else:
print "Loading the model..."
model = load_model(model_filename)
# We operate on an ExtendedPredictableModel. Quit the application if this
# isn't what we expect it to be:
if not isinstance(model, ExtendedPredictableModel):
print "[Error] The given model is not of type '%s'." % "ExtendedPredictableModel"
sys.exit()
# Now it's time to finally start the Application! It simply get's the model
# and the image size the incoming webcam or video images are resized to:
print "Starting application..."
App(model=model,
camera_id=options.camera_id,
cascade_filename=options.cascade_filename).run()
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v10.common.types import criteria
from google.ads.googleads.v10.enums.types import campaign_criterion_status
from google.ads.googleads.v10.enums.types import criterion_type
__protobuf__ = proto.module(
package="google.ads.googleads.v10.resources",
marshal="google.ads.googleads.v10",
manifest={"CampaignCriterion",},
)
class CampaignCriterion(proto.Message):
r"""A campaign criterion.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
resource_name (str):
Immutable. The resource name of the campaign criterion.
Campaign criterion resource names have the form:
``customers/{customer_id}/campaignCriteria/{campaign_id}~{criterion_id}``
campaign (str):
Immutable. The campaign to which the
criterion belongs.
This field is a member of `oneof`_ ``_campaign``.
criterion_id (int):
Output only. The ID of the criterion.
This field is ignored during mutate.
This field is a member of `oneof`_ ``_criterion_id``.
display_name (str):
Output only. The display name of the
criterion.
This field is ignored for mutates.
bid_modifier (float):
The modifier for the bids when the criterion
matches. The modifier must be in the range: 0.1
- 10.0. Most targetable criteria types support
modifiers. Use 0 to opt out of a Device type.
This field is a member of `oneof`_ ``_bid_modifier``.
negative (bool):
Immutable. Whether to target (``false``) or exclude
(``true``) the criterion.
This field is a member of `oneof`_ ``_negative``.
type_ (google.ads.googleads.v10.enums.types.CriterionTypeEnum.CriterionType):
Output only. The type of the criterion.
status (google.ads.googleads.v10.enums.types.CampaignCriterionStatusEnum.CampaignCriterionStatus):
The status of the criterion.
keyword (google.ads.googleads.v10.common.types.KeywordInfo):
Immutable. Keyword.
This field is a member of `oneof`_ ``criterion``.
placement (google.ads.googleads.v10.common.types.PlacementInfo):
Immutable. Placement.
This field is a member of `oneof`_ ``criterion``.
mobile_app_category (google.ads.googleads.v10.common.types.MobileAppCategoryInfo):
Immutable. Mobile app category.
This field is a member of `oneof`_ ``criterion``.
mobile_application (google.ads.googleads.v10.common.types.MobileApplicationInfo):
Immutable. Mobile application.
This field is a member of `oneof`_ ``criterion``.
location (google.ads.googleads.v10.common.types.LocationInfo):
Immutable. Location.
This field is a member of `oneof`_ ``criterion``.
device (google.ads.googleads.v10.common.types.DeviceInfo):
Immutable. Device.
This field is a member of `oneof`_ ``criterion``.
ad_schedule (google.ads.googleads.v10.common.types.AdScheduleInfo):
Immutable. Ad Schedule.
This field is a member of `oneof`_ ``criterion``.
age_range (google.ads.googleads.v10.common.types.AgeRangeInfo):
Immutable. Age range.
This field is a member of `oneof`_ ``criterion``.
gender (google.ads.googleads.v10.common.types.GenderInfo):
Immutable. Gender.
This field is a member of `oneof`_ ``criterion``.
income_range (google.ads.googleads.v10.common.types.IncomeRangeInfo):
Immutable. Income range.
This field is a member of `oneof`_ ``criterion``.
parental_status (google.ads.googleads.v10.common.types.ParentalStatusInfo):
Immutable. Parental status.
This field is a member of `oneof`_ ``criterion``.
user_list (google.ads.googleads.v10.common.types.UserListInfo):
Immutable. User List.
This field is a member of `oneof`_ ``criterion``.
youtube_video (google.ads.googleads.v10.common.types.YouTubeVideoInfo):
Immutable. YouTube Video.
This field is a member of `oneof`_ ``criterion``.
youtube_channel (google.ads.googleads.v10.common.types.YouTubeChannelInfo):
Immutable. YouTube Channel.
This field is a member of `oneof`_ ``criterion``.
proximity (google.ads.googleads.v10.common.types.ProximityInfo):
Immutable. Proximity.
This field is a member of `oneof`_ ``criterion``.
topic (google.ads.googleads.v10.common.types.TopicInfo):
Immutable. Topic.
This field is a member of `oneof`_ ``criterion``.
listing_scope (google.ads.googleads.v10.common.types.ListingScopeInfo):
Immutable. Listing scope.
This field is a member of `oneof`_ ``criterion``.
language (google.ads.googleads.v10.common.types.LanguageInfo):
Immutable. Language.
This field is a member of `oneof`_ ``criterion``.
ip_block (google.ads.googleads.v10.common.types.IpBlockInfo):
Immutable. IpBlock.
This field is a member of `oneof`_ ``criterion``.
content_label (google.ads.googleads.v10.common.types.ContentLabelInfo):
Immutable. ContentLabel.
This field is a member of `oneof`_ ``criterion``.
carrier (google.ads.googleads.v10.common.types.CarrierInfo):
Immutable. Carrier.
This field is a member of `oneof`_ ``criterion``.
user_interest (google.ads.googleads.v10.common.types.UserInterestInfo):
Immutable. User Interest.
This field is a member of `oneof`_ ``criterion``.
webpage (google.ads.googleads.v10.common.types.WebpageInfo):
Immutable. Webpage.
This field is a member of `oneof`_ ``criterion``.
operating_system_version (google.ads.googleads.v10.common.types.OperatingSystemVersionInfo):
Immutable. Operating system version.
This field is a member of `oneof`_ ``criterion``.
mobile_device (google.ads.googleads.v10.common.types.MobileDeviceInfo):
Immutable. Mobile Device.
This field is a member of `oneof`_ ``criterion``.
location_group (google.ads.googleads.v10.common.types.LocationGroupInfo):
Immutable. Location Group
This field is a member of `oneof`_ ``criterion``.
custom_affinity (google.ads.googleads.v10.common.types.CustomAffinityInfo):
Immutable. Custom Affinity.
This field is a member of `oneof`_ ``criterion``.
custom_audience (google.ads.googleads.v10.common.types.CustomAudienceInfo):
Immutable. Custom Audience
This field is a member of `oneof`_ ``criterion``.
combined_audience (google.ads.googleads.v10.common.types.CombinedAudienceInfo):
Immutable. Combined Audience.
This field is a member of `oneof`_ ``criterion``.
keyword_theme (google.ads.googleads.v10.common.types.KeywordThemeInfo):
Immutable. Smart Campaign Keyword Theme.
This field is a member of `oneof`_ ``criterion``.
"""
resource_name = proto.Field(proto.STRING, number=1,)
campaign = proto.Field(proto.STRING, number=37, optional=True,)
criterion_id = proto.Field(proto.INT64, number=38, optional=True,)
display_name = proto.Field(proto.STRING, number=43,)
bid_modifier = proto.Field(proto.FLOAT, number=39, optional=True,)
negative = proto.Field(proto.BOOL, number=40, optional=True,)
type_ = proto.Field(
proto.ENUM,
number=6,
enum=criterion_type.CriterionTypeEnum.CriterionType,
)
status = proto.Field(
proto.ENUM,
number=35,
enum=campaign_criterion_status.CampaignCriterionStatusEnum.CampaignCriterionStatus,
)
keyword = proto.Field(
proto.MESSAGE,
number=8,
oneof="criterion",
message=criteria.KeywordInfo,
)
placement = proto.Field(
proto.MESSAGE,
number=9,
oneof="criterion",
message=criteria.PlacementInfo,
)
mobile_app_category = proto.Field(
proto.MESSAGE,
number=10,
oneof="criterion",
message=criteria.MobileAppCategoryInfo,
)
mobile_application = proto.Field(
proto.MESSAGE,
number=11,
oneof="criterion",
message=criteria.MobileApplicationInfo,
)
location = proto.Field(
proto.MESSAGE,
number=12,
oneof="criterion",
message=criteria.LocationInfo,
)
device = proto.Field(
proto.MESSAGE,
number=13,
oneof="criterion",
message=criteria.DeviceInfo,
)
ad_schedule = proto.Field(
proto.MESSAGE,
number=15,
oneof="criterion",
message=criteria.AdScheduleInfo,
)
age_range = proto.Field(
proto.MESSAGE,
number=16,
oneof="criterion",
message=criteria.AgeRangeInfo,
)
gender = proto.Field(
proto.MESSAGE,
number=17,
oneof="criterion",
message=criteria.GenderInfo,
)
income_range = proto.Field(
proto.MESSAGE,
number=18,
oneof="criterion",
message=criteria.IncomeRangeInfo,
)
parental_status = proto.Field(
proto.MESSAGE,
number=19,
oneof="criterion",
message=criteria.ParentalStatusInfo,
)
user_list = proto.Field(
proto.MESSAGE,
number=22,
oneof="criterion",
message=criteria.UserListInfo,
)
youtube_video = proto.Field(
proto.MESSAGE,
number=20,
oneof="criterion",
message=criteria.YouTubeVideoInfo,
)
youtube_channel = proto.Field(
proto.MESSAGE,
number=21,
oneof="criterion",
message=criteria.YouTubeChannelInfo,
)
proximity = proto.Field(
proto.MESSAGE,
number=23,
oneof="criterion",
message=criteria.ProximityInfo,
)
topic = proto.Field(
proto.MESSAGE, number=24, oneof="criterion", message=criteria.TopicInfo,
)
listing_scope = proto.Field(
proto.MESSAGE,
number=25,
oneof="criterion",
message=criteria.ListingScopeInfo,
)
language = proto.Field(
proto.MESSAGE,
number=26,
oneof="criterion",
message=criteria.LanguageInfo,
)
ip_block = proto.Field(
proto.MESSAGE,
number=27,
oneof="criterion",
message=criteria.IpBlockInfo,
)
content_label = proto.Field(
proto.MESSAGE,
number=28,
oneof="criterion",
message=criteria.ContentLabelInfo,
)
carrier = proto.Field(
proto.MESSAGE,
number=29,
oneof="criterion",
message=criteria.CarrierInfo,
)
user_interest = proto.Field(
proto.MESSAGE,
number=30,
oneof="criterion",
message=criteria.UserInterestInfo,
)
webpage = proto.Field(
proto.MESSAGE,
number=31,
oneof="criterion",
message=criteria.WebpageInfo,
)
operating_system_version = proto.Field(
proto.MESSAGE,
number=32,
oneof="criterion",
message=criteria.OperatingSystemVersionInfo,
)
mobile_device = proto.Field(
proto.MESSAGE,
number=33,
oneof="criterion",
message=criteria.MobileDeviceInfo,
)
location_group = proto.Field(
proto.MESSAGE,
number=34,
oneof="criterion",
message=criteria.LocationGroupInfo,
)
custom_affinity = proto.Field(
proto.MESSAGE,
number=36,
oneof="criterion",
message=criteria.CustomAffinityInfo,
)
custom_audience = proto.Field(
proto.MESSAGE,
number=41,
oneof="criterion",
message=criteria.CustomAudienceInfo,
)
combined_audience = proto.Field(
proto.MESSAGE,
number=42,
oneof="criterion",
message=criteria.CombinedAudienceInfo,
)
keyword_theme = proto.Field(
proto.MESSAGE,
number=45,
oneof="criterion",
message=criteria.KeywordThemeInfo,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
"""
Main Lomb-Scargle Implementation
The ``lombscargle`` function here is essentially a sophisticated switch
statement for the various implementations available in this submodule
"""
import warnings
import numpy as np
from astropy import units
from astropy.utils.compat.numpy import broadcast_arrays
from .slow_impl import lombscargle_slow
from .fast_impl import lombscargle_fast
from .scipy_impl import lombscargle_scipy
from .chi2_impl import lombscargle_chi2
from .fastchi2_impl import lombscargle_fastchi2
METHODS = {'slow': lombscargle_slow,
'fast': lombscargle_fast,
'chi2': lombscargle_chi2,
'scipy': lombscargle_scipy,
'fastchi2': lombscargle_fastchi2}
def _validate_inputs(t, y, dy=None, frequency=None, strip_units=True):
"""Validation of input shapes & units
This utility function serves a few purposes:
- it validates that the shapes of t, y, and dy match, and broadcasts
them to a common 1D shape
- if any of t, y, day, or frequency are astropy Quantities (i.e. have
units attached), it validates that the units are compatible, and does
any necessary unit conversions
- if ``strip_units == True``, it strips units from all the arrays
before returning them.
- all relevant units are returned in ``unit_dict``
Parameters
----------
t, y : array_like or Quantity
dy, frequency : array_like or Quantity (optional)
strip_units : bool (optional, default=True)
if True, the returned quantities will have units stripped.
Returns
-------
t, y, dy, frequency : ndarray, Quantity, or None
reshaped and/or unit-stripped arrays
unit_dict : dict
dictionary of relevant units
"""
if dy is None:
t, y = broadcast_arrays(t, y, subok=True)
else:
t, y, dy = broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Input times & data must be one-dimensional")
has_units = any(isinstance(arr, units.Quantity)
for arr in (t, y, dy, frequency))
if has_units:
power_unit = units.dimensionless_unscaled
t = units.Quantity(t)
y = units.Quantity(y)
if frequency is not None:
frequency = units.Quantity(frequency)
if not t.unit.is_equivalent(1. / frequency.unit):
raise ValueError("Units of frequency not equivalent to "
"units of 1/t")
t = units.Quantity(t, unit=1. / frequency.unit)
if dy is not None:
dy = units.Quantity(dy)
if not y.unit.is_equivalent(dy.unit):
raise ValueError("Units of y not equivalent to units of dy")
dy = units.Quantity(dy, unit=y.unit)
else:
power_unit = 1
t = np.asarray(t)
y = np.asarray(y)
if dy is not None:
dy = np.asarray(dy)
def get_unit(val):
if isinstance(val, units.Quantity):
return val.unit
else:
return 1
unit_dict = {'t': get_unit(t),
'y': get_unit(y),
'dy': get_unit(y),
'frequency': 1. / get_unit(t),
'power': power_unit}
def unit_strip(arr):
if arr is None:
return arr
else:
return np.asarray(arr)
if strip_units:
t, y, dy, frequency = map(unit_strip, (t, y, dy, frequency))
return t, y, dy, frequency, unit_dict
def _get_frequency_grid(frequency, assume_regular_frequency=False):
"""Utility to get grid parameters from a frequency array
Parameters
----------
frequency : array_like or Quantity
input frequency grid
assume_regular_frequency : bool (default = False)
if True, then do not check whether frequency is a regular grid
Returns
-------
f0, df, N : scalars
Parameters such that all(frequency == f0 + df * np.arange(N))
"""
frequency = np.asarray(frequency)
if frequency.ndim != 1:
raise ValueError("frequency grid must be 1 dimensional")
elif len(frequency) == 1:
return frequency[0], frequency[0], 1
elif not assume_regular_frequency:
diff = frequency[1:] - frequency[:-1]
if not np.allclose(diff[0], diff):
raise ValueError("frequency must be a regular grid")
return frequency[0], frequency[1] - frequency[0], len(frequency)
def _is_regular(frequency, assume_regular_frequency=False):
if assume_regular_frequency:
return True
frequency = np.asarray(frequency)
if frequency.ndim != 1:
return False
elif len(frequency) == 1:
return True
else:
diff = frequency[1:] - frequency[:-1]
return np.allclose(diff[0], diff)
def _validate_method(method, dy, fit_bias, nterms,
frequency, assume_regular_frequency):
fast_method_ok = hasattr(np.ufunc, 'at')
if not fast_method_ok:
warnings.warn("Fast Lomb-Scargle methods require numpy version 1.8 "
"or newer. Using slower methods instead.")
# automatically choose the appropiate method
if method == 'auto':
if nterms != 1:
if (fast_method_ok and len(frequency) > 100
and _is_regular(frequency, assume_regular_frequency)):
method = 'fastchi2'
else:
method = 'chi2'
elif (fast_method_ok and len(frequency) > 100
and _is_regular(frequency, assume_regular_frequency)):
method = 'fast'
elif dy is None and not fit_bias:
method = 'scipy'
else:
method = 'slow'
if method not in METHODS:
raise ValueError("invalid method: {0}".format(method))
return method
def lombscargle(t, y, dy=None,
frequency=None,
method='auto',
assume_regular_frequency=False,
normalization='normalized',
fit_bias=True, center_data=True,
method_kwds=None, nterms=1):
"""
Compute the Lomb-scargle Periodogram with a given method.
Parameters
----------
t : array_like
sequence of observation times
y : array_like
sequence of observations associated with times t
dy : float or array_like (optional)
error or sequence of observational errors associated with times t
frequency : array_like
frequencies (not angular frequencies) at which to evaluate the
periodogram. If not specified, optimal frequencies will be chosen using
a heuristic which will attempt to provide sufficient frequency range
and sampling so that peaks will not be missed. Note that in order to
use method='fast', frequencies must be regularly spaced.
method : string (optional)
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
`assume_regular_frequency` is set to True.
- `slow`: use the O[N^2] pure-python implementation
- `chi2`: use the O[N^2] chi2/linear-fitting implementation
- `fastchi2`: use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless `assume_regular_frequency` is set to True.
- `scipy`: use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool (optional)
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : string (optional, default='normalized')
Normalization to use for the periodogram. Options are 'normalized' or
'unnormalized'.
fit_bias : bool (optional, default=True)
if True, include a constant offet as part of the model at each
frequency. This can lead to more accurate results, especially in then
case of incomplete phase coverage.
center_data : bool (optional, default=True)
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if `fit_bias = False`
method_kwds : dict (optional)
additional keywords to pass to the lomb-scargle method
nterms : int (default=1)
number of Fourier terms to use in the periodogram.
Not supported with every method.
Returns
-------
PLS : array_like
Lomb-Scargle power associated with each frequency omega
"""
if frequency is None:
raise ValueError("Must supply a valid frequency. If you would like "
"an automatic frequency grid, use the "
"LombScargle.autopower() method.")
t, y, dy, frequency, unit_dict = _validate_inputs(t, y, dy, frequency)
output_shape = frequency.shape
frequency = frequency.ravel()
# we'll need to adjust args and kwds for each method
args = (t, y, dy)
kwds = dict(frequency=frequency,
center_data=center_data,
fit_bias=fit_bias,
normalization=normalization,
nterms=nterms,
**(method_kwds or {}))
method = _validate_method(method, dy=dy, fit_bias=fit_bias, nterms=nterms,
frequency=frequency,
assume_regular_frequency=assume_regular_frequency)
# scipy doesn't support dy or fit_bias=True
if method == 'scipy':
if kwds.pop('fit_bias'):
raise ValueError("scipy method does not support fit_bias=True")
if dy is not None:
dy = np.ravel(np.asarray(dy))
if not np.allclose(dy[0], dy):
raise ValueError("scipy method only supports "
"uniform uncertainties dy")
args = (t, y)
# fast methods require frequency expressed as a grid
if method.startswith('fast'):
f0, df, Nf = _get_frequency_grid(kwds.pop('frequency'),
assume_regular_frequency)
kwds.update(f0=f0, df=df, Nf=Nf)
# only chi2 methods support nterms
if not method.endswith('chi2'):
if kwds.pop('nterms') != 1:
raise ValueError("nterms != 1 only supported with 'chi2' "
"or 'fastchi2' methods")
PLS = METHODS[method](*args, **kwds)
return PLS.reshape(output_shape) * unit_dict['power']
|
|
import hashlib
import os
import re
import time
import uuid
from datetime import datetime
from django.conf import settings
from django.core.cache import cache
from django.db import connection, models
import caching.base as caching
from olympia import amo
from olympia.amo.models import ManagerBase, ModelBase
from olympia.access import acl
from olympia.addons.models import Addon
from olympia.amo.helpers import absolutify, user_media_path, user_media_url
from olympia.amo.urlresolvers import reverse
from olympia.amo.utils import sorted_groupby
from olympia.translations.fields import (
LinkifiedField, save_signal, NoLinksNoMarkupField, TranslatedField)
from olympia.users.models import UserProfile
SPECIAL_SLUGS = amo.COLLECTION_SPECIAL_SLUGS
class TopTags(object):
"""Descriptor to manage a collection's top tags in cache."""
def key(self, obj):
return '%s:top-tags:%s' % (settings.CACHE_PREFIX, obj.id)
def __get__(self, obj, type=None):
if obj is None:
return self
return cache.get(self.key(obj), [])
def __set__(self, obj, value):
two_days = 60 * 60 * 24 * 2
cache.set(self.key(obj), value, two_days)
class CollectionQuerySet(caching.CachingQuerySet):
def with_has_addon(self, addon_id):
"""Add a `has_addon` property to each collection.
`has_addon` will be `True` if `addon_id` exists in that
particular collection.
"""
has_addon = """
select 1 from addons_collections as ac
where ac.addon_id = %s and ac.collection_id = collections.id
limit 1"""
return self.extra(
select={'has_addon': has_addon},
select_params=(addon_id,))
class CollectionManager(ManagerBase):
def get_queryset(self):
qs = super(CollectionManager, self).get_queryset()
qs = qs._clone(klass=CollectionQuerySet)
return qs.transform(Collection.transformer)
def manual(self):
"""Only hand-crafted, favorites, and featured collections should appear
in this filter."""
types = (amo.COLLECTION_NORMAL, amo.COLLECTION_FAVORITES,
amo.COLLECTION_FEATURED, )
return self.filter(type__in=types)
def listed(self):
"""Return public collections only."""
return self.filter(listed=True)
def publishable_by(self, user):
"""Collections that are publishable by a user."""
owned_by = models.Q(author=user.id)
publishable_by = models.Q(users=user.id)
collections = self.filter(owned_by | publishable_by)
return collections.distinct().order_by('name__localized_string')
class Collection(ModelBase):
TYPE_CHOICES = amo.COLLECTION_CHOICES.items()
# TODO: Use models.UUIDField but it uses max_length=32 hex (no hyphen)
# uuids so needs some migration.
uuid = models.CharField(max_length=36, blank=True, unique=True)
name = TranslatedField(require_locale=False)
# nickname is deprecated. Use slug.
nickname = models.CharField(max_length=30, blank=True, unique=True,
null=True)
slug = models.CharField(max_length=30, blank=True, null=True)
description = NoLinksNoMarkupField(require_locale=False)
default_locale = models.CharField(max_length=10, default='en-US',
db_column='defaultlocale')
type = models.PositiveIntegerField(db_column='collection_type',
choices=TYPE_CHOICES, default=0)
icontype = models.CharField(max_length=25, blank=True)
listed = models.BooleanField(
default=True, help_text='Collections are either listed or private.')
subscribers = models.PositiveIntegerField(default=0)
downloads = models.PositiveIntegerField(default=0)
weekly_subscribers = models.PositiveIntegerField(default=0)
monthly_subscribers = models.PositiveIntegerField(default=0)
application = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='application_id',
null=True)
addon_count = models.PositiveIntegerField(default=0,
db_column='addonCount')
upvotes = models.PositiveIntegerField(default=0)
downvotes = models.PositiveIntegerField(default=0)
rating = models.FloatField(default=0)
all_personas = models.BooleanField(
default=False,
help_text='Does this collection only contain Themes?')
addons = models.ManyToManyField(Addon, through='CollectionAddon',
related_name='collections')
author = models.ForeignKey(UserProfile, null=True,
related_name='collections')
users = models.ManyToManyField(UserProfile, through='CollectionUser',
related_name='collections_publishable')
addon_index = models.CharField(
max_length=40, null=True, db_index=True,
help_text='Custom index for the add-ons in this collection')
objects = CollectionManager()
top_tags = TopTags()
class Meta(ModelBase.Meta):
db_table = 'collections'
unique_together = (('author', 'slug'),)
def __unicode__(self):
return u'%s (%s)' % (self.name, self.addon_count)
@classmethod
def make_index(cls, addon_ids):
ids = ':'.join(map(str, sorted(addon_ids)))
return hashlib.md5(ids).hexdigest()
def save(self, **kw):
if not self.uuid:
self.uuid = unicode(uuid.uuid4())
if not self.slug:
self.slug = self.uuid[:30]
self.clean_slug()
# Maintain our index of add-on ids.
if self.id:
ids = self.addons.values_list('id', flat=True)
self.addon_index = self.make_index(ids)
super(Collection, self).save(**kw)
def clean_slug(self):
if self.type in SPECIAL_SLUGS:
self.slug = SPECIAL_SLUGS[self.type]
return
if self.slug in SPECIAL_SLUGS.values():
self.slug += '~'
if not self.author:
return
qs = self.author.collections.using('default')
slugs = dict((slug, id) for slug, id in qs.values_list('slug', 'id'))
if self.slug in slugs and slugs[self.slug] != self.id:
for idx in range(len(slugs)):
new = '%s-%s' % (self.slug, idx + 1)
if new not in slugs:
self.slug = new
return
def get_url_path(self):
return reverse('collections.detail',
args=[self.author_username, self.slug])
def get_abs_url(self):
return absolutify(self.get_url_path())
def get_img_dir(self):
return os.path.join(user_media_path('collection_icons'),
str(self.id / 1000))
def upvote_url(self):
return reverse('collections.vote',
args=[self.author_username, self.slug, 'up'])
def downvote_url(self):
return reverse('collections.vote',
args=[self.author_username, self.slug, 'down'])
def edit_url(self):
return reverse('collections.edit',
args=[self.author_username, self.slug])
def watch_url(self):
return reverse('collections.watch',
args=[self.author_username, self.slug])
def delete_url(self):
return reverse('collections.delete',
args=[self.author_username, self.slug])
def delete_icon_url(self):
return reverse('collections.delete_icon',
args=[self.author_username, self.slug])
def share_url(self):
return reverse('collections.share',
args=[self.author_username, self.slug])
def feed_url(self):
return reverse('collections.detail.rss',
args=[self.author_username, self.slug])
def stats_url(self):
return reverse('collections.stats',
args=[self.author_username, self.slug])
@property
def author_username(self):
return self.author.username if self.author else 'anonymous'
@classmethod
def get_fallback(cls):
return cls._meta.get_field('default_locale')
@property
def url_slug(self):
"""uuid or nickname if chosen"""
return self.nickname or self.uuid
@property
def icon_url(self):
modified = int(time.mktime(self.modified.timetuple()))
if self.icontype:
# [1] is the whole ID, [2] is the directory
split_id = re.match(r'((\d*?)\d{1,3})$', str(self.id))
path = "/".join([
split_id.group(2) or '0',
"%s.png?m=%s" % (self.id, modified)
])
return user_media_url('collection_icons') + path
elif self.type == amo.COLLECTION_FAVORITES:
return settings.STATIC_URL + 'img/icons/heart.png'
else:
return settings.STATIC_URL + 'img/icons/collection.png'
def set_addons(self, addon_ids, comments={}):
"""Replace the current add-ons with a new list of add-on ids."""
order = dict((a, idx) for idx, a in enumerate(addon_ids))
# Partition addon_ids into add/update/remove buckets.
existing = set(self.addons.using('default')
.values_list('id', flat=True))
add, update = [], []
for addon in addon_ids:
bucket = update if addon in existing else add
bucket.append((addon, order[addon]))
remove = existing.difference(addon_ids)
cursor = connection.cursor()
now = datetime.now()
if remove:
cursor.execute("DELETE FROM addons_collections "
"WHERE collection_id=%s AND addon_id IN (%s)" %
(self.id, ','.join(map(str, remove))))
if self.listed:
for addon in remove:
amo.log(amo.LOG.REMOVE_FROM_COLLECTION,
(Addon, addon), self)
if add:
insert = '(%s, %s, %s, NOW(), NOW(), 0)'
values = [insert % (a, self.id, idx) for a, idx in add]
cursor.execute("""
INSERT INTO addons_collections
(addon_id, collection_id, ordering, created,
modified, downloads)
VALUES %s""" % ','.join(values))
if self.listed:
for addon_id, idx in add:
amo.log(amo.LOG.ADD_TO_COLLECTION,
(Addon, addon_id), self)
for addon, ordering in update:
(CollectionAddon.objects.filter(collection=self.id, addon=addon)
.update(ordering=ordering, modified=now))
for addon, comment in comments.iteritems():
try:
c = (CollectionAddon.objects.using('default')
.get(collection=self.id, addon=addon))
except CollectionAddon.DoesNotExist:
pass
else:
c.comments = comment
c.save(force_update=True)
self.save()
def is_subscribed(self, user):
"""Determines if the user is subscribed to this collection."""
return self.following.filter(user=user).exists()
def add_addon(self, addon):
"Adds an addon to the collection."
CollectionAddon.objects.get_or_create(addon=addon, collection=self)
if self.listed:
amo.log(amo.LOG.ADD_TO_COLLECTION, addon, self)
self.save() # To invalidate Collection.
def remove_addon(self, addon):
CollectionAddon.objects.filter(addon=addon, collection=self).delete()
if self.listed:
amo.log(amo.LOG.REMOVE_FROM_COLLECTION, addon, self)
self.save() # To invalidate Collection.
def owned_by(self, user):
return (user.id == self.author_id)
def can_view_stats(self, request):
if request and request.user:
return (self.publishable_by(request.user) or
acl.action_allowed(request, 'CollectionStats', 'View'))
return False
@caching.cached_method
def publishable_by(self, user):
return bool(self.owned_by(user) or self.users.filter(pk=user.id))
@staticmethod
def transformer(collections):
if not collections:
return
author_ids = set(c.author_id for c in collections)
authors = dict((u.id, u) for u in
UserProfile.objects.filter(id__in=author_ids))
for c in collections:
c.author = authors.get(c.author_id)
@staticmethod
def post_save(sender, instance, **kwargs):
from . import tasks
if kwargs.get('raw'):
return
tasks.collection_meta.delay(instance.id, using='default')
tasks.index_collections.delay([instance.id])
@staticmethod
def post_delete(sender, instance, **kwargs):
from . import tasks
if kwargs.get('raw'):
return
tasks.unindex_collections.delay([instance.id])
def check_ownership(self, request, require_owner, require_author,
ignore_disabled, admin):
"""
Used by acl.check_ownership to see if request.user has permissions for
the collection.
"""
from olympia.access import acl
return acl.check_collection_ownership(request, self, require_owner)
models.signals.post_save.connect(Collection.post_save, sender=Collection,
dispatch_uid='coll.post_save')
models.signals.pre_save.connect(save_signal, sender=Collection,
dispatch_uid='coll_translations')
models.signals.post_delete.connect(Collection.post_delete, sender=Collection,
dispatch_uid='coll.post_delete')
class CollectionAddon(ModelBase):
addon = models.ForeignKey(Addon)
collection = models.ForeignKey(Collection)
# category (deprecated: for "Fashion Your Firefox")
comments = LinkifiedField(null=True)
downloads = models.PositiveIntegerField(default=0)
user = models.ForeignKey(UserProfile, null=True)
ordering = models.PositiveIntegerField(
default=0,
help_text='Add-ons are displayed in ascending order '
'based on this field.')
class Meta(ModelBase.Meta):
db_table = 'addons_collections'
unique_together = (('addon', 'collection'),)
@staticmethod
def post_save_or_delete(sender, instance, **kwargs):
"""Update Collection.addon_count."""
from . import tasks
tasks.collection_meta.delay(instance.collection_id, using='default')
models.signals.pre_save.connect(save_signal, sender=CollectionAddon,
dispatch_uid='coll_addon_translations')
# Update Collection.addon_count.
models.signals.post_save.connect(CollectionAddon.post_save_or_delete,
sender=CollectionAddon,
dispatch_uid='coll.post_save')
models.signals.post_delete.connect(CollectionAddon.post_save_or_delete,
sender=CollectionAddon,
dispatch_uid='coll.post_save')
class CollectionFeature(ModelBase):
title = TranslatedField()
tagline = TranslatedField()
class Meta(ModelBase.Meta):
db_table = 'collection_features'
models.signals.pre_save.connect(save_signal, sender=CollectionFeature,
dispatch_uid='collectionfeature_translations')
class CollectionPromo(ModelBase):
collection = models.ForeignKey(Collection, null=True)
locale = models.CharField(max_length=10, null=True)
collection_feature = models.ForeignKey(CollectionFeature)
class Meta(ModelBase.Meta):
db_table = 'collection_promos'
unique_together = ('collection', 'locale', 'collection_feature')
@staticmethod
def transformer(promos):
if not promos:
return
promo_dict = dict((p.id, p) for p in promos)
q = (Collection.objects.no_cache()
.filter(collectionpromo__in=promos)
.extra(select={'promo_id': 'collection_promos.id'}))
for promo_id, collection in (sorted_groupby(q, 'promo_id')):
promo_dict[promo_id].collection = collection.next()
class CollectionWatcher(ModelBase):
collection = models.ForeignKey(Collection, related_name='following')
user = models.ForeignKey(UserProfile)
class Meta(ModelBase.Meta):
db_table = 'collection_subscriptions'
@staticmethod
def post_save_or_delete(sender, instance, **kw):
from . import tasks
tasks.collection_watchers(instance.collection_id, using='default')
models.signals.post_save.connect(CollectionWatcher.post_save_or_delete,
sender=CollectionWatcher)
models.signals.post_delete.connect(CollectionWatcher.post_save_or_delete,
sender=CollectionWatcher)
class CollectionUser(models.Model):
collection = models.ForeignKey(Collection)
user = models.ForeignKey(UserProfile)
role = models.SmallIntegerField(
default=1,
choices=amo.COLLECTION_AUTHOR_CHOICES.items())
class Meta:
db_table = 'collections_users'
class CollectionVote(models.Model):
collection = models.ForeignKey(Collection, related_name='votes')
user = models.ForeignKey(UserProfile, related_name='votes')
vote = models.SmallIntegerField(default=0)
created = models.DateTimeField(null=True, auto_now_add=True)
class Meta:
db_table = 'collections_votes'
@staticmethod
def post_save_or_delete(sender, instance, **kwargs):
# There are some issues with cascade deletes, where the
# collection disappears before the votes. Make sure the
# collection exists before trying to update it in the task.
if Collection.objects.filter(id=instance.collection_id).exists():
from . import tasks
tasks.collection_votes(instance.collection_id, using='default')
models.signals.post_save.connect(CollectionVote.post_save_or_delete,
sender=CollectionVote)
models.signals.post_delete.connect(CollectionVote.post_save_or_delete,
sender=CollectionVote)
class FeaturedCollection(ModelBase):
application = models.PositiveIntegerField(choices=amo.APPS_CHOICES,
db_column='application_id')
collection = models.ForeignKey(Collection)
locale = models.CharField(max_length=10, null=True)
class Meta:
db_table = 'featured_collections'
def __unicode__(self):
return u'%s (%s: %s)' % (self.collection, self.application,
self.locale)
class MonthlyPick(ModelBase):
addon = models.ForeignKey(Addon)
blurb = models.TextField()
image = models.URLField()
locale = models.CharField(max_length=10, unique=True, null=True,
blank=True)
class Meta:
db_table = 'monthly_pick'
|
|
# Copyright 2010 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""GDB Pretty printers and convenience functions for Go's runtime structures.
This script is loaded by GDB when it finds a .debug_gdb_scripts
section in the compiled binary. The [68]l linkers emit this with a
path to this file based on the path to the runtime package.
"""
# Known issues:
# - pretty printing only works for the 'native' strings. E.g. 'type
# foo string' will make foo a plain struct in the eyes of gdb,
# circumventing the pretty print triggering.
import sys, re
print >>sys.stderr, "Loading Go Runtime support."
# allow to manually reload while developing
goobjfile = gdb.current_objfile() or gdb.objfiles()[0]
goobjfile.pretty_printers = []
#
# Pretty Printers
#
class StringTypePrinter:
"Pretty print Go strings."
pattern = re.compile(r'^struct string$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
l = int(self.val['len'])
return self.val['str'].string("utf-8", "ignore", l)
class SliceTypePrinter:
"Pretty print slices."
pattern = re.compile(r'^struct \[\]')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)[6:] # skip 'struct '
def children(self):
if self.val["len"] > self.val["cap"]:
return
ptr = self.val["array"]
for idx in range(self.val["len"]):
yield ('[%d]' % idx, (ptr + idx).dereference())
class MapTypePrinter:
"""Pretty print map[K]V types.
Map-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^struct hash<.*>$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'map'
def to_string(self):
return str(self.val.type)
def children(self):
stab = self.val['st']
i = 0
for v in self.traverse_hash(stab):
yield ("[%d]" % i, v['key'])
yield ("[%d]" % (i + 1), v['val'])
i += 2
def traverse_hash(self, stab):
ptr = stab['entry'].address
last = stab['last']
while ptr <= last:
v = ptr.dereference()
ptr = ptr + 1
if v['hash'] == 0: continue
if v['hash'] & 63 == 63: # subtable
for v in self.traverse_hash(v['key'].cast(self.val['st'].type)):
yield v
else:
yield v
class ChanTypePrinter:
"""Pretty print chan[T] types.
Chan-typed go variables are really pointers. dereference them in gdb
to inspect their contents with this pretty printer.
"""
pattern = re.compile(r'^struct hchan<.*>$')
def __init__(self, val):
self.val = val
def display_hint(self):
return 'array'
def to_string(self):
return str(self.val.type)
def children(self):
# see chan.c chanbuf(). et is the type stolen from hchan<T>::recvq->first->elem
et = [x.type for x in self.val['recvq']['first'].type.target().fields() if x.name == 'elem'][0]
ptr = (self.val.address + 1).cast(et.pointer())
for i in range(self.val["qcount"]):
j = (self.val["recvx"] + i) % self.val["dataqsiz"]
yield ('[%d]' % i, (ptr + j).dereference())
#
# Register all the *Printer classes above.
#
def makematcher(klass):
def matcher(val):
try:
if klass.pattern.match(str(val.type)):
return klass(val)
except:
pass
return matcher
goobjfile.pretty_printers.extend([makematcher(k) for k in vars().values() if hasattr(k, 'pattern')])
#
# For reference, this is what we're trying to do:
# eface: p *(*(struct 'runtime.commonType'*)'main.e'->type_->data)->string
# iface: p *(*(struct 'runtime.commonType'*)'main.s'->tab->Type->data)->string
#
# interface types can't be recognized by their name, instead we check
# if they have the expected fields. Unfortunately the mapping of
# fields to python attributes in gdb.py isn't complete: you can't test
# for presence other than by trapping.
def is_iface(val):
try:
return str(val['tab'].type) == "struct runtime.itab *" \
and str(val['data'].type) == "void *"
except:
pass
def is_eface(val):
try:
return str(val['_type'].type) == "struct runtime._type *" \
and str(val['data'].type) == "void *"
except:
pass
def lookup_type(name):
try:
return gdb.lookup_type(name)
except:
pass
try:
return gdb.lookup_type('struct ' + name)
except:
pass
try:
return gdb.lookup_type('struct ' + name[1:]).pointer()
except:
pass
_rctp_type = gdb.lookup_type("struct runtime.commonType").pointer()
_rtp_type = gdb.lookup_type("struct runtime._type").pointer()
def iface_commontype(obj):
if is_iface(obj):
go_type_ptr = obj['tab']['_type']
elif is_eface(obj):
go_type_ptr = obj['_type']
else:
return
# sanity check: reflection type description ends in a loop.
tt = go_type_ptr['_type'].cast(_rtp_type).dereference()['_type']
if tt != tt.cast(_rtp_type).dereference()['_type']:
return
return go_type_ptr['ptr'].cast(_rctp_type).dereference()
def iface_dtype(obj):
"Decode type of the data field of an eface or iface struct."
# known issue: dtype_name decoded from runtime.commonType is "nested.Foo"
# but the dwarf table lists it as "full/path/to/nested.Foo"
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
dtype_name = dynamic_go_type['string'].dereference()['str'].string()
dynamic_gdb_type = lookup_type(dtype_name)
if dynamic_gdb_type is None:
return
type_size = int(dynamic_go_type['size'])
uintptr_size = int(dynamic_go_type['size'].type.sizeof) # size is itself an uintptr
if type_size > uintptr_size:
dynamic_gdb_type = dynamic_gdb_type.pointer()
return dynamic_gdb_type
def iface_dtype_name(obj):
"Decode type name of the data field of an eface or iface struct."
dynamic_go_type = iface_commontype(obj)
if dynamic_go_type is None:
return
return dynamic_go_type['string'].dereference()['str'].string()
class IfacePrinter:
"""Pretty print interface values
Casts the data field to the appropriate dynamic type."""
def __init__(self, val):
self.val = val
def display_hint(self):
return 'string'
def to_string(self):
if self.val['data'] == 0:
return 0x0
try:
dtype = iface_dtype(self.val)
except:
return "<bad dynamic type>"
if dtype is None: # trouble looking up, print something reasonable
return "(%s)%s" % (iface_dtype_name(self.val), self.val['data'])
try:
return self.val['data'].cast(dtype).dereference()
except:
pass
return self.val['data'].cast(dtype)
def ifacematcher(val):
if is_iface(val) or is_eface(val):
return IfacePrinter(val)
goobjfile.pretty_printers.append(ifacematcher)
#
# Convenience Functions
#
class GoLenFunc(gdb.Function):
"Length of strings, slices, maps or channels"
how = ((StringTypePrinter, 'len'),
(SliceTypePrinter, 'len'),
(MapTypePrinter, 'count'),
(ChanTypePrinter, 'qcount'))
def __init__(self):
super(GoLenFunc, self).__init__("len")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class GoCapFunc(gdb.Function):
"Capacity of slices or channels"
how = ((SliceTypePrinter, 'cap'),
(ChanTypePrinter, 'dataqsiz'))
def __init__(self):
super(GoCapFunc, self).__init__("cap")
def invoke(self, obj):
typename = str(obj.type)
for klass, fld in self.how:
if klass.pattern.match(typename):
return obj[fld]
class DTypeFunc(gdb.Function):
"""Cast Interface values to their dynamic type.
For non-interface types this behaves as the identity operation.
"""
def __init__(self):
super(DTypeFunc, self).__init__("dtype")
def invoke(self, obj):
try:
return obj['data'].cast(iface_dtype(obj))
except:
pass
return obj
#
# Commands
#
sts = ('idle', 'runnable', 'running', 'syscall', 'waiting', 'moribund', 'dead', 'recovery')
def linked_list(ptr, linkfield):
while ptr:
yield ptr
ptr = ptr[linkfield]
class GoroutinesCmd(gdb.Command):
"List all goroutines."
def __init__(self):
super(GoroutinesCmd, self).__init__("info goroutines", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
# args = gdb.string_to_argv(arg)
vp = gdb.lookup_type('void').pointer()
for ptr in linked_list(gdb.parse_and_eval("'runtime.allg'"), 'alllink'):
if ptr['status'] == 6: # 'gdead'
continue
s = ' '
if ptr['m']:
s = '*'
pc = ptr['sched']['pc'].cast(vp)
sp = ptr['sched']['sp'].cast(vp)
blk = gdb.block_for_pc(long((pc)))
print s, ptr['goid'], "%8s" % sts[long((ptr['status']))], blk.function
def find_goroutine(goid):
vp = gdb.lookup_type('void').pointer()
for ptr in linked_list(gdb.parse_and_eval("'runtime.allg'"), 'alllink'):
if ptr['status'] == 6: # 'gdead'
continue
if ptr['goid'] == goid:
return [ptr['sched'][x].cast(vp) for x in 'pc', 'sp']
return None, None
class GoroutineCmd(gdb.Command):
"""Execute gdb command in the context of goroutine <goid>.
Switch PC and SP to the ones in the goroutine's G structure,
execute an arbitrary gdb command, and restore PC and SP.
Usage: (gdb) goroutine <goid> <gdbcmd>
Note that it is ill-defined to modify state in the context of a goroutine.
Restrict yourself to inspecting values.
"""
def __init__(self):
super(GoroutineCmd, self).__init__("goroutine", gdb.COMMAND_STACK, gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
goid, cmd = arg.split(None, 1)
pc, sp = find_goroutine(int(goid))
if not pc:
print "No such goroutine: ", goid
return
save_frame = gdb.selected_frame()
gdb.parse_and_eval('$save_pc = $pc')
gdb.parse_and_eval('$save_sp = $sp')
gdb.parse_and_eval('$pc = 0x%x' % long(pc))
gdb.parse_and_eval('$sp = 0x%x' % long(sp))
try:
gdb.execute(cmd)
finally:
gdb.parse_and_eval('$pc = $save_pc')
gdb.parse_and_eval('$sp = $save_sp')
save_frame.select()
class GoIfaceCmd(gdb.Command):
"Print Static and dynamic interface types"
def __init__(self):
super(GoIfaceCmd, self).__init__("iface", gdb.COMMAND_DATA, gdb.COMPLETE_SYMBOL)
def invoke(self, arg, from_tty):
for obj in gdb.string_to_argv(arg):
try:
#TODO fix quoting for qualified variable names
obj = gdb.parse_and_eval("%s" % obj)
except Exception, e:
print "Can't parse ", obj, ": ", e
continue
if obj['data'] == 0:
dtype = "nil"
else:
dtype = iface_dtype(obj)
if dtype is None:
print "Not an interface: ", obj.type
continue
print "%s: %s" % (obj.type, dtype)
# TODO: print interface's methods and dynamic type's func pointers thereof.
#rsc: "to find the number of entries in the itab's Fn field look at itab.inter->numMethods
#i am sure i have the names wrong but look at the interface type and its method count"
# so Itype will start with a commontype which has kind = interface
#
# Register all convenience functions and CLI commands
#
for k in vars().values():
if hasattr(k, 'invoke'):
k()
|
|
#!/usr/bin/env python
#
# Copyright (c) 2018, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import binascii
import bisect
import cmd
import os
import socket
import struct
import sys
import traceback
import time
import io
import config
import message
import pcap
def dbg_print(*args):
if False:
print(args)
class RealTime:
def __init__(self):
self._sniffer = config.create_default_thread_sniffer()
self._sniffer.start()
def set_lowpan_context(self, cid, prefix):
self._sniffer.set_lowpan_context(cid, prefix)
def get_messages_sent_by(self, nodeid):
return self._sniffer.get_messages_sent_by(nodeid)
def go(self, duration, nodeid=None):
time.sleep(duration)
def stop(self):
pass
class VirtualTime:
OT_SIM_EVENT_ALARM_FIRED = 0
OT_SIM_EVENT_RADIO_RECEIVED = 1
OT_SIM_EVENT_UART_WRITE = 2
OT_SIM_EVENT_RADIO_SPINEL_WRITE = 3
OT_SIM_EVENT_POSTCMD = 4
EVENT_TIME = 0
EVENT_SEQUENCE = 1
EVENT_ADDR = 2
EVENT_TYPE = 3
EVENT_DATA_LENGTH = 4
EVENT_DATA = 5
BASE_PORT = 9000
MAX_NODES = 34
MAX_MESSAGE = 1024
END_OF_TIME = 0x7fffffff
PORT_OFFSET = int(os.getenv('PORT_OFFSET', '0'))
BLOCK_TIMEOUT = 4
RADIO_ONLY = os.getenv('RADIO_DEVICE') != None
NCP_SIM = os.getenv('NODE_TYPE', 'sim') == 'ncp-sim'
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = '127.0.0.1'
self.port = self.BASE_PORT + (self.PORT_OFFSET * self.MAX_NODES)
self.sock.bind((ip, self.port))
self.devices = {}
self.event_queue = []
# there could be events scheduled at exactly the same time
self.event_sequence = 0
self.current_time = 0
self.current_event = None
self.awake_devices = set()
self._pcap = pcap.PcapCodec(os.getenv('TEST_NAME', 'current'))
# the addr for spinel-cli sending OT_SIM_EVENT_POSTCMD
self._spinel_cli_addr = (ip, self.BASE_PORT + self.port)
self.current_nodeid = None
self._pause_time = 0
self._message_factory = config.create_default_thread_message_factory()
def __del__(self):
if self.sock:
self.stop()
def stop(self):
self.sock.close()
self.sock = None
def _add_message(self, nodeid, message):
addr = ('127.0.0.1', self.port + nodeid)
# Ignore any exceptions
try:
msg = self._message_factory.create(io.BytesIO(message))
if msg is not None:
self.devices[addr]['msgs'].append(msg)
except Exception as e:
# Just print the exception to the console
print("EXCEPTION: %s" % e)
def set_lowpan_context(self, cid, prefix):
self._message_factory.set_lowpan_context(cid, prefix)
def get_messages_sent_by(self, nodeid):
""" Get sniffed messages.
Note! This method flushes the message queue so calling this method again will return only the newly logged messages.
Args:
nodeid (int): node id
Returns:
MessagesSet: a set with received messages.
"""
addr = ('127.0.0.1', self.port + nodeid)
messages = self.devices[addr]['msgs']
self.devices[addr]['msgs'] = []
return message.MessagesSet(messages)
def _is_radio(self, addr):
return addr[1] < self.BASE_PORT * 2
def _to_core_addr(self, addr):
assert self._is_radio(addr)
return (addr[0], addr[1] + self.BASE_PORT)
def _to_radio_addr(self, addr):
assert not self._is_radio(addr)
return (addr[0], addr[1] - self.BASE_PORT)
def _core_addr_from(self, nodeid):
if self.RADIO_ONLY:
return ('127.0.0.1', self.BASE_PORT + self.port + nodeid)
else:
return ('127.0.0.1', self.port + nodeid)
def _next_event_time(self):
if len(self.event_queue) == 0:
return self.END_OF_TIME
else:
return self.event_queue[0][0]
def receive_events(self):
""" Receive events until all devices are asleep. """
while True:
if self.current_event or len(self.awake_devices) or (self._next_event_time() > self._pause_time and self.current_nodeid):
self.sock.settimeout(self.BLOCK_TIMEOUT)
try:
msg, addr = self.sock.recvfrom(self.MAX_MESSAGE)
except socket.error:
# print debug information on failure
print('Current nodeid:')
print(self.current_nodeid)
print('Current awake:')
print(self.awake_devices)
print('Current time:')
print(self.current_time)
print('Current event:')
print(self.current_event)
print('Events:')
for event in self.event_queue:
print(event)
raise
else:
self.sock.settimeout(0)
try:
msg, addr = self.sock.recvfrom(self.MAX_MESSAGE)
except socket.error:
break
if addr != self._spinel_cli_addr and addr not in self.devices:
self.devices[addr] = {}
self.devices[addr]['alarm'] = None
self.devices[addr]['msgs'] = []
self.devices[addr]['time'] = self.current_time
self.awake_devices.discard(addr)
#print "New device:", addr, self.devices
delay, type, datalen = struct.unpack('=QBH', msg[:11])
data = msg[11:]
event_time = self.current_time + delay
if data:
dbg_print("New event: ", event_time, addr, type, datalen, binascii.hexlify(data))
else:
dbg_print("New event: ", event_time, addr, type, datalen)
if type == self.OT_SIM_EVENT_ALARM_FIRED:
# remove any existing alarm event for device
if self.devices[addr]['alarm']:
self.event_queue.remove(self.devices[addr]['alarm'])
#print "-- Remove\t", self.devices[addr]['alarm']
# add alarm event to event queue
event = (event_time, self.event_sequence, addr, type, datalen)
self.event_sequence += 1
#print "-- Enqueue\t", event, delay, self.current_time
bisect.insort(self.event_queue, event)
self.devices[addr]['alarm'] = event
self.awake_devices.discard(addr)
if self.current_event and self.current_event[self.EVENT_ADDR] == addr:
#print "Done\t", self.current_event
self.current_event = None
elif type == self.OT_SIM_EVENT_RADIO_RECEIVED:
assert self._is_radio(addr)
# add radio receive events event queue
for device in self.devices:
if device != addr and self._is_radio(device):
event = (event_time, self.event_sequence, device, type, datalen, data)
self.event_sequence += 1
#print "-- Enqueue\t", event
bisect.insort(self.event_queue, event)
self._pcap.append(data, (event_time // 1000000, event_time % 1000000))
self._add_message(addr[1] - self.port, data)
# add radio transmit done events to event queue
event = (event_time, self.event_sequence, addr, type, datalen, data)
self.event_sequence += 1
bisect.insort(self.event_queue, event)
self.awake_devices.add(addr)
elif type == self.OT_SIM_EVENT_RADIO_SPINEL_WRITE:
assert not self._is_radio(addr)
radio_addr = self._to_radio_addr(addr)
if not radio_addr in self.devices:
self.awake_devices.add(radio_addr)
event = (event_time, self.event_sequence, radio_addr, self.OT_SIM_EVENT_UART_WRITE, datalen, data)
self.event_sequence += 1
bisect.insort(self.event_queue, event)
self.awake_devices.add(addr)
elif type == self.OT_SIM_EVENT_UART_WRITE:
assert self._is_radio(addr)
core_addr = self._to_core_addr(addr)
if not core_addr in self.devices:
self.awake_devices.add(core_addr)
event = (event_time, self.event_sequence, core_addr, self.OT_SIM_EVENT_RADIO_SPINEL_WRITE, datalen, data)
self.event_sequence += 1
bisect.insort(self.event_queue, event)
self.awake_devices.add(addr)
elif type == self.OT_SIM_EVENT_POSTCMD:
assert self.current_time == self._pause_time
nodeid = struct.unpack('=B', data)[0]
if self.current_nodeid == nodeid:
self.current_nodeid = None
def _send_message(self, message, addr):
while True:
try:
sent = self.sock.sendto(message, addr)
except socket.error:
traceback.print_exc()
time.sleep(0)
else:
break
assert sent == len(message)
def process_next_event(self):
assert self.current_event is None
assert self._next_event_time() < self.END_OF_TIME
# process next event
event = self.event_queue.pop(0)
if len(event) == 5:
event_time, sequence, addr, type, datalen = event
dbg_print("Pop event: ", event_time, addr, type, datalen)
else:
event_time, sequence, addr, type, datalen, data = event
dbg_print("Pop event: ", event_time, addr, type, datalen, binascii.hexlify(data))
self.current_event = event
assert(event_time >= self.current_time)
self.current_time = event_time
elapsed = event_time - self.devices[addr]['time']
self.devices[addr]['time'] = event_time
message = struct.pack('=QBH', elapsed, type, datalen)
if type == self.OT_SIM_EVENT_ALARM_FIRED:
self.devices[addr]['alarm'] = None
self._send_message(message, addr)
elif type == self.OT_SIM_EVENT_RADIO_RECEIVED:
message += data
self._send_message(message, addr)
elif type == self.OT_SIM_EVENT_RADIO_SPINEL_WRITE:
message += data
self._send_message(message, addr)
elif type == self.OT_SIM_EVENT_UART_WRITE:
message += data
self._send_message(message, addr)
def sync_devices(self):
self.current_time = self._pause_time
for addr in self.devices:
elapsed = self.current_time - self.devices[addr]['time']
if elapsed == 0:
continue
dbg_print('syncing', addr, elapsed)
self.devices[addr]['time'] = self.current_time
message = struct.pack('=QBH', elapsed, self.OT_SIM_EVENT_ALARM_FIRED, 0)
self._send_message(message, addr)
self.awake_devices.add(addr)
self.receive_events()
self.awake_devices.clear()
def go(self, duration, nodeid=None):
assert self.current_time == self._pause_time
duration = int(duration) * 1000000
dbg_print('running for %d us' % duration)
self._pause_time += duration
if nodeid:
if self.NCP_SIM:
self.current_nodeid = nodeid
self.awake_devices.add(self._core_addr_from(nodeid))
self.receive_events()
while self._next_event_time() <= self._pause_time:
self.process_next_event()
self.receive_events()
if duration > 0:
self.sync_devices()
dbg_print('current time %d us' % self.current_time)
if __name__ == '__main__':
simulator = VirtualTime()
while True:
simulator.go(0)
|
|
#
#
# =================================================================
# =================================================================
"""An os-hypervisors API extension to report additional hypervisor metrics."""
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from oslo.config import cfg
#from powervc_health import _
# Load the over-commit ratios for CPU and memory.
CONF = cfg.CONF
CONF.import_opt('cpu_allocation_ratio', 'nova.scheduler.filters.core_filter')
CONF.import_opt('ram_allocation_ratio', 'nova.scheduler.filters.ram_filter')
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'hypervisor_metrics')
"""
The key is the name of the field in the database.
In the dictionary for each key:
display_name is the name of the field to be surfaced through this
extension
converter is a function that takes a string value and converts it to
the data type for the field
default_value is the default value to be surfaced for the display_name
key for this metric
"""
HYPERVISOR_TYPE_POWERVM = 'powervm'
HYPERVISOR_TYPE_POWERKVM = 'QEMU'
METRICS = {
# Not intended to be a host-level metric at this time (i.e., use default)
'cpu_allocation_ratio': {
'display_name': 'cpu_allocation_ratio',
'converter': float,
'default_value': CONF.cpu_allocation_ratio,
'supported_platforms': [HYPERVISOR_TYPE_POWERKVM]
},
# Not intended to be a host-level metric at this time (i.e., use default)
'memory_allocation_ratio': {
'display_name': 'memory_allocation_ratio',
'converter': float,
'default_value': CONF.ram_allocation_ratio,
'supported_platforms': [HYPERVISOR_TYPE_POWERKVM]
},
'host_memory_reserved': {
'display_name': 'memory_mb_reserved',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'memory_mb_used': {
'display_name': 'memory_mb_used',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM,
HYPERVISOR_TYPE_POWERKVM]
},
'proc_units_reserved': {
'display_name': 'proc_units_reserved',
'converter': None,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'proc_units_used': {
'display_name': 'proc_units_used',
'converter': None,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'proc_units': {
'display_name': 'proc_units',
'converter': None,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'lmb_size': {
'display_name': 'lmb_size',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'active_lpar_mobility_capable': {
'display_name': 'active_lpar_mobility_capable',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'compatibility_modes': {
'display_name': 'compatibility_modes',
'converter': None,
'default_value': [],
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'active_migrations_supported': {
'display_name': 'active_migrations_supported',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'active_migrations_in_progress': {
'display_name': 'active_migrations_in_progress',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'inactive_migrations_supported': {
'display_name': 'inactive_migrations_supported',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'inactive_migrations_in_progress': {
'display_name': 'inactive_migrations_in_progress',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'max_procs_per_aix_linux_lpar': {
'display_name': 'max_procs_per_aix_linux_lpar',
'converter': None,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'max_vcpus_per_aix_linux_lpar': {
'display_name': 'max_vcpus_per_aix_linux_lpar',
'converter': None,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'disk_available': {
'display_name': 'disk_available',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'disk_total': {
'display_name': 'disk_total',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'disk_used': {
'display_name': 'disk_used',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'usable_local_mb': {
'display_name': 'usable_local_mb',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM]
},
'threads_per_core': {
'display_name': 'threads_per_core',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERKVM]
},
'split_core': {
'display_name': 'split_core',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERKVM]
},
'max_smt_per_guest': {
'display_name': 'max_smt_per_guest',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERKVM]
},
'vcpus': {
'display_name': 'vcpus',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM,
HYPERVISOR_TYPE_POWERKVM]
},
'memory_mb': {
'display_name': 'memory_mb',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM,
HYPERVISOR_TYPE_POWERKVM]
},
'local_gb': {
'display_name': 'local_gb',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERKVM]
},
'vcpus_used': {
'display_name': 'vcpus_used',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM,
HYPERVISOR_TYPE_POWERKVM]
},
'local_gb_used': {
'display_name': 'local_gb_used',
'converter': int,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERKVM]
},
'hypervisor_type': {
'display_name': 'hypervisor_type',
'converter': None,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM,
HYPERVISOR_TYPE_POWERKVM]
},
'hypervisor_version': {
'display_name': 'hypervisor_version',
'converter': float,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM,
HYPERVISOR_TYPE_POWERKVM]
},
'hypervisor_hostname': {
'display_name': 'hypervisor_hostname',
'converter': None,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM,
HYPERVISOR_TYPE_POWERKVM]
},
'cpu_info': {
'display_name': 'cpu_info',
'converter': None,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM,
HYPERVISOR_TYPE_POWERKVM]
},
'disk_available_least': {
'display_name': 'disk_available_least',
'converter': None,
'default_value': None,
'supported_platforms': [HYPERVISOR_TYPE_POWERVM,
HYPERVISOR_TYPE_POWERKVM]
},
}
class HypervisorMetricsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisor', selector='hypervisor')
for key in METRICS.keys():
root.set(METRICS.get(key)['display_name'])
return xmlutil.SlaveTemplate(root, 1)
class HypervisorsMetricsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hypervisors')
elem = xmlutil.SubTemplateElement(
root, 'hypervisor', selector='hypervisors')
for key in METRICS.keys():
elem.set(METRICS.get(key)['display_name'])
return xmlutil.SlaveTemplate(root, 1)
class HypervisorMetricsControllerExtension(wsgi.Controller):
def __init__(self):
self.host_api = compute.HostAPI()
@wsgi.extends
def show(self, req, resp_obj, id):
self._log("Enter: show")
resp_obj.attach(xml=HypervisorMetricsTemplate())
# Retrieve the hypervisor instance specified in the database and
# populate the state and metrics.
context = req.environ['nova.context']
compute_node = None
try:
compute_node = self.host_api.compute_node_get(context, int(id))
self._populate_metric_attrs(resp_obj.obj['hypervisor'],
compute_node)
except Exception as e:
self._log("ERROR: show(self, req, resp_obj, id) caught an "
"unexpected exception %s retrieving %s" % (str(e), id))
@wsgi.extends
def detail(self, req, resp_obj):
self._log("Enter: detail")
resp_obj.attach(xml=HypervisorsMetricsTemplate())
# Loop through the hypervisors that the primary hypervisor API returns.
context = req.environ['nova.context']
compute_node = None
for hypervisor in list(resp_obj.obj['hypervisors']):
# Retrieve the hypervisor instance specified from the database and
# populate the metrics.
try:
compute_node = self.host_api.compute_node_get(context,
hypervisor['id'])
self._populate_metric_attrs(hypervisor, compute_node)
except Exception as e:
self._log("ERROR: detail(self, req, resp_obj) caught an "
"unexpected exception %s retrieving %s" %
(str(e), hypervisor['id']))
def _populate_metric_attrs(self, hypervisor, compute_node):
self._log("Enter: _populate_metric_attrs")
# Set the hypervisor metrics on the response.
if compute_node:
# Since the Stats are now a JSON string, parse into a Dictionary
compute_stats = compute_node.get('stats')
compute_stats = '{}' if compute_stats is None else compute_stats
compute_stats = jsonutils.loads(compute_stats)
#hypervisor_type = compute_stats.get('hypervisor_type',
# HYPERVISOR_TYPE_POWERVM)
hypervisor_type = compute_node.get('hypervisor_type',
HYPERVISOR_TYPE_POWERVM)
LOG.debug("Platform is " + hypervisor_type)
for key, value in compute_stats.iteritems():
if key in METRICS.keys():
metric = METRICS.get(key)
if hypervisor_type in metric['supported_platforms']:
# Compatibility modes is a special case.In the database
# it can only be a string,so we will convert the comma-
# separated string into a list.
if key == 'compatibility_modes':
value = value.split(',')
converter = metric['converter']
if converter:
hypervisor[metric['display_name']] = \
converter(value)
else:
hypervisor[metric['display_name']] = value
for item in METRICS.items():
metric = item[1]
if hypervisor_type in metric['supported_platforms']:
metric_display_name = metric['display_name']
if metric_display_name not in hypervisor:
compute_node_id = compute_node['id']
self._log("_populate_metric_attrs database "
"for %s does not contain %s" %
(compute_node_id, metric_display_name))
hypervisor[metric_display_name] = \
metric['default_value']
else:
for item in METRICS.items():
metric = item[1]
if hypervisor_type in metric['supported_platforms']:
metric_display_name = metric['display_name']
if metric_display_name not in hypervisor:
hypervisor[metric_display_name] = \
metric['default_value']
def _log(self, log_str):
pass
# log_msg = _("powerVC-Hypervisor-Metric-LOG: '%s'") % log_str
# if log_str.startswith('ERROR'):
# LOG.error(log_msg)
# elif log_str.startswith('WARNING'):
# LOG.warning(log_msg)
# else:
# LOG.debug(log_msg)
class Hypervisor_metrics(extensions.ExtensionDescriptor):
"""Extended Metric Information about the Hypervisor."""
name = "PowerVC Hypervisor Metrics"
alias = "powervc-hypervisors-metrics"
namespace = "http://www.ibm.com/openstack/compute/contrib/powervc/v1.0"
updated = "2013-03-13T21:00:00-06:00"
def get_controller_extensions(self):
"""Provide a Controller Extension to the os-hypervisors Resource"""
extension_list = []
extension_list.append(
extensions.ControllerExtension(
self,
'os-hypervisors',
HypervisorMetricsControllerExtension()
)
)
return extension_list
|
|
from threepio import logger
from django.core.exceptions import ValidationError
from core.models.quota import get_quota, has_storage_count_quota,\
has_storage_quota
from core.models.identity import Identity
from core.models.volume import Volume
from core.models.instance_source import InstanceSource
from service.cache import get_cached_driver
from service.driver import _retrieve_source, get_esh_driver
from service.quota import check_over_storage_quota
from service import exceptions
from service.instance import boot_volume_instance
def update_volume_metadata(core_volume, metadata={}):
identity = core_volume.source.created_by_identity
volume_id = core_volume.provider_alias
esh_driver = get_cached_driver(identity=identity)
esh_volume = esh_driver.get_volume(volume_id)
return _update_volume_metadata(esh_driver, esh_volume, metadata)
def _update_volume_metadata(esh_driver, esh_volume,
metadata={}):
"""
NOTE: This will NOT WORK for TAGS until openstack
allows JSONArrays as values for metadata!
NOTE: This will NOT replace missing metadata tags..
ex:
Start: ('a':'value','c':'value')
passed: c=5
End: ('a':'value', 'c':5)
"""
if not esh_volume:
return {}
if not hasattr(esh_driver._connection, 'ex_update_volume_metadata'):
logger.warn(
"EshDriver %s does not have function 'ex_update_volume_metadata'" %
esh_driver._connection.__class__)
return {}
data = esh_volume.extra.get('metadata', {})
data.update(metadata)
try:
return esh_driver._connection.ex_update_volume_metadata(
esh_volume,
data)
except Exception as e:
logger.exception("Error updating the metadata")
if 'incapable of performing the request' in e.message:
return {}
else:
raise
def restrict_size_by_image(size, image):
image_bytes = image._image.extra.get('image_size', None)
if not image_bytes:
raise exceptions.VolumeError(
"Cannot determine size of the image %s: "
"Expected rtwo.models.machine.OSMachine to include "
"'image_size' key in the 'extra' fields." % (image.name,))
image_size = int(image_bytes / 1024.0**3)
if size > image_size + 4:
raise exceptions.VolumeError(
"Volumes created from images cannot exceed "
"more than 4GB greater than the size of the image:(%s GB)"
% size)
def create_volume_or_fail(name, size, user, provider, identity,
description=None, project=None, image_id=None, snapshot_id=None):
snapshot = None
image = None
driver = get_esh_driver(identity, username=user.username)
if snapshot_id:
snapshot = driver._connection.ex_get_snapshot(image_id)
if image_id:
image = driver.get_machine(image_id)
restrict_size_by_image(size, image)
#: Guard against both snapshot and image being present
assert snapshot is None or image is None, (
"A volume can only be constructed from a `snapshot` "
"or an `image` not both.")
#: Create the volume or raise an exception
# NOTE: username can be removed when 'quota' is not linked to IdentityMembership
_, esh_volume = create_esh_volume(driver, user.username, identity.uuid, name, size,
description=description,
snapshot=snapshot, image=image,
raise_exception=True)
identifier = esh_volume.id
start_date = esh_volume.extra.get('created_at')
source = InstanceSource.objects.create(
identifier=identifier,
provider=provider,
created_by=user,
created_by_identity=identity)
kwargs = {
"name": name,
"size": size,
"description": description,
"instance_source": source,
"start_date": start_date
}
volume = Volume.objects.create(**kwargs)
if project:
project.volumes.add(volume)
return volume
def create_snapshot(esh_driver, username, identity_uuid, name,
volume, description=None, raise_exception=False):
if not volume:
raise ValueError("Volume is required to create VolumeSnapshot")
try:
check_over_storage_quota(username, identity_uuid, new_snapshot_size=volume.size)
except ValidationError as over_quota:
raise exceptions.OverQuotaError(
message=over_quota.message)
esh_ss = esh_driver._connection.ex_create_snapshot(
volume_id=volume.id,
display_name=name,
display_description=description)
if not esh_ss and raise_exception:
raise exceptions.VolumeError("The volume failed to be created.")
return esh_ss
def create_esh_volume(esh_driver, username, identity_uuid, name, size,
description=None, metadata=None, snapshot=None, image=None,
raise_exception=False):
quota = get_quota(identity_uuid)
try:
check_over_storage_quota(username, identity_uuid, new_volume_size=size)
except ValidationError as over_quota:
raise exceptions.OverQuotaError(
message=over_quota.message)
if not has_storage_count_quota(esh_driver, quota, 1):
raise exceptions.OverQuotaError(
message="Maximum # of Storage Volumes Exceeded")
# NOTE: Calling non-standard create_volume_obj so we know the ID
# of newly created volume. Libcloud just returns 'True'... --Steve
conn_kwargs = {'max_attempts': 1}
success, esh_volume = esh_driver.create_volume(
size=size,
name=name,
metadata=metadata,
snapshot=snapshot,
image=image,
**conn_kwargs)
if not success and raise_exception:
raise exceptions.VolumeError("The volume failed to be created.")
return success, esh_volume
def destroy_volume_or_fail(volume, user, cascade=False):
"""
Destroy the volume specified
:param cascade: Cascades through and destroy volume snapshots
(defaults is False)
:type cascade: ``bool``
"""
identity = volume.instance_source.created_by_identity
driver = get_esh_driver(identity, username=user.username)
# retrieve volume or fail with not found
esh_volume = driver.get_volume(volume.identifier)
if esh_volume is None:
raise exceptions.NotFound(
"The `%s` could not be found."
% volume.identifier)
# if cascade True and snapshots exist delete all snapshots
if cascade:
snapshots = esh_volume.list_snapshots()
for snapshot in snapshots:
driver.destroy_snapshot(snapshot)
# destroy the volume successfully or raise an exception
if not driver.destroy_volume(esh_volume):
raise Exception("Encountered an error destroying the volume.")
def create_bootable_volume(
user,
provider_uuid,
identity_uuid,
name,
size_alias,
new_source_alias,
source_hint=None,
**kwargs):
"""
**kwargs passed as data to boot_volume_instance
"""
identity = Identity.objects.get(uuid=identity_uuid)
if not identity:
raise Exception("Identity UUID %s does not exist." % identity_uuid)
driver = get_cached_driver(identity=identity)
if not driver:
raise Exception(
"Driver could not be initialized. Invalid Credentials?")
size = driver.get_size(size_alias)
if not size:
raise Exception(
"Size %s could not be located with this driver" % size_alias)
# Return source or raises an Exception
source = _retrieve_source(driver, new_source_alias, source_hint)
core_instance = boot_volume_instance(driver, identity,
source, size, name, **kwargs)
return core_instance
def attach_volume(driver, instance_id, volume_id, device_choice=None):
instance = driver.get_instance(instance_id)
volume = driver.get_volume(volume_id)
if volume.extra.get('status', 'N/A') in 'in-use':
attachments = volume.extra['attachments']
for attach_data in attachments:
if instance_id in attach_data['serverId']:
return volume
# Step 1. Attach the volume
# NOTE: device_choice !== device 100%
return driver.attach_volume(instance,
volume,
device_choice)
|
|
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Colorizer Code is borrowed from Twisted:
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Unittest runner for Nova.
To run all tests
python run_tests.py
To run a single test:
python run_tests.py test_compute:ComputeTestCase.test_run_terminate
To run a single test module:
python run_tests.py test_compute
or
python run_tests.py api.test_wsgi
"""
import gettext
import os
import sys
import unittest
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nose import config
from nose import core
from nose import result
from smoketests import flags
FLAGS = flags.FLAGS
class _AnsiColorizer(object):
"""
A colorizer is an object that loosely wraps around a stream, allowing
callers to write text to the stream in a particular color.
Colorizer classes must implement C{supported()} and C{write(text, color)}.
"""
_colors = dict(black=30, red=31, green=32, yellow=33,
blue=34, magenta=35, cyan=36, white=37)
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
"""
A class method that returns True if the current platform supports
coloring terminal output using this method. Returns False otherwise.
"""
if not stream.isatty():
return False # auto color only on TTYs
try:
import curses
except ImportError:
return False
else:
try:
try:
return curses.tigetnum("colors") > 2
except curses.error:
curses.setupterm()
return curses.tigetnum("colors") > 2
except Exception:
raise
# guess false in case of error
return False
supported = classmethod(supported)
def write(self, text, color):
"""
Write the given text to the stream in the given color.
@param text: Text to be written to the stream.
@param color: A string label for a color. e.g. 'red', 'white'.
"""
color = self._colors[color]
self.stream.write('\x1b[%s;1m%s\x1b[0m' % (color, text))
class _Win32Colorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
from win32console import FOREGROUND_BLUE
from win32console import FOREGROUND_GREEN
from win32console import FOREGROUND_INTENSITY
from win32console import FOREGROUND_RED
from win32console import GetStdHandle
from win32console import STD_OUT_HANDLE
red, green, blue, bold = (FOREGROUND_RED, FOREGROUND_GREEN,
FOREGROUND_BLUE, FOREGROUND_INTENSITY)
self.stream = stream
self.screenBuffer = GetStdHandle(STD_OUT_HANDLE)
self._colors = {
'normal': red | green | blue,
'red': red | bold,
'green': green | bold,
'blue': blue | bold,
'yellow': red | green | bold,
'magenta': red | blue | bold,
'cyan': green | blue | bold,
'white': red | green | blue | bold
}
def supported(cls, stream=sys.stdout):
try:
import win32console
screenBuffer = win32console.GetStdHandle(
win32console.STD_OUT_HANDLE)
except ImportError:
return False
import pywintypes
try:
screenBuffer.SetConsoleTextAttribute(
win32console.FOREGROUND_RED |
win32console.FOREGROUND_GREEN |
win32console.FOREGROUND_BLUE)
except pywintypes.error:
return False
else:
return True
supported = classmethod(supported)
def write(self, text, color):
color = self._colors[color]
self.screenBuffer.SetConsoleTextAttribute(color)
self.stream.write(text)
self.screenBuffer.SetConsoleTextAttribute(self._colors['normal'])
class _NullColorizer(object):
"""
See _AnsiColorizer docstring.
"""
def __init__(self, stream):
self.stream = stream
def supported(cls, stream=sys.stdout):
return True
supported = classmethod(supported)
def write(self, text, color):
self.stream.write(text)
class NovaTestResult(result.TextTestResult):
def __init__(self, *args, **kw):
result.TextTestResult.__init__(self, *args, **kw)
self._last_case = None
self.colorizer = None
# NOTE(vish): reset stdout for the terminal check
stdout = sys.stdout
sys.stdout = sys.__stdout__
for colorizer in [_Win32Colorizer, _AnsiColorizer, _NullColorizer]:
if colorizer.supported():
self.colorizer = colorizer(self.stream)
break
sys.stdout = stdout
def getDescription(self, test):
return str(test)
# NOTE(vish): copied from unittest with edit to add color
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
if self.showAll:
self.colorizer.write("OK", 'green')
self.stream.writeln()
elif self.dots:
self.stream.write('.')
self.stream.flush()
# NOTE(vish): copied from unittest with edit to add color
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
if self.showAll:
self.colorizer.write("FAIL", 'red')
self.stream.writeln()
elif self.dots:
self.stream.write('F')
self.stream.flush()
# NOTE(vish): copied from nose with edit to add color
def addError(self, test, err):
"""Overrides normal addError to add support for
errorClasses. If the exception is a registered class, the
error will be added to the list for that class, not errors.
"""
stream = getattr(self, 'stream', None)
ec, ev, tb = err
try:
exc_info = self._exc_info_to_string(err, test)
except TypeError:
# 2.3 compat
exc_info = self._exc_info_to_string(err)
for cls, (storage, label, isfail) in self.errorClasses.items():
if result.isclass(ec) and issubclass(ec, cls):
if isfail:
test.passed = False
storage.append((test, exc_info))
# Might get patched into a streamless result
if stream is not None:
if self.showAll:
message = [label]
detail = result._exception_detail(err[1])
if detail:
message.append(detail)
stream.writeln(": ".join(message))
elif self.dots:
stream.write(label[:1])
return
self.errors.append((test, exc_info))
test.passed = False
if stream is not None:
if self.showAll:
self.colorizer.write("ERROR", 'red')
self.stream.writeln()
elif self.dots:
stream.write('E')
def startTest(self, test):
unittest.TestResult.startTest(self, test)
current_case = test.test.__class__.__name__
if self.showAll:
if current_case != self._last_case:
self.stream.writeln(current_case)
self._last_case = current_case
self.stream.write(
' %s' % str(test.test._testMethodName).ljust(60))
self.stream.flush()
class NovaTestRunner(core.TextTestRunner):
def _makeResult(self):
return NovaTestResult(self.stream,
self.descriptions,
self.verbosity,
self.config)
if __name__ == '__main__':
if not os.getenv('EC2_ACCESS_KEY'):
print _('Missing EC2 environment variables. Please '
'source the appropriate novarc file before '
'running this test.')
sys.exit(1)
argv = FLAGS(sys.argv)
testdir = os.path.abspath("./")
c = config.Config(stream=sys.stdout,
env=os.environ,
verbosity=3,
workingDir=testdir,
plugins=core.DefaultPluginManager())
runner = NovaTestRunner(stream=c.stream,
verbosity=c.verbosity,
config=c)
sys.exit(not core.run(config=c, testRunner=runner, argv=argv))
|
|
"""
dj-stripe Model Manager Tests.
"""
import datetime
import decimal
from copy import deepcopy
from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import timezone
from djstripe.models import Charge, Customer, Plan, Subscription, Transfer
from . import (
FAKE_PLAN,
FAKE_PLAN_II,
FAKE_PRODUCT,
FAKE_STANDARD_ACCOUNT,
FAKE_TRANSFER,
IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
class SubscriptionManagerTest(TestCase):
def setUp(self):
# create customers and current subscription records
period_start = datetime.datetime(2013, 4, 1, tzinfo=timezone.utc)
period_end = datetime.datetime(2013, 4, 30, tzinfo=timezone.utc)
start = datetime.datetime(
2013, 1, 1, 0, 0, 1, tzinfo=timezone.utc
) # more realistic start
with patch(
"stripe.Product.retrieve",
return_value=deepcopy(FAKE_PRODUCT),
autospec=True,
):
self.plan = Plan.sync_from_stripe_data(FAKE_PLAN)
self.plan2 = Plan.sync_from_stripe_data(FAKE_PLAN_II)
for i in range(10):
user = get_user_model().objects.create_user(
username="patrick{0}".format(i),
email="patrick{0}@example.com".format(i),
)
customer = Customer.objects.create(
subscriber=user,
id="cus_xxxxxxxxxxxxxx{0}".format(i),
livemode=False,
balance=0,
delinquent=False,
)
Subscription.objects.create(
id="sub_xxxxxxxxxxxxxx{0}".format(i),
customer=customer,
plan=self.plan,
current_period_start=period_start,
current_period_end=period_end,
status="active",
start_date=start,
quantity=1,
)
user = get_user_model().objects.create_user(
username="patrick{0}".format(11), email="patrick{0}@example.com".format(11)
)
customer = Customer.objects.create(
subscriber=user,
id="cus_xxxxxxxxxxxxxx{0}".format(11),
livemode=False,
balance=0,
delinquent=False,
)
Subscription.objects.create(
id="sub_xxxxxxxxxxxxxx{0}".format(11),
customer=customer,
plan=self.plan,
current_period_start=period_start,
current_period_end=period_end,
status="canceled",
canceled_at=period_end,
start_date=start,
quantity=1,
)
user = get_user_model().objects.create_user(
username="patrick{0}".format(12), email="patrick{0}@example.com".format(12)
)
customer = Customer.objects.create(
subscriber=user,
id="cus_xxxxxxxxxxxxxx{0}".format(12),
livemode=False,
balance=0,
delinquent=False,
)
Subscription.objects.create(
id="sub_xxxxxxxxxxxxxx{0}".format(12),
customer=customer,
plan=self.plan2,
current_period_start=period_start,
current_period_end=period_end,
status="active",
start_date=start,
quantity=1,
)
def test_started_during_no_records(self):
self.assertEqual(Subscription.objects.started_during(2013, 4).count(), 0)
def test_started_during_has_records(self):
self.assertEqual(Subscription.objects.started_during(2013, 1).count(), 12)
def test_canceled_during(self):
self.assertEqual(Subscription.objects.canceled_during(2013, 4).count(), 1)
def test_canceled_all(self):
self.assertEqual(Subscription.objects.canceled().count(), 1)
def test_active_all(self):
self.assertEqual(Subscription.objects.active().count(), 11)
def test_started_plan_summary(self):
for plan in Subscription.objects.started_plan_summary_for(2013, 1):
if plan["plan"] == self.plan:
self.assertEqual(plan["count"], 11)
if plan["plan"] == self.plan2:
self.assertEqual(plan["count"], 1)
def test_active_plan_summary(self):
for plan in Subscription.objects.active_plan_summary():
if plan["plan"] == self.plan:
self.assertEqual(plan["count"], 10)
if plan["plan"] == self.plan2:
self.assertEqual(plan["count"], 1)
def test_canceled_plan_summary(self):
for plan in Subscription.objects.canceled_plan_summary_for(2013, 1):
if plan["plan"] == self.plan:
self.assertEqual(plan["count"], 1)
if plan["plan"] == self.plan2:
self.assertEqual(plan["count"], 0)
def test_churn(self):
self.assertEqual(
Subscription.objects.churn(), decimal.Decimal("1") / decimal.Decimal("11")
)
class TransferManagerTest(TestCase):
@patch.object(Transfer, "_attach_objects_post_save_hook")
@patch(
"stripe.Account.retrieve",
return_value=deepcopy(FAKE_STANDARD_ACCOUNT),
autospec=IS_STATICMETHOD_AUTOSPEC_SUPPORTED,
)
def test_transfer_summary(
self, account_retrieve_mock, transfer__attach_object_post_save_hook_mock
):
def FAKE_TRANSFER_III():
data = deepcopy(FAKE_TRANSFER)
data["id"] = "tr_17O4U52eZvKYlo2CmyYbDAEy"
data["amount"] = 19010
data["created"] = 1451560845
return data
def FAKE_TRANSFER_II():
data = deepcopy(FAKE_TRANSFER)
data["id"] = "tr_16hTzv2eZvKYlo2CWuyMmuvV"
data["amount"] = 2000
data["created"] = 1440420000
return data
Transfer.sync_from_stripe_data(deepcopy(FAKE_TRANSFER))
Transfer.sync_from_stripe_data(FAKE_TRANSFER_II())
Transfer.sync_from_stripe_data(FAKE_TRANSFER_III())
self.assertEqual(Transfer.objects.during(2015, 8).count(), 2)
totals = Transfer.objects.paid_totals_for(2015, 12)
self.assertEqual(totals["total_amount"], decimal.Decimal("190.10"))
class ChargeManagerTest(TestCase):
def setUp(self):
customer = Customer.objects.create(
id="cus_XXXXXXX", livemode=False, balance=0, delinquent=False
)
self.march_charge = Charge.objects.create(
id="ch_XXXXMAR1",
customer=customer,
created=datetime.datetime(2015, 3, 31, tzinfo=timezone.utc),
amount=0,
amount_refunded=0,
currency="usd",
status="pending",
)
self.april_charge_1 = Charge.objects.create(
id="ch_XXXXAPR1",
customer=customer,
created=datetime.datetime(2015, 4, 1, tzinfo=timezone.utc),
amount=decimal.Decimal("20.15"),
amount_refunded=0,
currency="usd",
status="succeeded",
paid=True,
)
self.april_charge_2 = Charge.objects.create(
id="ch_XXXXAPR2",
customer=customer,
created=datetime.datetime(2015, 4, 18, tzinfo=timezone.utc),
amount=decimal.Decimal("10.35"),
amount_refunded=decimal.Decimal("5.35"),
currency="usd",
status="succeeded",
paid=True,
)
self.april_charge_3 = Charge.objects.create(
id="ch_XXXXAPR3",
customer=customer,
created=datetime.datetime(2015, 4, 30, tzinfo=timezone.utc),
amount=decimal.Decimal("100.00"),
amount_refunded=decimal.Decimal("80.00"),
currency="usd",
status="pending",
paid=False,
)
self.may_charge = Charge.objects.create(
id="ch_XXXXMAY1",
customer=customer,
created=datetime.datetime(2015, 5, 1, tzinfo=timezone.utc),
amount=0,
amount_refunded=0,
currency="usd",
status="pending",
)
self.november_charge = Charge.objects.create(
id="ch_XXXXNOV1",
customer=customer,
created=datetime.datetime(2015, 11, 16, tzinfo=timezone.utc),
amount=0,
amount_refunded=0,
currency="usd",
status="pending",
)
self.charge_2014 = Charge.objects.create(
id="ch_XXXX20141",
customer=customer,
created=datetime.datetime(2014, 12, 31, tzinfo=timezone.utc),
amount=0,
amount_refunded=0,
currency="usd",
status="pending",
)
self.charge_2016 = Charge.objects.create(
id="ch_XXXX20161",
customer=customer,
created=datetime.datetime(2016, 1, 1, tzinfo=timezone.utc),
amount=0,
amount_refunded=0,
currency="usd",
status="pending",
)
def test_is_during_april_2015(self):
raw_charges = Charge.objects.during(year=2015, month=4)
charges = [charge.id for charge in raw_charges]
self.assertIn(self.april_charge_1.id, charges, "April charge 1 not in charges.")
self.assertIn(self.april_charge_2.id, charges, "April charge 2 not in charges.")
self.assertIn(self.april_charge_3.id, charges, "April charge 3 not in charges.")
self.assertNotIn(
self.march_charge.id, charges, "March charge unexpectedly in charges."
)
self.assertNotIn(
self.may_charge.id, charges, "May charge unexpectedly in charges."
)
self.assertNotIn(
self.november_charge.id, charges, "November charge unexpectedly in charges."
)
self.assertNotIn(
self.charge_2014.id, charges, "2014 charge unexpectedly in charges."
)
self.assertNotIn(
self.charge_2016.id, charges, "2016 charge unexpectedly in charges."
)
def test_get_paid_totals_for_april_2015(self):
paid_totals = Charge.objects.paid_totals_for(year=2015, month=4)
self.assertEqual(
decimal.Decimal("30.50"),
paid_totals["total_amount"],
"Total amount is not correct.",
)
self.assertEqual(
decimal.Decimal("5.35"),
paid_totals["total_refunded"],
"Total amount refunded is not correct.",
)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains the bindings for command line integration and dynamic loading of tasks
"""
import argparse
import logging
import logging.config
import os
import sys
import tempfile
import signal
from luigi import configuration
from luigi import lock
from luigi import parameter
from luigi import rpc
from luigi import scheduler
from luigi import task
from luigi import worker
from luigi import execution_summary
from luigi.task_register import Register
def setup_interface_logging(conf_file=None):
# use a variable in the function object to determine if it has run before
if getattr(setup_interface_logging, "has_run", False):
return
if conf_file is None:
logger = logging.getLogger('luigi-interface')
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s: %(message)s')
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
else:
logging.config.fileConfig(conf_file, disable_existing_loggers=False)
setup_interface_logging.has_run = True
class core(task.Config):
''' Keeps track of a bunch of environment params.
Uses the internal luigi parameter mechanism.
The nice thing is that we can instantiate this class
and get an object with all the environment variables set.
This is arguably a bit of a hack.
'''
use_cmdline_section = False
local_scheduler = parameter.BoolParameter(
default=False,
description='Use local scheduling')
scheduler_host = parameter.Parameter(
default='localhost',
description='Hostname of machine running remote scheduler',
config_path=dict(section='core', name='default-scheduler-host'))
scheduler_port = parameter.IntParameter(
default=8082,
description='Port of remote scheduler api process',
config_path=dict(section='core', name='default-scheduler-port'))
scheduler_url = parameter.Parameter(
default=None,
description='Full path to remote scheduler',
config_path=dict(section='core', name='default-scheduler-url'),
)
lock_size = parameter.IntParameter(
default=1,
description="Maximum number of workers running the same command")
no_lock = parameter.BoolParameter(
default=False,
description='Ignore if similar process is already running')
lock_pid_dir = parameter.Parameter(
default=os.path.join(tempfile.gettempdir(), 'luigi'),
description='Directory to store the pid file')
take_lock = parameter.BoolParameter(
default=False,
description='Signal other processes to stop getting work if already running')
workers = parameter.IntParameter(
default=1,
description='Maximum number of parallel tasks to run')
logging_conf_file = parameter.Parameter(
default=None,
description='Configuration file for logging')
module = parameter.Parameter(
default=None,
description='Used for dynamic loading of modules') # see _DynamicArgParseInterface
parallel_scheduling = parameter.BoolParameter(
default=False,
description='Use multiprocessing to do scheduling in parallel.')
assistant = parameter.BoolParameter(
default=False,
description='Run any task from the scheduler.')
class _WorkerSchedulerFactory(object):
def create_local_scheduler(self):
return scheduler.CentralPlannerScheduler(prune_on_get_work=True)
def create_remote_scheduler(self, url):
return rpc.RemoteScheduler(url)
def create_worker(self, scheduler, worker_processes, assistant=False):
return worker.Worker(
scheduler=scheduler, worker_processes=worker_processes, assistant=assistant)
class _Interface(object):
def parse(self):
raise NotImplementedError
@staticmethod
def run(tasks, worker_scheduler_factory=None, override_defaults=None):
"""
:param tasks:
:param worker_scheduler_factory:
:param override_defaults:
:return: True if all tasks and their dependencies were successfully run (or already completed);
False if any error occurred.
"""
if worker_scheduler_factory is None:
worker_scheduler_factory = _WorkerSchedulerFactory()
if override_defaults is None:
override_defaults = {}
env_params = core(**override_defaults)
# search for logging configuration path first on the command line, then
# in the application config file
logging_conf = env_params.logging_conf_file
if logging_conf is not None and not os.path.exists(logging_conf):
raise Exception(
"Error: Unable to locate specified logging configuration file!"
)
if not configuration.get_config().getboolean(
'core', 'no_configure_logging', False):
setup_interface_logging(logging_conf)
kill_signal = signal.SIGUSR1 if env_params.take_lock else None
if (not env_params.no_lock and
not(lock.acquire_for(env_params.lock_pid_dir, env_params.lock_size, kill_signal))):
sys.exit(1)
if env_params.local_scheduler:
sch = worker_scheduler_factory.create_local_scheduler()
else:
if env_params.scheduler_url is not None:
url = env_params.scheduler_url
else:
url = 'http://{host}:{port:d}/'.format(
host=env_params.scheduler_host,
port=env_params.scheduler_port,
)
sch = worker_scheduler_factory.create_remote_scheduler(url=url)
w = worker_scheduler_factory.create_worker(
scheduler=sch, worker_processes=env_params.workers, assistant=env_params.assistant)
success = True
for t in tasks:
success &= w.add(t, env_params.parallel_scheduling)
logger = logging.getLogger('luigi-interface')
logger.info('Done scheduling tasks')
if env_params.workers != 0:
success &= w.run()
w.stop()
logger.info(execution_summary.summary(w))
return success
def _add_task_parameters(parser, task_cls):
for param_name, param in task_cls.get_params():
param.add_to_cmdline_parser(parser, param_name, task_cls.task_family, glob=False)
def _get_global_parameters():
seen_params = set()
for task_name, is_without_section, param_name, param in Register.get_all_params():
if param in seen_params:
continue
seen_params.add(param)
yield task_name, is_without_section, param_name, param
def _add_global_parameters(parser):
for task_name, is_without_section, param_name, param in _get_global_parameters():
param.add_to_cmdline_parser(parser, param_name, task_name, glob=True, is_without_section=is_without_section)
def _get_task_parameters(task_cls, args):
# Parse a str->str dict to the correct types
params = {}
for param_name, param in task_cls.get_params():
param.parse_from_args(param_name, task_cls.task_family, args, params)
return params
def _set_global_parameters(args):
# Note that this is not side effect free
for task_name, is_without_section, param_name, param in _get_global_parameters():
param.set_global_from_args(param_name, task_name, args, is_without_section=is_without_section)
class _ArgParseInterface(_Interface):
"""
Takes the task as the command, with parameters specific to it.
"""
def parse_task(self, cmdline_args=None):
if cmdline_args is None:
cmdline_args = sys.argv[1:]
parser = argparse.ArgumentParser()
_add_global_parameters(parser)
task_names = Register.task_names()
# Parse global arguments and pull out the task name.
# We used to do this using subparsers+command, but some issues with
# argparse across different versions of Python (2.7.9) made it hard.
args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help'])
if len(unknown) == 0:
# In case it included a --help argument, run again
parser.parse_known_args(args=cmdline_args)
raise SystemExit('No task specified')
task_name = unknown[0]
task_cls = Register.get_task_cls(task_name)
# Add a subparser to parse task-specific arguments
subparsers = parser.add_subparsers(dest='command')
subparser = subparsers.add_parser(task_name)
# Add both task and global params here so that we can support both:
# test.py --global-param xyz Test --n 42
# test.py Test --n 42 --global-param xyz
_add_global_parameters(subparser)
_add_task_parameters(subparser, task_cls)
# Workaround for bug in argparse for Python 2.7.9
# See https://mail.python.org/pipermail/python-dev/2015-January/137699.html
subargs = parser.parse_args(args=cmdline_args)
for key, value in vars(subargs).items():
if value: # Either True (for boolean args) or non-None (everything else)
setattr(args, key, value)
# Notice that this is not side effect free because it might set global params
_set_global_parameters(args)
task_params = _get_task_parameters(task_cls, args)
return [task_cls(**task_params)]
def parse(self, cmdline_args=None):
return self.parse_task(cmdline_args)
class _DynamicArgParseInterface(_ArgParseInterface):
"""
Uses --module as a way to load modules dynamically
Usage:
.. code-block:: console
python whatever.py --module foo_module FooTask --blah xyz --x 123
This will dynamically import foo_module and then try to create FooTask from this.
"""
def parse(self, cmdline_args=None):
if cmdline_args is None:
cmdline_args = sys.argv[1:]
parser = argparse.ArgumentParser()
_add_global_parameters(parser)
args, unknown = parser.parse_known_args(args=[a for a in cmdline_args if a != '--help'])
module = args.module
if module:
__import__(module)
return self.parse_task(cmdline_args)
def run(cmdline_args=None, main_task_cls=None,
worker_scheduler_factory=None, use_dynamic_argparse=False, local_scheduler=False):
"""
Please dont use. Instead use `luigi` binary.
Run from cmdline using argparse.
:param cmdline_args:
:param main_task_cls:
:param worker_scheduler_factory:
:param use_dynamic_argparse:
:param local_scheduler:
"""
if cmdline_args is None:
cmdline_args = sys.argv[1:]
if use_dynamic_argparse:
interface = _DynamicArgParseInterface()
else:
interface = _ArgParseInterface()
if main_task_cls:
cmdline_args.insert(0, main_task_cls.task_family)
if local_scheduler:
cmdline_args.insert(0, '--local-scheduler')
tasks = interface.parse(cmdline_args)
return interface.run(tasks, worker_scheduler_factory)
def build(tasks, worker_scheduler_factory=None, **env_params):
"""
Run internally, bypassing the cmdline parsing.
Useful if you have some luigi code that you want to run internally.
Example:
.. code-block:: python
luigi.build([MyTask1(), MyTask2()], local_scheduler=True)
One notable difference is that `build` defaults to not using
the identical process lock. Otherwise, `build` would only be
callable once from each process.
:param tasks:
:param worker_scheduler_factory:
:param env_params:
:return: True if there were no scheduling errors, even if tasks may fail.
"""
if "no_lock" not in env_params:
env_params["no_lock"] = True
return _Interface.run(tasks, worker_scheduler_factory, override_defaults=env_params)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import ast
import copy
from oslo_serialization import jsonutils
import six
import six.moves.urllib.parse as urlparse
import six.moves.urllib.request as urlrequest
registered_checks = {}
@six.add_metaclass(abc.ABCMeta)
class BaseCheck(object):
"""Abstract base class for Check classes."""
@abc.abstractmethod
def __str__(self):
"""String representation of the Check tree rooted at this node."""
pass
@abc.abstractmethod
def __call__(self, target, cred, enforcer):
"""Triggers if instance of the class is called.
Performs the check. Returns False to reject the access or a
true value (not necessary True) to accept the access.
"""
pass
class FalseCheck(BaseCheck):
"""A policy check that always returns ``False`` (disallow)."""
def __str__(self):
"""Return a string representation of this check."""
return '!'
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return False
class TrueCheck(BaseCheck):
"""A policy check that always returns ``True`` (allow)."""
def __str__(self):
"""Return a string representation of this check."""
return '@'
def __call__(self, target, cred, enforcer):
"""Check the policy."""
return True
class Check(BaseCheck):
"""A base class to allow for user-defined policy checks.
:param kind: The kind of the check, i.e., the field before the ``:``.
:param match: The match of the check, i.e., the field after the ``:``.
"""
def __init__(self, kind, match):
self.kind = kind
self.match = match
def __str__(self):
"""Return a string representation of this check."""
return '%s:%s' % (self.kind, self.match)
class NotCheck(BaseCheck):
"""Implements the "not" logical operator.
A policy check that inverts the result of another policy check.
:param rule: The rule to negate. Must be a Check.
"""
def __init__(self, rule):
self.rule = rule
def __str__(self):
"""Return a string representation of this check."""
return 'not %s' % self.rule
def __call__(self, target, cred, enforcer):
"""Check the policy.
Returns the logical inverse of the wrapped check.
"""
return not self.rule(target, cred, enforcer)
class AndCheck(BaseCheck):
"""Implements the "and" logical operator.
A policy check that requires that a list of other checks all return True.
:param list rules: rules that will be tested.
"""
def __init__(self, rules):
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return '(%s)' % ' and '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that all rules accept in order to return True.
"""
for rule in self.rules:
if not rule(target, cred, enforcer):
return False
return True
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested.
:returns: self
:rtype: :class:`.AndCheck`
"""
self.rules.append(rule)
return self
class OrCheck(BaseCheck):
"""Implements the "or" operator.
A policy check that requires that at least one of a list of other
checks returns ``True``.
:param rules: A list of rules that will be tested.
"""
def __init__(self, rules):
self.rules = rules
def __str__(self):
"""Return a string representation of this check."""
return '(%s)' % ' or '.join(str(r) for r in self.rules)
def __call__(self, target, cred, enforcer):
"""Check the policy.
Requires that at least one rule accept in order to return True.
"""
for rule in self.rules:
if rule(target, cred, enforcer):
return True
return False
def add_check(self, rule):
"""Adds rule to be tested.
Allows addition of another rule to the list of rules that will
be tested. Returns the OrCheck object for convenience.
"""
self.rules.append(rule)
return self
def register(name, func=None):
"""Register a function or :class:`.Check` class as a policy check.
:param name: Gives the name of the check type, e.g., "rule",
"role", etc. If name is ``None``, a default check type
will be registered.
:param func: If given, provides the function or class to register.
If not given, returns a function taking one argument
to specify the function or class to register,
allowing use as a decorator.
"""
# Perform the actual decoration by registering the function or
# class. Returns the function or class for compliance with the
# decorator interface.
def decorator(func):
registered_checks[name] = func
return func
# If the function or class is given, do the registration
if func:
return decorator(func)
return decorator
@register('rule')
class RuleCheck(Check):
"""Recursively checks credentials based on the defined rules."""
def __call__(self, target, creds, enforcer):
try:
return enforcer.rules[self.match](target, creds, enforcer)
except KeyError:
# We don't have any matching rule; fail closed
return False
@register('role')
class RoleCheck(Check):
"""Check that there is a matching role in the ``creds`` dict."""
def __call__(self, target, creds, enforcer):
return self.match.lower() in [x.lower() for x in creds['roles']]
@register('http')
class HttpCheck(Check):
"""Check ``http:`` rules by calling to a remote server.
This example implementation simply verifies that the response
is exactly ``True``.
"""
def __call__(self, target, creds, enforcer):
url = ('http:' + self.match) % target
# Convert instances of object() in target temporarily to
# empty dict to avoid circular reference detection
# errors in jsonutils.dumps().
temp_target = copy.deepcopy(target)
for key in target.keys():
element = target.get(key)
if type(element) is object:
temp_target[key] = {}
data = {'target': jsonutils.dumps(temp_target),
'credentials': jsonutils.dumps(creds)}
post_data = urlparse.urlencode(data)
f = urlrequest.urlopen(url, post_data)
return f.read() == 'True'
@register(None)
class GenericCheck(Check):
"""Check an individual match.
Matches look like:
- tenant:%(tenant_id)s
- role:compute:admin
- True:%(user.enabled)s
- 'Member':%(role.name)s
"""
def __call__(self, target, creds, enforcer):
try:
match = self.match % target
except KeyError:
# While doing GenericCheck if key not
# present in Target return false
return False
try:
# Try to interpret self.kind as a literal
leftval = ast.literal_eval(self.kind)
except ValueError:
try:
kind_parts = self.kind.split('.')
leftval = creds
for kind_part in kind_parts:
leftval = leftval[kind_part]
except KeyError:
return False
return match == six.text_type(leftval)
|
|
from steerclear import app
from steerclear.forms import RegisterForm, LoginForm
import unittest, flask
# username string min and max lengths
USERNAME_MIN_LENGTH = 1
USERNAME_MAX_LENGTH = 119
# password string min and max lengths
PASSWORD_MIN_LENGTH = 1
PASSWORD_MAX_LENGTH = 119
"""
RegisterFormTestCase
----------------
Tests for login module RegisterForm
"""
class RegisterFormTestCase(unittest.TestCase):
"""
submit_form
-----------
helper method to submit a RegisterForm by faking
a request context. Returns True is the form
validated and False if not.
*payload* is a dictionary of name/value pairs
of the form data that is being submitted
"""
def submit_form(self, form, payload):
with app.test_request_context():
myform = form(data=payload)
return myform.validate()
def setUp(self):
self.payload = {
u"username": u"ryan",
u"password": u"1234",
u'phone': u'+17572214000',
}
"""
test_ride_form_correct_submit
-----------------------------
Tests that a RegisterForm can be validated correctly
"""
def test_ride_form_correct_submit(self):
result = self.submit_form(RegisterForm, self.payload)
self.assertTrue(result)
"""
test_data_required_fields
-------------------------
tests that a RegisterForm is not valid unless
all fields are included in the form data
"""
def test_data_required_fields(self):
payload = self.payload
for key in payload.keys():
bad_payload = payload.copy()
bad_payload.pop(key, None)
result = self.submit_form(RegisterForm, bad_payload)
self.assertFalse(result)
"""
test_username_min_length
------------------------
Tests that a RegisterForm validates the minimum length
for the username field correctly
"""
def test_username_min_length(self):
payload = self.payload.copy()
payload[u'username'] = 'x' * USERNAME_MIN_LENGTH
result = self.submit_form(RegisterForm, payload)
self.assertTrue(result)
payload = self.payload.copy()
payload[u'username'] = 'x' * (USERNAME_MIN_LENGTH-1)
result = self.submit_form(RegisterForm, payload)
self.assertFalse(result)
"""
test_username_max_length
------------------------
Tests that a RegisterForm validates the maximum length
for the username field correctly
"""
def test_username_max_length(self):
payload = self.payload.copy()
payload[u'username'] = 'x' * USERNAME_MAX_LENGTH
result = self.submit_form(RegisterForm, payload)
self.assertTrue(result)
payload = self.payload.copy()
payload[u'username'] = 'x' * (USERNAME_MAX_LENGTH+1)
result = self.submit_form(RegisterForm, payload)
self.assertFalse(result)
"""
test_password_min_length
------------------------
Tests that a RegisterForm validates the minimum length
for the password field correctly
"""
def test_password_min_length(self):
payload = self.payload.copy()
payload[u'password'] = 'x' * PASSWORD_MIN_LENGTH
result = self.submit_form(RegisterForm, payload)
self.assertTrue(result)
payload = self.payload.copy()
payload[u'password'] = 'x' * (PASSWORD_MIN_LENGTH-1)
result = self.submit_form(RegisterForm, payload)
self.assertFalse(result)
"""
test_password_max_length
------------------------
Tests that a RegisterForm validates the maximum length
for the password field correctly
"""
def test_password_max_length(self):
payload = self.payload.copy()
payload[u'password'] = 'x' * PASSWORD_MAX_LENGTH
result = self.submit_form(RegisterForm, payload)
self.assertTrue(result)
payload = self.payload.copy()
payload[u'password'] = 'x' * (PASSWORD_MAX_LENGTH+1)
result = self.submit_form(RegisterForm, payload)
self.assertFalse(result)
"""
test_phone_bad_format_too_few_digits
------------------------------------
Tests that RegisterForm fails to validate if phone number
field has too few digits to be a phone number
"""
def test_phone_bad_format_too_few_digits(self):
payload = self.payload.copy()
payload[u'phone'] = self.payload[u'phone'][:-1]
result = self.submit_form(RegisterForm, payload)
self.assertFalse(result)
"""
test_phone_bad_format_too_many_digits
-------------------------------------
Tests that RegisterForm fails to validate if phone field
has too many digits to be a correct phone number
"""
def test_phone_bad_format_too_many_digits(self):
payload = self.payload.copy()
payload[u'phone'] += '1'
result = self.submit_form(RegisterForm, payload)
self.assertFalse(result)
"""
test_phone_bad_format_invalid_number
------------------------------------
Tests that RegisterForm fails to validate if
phone number is not a valid number
"""
def test_phone_bad_format_invalid_number(self):
payload = self.payload.copy()
payload[u'phone'] += '+12223334444'
result = self.submit_form(RegisterForm, payload)
self.assertFalse(result)
"""
test_phone_weird_formats
------------------------
Tests that the RegisterForm can handly weirdly
formatted numbers correctly
"""
def test_phone_weird_formats(self):
def test(formats):
for f in formats:
payload = self.payload.copy()
payload[u'phone'] = f
result = self.submit_form(RegisterForm, payload)
self.assertTrue(result)
test([
u'+1(757)2214000',
u'+1(757) 2214000',
u'+1757-2214000',
u'+1757-221-4000',
u'+1757221-4000',
u'+1(757) 221-4000',
u'+1(757)221-4000',
u'+1757 221-4000'
])
"""
LoginFormTestCase
-----------------
Test case for LoginForm
"""
class LoginFormTestCase(unittest.TestCase):
"""
submit_form
-----------
helper method to submit a UserForm by faking
a request context. Returns True is the form
validated and False if not.
*payload* is a dictionary of name/value pairs
of the form data that is being submitted
"""
def submit_form(self, form, payload):
with app.test_request_context():
myform = form(data=payload)
return myform.validate()
def setUp(self):
self.payload = {
u"username": u"ryan",
u"password": u"1234",
}
"""
test_login_form_correct_submit
-----------------------------
Tests that a LoginForm can be validated correctly
"""
def test_login_form_correct_submit(self):
result = self.submit_form(LoginForm, self.payload)
self.assertTrue(result)
"""
test_data_required_fields
-------------------------
tests that a LoginForm is not valid unless
all fields are included in the form data
"""
def test_data_required_fields(self):
payload = self.payload
for key in payload.keys():
bad_payload = payload.copy()
bad_payload.pop(key, None)
result = self.submit_form(LoginForm, bad_payload)
self.assertFalse(result)
|
|
from __future__ import print_function, division
from sympy.core import C, Add, Mul, Pow, S
from sympy.core.compatibility import default_sort_key, string_types
from sympy.core.sympify import _sympify
from sympy.core.mul import _keep_coeff
from sympy.printing.str import StrPrinter
from sympy.printing.precedence import precedence
from symcc.types.ast import Assign
__all__ = ["CodePrinter"]
class CodePrinter(StrPrinter):
"""
The base class for code-printing subclasses.
"""
_operators = {
'and': '&&',
'or': '||',
'not': '!',
}
def doprint(self, expr, assign_to=None):
"""
Print the expression as code.
Parameters
----------
expr : Expression
The expression to be printed.
assign_to : Symbol, MatrixSymbol, or string (optional)
If provided, the printed code will set the expression to a
variable with name ``assign_to``.
"""
if isinstance(assign_to, string_types):
assign_to = C.Symbol(assign_to)
elif not isinstance(assign_to, (C.Basic, type(None))):
raise TypeError("{0} cannot assign to object of type {1}".format(
type(self).__name__, type(assign_to)))
if assign_to:
expr = Assign(assign_to, expr)
else:
expr = _sympify(expr)
# Do the actual printing
lines = self._print(expr).splitlines()
# Format the output
return "\n".join(self._format_code(lines))
def _get_statement(self, codestring):
"""Formats a codestring with the proper line ending."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _get_comment(self, text):
"""Formats a text string as a comment."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _declare_number_const(self, name, value):
"""Declare a numeric constant at the top of a function"""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _format_code(self, lines):
"""Take in a list of lines of code, and format them accordingly.
This may include indenting, wrapping long lines, etc..."""
raise NotImplementedError("This function must be implemented by "
"subclass of CodePrinter.")
def _print_Assign(self, expr):
lhs_code = self._print(expr.lhs)
rhs_code = self._print(expr.rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Function(self, expr):
if expr.func.__name__ in self.known_functions:
cond_func = self.known_functions[expr.func.__name__]
func = None
if isinstance(cond_func, str):
func = cond_func
else:
for cond, func in cond_func:
if cond(*expr.args):
break
if func is not None:
return "%s(%s)" % (func, self.stringify(expr.args, ", "))
elif hasattr(expr, '_imp_') and isinstance(expr._imp_, C.Lambda):
# inlined function
return self._print(expr._imp_(*expr.args))
else:
return self._print_not_supported(expr)
def _print_NumberSymbol(self, expr):
return str(expr)
def _print_Dummy(self, expr):
# dummies must be printed as unique symbols
return "%s_%i" % (expr.name, expr.dummy_index) # Dummy
def _print_And(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['and']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Or(self, expr):
PREC = precedence(expr)
return (" %s " % self._operators['or']).join(self.parenthesize(a, PREC)
for a in sorted(expr.args, key=default_sort_key))
def _print_Xor(self, expr):
if self._operators.get('xor') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (" %s " % self._operators['xor']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Equivalent(self, expr):
if self._operators.get('equivalent') is None:
return self._print_not_supported(expr)
PREC = precedence(expr)
return (" %s " % self._operators['equivalent']).join(self.parenthesize(a, PREC)
for a in expr.args)
def _print_Not(self, expr):
PREC = precedence(expr)
return self._operators['not'] + self.parenthesize(expr.args[0], PREC)
def _print_Mul(self, expr):
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if item.is_commutative and item.is_Pow and item.exp.is_Rational and item.exp.is_negative:
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
if len(b) == 0:
return sign + '*'.join(a_str)
elif len(b) == 1:
if len(a) == 1 and not (a[0].is_Atom or a[0].is_Add):
return sign + "%s/" % a_str[0] + '*'.join(b_str)
else:
return sign + '*'.join(a_str) + "/%s" % b_str[0]
else:
return sign + '*'.join(a_str) + "/(%s)" % '*'.join(b_str)
def _print_not_supported(self, expr):
raise TypeError("{0} not supported in {1}".format(type(expr), self.language))
# Number constants
_print_Catalan = _print_NumberSymbol
_print_EulerGamma = _print_NumberSymbol
_print_GoldenRatio = _print_NumberSymbol
_print_Exp1 = _print_NumberSymbol
_print_Pi = _print_NumberSymbol
# The following can not be simply translated into C or Fortran
_print_Basic = _print_not_supported
_print_ComplexInfinity = _print_not_supported
_print_Derivative = _print_not_supported
_print_dict = _print_not_supported
_print_ExprCondPair = _print_not_supported
_print_GeometryEntity = _print_not_supported
_print_Infinity = _print_not_supported
_print_Integral = _print_not_supported
_print_Interval = _print_not_supported
_print_Limit = _print_not_supported
_print_list = _print_not_supported
_print_Matrix = _print_not_supported
_print_ImmutableMatrix = _print_not_supported
_print_MutableDenseMatrix = _print_not_supported
_print_MatrixBase = _print_not_supported
_print_DeferredVector = _print_not_supported
_print_NaN = _print_not_supported
_print_NegativeInfinity = _print_not_supported
_print_Normal = _print_not_supported
_print_Order = _print_not_supported
_print_PDF = _print_not_supported
_print_RootOf = _print_not_supported
_print_RootsOf = _print_not_supported
_print_RootSum = _print_not_supported
_print_Sample = _print_not_supported
_print_SparseMatrix = _print_not_supported
_print_tuple = _print_not_supported
_print_Uniform = _print_not_supported
_print_Unit = _print_not_supported
_print_Wild = _print_not_supported
_print_WildFunction = _print_not_supported
|
|
"""This directory is setup with configurations to run the main functional test.
Inspired in bcbio-nextgen code
"""
from __future__ import print_function
import os
import subprocess
import unittest
import shutil
import contextlib
import functools
from nose import SkipTest
from nose.plugins.attrib import attr
@contextlib.contextmanager
def make_workdir():
remove_old_dir = True
dirname = os.path.join(os.path.dirname(__file__), "test_automated_output")
if remove_old_dir:
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
orig_dir = os.getcwd()
try:
os.chdir(dirname)
yield dirname
finally:
os.chdir(orig_dir)
def expected_failure(test):
"""Small decorator to mark tests as expected failure.
Useful for tests that are work-in-progress.
"""
@functools.wraps(test)
def inner(*args, **kwargs):
try:
test(*args, **kwargs)
except Exception:
raise SkipTest
else:
raise AssertionError('Failure expected')
return inner
class AutomatedAnalysisTest(unittest.TestCase):
"""Setup a full automated analysis and run the pipeline.
"""
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), "data", "automated")
def _install_test_files(self, data_dir):
"""Download required sequence and reference files.
"""
# self._download_to_dir(url, dirname)
def _download_to_dir(self, url, dirname):
print(dirname)
cl = ["wget", url]
subprocess.check_call(cl)
cl = ["tar", "-xzvpf", os.path.basename(url)]
subprocess.check_call(cl)
shutil.move(os.path.basename(dirname), dirname)
os.remove(os.path.basename(url))
@attr(simulate=True)
def test_simulate(self):
"""Check simulated data"""
mirna = "TGAGGTAGTAGGTTGTATAGTT"
correct = 0
n = 0
with open("data/examples/simulation/res/reads.mirna") as inh:
header = inh.readline()
for line in inh:
cols = line.strip().split()
mut, add, t5, t3 = cols[6:10]
seq = cols[0]
if mut!="0":
pos = int(mut[:-2])
nt1 = mut[-2]
nt2 = mut[-1]
seql = list(seq)
seql[pos] = nt2
seq = "".join(seql)
if t5!="0" and t5.islower():
seq = "%s%s" % (t5.upper(), seq)
elif t5!="0" and t5.isupper():
seq = seq[len(t5):]
if add!="0":
seq = seq[:-len(add)]
if t3!="0" and t3.islower():
seq = "%s%s" % (seq, t3.upper())
elif t3!="0" and t3.isupper():
seq = seq[:-len(t3)]
if seq == mirna:
correct += 1
else:
print("\nerror:\n%s\n%s" % (seq, mirna))
n += 1
print("rate %s/%s" % (correct, n))
@attr(complete=True)
@attr(annotate=True)
@attr(bam=True)
@attr(cmd=True)
def test_srnaseq_annotation_bam(self):
"""Run miraligner analysis
"""
with make_workdir():
clcode = ["mirtop",
"gff",
"--sps", "hsa", "--add-extra",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"-o", "test_out_mirs",
"../../data/examples/annotate/sim_isomir.sam"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(low_memory=True)
@attr(cmd=True)
def test_srnaseq_annotation_bam_chunk(self):
"""Run miraligner analysis
"""
with make_workdir():
clcode = ["mirtop",
"gff", "--low-memory",
"--sps", "hsa", "--add-extra",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"-o", "test_out_mirs",
"../../data/examples/annotate/sim_isomir.sam"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(cmd_bam_genomic=True)
@attr(complete=True)
@attr(cmd=True)
def test_srnaseq_annotation_genomic_bam(self):
"""Run genomic bam analysis
"""
with make_workdir():
clcode = ["mirtop",
"gff",
"--sps", "hsa", "--add-extra", "--genomic",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/db/mirbase/hsa.gff3",
"-o", "test_out_mirs",
"../../data/examples/annotate/hsa-let-7a-nm.sam"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(cmd_bam_genomic_low_memory=True)
@attr(complete=True)
@attr(cmd=True)
def test_srnaseq_annotation_genomic_bam_low_memory(self):
"""Run genomic bam analysis
"""
with make_workdir():
clcode = ["mirtop",
"gff", "--genomic", "--low-memory",
"--sps", "hsa", "--add-extra",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/db/mirbase/hsa.gff3",
"-o", "test_out_mirs",
"../../data/examples/annotate/hsa-let-7a-nm.sam"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_seqbuster=True)
@attr(cmd=True)
def test_srnaseq_annotation_seqbuster(self):
"""Run miraligner analysis
"""
with make_workdir():
clcode = ["mirtop",
"gff",
"--format", "seqbuster",
"--sps", "hsa",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"-o", "test_out_mirs",
"../../data/examples/seqbuster/reads.mirna"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_seqbuster_low_memory=True)
@attr(cmd=True)
def test_srnaseq_annotation_seqbuster_low_memory(self):
"""Run miraligner analysis
"""
with make_workdir():
clcode = ["mirtop",
"gff", "--low-memory",
"--format", "seqbuster",
"--sps", "hsa",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"-o", "test_out_mirs",
"../../data/examples/seqbuster/reads.mirna"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_isomirsea=True)
@attr(cmd=True)
def test_srnaseq_annotation_isomirsea(self):
"""Run isomirsea analysis
"""
with make_workdir():
clcode = ["mirtop",
"gff",
"--format", "isomirsea",
"--sps", "hsa",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"-o", "test_out_mirs",
"../../data/examples/isomir-sea/tagMir-all.gff",
"-d", "-vd"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_srnabench=True)
@attr(cmd=True)
def test_srnaseq_annotation_srnabench(self):
"""Run srnabench analysis
"""
with make_workdir():
clcode = ["mirtop",
"gff",
"--format", "srnabench",
"--sps", "hsa",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"-o", "test_out_mirs",
"../../data/examples/srnabench/",
"-d", "-vd"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_optimir=True)
@attr(cmd=True)
def test_srnaseq_annotation_optimir(self):
"""Run optimir analysis
"""
with make_workdir():
clcode = ["mirtop",
"gff",
"--format", "optimir",
"--sps", "hsa",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"-o", "test_out_mirs",
"../../data/examples/optimir/synthetic_100_full.gff3",
"-d", "-vd"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_manatee=True)
@attr(cmd=True)
def test_srnaseq_annotation_manatee(self):
"""Run Manatee analysis
"""
with make_workdir():
clcode = ["mirtop",
"gff",
"--format", "manatee",
"--sps", "hsa",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"-o", "test_out_mirs",
"../../data/examples/manatee/simulated.sam",
"-d", "-vd"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_stats=True)
@attr(cmd=True)
def test_srnaseq_stats(self):
"""Run stats analysis
"""
with make_workdir():
clcode = ["mirtop",
"stats",
"-o", "test_out_mirs",
"../../data/examples/gff/correct_file.gff"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
if not os.path.exists("test_out_mirs/mirtop_stats.txt"):
raise ValueError("File doesn't exist, something is wrong with stats cmd.")
if sum(1 for line in open('test_out_mirs/mirtop_stats.txt')) == 1:
raise ValueError("File is empty, something is wrong with stats cmd.")
@attr(complete=True)
@attr(cmd_merge=True)
@attr(cmd=True)
def test_merge_bam(self):
"""
Run collapse two samples
"""
with make_workdir():
clcode = ["mirtop",
"gff",
"--sps", "hsa", "--add-extra",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"-o", "test_out_mirs",
"../../data/merge/samples1.sam",
"../../data/merge/samples2.sam"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_export_seqbuster=True)
@attr(cmd=True)
def test_export_seqbuster(self):
"""
Run SEQBUSTER export command
"""
with make_workdir():
clcode = ["mirtop",
"export",
"-o", "test_out_mirs",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"../../data/examples/gff/correct_file.gff"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_export_vcf=True)
@attr(cmd=True)
def test_export_vcf(self):
"""
Run VCF export command
"""
with make_workdir():
clcode = ["mirtop",
"export",
"-o", "test_out_mirs",
"--format", "vcf",
"-d", "-vd",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"../../data/examples/gff/correct_file.gff"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_export_fasta=True)
@attr(cmd=True)
def test_export_fasta(self):
"""
Run FASTA export command
"""
with make_workdir():
clcode = ["mirtop",
"export",
"-o", "test_out_mirs",
"--format", "fasta",
"-d", "-vd",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"../../data/examples/gff/correct_file.gff"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_count=True)
@attr(cmd=True)
def test_count(self):
"""
Run count command
"""
with make_workdir():
clcode = ["mirtop",
"counts",
"-o", "test_out_mirs",
"--hairpin", "../../data/examples/annotate/hairpin.fa",
"--gtf", "../../data/examples/annotate/hsa.gff3",
"--gff", "../../data/examples/synthetic/let7a-5p.gff"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_spikeins=True)
@attr(cmd=True)
def test_spikeins_cmd(self):
"""Run spikeins analysis
"""
import platform
with make_workdir():
shutil.copy("../../data/examples/spikeins/spikeins.fa",
"spikeins.fa")
clcode = ["mirtop",
"spikein",
"spikeins.fa",
"-o",
"test_out_spikeins"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
if platform.system() == "Linux":
clcode = ["razers3", "-dr", "0", "-i", "80", "-rr", "90",
"-f", "-o", "spikeins.sam",
"test_out_spikeins/spikeins_pre.fasta",
"../../data/examples/spikeins/test-spikeins.fa"]
print(" ".join(clcode))
subprocess.check_call(clcode)
else:
shutil.copy("../../data/examples/spikeins/spikeins.sam",
"spikeins.sam")
clcode = ["mirtop",
"gff",
"--add-extra",
"--hairpin", "test_out_spikeins/spikeins_pre.fasta",
"--gtf", "test_out_spikeins/spikeins_pre.gff",
"-o", "test_out_mirs",
"spikeins.sam"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_update=True)
@attr(cmd=True)
def test_update_cmd(self):
"""Run update analysis
"""
with make_workdir():
clcode = ["mirtop",
"update",
"-o", "test_out_mirs",
"../../data/examples/versions/version1.0.gff"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_validate_cmd(self):
"""Run update analysis
"""
with make_workdir():
clcode = ["mirtop",
"validate",
"../../data/examples/gff/correct_file.gff"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_create_1_cmd(self):
"""Run sql command to incorporate GFF to SQLite
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-c",
"--gff",
"../../data/examples/annotate/SQL_sample.gff",
"-o",
"../../data/examples/annotate",
"--db",
"SQL_sample.db"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_create_2_cmd(self):
"""Run sql command to incorporate GFF to SQLite
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-c",
"--gff",
"../../data/examples/annotate/SQL_sample.gff",
"-o",
"../../data/examples/annotate"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_showTables_cmd(self):
"""Run sql command to query from a database to show tables using SQLite
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e",
"show-tables"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_showSchema_cmd(self):
"""Run sql command to query from a database to show schema using SQLite
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e",
"show-schema",
"-t",
"summary"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_showColumns_cmd(self):
"""Run sql command to query from a database to show columns using SQLite
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e",
"show-columns"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_descSummary_cmd(self):
"""Run sql command to query from a database to display the header of the GFF using SQLite
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e",
"describe-gff"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_statIsomirs_cmd(self):
"""Run sql command to query from a database to summarize isomirs per miRNA
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e",
"isomirs-per-mirna",
"-miR",
"hsa-let-7a-5p,hsa-let-7d-5p"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_statIsomirsFile_cmd(self):
"""Run sql command to query from a database to summarize isomirs per miRNA reading from afile
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e",
"isomirs-per-mirna",
"-miR",
"../../data/examples/annotate/miRNA_sample_list.txt"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_SelectLimit_cmd(self):
"""Run sql command to query from database using limit option
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e",
"select",
"--limit",
"2"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_SelectColumns_cmd(self):
"""Run sql command to query from database using limit option
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e",
"select",
"-l",
"2",
"-col",
"seqID,UID,Read,iso_5p,iso_3p,start,end"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_SelectMirna_cmd(self):
"""Run sql command to query from database for specific miRNAs
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e",
"select",
"-l",
"4",
"-col",
"seqID,UID,Read,iso_5p,iso_3p,start,end",
"-miR",
"hsa-let-7i-5p"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_SelectiVariant_cmd(self):
"""Run sql command to query from database for specific variant types
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e",
"select",
"-l",
"5",
"-var",
"iso_5p,iso_3p,iso_snv_central_offset"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_SelectFilter_cmd(self):
"""Run sql command to query from database using filters
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e",
"select",
"-l",
"5",
"-var",
"iso_5p,iso_3p,iso_snv_central_offset",
"-f",
"Pass"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_SelectCount_cmd(self):
"""Run sql command to query from database to fetch counts of the return values
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e", "select",
"-var", "iso_5p,iso_3p",
"-miR", "hsa-miR-142-5p,hsa-miR-372-3p",
"-n","T"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
@attr(complete=True)
@attr(cmd_validate=True)
@attr(cmd=True)
def test_sql_query_SelectTextOut_cmd(self):
"""Run sql command to query from database and return the output to a text file
"""
with make_workdir():
clcode = ["mirtop",
"sql",
"-q",
"--db",
"../../data/examples/annotate/query_sample.db",
"-e", "select",
"-var", "iso_5p,iso_3p",
"-miR", "hsa-miR-142-5p,hsa-miR-372-3p",
"-n","T",
"-txto","sample_count.txt"]
print("")
print(" ".join(clcode))
subprocess.check_call(clcode)
|
|
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for flavor basic functions"""
from nova.compute import flavors
from nova import context
from nova.db import constants as db_const
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova import test
class TestValidateExtraSpecKeys(test.NoDBTestCase):
def test_flavor_validate_extra_spec_keys_invalid_input(self):
for key_name_list in [['', ], ['*', ], ['+', ]]:
self.assertRaises(
exception.InvalidInput,
flavors.validate_extra_spec_keys, key_name_list)
def test_flavor_validate_extra_spec_keys(self):
key_name_list = ['abc', 'ab c', 'a-b-c', 'a_b-c', 'a:bc']
flavors.validate_extra_spec_keys(key_name_list)
class TestGetFlavorByFlavorID(test.TestCase):
"""Test cases for flavor code."""
def test_will_not_get_instance_by_unknown_flavor_id(self):
# Ensure get by flavor raises error with wrong flavorid.
self.assertRaises(exception.FlavorNotFound,
flavors.get_flavor_by_flavor_id,
'unknown_flavor')
def test_will_get_instance_by_flavor_id(self):
default_flavor = objects.Flavor.get_by_name(
context.get_admin_context(), 'm1.small')
flavorid = default_flavor.flavorid
fetched = flavors.get_flavor_by_flavor_id(flavorid)
self.assertIsInstance(fetched, objects.Flavor)
self.assertEqual(default_flavor.flavorid, fetched.flavorid)
class TestExtractFlavor(test.TestCase):
def setUp(self):
super().setUp()
self.context = context.get_admin_context()
def _dict_to_metadata(self, data):
return [{'key': key, 'value': value} for key, value in data.items()]
def _test_extract_flavor(self, prefix):
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
flavor_p = obj_base.obj_to_primitive(flavor)
metadata = {}
flavors.save_flavor_info(metadata, flavor, prefix)
instance = {'system_metadata': self._dict_to_metadata(metadata)}
_flavor = flavors.extract_flavor(instance, prefix)
_flavor_p = obj_base.obj_to_primitive(_flavor)
props = flavors.system_metadata_flavor_props.keys()
for key in list(flavor_p.keys()):
if key not in props:
del flavor_p[key]
self.assertEqual(flavor_p, _flavor_p)
def test_extract_flavor(self):
self._test_extract_flavor('')
def test_extract_flavor_no_sysmeta(self):
instance = {}
prefix = ''
result = flavors.extract_flavor(instance, prefix)
self.assertIsNone(result)
def test_extract_flavor_prefix(self):
self._test_extract_flavor('foo_')
class TestSaveFlavorInfo(test.TestCase):
def setUp(self):
super().setUp()
self.context = context.get_admin_context()
def test_save_flavor_info(self):
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
example = {}
example_prefix = {}
for key in flavors.system_metadata_flavor_props.keys():
example['instance_type_%s' % key] = flavor[key]
example_prefix['fooinstance_type_%s' % key] = flavor[key]
metadata = {}
flavors.save_flavor_info(metadata, flavor)
self.assertEqual(example, metadata)
metadata = {}
flavors.save_flavor_info(metadata, flavor, 'foo')
self.assertEqual(example_prefix, metadata)
def test_flavor_numa_extras_are_saved(self):
flavor = objects.Flavor.get_by_name(self.context, 'm1.small')
flavor['extra_specs'] = {
'hw:numa_mem.0': '123',
'hw:numa_cpus.0': '456',
'hw:numa_mem.1': '789',
'hw:numa_cpus.1': 'ABC',
'foo': 'bar',
}
sysmeta = flavors.save_flavor_info({}, flavor)
_flavor = flavors.extract_flavor({'system_metadata': sysmeta})
expected_extra_specs = {
'hw:numa_mem.0': '123',
'hw:numa_cpus.0': '456',
'hw:numa_mem.1': '789',
'hw:numa_cpus.1': 'ABC',
}
self.assertEqual(expected_extra_specs, _flavor['extra_specs'])
class TestCreateFlavor(test.TestCase):
def assertInvalidInput(self, *create_args, **create_kwargs):
self.assertRaises(
exception.InvalidInput, flavors.create,
*create_args, **create_kwargs)
def test_memory_must_be_positive_db_integer(self):
self.assertInvalidInput('flavor1', 'foo', 1, 120)
self.assertInvalidInput('flavor1', -1, 1, 120)
self.assertInvalidInput('flavor1', 0, 1, 120)
self.assertInvalidInput('flavor1', db_const.MAX_INT + 1, 1, 120)
flavors.create('flavor1', 1, 1, 120)
def test_vcpus_must_be_positive_db_integer(self):
self.assertInvalidInput('flavor`', 64, 'foo', 120)
self.assertInvalidInput('flavor1', 64, -1, 120)
self.assertInvalidInput('flavor1', 64, 0, 120)
self.assertInvalidInput('flavor1', 64, db_const.MAX_INT + 1, 120)
flavors.create('flavor1', 64, 1, 120)
def test_root_gb_must_be_nonnegative_db_integer(self):
self.assertInvalidInput('flavor1', 64, 1, 'foo')
self.assertInvalidInput('flavor1', 64, 1, -1)
self.assertInvalidInput('flavor1', 64, 1, db_const.MAX_INT + 1)
flavors.create('flavor1', 64, 1, 0)
flavors.create('flavor2', 64, 1, 120)
def test_ephemeral_gb_must_be_nonnegative_db_integer(self):
self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb='foo')
self.assertInvalidInput('flavor1', 64, 1, 120, ephemeral_gb=-1)
self.assertInvalidInput(
'flavor1', 64, 1, 120, ephemeral_gb=db_const.MAX_INT + 1)
flavors.create('flavor1', 64, 1, 120, ephemeral_gb=0)
flavors.create('flavor2', 64, 1, 120, ephemeral_gb=120)
def test_swap_must_be_nonnegative_db_integer(self):
self.assertInvalidInput('flavor1', 64, 1, 120, swap='foo')
self.assertInvalidInput('flavor1', 64, 1, 120, swap=-1)
self.assertInvalidInput(
'flavor1', 64, 1, 120, swap=db_const.MAX_INT + 1)
flavors.create('flavor1', 64, 1, 120, swap=0)
flavors.create('flavor2', 64, 1, 120, swap=1)
def test_rxtx_factor_must_be_positive_float(self):
self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor='foo')
self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=-1.0)
self.assertInvalidInput('flavor1', 64, 1, 120, rxtx_factor=0.0)
flavor = flavors.create('flavor1', 64, 1, 120, rxtx_factor=1.0)
self.assertEqual(1.0, flavor.rxtx_factor)
flavor = flavors.create('flavor2', 64, 1, 120, rxtx_factor=1.1)
self.assertEqual(1.1, flavor.rxtx_factor)
def test_rxtx_factor_must_be_within_sql_float_range(self):
# We do * 10 since this is an approximation and we need to make sure
# the difference is noticeble.
over_rxtx_factor = db_const.SQL_SP_FLOAT_MAX * 10
self.assertInvalidInput('flavor1', 64, 1, 120,
rxtx_factor=over_rxtx_factor)
flavor = flavors.create(
'flavor2', 64, 1, 120, rxtx_factor=db_const.SQL_SP_FLOAT_MAX)
self.assertEqual(db_const.SQL_SP_FLOAT_MAX, flavor.rxtx_factor)
def test_is_public_must_be_valid_bool_string(self):
self.assertInvalidInput('flavor1', 64, 1, 120, is_public='foo')
flavors.create('flavor1', 64, 1, 120, is_public='TRUE')
flavors.create('flavor2', 64, 1, 120, is_public='False')
flavors.create('flavor3', 64, 1, 120, is_public='Yes')
flavors.create('flavor4', 64, 1, 120, is_public='No')
flavors.create('flavor5', 64, 1, 120, is_public='Y')
flavors.create('flavor6', 64, 1, 120, is_public='N')
flavors.create('flavor7', 64, 1, 120, is_public='1')
flavors.create('flavor8', 64, 1, 120, is_public='0')
flavors.create('flavor9', 64, 1, 120, is_public='true')
def test_flavorid_populated(self):
flavor1 = flavors.create('flavor1', 64, 1, 120)
self.assertIsNotNone(flavor1.flavorid)
flavor2 = flavors.create('flavor2', 64, 1, 120, flavorid='')
self.assertIsNotNone(flavor2.flavorid)
flavor3 = flavors.create('flavor3', 64, 1, 120, flavorid='foo')
self.assertEqual('foo', flavor3.flavorid)
def test_default_values(self):
flavor1 = flavors.create('flavor1', 64, 1, 120)
self.assertIsNotNone(flavor1.flavorid)
self.assertEqual(flavor1.ephemeral_gb, 0)
self.assertEqual(flavor1.swap, 0)
self.assertEqual(flavor1.rxtx_factor, 1.0)
def test_basic_create(self):
# Ensure instance types can be created.
ctxt = context.get_admin_context()
original_list = objects.FlavorList.get_all(ctxt)
# Create new type and make sure values stick
flavor = flavors.create('flavor', 64, 1, 120)
self.assertEqual(flavor.name, 'flavor')
self.assertEqual(flavor.memory_mb, 64)
self.assertEqual(flavor.vcpus, 1)
self.assertEqual(flavor.root_gb, 120)
# Ensure new type shows up in list
new_list = objects.FlavorList.get_all(ctxt)
self.assertNotEqual(
len(original_list), len(new_list),
'flavor was not created')
def test_create_then_delete(self):
ctxt = context.get_admin_context()
original_list = objects.FlavorList.get_all(ctxt)
flavor = flavors.create('flavor', 64, 1, 120)
# Ensure new type shows up in list
new_list = objects.FlavorList.get_all(ctxt)
self.assertNotEqual(
len(original_list), len(new_list),
'instance type was not created')
flavor.destroy()
self.assertRaises(
exception.FlavorNotFound,
objects.Flavor.get_by_name, ctxt, flavor.name)
# Deleted instance should not be in list anymore
new_list = objects.FlavorList.get_all(ctxt)
self.assertEqual(len(original_list), len(new_list))
for i, f in enumerate(original_list):
self.assertIsInstance(f, objects.Flavor)
self.assertEqual(f.flavorid, new_list[i].flavorid)
def test_duplicate_names_fail(self):
# Ensures that name duplicates raise FlavorExists
flavors.create('flavor', 256, 1, 120, 200, 'flavor1')
self.assertRaises(
exception.FlavorExists,
flavors.create, 'flavor', 64, 1, 120)
def test_duplicate_flavorids_fail(self):
# Ensures that flavorid duplicates raise FlavorExists
flavors.create('flavor1', 64, 1, 120, flavorid='flavorid')
self.assertRaises(
exception.FlavorIdExists,
flavors.create, 'flavor2', 64, 1, 120, flavorid='flavorid')
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Special Math Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import importlib
import numpy as np
from tensorflow.python.eager import backprop as tfe_backprop
from tensorflow.python.eager import context as tfe_context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops.distributions import special_math
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def try_import(name): # pylint: disable=invalid-name
module = None
try:
module = importlib.import_module(name)
except ImportError as e:
tf_logging.warning("Could not import %s: %s" % (name, str(e)))
return module
special = try_import("scipy.special")
stats = try_import("scipy.stats")
sm = special_math
def _check_strictly_increasing(array_1d):
diff = np.diff(array_1d)
np.testing.assert_array_less(0, diff)
def _make_grid(dtype, grid_spec):
"""Returns a uniform grid + noise, reshaped to shape argument."""
rng = np.random.RandomState(0)
num_points = np.prod(grid_spec.shape)
grid = np.linspace(grid_spec.min, grid_spec.max, num=num_points).astype(dtype)
grid_spacing = (grid_spec.max - grid_spec.min) / num_points
grid += 0.1 * grid_spacing * rng.randn(*grid.shape)
# More useful if it's sorted (e.g. for testing monotonicity, or debugging).
grid = np.sort(grid)
return np.reshape(grid, grid_spec.shape)
def _value_and_gradient(fn, *args):
"""Calls `fn` and computes the gradient of the result wrt `arg`."""
if tfe_context.executing_eagerly():
v, g = tfe_backprop.val_and_grad_function(fn)(args)
else:
v = fn(*args)
g = gradients_impl.gradients(v, args)
return v, g
GridSpec = collections.namedtuple("GridSpec", ["min", "max", "shape"])
ErrorSpec = collections.namedtuple("ErrorSpec", ["rtol", "atol"])
class NdtriTest(test.TestCase):
def assertAllFinite(self, x):
is_finite = np.isfinite(x)
all_true = np.ones_like(is_finite, dtype=np.bool)
self.assertAllEqual(all_true, is_finite)
@test_util.run_in_graph_and_eager_modes
def testNdtri(self):
"""Verifies that ndtri computation is correct."""
if not special:
return
p = np.linspace(0., 1.0, 50).astype(np.float64)
# Quantile performs piecewise rational approximation so adding some
# special input values to make sure we hit all the pieces.
p = np.hstack((p, np.exp(-32), 1. - np.exp(-32), np.exp(-2),
1. - np.exp(-2)))
expected_x = special.ndtri(p)
x = special_math.ndtri(p)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def testNdtriDynamicShape(self):
"""Verifies that ndtri computation is correct."""
with self.cached_session() as sess:
if not special:
return
p = array_ops.placeholder(np.float32)
p_ = np.linspace(0., 1.0, 50).astype(np.float32)
x = special_math.ndtri(p)
x_ = sess.run(x, feed_dict={p: p_})
expected_x_ = special.ndtri(p_)
self.assertAllClose(expected_x_, x_, atol=0.)
def _baseNdtriFiniteGradientTest(self, dtype):
"""Verifies that ndtri has finite gradients at interesting points."""
# Tests gradients at 0, 1, and piece-wise boundaries.
p = constant_op.constant(
np.array([
0.,
np.exp(-32.),
np.exp(-2.),
1. - np.exp(-2.),
1. - np.exp(-32.),
1.,
]).astype(dtype))
# Not having the lambda sanitzer means we'd get an `IndexError` whenever
# the user supplied function has default args.
_, grads = _value_and_gradient(
lambda x: special_math.ndtri(x), p) # pylint: disable=unnecessary-lambda
self.assertAllFinite(self.evaluate(grads[0]))
@test_util.run_in_graph_and_eager_modes
def testNdtriFiniteGradientFloat32(self):
self._baseNdtriFiniteGradientTest(np.float32)
@test_util.run_in_graph_and_eager_modes
def testNdtriFiniteGradientFloat64(self):
self._baseNdtriFiniteGradientTest(np.float64)
@test_util.run_all_in_graph_and_eager_modes
class NdtrTest(test.TestCase):
_use_log = False
# Grid min/max chosen to ensure 0 < cdf(x) < 1.
_grid32 = GridSpec(min=-12.9, max=5., shape=[100])
_grid64 = GridSpec(min=-37.5, max=8., shape=[100])
_error32 = ErrorSpec(rtol=1e-4, atol=0.)
_error64 = ErrorSpec(rtol=1e-6, atol=0.)
def _test_grid(self, dtype, grid_spec, error_spec):
if self._use_log:
self._test_grid_log(dtype, grid_spec, error_spec)
else:
self._test_grid_no_log(dtype, grid_spec, error_spec)
def _test_grid_log(self, dtype, grid_spec, error_spec):
if not special:
return
grid = _make_grid(dtype, grid_spec)
actual = self.evaluate(sm.log_ndtr(grid))
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertTrue(np.isfinite(actual).all())
# On the grid, -inf < log_cdf(x) < 0. In this case, we should be able
# to use a huge grid because we have used tricks to escape numerical
# difficulties.
self.assertTrue((actual < 0).all())
_check_strictly_increasing(actual)
# Versus scipy.
expected = special.log_ndtr(grid)
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
actual.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
def _test_grid_no_log(self, dtype, grid_spec, error_spec):
if not special:
return
grid = _make_grid(dtype, grid_spec)
actual = self.evaluate(sm.ndtr(grid))
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertTrue(np.isfinite(actual).all())
# On the grid, 0 < cdf(x) < 1. The grid cannot contain everything due
# to numerical limitations of cdf.
self.assertTrue((actual > 0).all())
self.assertTrue((actual < 1).all())
_check_strictly_increasing(actual)
# Versus scipy.
expected = special.ndtr(grid)
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
actual.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
def test_float32(self):
self._test_grid(np.float32, self._grid32, self._error32)
def test_float64(self):
self._test_grid(np.float64, self._grid64, self._error64)
class LogNdtrTestLower(NdtrTest):
_use_log = True
_grid32 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT32_LOWER, shape=[100])
_grid64 = GridSpec(min=-100., max=sm.LOGNDTR_FLOAT64_LOWER, shape=[100])
_error32 = ErrorSpec(rtol=1e-4, atol=0.)
_error64 = ErrorSpec(rtol=1e-4, atol=0.)
# The errors are quite large when the input is > 6 or so. Also,
# scipy.special.log_ndtr becomes zero very early, before 10,
# (due to ndtr becoming 1). We approximate Log[1 + epsilon] as epsilon, and
# avoid this issue.
class LogNdtrTestMid(NdtrTest):
_use_log = True
_grid32 = GridSpec(
min=sm.LOGNDTR_FLOAT32_LOWER, max=sm.LOGNDTR_FLOAT32_UPPER, shape=[100])
_grid64 = GridSpec(
min=sm.LOGNDTR_FLOAT64_LOWER, max=sm.LOGNDTR_FLOAT64_UPPER, shape=[100])
# Differences show up as soon as we're in the tail, so add some atol.
_error32 = ErrorSpec(rtol=0.1, atol=1e-7)
_error64 = ErrorSpec(rtol=0.1, atol=1e-7)
class LogNdtrTestUpper(NdtrTest):
_use_log = True
_grid32 = GridSpec(
min=sm.LOGNDTR_FLOAT32_UPPER,
max=12., # Beyond this, log_cdf(x) may be zero.
shape=[100])
_grid64 = GridSpec(
min=sm.LOGNDTR_FLOAT64_UPPER,
max=35., # Beyond this, log_cdf(x) may be zero.
shape=[100])
_error32 = ErrorSpec(rtol=1e-6, atol=1e-14)
_error64 = ErrorSpec(rtol=1e-6, atol=1e-14)
class NdtrGradientTest(test.TestCase):
_use_log = False
_grid = GridSpec(min=-100., max=100., shape=[1, 2, 3, 8])
_error32 = ErrorSpec(rtol=1e-4, atol=0)
_error64 = ErrorSpec(rtol=1e-7, atol=0)
def assert_all_true(self, v):
self.assertAllEqual(np.ones_like(v, dtype=np.bool), v)
def assert_all_false(self, v):
self.assertAllEqual(np.zeros_like(v, dtype=np.bool), v)
def _test_grad_finite(self, dtype):
x = constant_op.constant([-100., 0., 100.], dtype=dtype)
output = (sm.log_ndtr(x) if self._use_log else sm.ndtr(x))
fn = sm.log_ndtr if self._use_log else sm.ndtr
# Not having the lambda sanitzer means we'd get an `IndexError` whenever
# the user supplied function has default args.
output, grad_output = _value_and_gradient(
lambda x_: fn(x_), x) # pylint: disable=unnecessary-lambda
# isfinite checks for NaN and Inf.
output_, grad_output_ = self.evaluate([output, grad_output])
self.assert_all_true(np.isfinite(output_))
self.assert_all_true(np.isfinite(grad_output_[0]))
def _test_grad_accuracy(self, dtype, grid_spec, error_spec):
raw_grid = _make_grid(dtype, grid_spec)
grid = ops.convert_to_tensor(raw_grid)
with self.cached_session():
fn = sm.log_ndtr if self._use_log else sm.ndtr
# If there are N points in the grid,
# grad_eval.shape = (N, N), with grad_eval[i, j] the partial derivative of
# the ith output point w.r.t. the jth grid point. We only expect the
# diagonal to be nonzero.
# TODO(b/31131137): Replace tf.test.compute_gradient with our own custom
# gradient evaluation to ensure we correctly handle small function delta.
grad_eval, _ = gradient_checker.compute_gradient(grid, grid_spec.shape,
fn(grid),
grid_spec.shape)
grad_eval = np.diag(grad_eval)
# Check for NaN separately in order to get informative failures.
self.assert_all_false(np.isnan(grad_eval))
self.assert_all_true(grad_eval > 0.)
# isfinite checks for NaN and Inf.
self.assert_all_true(np.isfinite(grad_eval))
# Do the same checks but explicitly compute the gradient.
# (We did this because we're not sure if we trust
# tf.test.compute_gradient.)
grad_eval = gradients_impl.gradients(fn(grid), grid)[0].eval()
self.assert_all_false(np.isnan(grad_eval))
if self._use_log:
g = np.reshape(grad_eval, [-1])
half = np.ceil(len(g) / 2)
self.assert_all_true(g[:int(half)] > 0.)
self.assert_all_true(g[int(half):] >= 0.)
else:
# The ndtr gradient will only be non-zero in the range [-14, 14] for
# float32 and [-38, 38] for float64.
self.assert_all_true(grad_eval >= 0.)
# isfinite checks for NaN and Inf.
self.assert_all_true(np.isfinite(grad_eval))
# Versus scipy.
if not (special and stats):
return
expected = stats.norm.pdf(raw_grid)
if self._use_log:
expected /= special.ndtr(raw_grid)
expected[np.isnan(expected)] = 0.
# Scipy prematurely goes to zero at some places that we don't. So don't
# include these in the comparison.
self.assertAllClose(
expected.astype(np.float64)[expected < 0],
grad_eval.astype(np.float64)[expected < 0],
rtol=error_spec.rtol,
atol=error_spec.atol)
def test_float32(self):
self._test_grad_accuracy(np.float32, self._grid, self._error32)
self._test_grad_finite(np.float32)
def test_float64(self):
self._test_grad_accuracy(np.float64, self._grid, self._error64)
self._test_grad_finite(np.float64)
class LogNdtrGradientTest(NdtrGradientTest):
_use_log = True
class ErfInvTest(test.TestCase):
def testErfInvValues(self):
with self.cached_session():
if not special:
return
x = np.linspace(0., 1.0, 50).astype(np.float64)
expected_x = special.erfinv(x)
x = special_math.erfinv(x)
self.assertAllClose(expected_x, self.evaluate(x), atol=0.)
def testErfInvIntegerInput(self):
with self.cached_session():
with self.assertRaises(TypeError):
x = np.array([1, 2, 3]).astype(np.int32)
special_math.erfinv(x)
with self.assertRaises(TypeError):
x = np.array([1, 2, 3]).astype(np.int64)
special_math.erfinv(x)
class LogCDFLaplaceTest(test.TestCase):
# Note that scipy.stats.laplace does not have a stable Log CDF, so we cannot
# rely on scipy to cross check the extreme values.
# Test will be done differently over different ranges. These are the values
# such that when exceeded by x, produce output that causes the naive (scipy)
# implementation to have numerical issues.
#
# If x = log(1 / (2 * eps)), then 0.5 * exp{-x} = eps.
# With inserting eps = np.finfo(dtype).eps, we see that log(1 / (2 * eps)) is
# the value of x such that any larger value will result in
# 1 - 0.5 * exp{-x} = 0, which will cause the log_cdf_laplace code to take a
# log # of zero. We therefore choose these as our cutoffs for testing.
CUTOFF_FLOAT64_UPPER = np.log(1. / (2. * np.finfo(np.float64).eps)) - 1.
CUTOFF_FLOAT32_UPPER = np.log(1. / (2. * np.finfo(np.float32).eps)) - 1.
def assertAllTrue(self, x):
self.assertAllEqual(np.ones_like(x, dtype=np.bool), x)
def _test_grid_log(self, dtype, scipy_dtype, grid_spec, error_spec):
with self.cached_session():
grid = _make_grid(dtype, grid_spec)
actual = sm.log_cdf_laplace(grid).eval()
# Basic tests.
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual))
self.assertAllTrue((actual < 0))
_check_strictly_increasing(actual)
# Versus scipy.
if not stats:
return
scipy_dist = stats.laplace(loc=0., scale=1.)
expected = scipy_dist.logcdf(grid.astype(scipy_dtype))
self.assertAllClose(
expected.astype(np.float64),
actual.astype(np.float64),
rtol=error_spec.rtol,
atol=error_spec.atol)
def test_float32_lower_and_mid_segment_scipy_float32_ok(self):
# Choose values mild enough that we can use scipy in float32, which will
# allow for a high accuracy match to scipy (since we both use float32).
self._test_grid_log(
np.float32, # dtype
np.float32, # scipy_dtype
GridSpec(min=-10, max=self.CUTOFF_FLOAT32_UPPER - 5, shape=[100]),
ErrorSpec(rtol=5e-4, atol=0))
def test_float32_all_segments_with_scipy_float64_ok(self):
# Choose values outside the range where scipy float32 works.
# Let scipy use float64. This means we
# won't be exactly the same since we are in float32.
self._test_grid_log(
np.float32, # dtype
np.float64, # scipy_dtype
GridSpec(min=-50, max=self.CUTOFF_FLOAT32_UPPER + 5, shape=[100]),
ErrorSpec(rtol=0.05, atol=0))
def test_float32_extreme_values_result_and_gradient_finite_and_nonzero(self):
with self.cached_session() as sess:
# On the lower branch, log_cdf_laplace(x) = x, so we know this will be
# fine, but test to -200 anyways.
grid = _make_grid(
np.float32, GridSpec(min=-200, max=80, shape=[20, 100]))
grid = ops.convert_to_tensor(grid)
actual = sm.log_cdf_laplace(grid)
grad = gradients_impl.gradients(actual, grid)[0]
actual_, grad_ = sess.run([actual, grad])
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual_))
self.assertAllTrue(np.isfinite(grad_))
self.assertFalse(np.any(actual_ == 0))
self.assertFalse(np.any(grad_ == 0))
def test_float64_extreme_values_result_and_gradient_finite_and_nonzero(self):
with self.cached_session() as sess:
# On the lower branch, log_cdf_laplace(x) = x, so we know this will be
# fine, but test to -200 anyways.
grid = _make_grid(
np.float64, GridSpec(min=-200, max=700, shape=[20, 100]))
grid = ops.convert_to_tensor(grid)
actual = sm.log_cdf_laplace(grid)
grad = gradients_impl.gradients(actual, grid)[0]
actual_, grad_ = sess.run([actual, grad])
# isfinite checks for NaN and Inf.
self.assertAllTrue(np.isfinite(actual_))
self.assertAllTrue(np.isfinite(grad_))
self.assertFalse(np.any(actual_ == 0))
self.assertFalse(np.any(grad_ == 0))
if __name__ == "__main__":
test.main()
|
|
"""
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import sys
import glob
import os
from subprocess import PIPE, Popen
import subprocess
from basic_modules.tool import Tool
from utils import logger
try:
if hasattr(sys, '_run_from_cmdl') is True:
raise ImportError
from pycompss.api.parameter import FILE_IN, FILE_OUT, IN
from pycompss.api.task import task
# from pycompss.api.api import compss_wait_on
# from pycompss.api.constraint import constraint
except ImportError:
logger.info("[Warning] Cannot import \"pycompss\" API packages.")
logger.info(" Using mock decorators.")
from utils.dummy_pycompss import FILE_IN, FILE_OUT, IN # pylint: disable=ungrouped-imports
from utils.dummy_pycompss import task # pylint: disable=ungrouped-imports
# from utils.dummy_pycompss import compss_wait_on # pylint: disable=ungrouped-imports
# from utils.dummy_pycompss import constraint
# ------------------------------------------------------------------------------
class tbNormalizeTool(Tool): # pylint: disable=invalid-name
"""
Tool for normalizing an adjacency matrix
"""
def __init__(self):
"""
Init function
"""
logger.info("TADbit - Normalize")
Tool.__init__(self)
@task(bamin=FILE_IN, normalization=IN, resolution=IN, min_perc=IN,
max_perc=IN, workdir=IN, biases=FILE_OUT, interactions_plot=FILE_OUT,
filtered_bins_plot=FILE_OUT)
def tb_normalize(self, bamin, normalization, resolution, min_perc, # pylint: disable=too-many-locals,too-many-statements,unused-argument,no-self-use,too-many-arguments
max_perc, workdir, ncpus="1", min_count=None, fasta=None,
mappability=None, rest_enzyme=None):
"""
Function to normalize to a given resolution the Hi-C
matrix
Parameters
----------
bamin : str
Location of the tadbit bam paired reads
normalization: str
normalization(s) to apply. Order matters. Choices: [Vanilla, oneD]
resolution : str
Resolution of the Hi-C
min_perc : str
lower percentile from which consider bins as good.
max_perc : str
upper percentile until which consider bins as good.
workdir : str
Location of working directory
ncpus : str
Number of cpus to use
min_count : str
minimum number of reads mapped to a bin (recommended value
could be 2500). If set this option overrides the perc_zero
fasta: str
Location of the fasta file with genome sequence, to compute GC content and
number of restriction sites per bin. Required for oneD normalization
mappability: str
Location of the file with mappability, required for oneD normalization
rest_enzyme: str
For oneD normalization. Name of the restriction enzyme used to do the Hi-C experiment
Returns
-------
hic_biases : str
Location of HiC biases pickle file
interactions : str
Location of interaction decay vs genomic distance pdf
filtered_bins : str
Location of filtered_bins png
"""
# chr_hic_data = read_matrix(matrix_file, resolution=int(resolution))
logger.info("TB NORMALIZATION: {0} {1} {2} {3} {4} {5}".format(
bamin, normalization, resolution, min_perc, max_perc, workdir))
_cmd = [
'tadbit', 'normalize',
'--bam', bamin,
'--normalization', normalization,
'--workdir', workdir,
'--resolution', resolution,
'--cpus', str(ncpus)
]
if min_perc:
_cmd.append('--min_perc')
_cmd.append(min_perc)
if max_perc:
_cmd.append('--max_perc')
_cmd.append(max_perc)
if min_count:
_cmd.append('--min_count')
_cmd.append(min_count)
if normalization == 'oneD':
_cmd.append('--fasta')
_cmd.append(fasta)
_cmd.append('--mappability')
_cmd.append(mappability)
_cmd.append('--renz')
_cmd.append(rest_enzyme)
output_metadata = {}
output_files = []
try:
_ = subprocess.check_output(_cmd, stderr=subprocess.STDOUT,
cwd=workdir)
except subprocess.CalledProcessError as subp_err:
logger.info(subp_err.output)
if not min_count:
logger.info("cis/trans ratio failed, trying with min_count. Disabling plot.")
_cmd.append('--min_count')
_cmd.append('10')
_cmd.append('--normalize_only')
try:
_ = subprocess.check_output(_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as subp_err:
logger.fatal(subp_err.output)
os.chdir(workdir+"/04_normalization")
for fl_file in glob.glob("biases_*.pickle"):
output_files.append(os.path.abspath(fl_file))
break
for fl_file in glob.glob("interactions*.png"):
output_files.append(os.path.abspath(fl_file))
break
for fl_file in glob.glob("filtered_bins_*.png"):
output_files.append(os.path.abspath(fl_file))
break
return (output_files, output_metadata)
def run(self, input_files, input_metadata, output_files): # pylint: disable=too-many-locals
"""
The main function for the normalization of the Hi-C matrix to a given resolution
Parameters
----------
input_files : list
bamin : str
Location of the tadbit bam paired reads
metadata : dict
normalization: str
normalization(s) to apply. Order matters. Choices: [Vanilla, oneD]
resolution : str
Resolution of the Hi-C
min_perc : str
lower percentile from which consider bins as good.
max_perc : str
upper percentile until which consider bins as good.
workdir : str
Location of working directory
ncpus : str
Number of cpus to use
min_count : str
minimum number of reads mapped to a bin (recommended value
could be 2500). If set this option overrides the perc_zero
fasta: str
Location of the fasta file with genome sequence, to compute GC content and
number of restriction sites per bin. Required for oneD normalization
mappability: str
Location of the file with mappability, required for oneD normalization
rest_enzyme: str
For oneD normalization.
Name of the restriction enzyme used to do the Hi-C experiment
Returns
-------
output_files : list
List of locations for the output files.
output_metadata : list
List of matching metadata dict objects
"""
bamin = input_files[0]
if not os.path.isfile(bamin.replace('.bam', '.bam.bai')):
logger.info('Creating bam index')
_cmd = ['samtools', 'index', bamin]
out, err = Popen(_cmd, stdout=PIPE, stderr=PIPE).communicate()
logger.info(out)
logger.info(err)
resolution = '1000000'
if 'resolution' in input_metadata:
resolution = input_metadata['resolution']
normalization = 'Vanilla'
if 'normalization' in input_metadata:
normalization = input_metadata['normalization']
min_perc = max_perc = min_count = fasta = mappability = rest_enzyme = None
ncpus = 1
if 'ncpus' in input_metadata:
ncpus = input_metadata['ncpus']
if 'min_perc' in input_metadata:
min_perc = input_metadata['min_perc']
if 'max_perc' in input_metadata:
max_perc = input_metadata['max_perc']
if 'min_count' in input_metadata:
min_count = input_metadata['min_count']
if 'fasta' in input_metadata:
fasta = input_metadata['fasta']
if 'mappability' in input_metadata:
mappability = input_metadata['mappability']
if 'rest_enzyme' in input_metadata:
rest_enzyme = input_metadata['rest_enzyme']
root_name = os.path.dirname(os.path.abspath(bamin))
if 'workdir' in input_metadata:
root_name = input_metadata['workdir']
# input and output share most metadata
output_files, output_metadata = self.tb_normalize(bamin, normalization,
resolution, min_perc,
max_perc, root_name, ncpus,
min_count, fasta, mappability,
rest_enzyme)
return (output_files, output_metadata)
# ------------------------------------------------------------------------------
|
|
import re
import sublime
from sublime_plugin import WindowCommand
from ..git_command import GitCommand
from ...common import util
class GsLogBase(WindowCommand, GitCommand):
_limit = 6000
def run(self, file_path=None):
self._skip = 0
self._file_path = file_path
sublime.set_timeout_async(self.run_async)
def run_async(self):
logs = self.log(file_path=self._file_path, limit=self._limit, skip=self._skip)
self._hashes = [l.long_hash for l in logs]
self.display_commits(self.render_commits(logs))
def render_commits(self, logs):
commit_list = []
for l in logs:
commit_list.append([
l.short_hash + " " + l.summary,
l.author + ", " + util.dates.fuzzy(l.datetime)
])
return commit_list
def display_commits(self, commit_list):
if len(commit_list) >= self._limit:
commit_list.append([
">>> NEXT {} COMMITS >>>".format(self._limit),
"Skip this set of commits and choose from the next-oldest batch."
])
self.window.show_quick_panel(
commit_list,
lambda index: sublime.set_timeout_async(lambda: self.on_commit_selection(index), 10),
flags=sublime.MONOSPACE_FONT | sublime.KEEP_OPEN_ON_FOCUS_LOST,
on_highlight=self.on_commit_highlight
)
def on_commit_highlight(self, index):
sublime.set_timeout_async(lambda: self.on_commit_highlight_async(index))
def on_commit_highlight_async(self, index):
savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
show_more = savvy_settings.get("log_show_more_commit_info")
if not show_more:
return
self.window.run_command("gs_show_commit_info", {"commit_hash": self._hashes[index]})
def on_commit_selection(self, index):
self.window.run_command("hide_panel", {"panel": "output.show_commit_info"})
if index == -1:
return
if index == self._limit:
self._skip += self._limit
sublime.set_timeout_async(self.run_async, 1)
return
self._selected_commit = self._hashes[index]
self.do_action(self._selected_commit)
def do_action(self, commit_hash):
self.window.run_command("gs_log_action", {
"commit_hash": commit_hash,
"file_path": self._file_path
})
class GsLogCurrentBranchCommand(GsLogBase):
pass
class GsLogByAuthorCommand(GsLogBase):
"""
Open a quick panel containing all committers for the active
repository, ordered by most commits, Git name, and email.
Once selected, display a quick panel with all commits made
by the specified author.
"""
def run_async(self):
email = self.git("config", "user.email").strip()
self._entries = []
commiter_str = self.git("shortlog", "-sne", "HEAD")
for line in commiter_str.split('\n'):
m = re.search('\s*(\d*)\s*(.*)\s<(.*)>', line)
if m is None:
continue
commit_count, author_name, author_email = m.groups()
author_text = "{} <{}>".format(author_name, author_email)
self._entries.append((commit_count, author_name, author_email, author_text))
self.window.show_quick_panel(
[entry[3] for entry in self._entries],
self.on_author_selection,
flags=sublime.MONOSPACE_FONT,
selected_index=(list(line[2] for line in self._entries)).index(email)
)
def on_author_selection(self, index):
if index == -1:
return
self._selected_author = self._entries[index][3]
super().run_async()
def log(self, **kwargs):
return super().log(author=self._selected_author, **kwargs)
class GsLogByBranchCommand(GsLogBase):
def run_async(self):
self.all_branches = [b.name_with_remote for b in self.get_branches()]
if hasattr(self, '_selected_branch') and self._selected_branch in self.all_branches:
pre_selected_index = self.all_branches.index(self._selected_branch)
else:
pre_selected_index = self.all_branches.index(self.get_current_branch_name())
self.window.show_quick_panel(
self.all_branches,
self.on_branch_selection,
flags=sublime.MONOSPACE_FONT,
selected_index=pre_selected_index
)
def on_branch_selection(self, index):
if index == -1:
return
self._selected_branch = self.all_branches[index]
super().run_async()
def log(self, **kwargs):
return super().log(branch=self._selected_branch, **kwargs)
class GsLogCommand(WindowCommand, GitCommand):
def run(self, file_path=None, current_file=False):
self._file_path = self.file_path if current_file else file_path
options_array = [
"For current branch",
"Filtered by author",
"Filtered by branch",
]
self.window.show_quick_panel(
options_array,
self.on_option_selection,
flags=sublime.MONOSPACE_FONT
)
def on_option_selection(self, index):
if index == -1:
return
if index == 0:
self.window.run_command("gs_log_current_branch", {"file_path": self._file_path})
elif index == 1:
self.window.run_command("gs_log_by_author", {"file_path": self._file_path})
elif index == 2:
self.window.run_command("gs_log_by_branch", {"file_path": self._file_path})
class GsLogActionCommand(WindowCommand, GitCommand):
def run(self, commit_hash, file_path=None):
self._commit_hash = commit_hash
self._file_path = file_path
self.actions = [
["show_commit", "Show commit"],
["checkout_commit", "Checkout commit"],
["compare_against", "Compare commit against ..."],
["copy_sha", "Copy the full SHA"],
["diff_commit", "Diff commit"],
["diff_commit_cache", "Diff commit (cached)"]
]
if self._file_path:
self.actions.insert(1, ["show_file_at_commit", "Show file at commit"])
self.window.show_quick_panel(
[a[1] for a in self.actions],
self.on_action_selection,
flags=sublime.MONOSPACE_FONT,
selected_index=self.quick_panel_log_idx
)
def on_action_selection(self, index):
if index == -1:
return
self.quick_panel_log_idx = index
action = self.actions[index][0]
eval("self.{}()".format(action))
def show_commit(self):
self.window.run_command("gs_show_commit", {"commit_hash": self._commit_hash})
def checkout_commit(self):
self.checkout_ref(self._commit_hash)
util.view.refresh_gitsavvy(self.view)
def compare_against(self):
self.window.run_command("gs_compare_against", {
"target_commit": self._commit_hash,
"file_path": self._file_path
})
def copy_sha(self):
sublime.set_clipboard(self.git("rev-parse", self._commit_hash))
def _diff_commit(self, cache=False):
self.window.run_command("gs_diff", {
"in_cached_mode": cache,
"file_path": self._file_path,
"current_file": bool(self._file_path),
"base_commit": self._commit_hash,
"disable_stage": True
})
def diff_commit(self):
self._diff_commit(cache=False)
def diff_commit_cache(self):
self._diff_commit(cache=True)
def show_file_at_commit(self):
lang = self.window.active_view().settings().get('syntax')
self.window.run_command(
"gs_show_file_at_commit",
{"commit_hash": self._commit_hash, "filepath": self._file_path, "lang": lang})
|
|
from django.apps import AppConfig
from django.apps.registry import Apps
from django.db import models
from django.db.models.options import DEFAULT_NAMES, normalize_unique_together
from django.utils import six
from django.utils.module_loading import import_by_path
class InvalidBasesError(ValueError):
pass
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None):
self.models = models or {}
self.apps = None
def add_model_state(self, model_state):
self.models[(model_state.app_label, model_state.name.lower())] = model_state
def clone(self):
"Returns an exact copy of this ProjectState"
return ProjectState(
models=dict((k, v.clone()) for k, v in self.models.items())
)
def render(self):
"Turns the project state into actual models in a new Apps"
if self.apps is None:
# Populate the app registry with a stub for each application.
app_labels = set(model_state.app_label for model_state in self.models.values())
self.apps = Apps([AppConfigStub(label) for label in sorted(app_labels)])
# We keep trying to render the models in a loop, ignoring invalid
# base errors, until the size of the unrendered models doesn't
# decrease by at least one, meaning there's a base dependency loop/
# missing base.
unrendered_models = list(self.models.values())
while unrendered_models:
new_unrendered_models = []
for model in unrendered_models:
try:
model.render(self.apps)
except InvalidBasesError:
new_unrendered_models.append(model)
if len(new_unrendered_models) == len(unrendered_models):
raise InvalidBasesError("Cannot resolve bases for %r" % new_unrendered_models)
unrendered_models = new_unrendered_models
return self.apps
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
app_models = {}
for model in apps.get_models():
model_state = ModelState.from_model(model)
app_models[(model_state.app_label, model_state.name.lower())] = model_state
return cls(app_models)
def __eq__(self, other):
if set(self.models.keys()) != set(other.models.keys()):
return False
return all(model == other.models[key] for key, model in self.models.items())
def __ne__(self, other):
return not (self == other)
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label and a dict of models.
"""
def __init__(self, label):
super(AppConfigStub, self).__init__(label, None)
def import_models(self, all_models):
self.models = all_models
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None):
self.app_label = app_label
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model, )
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
@classmethod
def from_model(cls, model):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
name, path, args, kwargs = field.deconstruct()
field_class = import_by_path(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct field %s on %s.%s: %s" % (
name,
model._meta.app_label,
model._meta.object_name,
e,
))
for field in model._meta.local_many_to_many:
name, path, args, kwargs = field.deconstruct()
field_class = import_by_path(path)
try:
fields.append((name, field_class(*args, **kwargs)))
except TypeError as e:
raise TypeError("Couldn't reconstruct m2m field %s on %s: %s" % (
name,
model._meta.object_name,
e,
))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["apps", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
ut = model._meta.original_attrs["unique_together"]
options[name] = set(normalize_unique_together(ut))
else:
options[name] = model._meta.original_attrs[name]
# Make our record
bases = tuple(
("%s.%s" % (base._meta.app_label, base._meta.model_name) if hasattr(base, "_meta") else base)
for base in model.__bases__
if (not hasattr(base, "_meta") or not base._meta.abstract)
)
if not bases:
bases = (models.Model, )
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
)
def clone(self):
"Returns an exact copy of this ModelState"
# We deep-clone the fields using deconstruction
fields = []
for name, field in self.fields:
_, path, args, kwargs = field.deconstruct()
field_class = import_by_path(path)
fields.append((name, field_class(*args, **kwargs)))
# Now make a copy
return self.__class__(
app_label=self.app_label,
name=self.name,
fields=fields,
options=dict(self.options),
bases=self.bases,
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)
if "unique_together" in meta_contents:
meta_contents["unique_together"] = list(meta_contents["unique_together"])
meta = type("Meta", tuple(), meta_contents)
# Then, work out our bases
try:
bases = tuple(
(apps.get_model(*base.split(".", 1)) if isinstance(base, six.string_types) else base)
for base in self.bases
)
except LookupError:
raise InvalidBasesError("Cannot resolve one or more bases from %r" % (self.bases,))
# Turn fields into a dict for the body, add other bits
body = dict(self.fields)
body['Meta'] = meta
body['__module__'] = "__fake__"
# Then, make a Model object
return type(
self.name,
bases,
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))
def __eq__(self, other):
return (
(self.app_label == other.app_label) and
(self.name == other.name) and
(len(self.fields) == len(other.fields)) and
all((k1 == k2 and (f1.deconstruct()[1:] == f2.deconstruct()[1:])) for (k1, f1), (k2, f2) in zip(self.fields, other.fields)) and
(self.options == other.options) and
(self.bases == other.bases)
)
def __ne__(self, other):
return not (self == other)
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'BillingAccount.entry_point'
db.add_column(u'accounting_billingaccount', 'entry_point', self.gf('django.db.models.fields.CharField')(default='NOT_SET', max_length=25), keep_default=False)
def backwards(self, orm):
# Deleting field 'BillingAccount.entry_point'
db.delete_column(u'accounting_billingaccount', 'entry_point')
models = {
u'accounting.billingaccount': {
'Meta': {'object_name': 'BillingAccount'},
'account_type': ('django.db.models.fields.CharField', [], {'default': "'CONTRACT'", 'max_length': '25'}),
'billing_admins': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.BillingAccountAdmin']", 'null': 'True', 'symmetrical': 'False'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'created_by_domain': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Currency']"}),
'date_confirmed_extra_charges': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dimagi_contact': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'entry_point': ('django.db.models.fields.CharField', [], {'default': "'NOT_SET'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_auto_invoiceable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'salesforce_account_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'})
},
u'accounting.billingaccountadmin': {
'Meta': {'object_name': 'BillingAccountAdmin'},
'domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'db_index': 'True'})
},
u'accounting.billingcontactinfo': {
'Meta': {'object_name': 'BillingContactInfo'},
'account': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['accounting.BillingAccount']", 'unique': 'True', 'primary_key': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'emails': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'first_line': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'second_line': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'state_province_region': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'accounting.billingrecord': {
'Meta': {'object_name': 'BillingRecord'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'emailed_to': ('django.db.models.fields.CharField', [], {'max_length': '254', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']"}),
'pdf_data_id': ('django.db.models.fields.CharField', [], {'max_length': '48'}),
'skipped_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'accounting.creditadjustment': {
'Meta': {'object_name': 'CreditAdjustment'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'credit_line': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.CreditLine']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']", 'null': 'True'}),
'line_item': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.LineItem']", 'null': 'True'}),
'note': ('django.db.models.fields.TextField', [], {}),
'payment_record': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.PaymentRecord']", 'null': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'MANUAL'", 'max_length': '25'}),
'related_credit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'creditadjustment_related'", 'null': 'True', 'to': u"orm['accounting.CreditLine']"}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'})
},
u'accounting.creditline': {
'Meta': {'object_name': 'CreditLine'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccount']"}),
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']", 'null': 'True', 'blank': 'True'})
},
u'accounting.currency': {
'Meta': {'object_name': 'Currency'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'rate_to_default': ('django.db.models.fields.DecimalField', [], {'default': "'1.0'", 'max_digits': '20', 'decimal_places': '9'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'accounting.defaultproductplan': {
'Meta': {'object_name': 'DefaultProductPlan'},
'edition': ('django.db.models.fields.CharField', [], {'default': "'Community'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlan']"}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
u'accounting.feature': {
'Meta': {'object_name': 'Feature'},
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
u'accounting.featurerate': {
'Meta': {'object_name': 'FeatureRate'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Feature']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'monthly_limit': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'per_excess_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'})
},
u'accounting.invoice': {
'Meta': {'object_name': 'Invoice'},
'balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_due': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {}),
'date_paid': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_received': ('django.db.models.fields.DateField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']"}),
'tax_rate': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'})
},
u'accounting.lineitem': {
'Meta': {'object_name': 'LineItem'},
'base_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'base_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feature_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.FeatureRate']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']"}),
'product_rate': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProductRate']", 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'unit_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'accounting.paymentmethod': {
'Meta': {'object_name': 'PaymentMethod'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccount']"}),
'billing_admin': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccountAdmin']"}),
'customer_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method_type': ('django.db.models.fields.CharField', [], {'default': "'Stripe'", 'max_length': '50', 'db_index': 'True'})
},
u'accounting.paymentrecord': {
'Meta': {'object_name': 'PaymentRecord'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0000'", 'max_digits': '10', 'decimal_places': '4'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.PaymentMethod']"}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'accounting.softwareplan': {
'Meta': {'object_name': 'SoftwarePlan'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'edition': ('django.db.models.fields.CharField', [], {'default': "'Enterprise'", 'max_length': '25'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'INTERNAL'", 'max_length': '10'})
},
u'accounting.softwareplanversion': {
'Meta': {'object_name': 'SoftwarePlanVersion'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feature_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.FeatureRate']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlan']"}),
'product_rates': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['accounting.SoftwareProductRate']", 'symmetrical': 'False', 'blank': 'True'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_prbac.Role']"})
},
u'accounting.softwareproduct': {
'Meta': {'object_name': 'SoftwareProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'product_type': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'})
},
u'accounting.softwareproductrate': {
'Meta': {'object_name': 'SoftwareProductRate'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'monthly_fee': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '10', 'decimal_places': '2'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwareProduct']"})
},
u'accounting.subscriber': {
'Meta': {'object_name': 'Subscriber'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'})
},
u'accounting.subscription': {
'Meta': {'object_name': 'Subscription'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.BillingAccount']"}),
'auto_generate_credits': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_delay_invoicing': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateField', [], {}),
'do_not_invoice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trial': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan_version': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.SoftwarePlanVersion']"}),
'pro_bono_status': ('django.db.models.fields.CharField', [], {'default': "'NOT_SET'", 'max_length': '25'}),
'salesforce_contract_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'service_type': ('django.db.models.fields.CharField', [], {'default': "'NOT_SET'", 'max_length': '25'}),
'subscriber': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscriber']"})
},
u'accounting.subscriptionadjustment': {
'Meta': {'object_name': 'SubscriptionAdjustment'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Invoice']", 'null': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'default': "'INTERNAL'", 'max_length': '50'}),
'new_date_delay_invoicing': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'new_date_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'new_date_start': ('django.db.models.fields.DateField', [], {}),
'new_salesforce_contract_id': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'reason': ('django.db.models.fields.CharField', [], {'default': "'CREATE'", 'max_length': '50'}),
'related_subscription': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptionadjustment_related'", 'null': 'True', 'to': u"orm['accounting.Subscription']"}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounting.Subscription']"}),
'web_user': ('django.db.models.fields.CharField', [], {'max_length': '80', 'null': 'True'})
},
u'django_prbac.role': {
'Meta': {'object_name': 'Role'},
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parameters': ('django_prbac.fields.StringSetField', [], {'default': '[]', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'})
}
}
complete_apps = ['accounting']
|
|
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import log as logging
from rally import consts
from rally import exceptions
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.cinder import utils
from rally.plugins.openstack.scenarios.glance import utils as glance_utils
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.task import types
from rally.task import validation
LOG = logging.getLogger(__name__)
class CinderVolumes(utils.CinderScenario,
nova_utils.NovaScenario,
glance_utils.GlanceScenario):
"""Benchmark scenarios for Cinder Volumes."""
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_volume(self, size, detailed=True,
image=None, **kwargs):
"""Create a volume and list all volumes.
Measure the "cinder volume-list" command performance.
If you have only 1 user in your context, you will
add 1 volume on every iteration. So you will have more
and more volumes and will be able to measure the
performance of the "cinder volume-list" command depending on
the number of images owned by users.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param detailed: determines whether the volume listing should contain
detailed information about all of them
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self._create_volume(size, **kwargs)
self._list_volumes(detailed)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def list_volumes(self, detailed=True):
"""List all volumes.
This simple scenario tests the cinder list command by listing
all the volumes.
:param detailed: True if detailed information about volumes
should be listed
"""
self._list_volumes(detailed)
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_delete_volume(self, size, image=None,
min_sleep=0, max_sleep=0,
**kwargs):
"""Create and then delete a volume.
Good for testing a maximal bandwidth of cloud. Optional 'min_sleep'
and 'max_sleep' parameters allow the scenario to simulate a pause
between volume creation and deletion (of random duration from
[min_sleep, max_sleep]).
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
volume = self._create_volume(size, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_volume(self, size, image=None, **kwargs):
"""Create a volume.
Good test to check how influence amount of active volumes on
performance of creating new.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self._create_volume(size, **kwargs)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@validation.required_contexts("volumes")
@scenario.configure(context={"cleanup": ["cinder"]})
def modify_volume_metadata(self, sets=10, set_size=3,
deletes=5, delete_size=3):
"""Modify a volume's metadata.
This requires a volume to be created with the volumes
context. Additionally, ``sets * set_size`` must be greater
than or equal to ``deletes * delete_size``.
:param sets: how many set_metadata operations to perform
:param set_size: number of metadata keys to set in each
set_metadata operation
:param deletes: how many delete_metadata operations to perform
:param delete_size: number of metadata keys to delete in each
delete_metadata operation
"""
if sets * set_size < deletes * delete_size:
raise exceptions.InvalidArgumentsException(
"Not enough metadata keys will be created: "
"Setting %(num_keys)s keys, but deleting %(num_deletes)s" %
{"num_keys": sets * set_size,
"num_deletes": deletes * delete_size})
volume = random.choice(self.context["tenant"]["volumes"])
keys = self._set_metadata(volume["id"], sets, set_size)
self._delete_metadata(volume["id"], keys, deletes, delete_size)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_extend_volume(self, size, new_size, min_sleep=0,
max_sleep=0, **kwargs):
"""Create and extend a volume and then delete it.
:param size: volume size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param new_size: volume new size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
to extend.
Notice: should be bigger volume size
:param min_sleep: minimum sleep time between volume extension and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume extension and
deletion (in seconds)
:param kwargs: optional args to extend the volume
"""
volume = self._create_volume(size, **kwargs)
self._extend_volume(volume, new_size)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_from_volume_and_delete_volume(self, size, min_sleep=0,
max_sleep=0, **kwargs):
"""Create volume from volume and then delete it.
Scenario for testing volume clone.Optional 'min_sleep' and 'max_sleep'
parameters allow the scenario to simulate a pause between volume
creation and deletion (of random duration from [min_sleep, max_sleep]).
:param size: volume size (in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
Should be equal or bigger source volume size
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
source_vol = random.choice(self.context["tenant"]["volumes"])
volume = self._create_volume(size, source_volid=source_vol["id"],
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_delete_snapshot(self, force=False, min_sleep=0,
max_sleep=0, **kwargs):
"""Create and then delete a volume-snapshot.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between snapshot creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param min_sleep: minimum sleep time between snapshot creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between snapshot creation and
deletion (in seconds)
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
snapshot = self._create_snapshot(volume["id"], force=force, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_snapshot(snapshot)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def create_and_attach_volume(self, size, image, flavor, **kwargs):
"""Create a VM and attach a volume to it.
Simple test to create a VM and attach a volume, then
detach the volume and delete volume/VM.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param kwargs: optional arguments for VM creation
"""
server = self._boot_server(image, flavor, **kwargs)
volume = self._create_volume(size)
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self._delete_volume(volume)
self._delete_server(server)
@validation.volume_type_exists("volume_type")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def create_snapshot_and_attach_volume(self, volume_type=False,
size=None, **kwargs):
"""Create volume, snapshot and attach/detach volume.
This scenario is based off of the standalone qaStressTest.py
(https://github.com/WaltHP/cinder-stress).
:param volume_type: Whether or not to specify volume type when creating
volumes.
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param kwargs: Optional parameters used during volume
snapshot creation.
"""
if size is None:
size = {"min": 1, "max": 5}
selected_type = None
volume_types = [None]
if volume_type:
volume_types_list = self.clients("cinder").volume_types.list()
for s in volume_types_list:
volume_types.append(s.name)
selected_type = random.choice(volume_types)
volume = self._create_volume(size, volume_type=selected_type)
snapshot = self._create_snapshot(volume.id, False, **kwargs)
server = self.get_random_server()
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self._delete_snapshot(snapshot)
self._delete_volume(volume)
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder", "nova"]})
def create_nested_snapshots_and_attach_volume(self,
size=None,
nested_level=None,
**kwargs):
"""Create a volume from snapshot and attach/detach the volume
This scenario create volume, create it's snapshot, attach volume,
then create new volume from existing snapshot and so on,
with defined nested level, after all detach and delete them.
volume->snapshot->volume->snapshot->volume ...
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param nested_level: Nested level - dictionary, contains two values:
min - minimum number of volumes will be created
from snapshot;
max - maximum number of volumes will be created
from snapshot.
default values: {"min": 5, "max": 10}
:param kwargs: Optional parameters used during volume
snapshot creation.
"""
if size is None:
size = {"min": 1, "max": 5}
if nested_level is None:
nested_level = {"min": 5, "max": 10}
# NOTE: Volume size cannot be smaller than the snapshot size, so
# volume with specified size should be created to avoid
# size mismatching between volume and snapshot due random
# size in _create_volume method.
size = random.randint(size["min"], size["max"])
nested_level = random.randint(nested_level["min"], nested_level["max"])
source_vol = self._create_volume(size)
nes_objs = [(self.get_random_server(), source_vol,
self._create_snapshot(source_vol.id, False, **kwargs))]
self._attach_volume(nes_objs[0][0], nes_objs[0][1])
snapshot = nes_objs[0][2]
for i in range(nested_level - 1):
volume = self._create_volume(size, snapshot_id=snapshot.id)
snapshot = self._create_snapshot(volume.id, False, **kwargs)
server = self.get_random_server()
self._attach_volume(server, volume)
nes_objs.append((server, volume, snapshot))
nes_objs.reverse()
for server, volume, snapshot in nes_objs:
self._detach_volume(server, volume)
self._delete_snapshot(snapshot)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_snapshots(self, force=False, detailed=True, **kwargs):
"""Create and then list a volume-snapshot.
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param detailed: True if detailed information about snapshots
should be listed
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
self._create_snapshot(volume["id"], force=force, **kwargs)
self._list_snapshots(detailed)
@validation.required_services(consts.Service.CINDER, consts.Service.GLANCE)
@validation.required_openstack(users=True)
@validation.required_parameters("size")
@scenario.configure(context={"cleanup": ["cinder", "glance"]})
def create_and_upload_volume_to_image(self, size, force=False,
container_format="bare",
disk_format="raw",
do_delete=True,
**kwargs):
"""Create and upload a volume to image.
:param size: volume size (integers, in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param force: when set to True volume that is attached to an instance
could be uploaded to image
:param container_format: image container format
:param disk_format: disk format for image
:param do_delete: deletes image and volume after uploading if True
:param kwargs: optional args to create a volume
"""
volume = self._create_volume(size, **kwargs)
image = self._upload_volume_to_image(volume, force, container_format,
disk_format)
if do_delete:
self._delete_volume(volume)
self._delete_image(image)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_volume_backup(self, size, do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create a volume backup.
:param size: volume size in GB
:param do_delete: if True, a volume and a volume backup will
be deleted after creation.
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_restore_volume_backup(self, size, do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Restore volume backup.
:param size: volume size in GB
:param do_delete: if True, the volume and the volume backup will
be deleted after creation.
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
self._restore_backup(backup.id)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["cinder"]})
def create_and_list_volume_backups(self, size, detailed=True,
do_delete=True,
create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create and then list a volume backup.
:param size: volume size in GB
:param detailed: True if detailed information about backup
should be listed
:param do_delete: if True, a volume backup will be deleted
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self._create_volume(size, **create_volume_kwargs)
backup = self._create_backup(volume.id, **create_backup_kwargs)
self._list_backups(detailed)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)
|
|
import os
from fabric.api import (settings, run, sudo, reboot, cd, get, put, prefix,
task, local)
from fabric.contrib.files import sed, exists, append
from crypt import crypt
from config import *
@task
def full_deploy(install_znc="yes", install_tirolbot="yes"):
"""
default is fab full_deploy:install_znc=yes,install_tirolbot=yes
"""
if install_znc == "yes":
prepare(znc="yes")
else:
prepare(znc="no")
maintenance(update="yes",
reboot="yes",
tirolbot_backup="no",
znc_backup="no")
if install_tirolbot == "yes":
tirolbot(run="no")
###############################################################################
@task
def prepare(znc="yes"):
"""
Adds user, fixes ssh, secures the machine, installs needed packages, \
installs znc or use fab prepare:znc=no
"""
prepare_adduser()
prepare_changepassword()
prepare_addusertosudoers()
prepare_ssh()
prepare_remove_backdoors()
prepare_authorized_keys()
prepare_fail2ban()
prepare_iptables()
prepare_activate_firewall_on_startup()
prepare_packages()
if znc == "yes":
prepare_znc()
def prepare_adduser():
"""
adds a new user with a disabled password
"""
user = env.user
with settings(user=initial_user,
password=initial_password,
port=initial_port):
run("adduser --gecos '' --disabled-password {user}".format(user=user))
def prepare_changepassword():
"""
Changes the password for the user
"""
password = env.password
user = env.user
crypted_password = crypt(password, salt)
with settings(user=initial_user,
password=initial_password,
port=initial_port):
run("usermod -p {crypted_password} {user}".format(
crypted_password=crypted_password,
user=user))
def prepare_addusertosudoers():
"""
adds the user to the sudoers list
"""
user = env.user
with settings(user=initial_user,
password=initial_password,
port=initial_port):
run("usermod -a -G sudo {user}".format(user=user))
def prepare_ssh():
"""
Changes the port, disables root login and restarts the ssh service
"""
port = env.port
with settings(port=initial_port):
sshd_config = "/etc/ssh/sshd_config"
sed(sshd_config, "Port 22", "Port {port}".format(port=port),
use_sudo=True)
sed(sshd_config, "PermitRootLogin yes", "PermitRootLogin no",
use_sudo=True)
sudo("service ssh restart")
def prepare_remove_backdoors():
"""
Removes the ovh backdoor /root/.ssh/authorized_keys2
"""
sudo("rm -f /root/.ssh/authorized_keys2")
sudo("ls -la /root/.ssh/")
@task
def prepare_authorized_keys():
"""
Sets up authorized_key login, requires private and pub keys on local
"""
authorized_keys = ".ssh/authorized_keys2"
if not exists(".ssh"):
run("mkdir .ssh")
if not exists(".ssh/authorized_keys2"):
run("touch .ssh/authorized_keys2")
with open("/home/martin/.ssh/id_rsa.pub") as f:
public_key = f.read().strip()
append(authorized_keys, public_key)
sudo("chown -R {user}:{user} .ssh".format(user=env.user))
sudo("chmod 700 .ssh")
sudo("chmod 600 .ssh/authorized_keys2")
def prepare_fail2ban():
"""
Installs and sets up fail2ban
"""
sudo("apt-get install -y fail2ban")
if exists(fail2ban_conf_location, use_sudo=True):
sudo("rm -f {filename}".format(filename=fail2ban_conf_location))
sudo("touch {filename}".format(filename=fail2ban_conf_location))
append(fail2ban_conf_location, fail2ban_conf, use_sudo=True)
sudo("service fail2ban restart")
def prepare_iptables():
"""
Sets up the iptables firewall according to the firewall_rules
"""
sudo("iptables -L -v")
if exists(firewall_rules_location, use_sudo=True):
sudo("rm -f {filename}".format(filename=firewall_rules_location))
sudo("touch {filename}".format(filename=firewall_rules_location))
append(firewall_rules_location, firewall_rules, use_sudo=True)
sudo("iptables-restore < {filename}".format(
filename=firewall_rules_location))
sudo("iptables -L -v")
def prepare_activate_firewall_on_startup():
"""
Activates the firewall on system startup
"""
if exists(startup_script_location, use_sudo=True):
sudo("rm -f {filename}".format(filename=startup_script_location))
sudo("touch {filename}".format(filename=startup_script_location))
append(startup_script_location, startup_script, use_sudo=True)
sudo("chmod +x {filename}".format(filename=startup_script_location))
def prepare_packages():
"""
Installs packages specified in packages_to_install from conf.py
"""
sudo("apt-get install -y {packages}".format(packages=" ".join(
packages_to_install)))
@task
def prepare_znc():
"""
Uploads the znc config and untars it to .znc
"""
put("znc.tar.gz")
run("tar xvfz znc.tar.gz")
run("rm znc.tar.gz")
###############################################################################
@task
def maintenance(update="yes",
reboot="no",
tirolbot_backup="no",
znc_backup="no"):
"""
default is fab maintenance:update=yes,reboot=no,tirolbot_backup=no,\
znc_backup=no
"""
if update == "yes":
maintenance_update()
if reboot == "yes":
maintenance_reboot()
maintenance_check_iptables()
if tirolbot_backup == "yes":
maintenance_tirolbot_backup()
if znc_backup == "yes":
maintenance_znc_backup()
@task
def maintenance_update():
"""
Runs sudo apt-get -y update and apt-get -y upgrade
"""
sudo("apt-get -y update")
sudo("apt-get -y upgrade")
@task
def maintenance_reboot():
"""
Reboots the machine and waits 3 minutes (180s) before reconnecting
"""
reboot(wait=180)
@task
def maintenance_check_iptables():
"""
Checks if iptable rules are all there
"""
sudo("iptables -L -v")
@task
def maintenance_tirolbot_backup():
"""
Copies the cfg, db and log(s) from the host to the local machine
"""
local_dir = "/home/martin/workspace/tirolbot/tirolbot/remote_backup/"
config = "config.cfg"
db = "tirolbot.db"
log = "tirolbot.log"
with cd("workspace/tirolbot/tirolbot/"):
get(config, local_dir)
get(db, local_dir)
get(log, local_dir)
for i in range(1, 11):
backup_log = "{log}.{i}".format(log=log, i=i)
if exists(backup_log):
get(backup_log, local_dir)
else:
break
local("nautilus "
"/home/martin/workspace/tirolbot/tirolbot/remote_backup/")
@task
def maintenance_znc_backup():
"""
Backs up the znc config
"""
run("tar cfz znc.tar.gz .znc")
get("znc.tar.gz", "znc.tar.gz")
run("rm znc.tar.gz")
local("nautilus /home/martin/workspace/fab/fabfile/")
###############################################################################
@task
def tirolbot(run="no"):
"""
Deploys tirolbot, use fab tirolbot:run=yes to deploy and then run the bot
"""
tirolbot_setup_virtualenv()
tirolbot_clone_repo()
tirolbot_pip_install_requirements()
tirolbot_put_files()
tirolbot_change_sh_path()
tirolbot_setup_cronetab()
if run == "yes":
tirolbot_run()
def tirolbot_setup_virtualenv():
"""
Sets up a python3 virtualenv called tirolbot
"""
if not exists("workspace"):
run("mkdir workspace")
sudo("chown -R {user}:{user} workspace".format(user=env.user))
sudo("chmod 700 workspace")
with cd("workspace/"):
run("virtualenv --python=python3.2 tirolbot")
def tirolbot_clone_repo():
"""
Clones the tirolbot repository from github.com/martinfischer/tirolbot
"""
with cd("workspace/tirolbot/"):
run("git clone https://github.com/martinfischer/tirolbot.git")
def tirolbot_pip_install_requirements():
"""
Runs a pip install of the requirements from tirolbot/requirements.txt
"""
with cd("workspace/tirolbot/"):
with prefix("source bin/activate"):
run("pip install -r tirolbot/requirements.txt")
run("deactivate")
def tirolbot_put_files():
"""
Copies the cfg, db and log(s) from the local machine to the host
"""
local_dir = "/home/martin/workspace/tirolbot/tirolbot/"
config = "config.cfg"
local_config = os.path.join(local_dir, config)
db = "tirolbot.db"
local_db = os.path.join(local_dir, db)
log = "tirolbot.log"
local_log = os.path.join(local_dir, log)
with cd("workspace/tirolbot/tirolbot/"):
put(local_config, config)
put(local_db, db)
put(local_log, log)
for i in range(1, 11):
backup_log = "{local_log}.{i}".format(local_log=local_log, i=i)
if os.path.exists(backup_log):
put(backup_log, "{log}.{i}".format(log=log, i=i))
def tirolbot_change_sh_path():
"""
Changes the path in tirolbot.sh to point to the right user directory
"""
before = "cd /home/martin/workspace/tirolbot"
after = "cd /home/{user}/workspace/tirolbot".format(user=env.user)
filename = "/home/{user}/workspace/tirolbot/tirolbot/tirolbot.sh".format(
user=env.user)
sed(filename, before, after)
run("cat {filename}".format(filename=filename))
def tirolbot_setup_cronetab():
"""
Sets up a crontab to run tirolbot.sh hourly
"""
tempcron = "/tmp/crondump"
sh = "/home/{user}/workspace/tirolbot/tirolbot/tirolbot.sh".format(
user=env.user)
if exists(tempcron):
run("rm -f {tempcron}".format(tempcron=tempcron))
with settings(warn_only=True):
run("crontab -l > {tempcron}".format(tempcron=tempcron))
append(tempcron, "@hourly {sh}".format(sh=sh))
run("crontab /tmp/crondump")
run("crontab -l")
run("rm -f {tempcron}".format(tempcron=tempcron))
@task
def tirolbot_run():
"""
Runs the tirolbot with shell logging on
"""
sh = "/home/{user}/workspace/tirolbot/tirolbot/tirolbot.sh".format(
user=env.user)
run(sh)
###############################################################################
@task
def remote_run(command):
"""
Usage: fab remote_run:"command"
"""
return run(command)
@task
def remote_sudo(command):
"""
Usage: fab remote_sudo:"command"
"""
return sudo(command)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow Debugger command parser."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class ParseCommandTest(test_util.TensorFlowTestCase):
def testParseNoBracketsOrQuotes(self):
command = ""
self.assertEqual([], command_parser.parse_command(command))
command = "a"
self.assertEqual(["a"], command_parser.parse_command(command))
command = "foo bar baz qux"
self.assertEqual(["foo", "bar", "baz", "qux"],
command_parser.parse_command(command))
command = "foo bar\tbaz\t qux"
self.assertEqual(["foo", "bar", "baz", "qux"],
command_parser.parse_command(command))
def testParseLeadingTrailingWhitespaces(self):
command = " foo bar baz qux "
self.assertEqual(["foo", "bar", "baz", "qux"],
command_parser.parse_command(command))
command = "\nfoo bar baz qux\n"
self.assertEqual(["foo", "bar", "baz", "qux"],
command_parser.parse_command(command))
def testParseCommandsWithBrackets(self):
command = "pt foo[1, 2, :]"
self.assertEqual(["pt", "foo[1, 2, :]"],
command_parser.parse_command(command))
command = "pt foo[1, 2, :] -a"
self.assertEqual(["pt", "foo[1, 2, :]", "-a"],
command_parser.parse_command(command))
command = "inject_value foo [1, 2,:] 0"
self.assertEqual(["inject_value", "foo", "[1, 2,:]", "0"],
command_parser.parse_command(command))
def testParseCommandWithTwoArgsContainingBrackets(self):
command = "pt foo[1, :] bar[:, 2]"
self.assertEqual(["pt", "foo[1, :]", "bar[:, 2]"],
command_parser.parse_command(command))
command = "pt foo[] bar[:, 2]"
self.assertEqual(["pt", "foo[]", "bar[:, 2]"],
command_parser.parse_command(command))
def testParseCommandWithUnmatchedBracket(self):
command = "pt foo[1, 2, :"
self.assertNotEqual(["pt", "foo[1, 2, :]"],
command_parser.parse_command(command))
def testParseCommandsWithQuotes(self):
command = "inject_value foo \"np.zeros([100, 500])\""
self.assertEqual(["inject_value", "foo", "np.zeros([100, 500])"],
command_parser.parse_command(command))
# The pair of double quotes should have been stripped.
command = "\"command prefix with spaces\" arg1"
self.assertEqual(["command prefix with spaces", "arg1"],
command_parser.parse_command(command))
def testParseCommandWithTwoArgsContainingQuotes(self):
command = "foo \"bar\" \"qux\""
self.assertEqual(["foo", "bar", "qux"],
command_parser.parse_command(command))
command = "foo \"\" \"qux\""
self.assertEqual(["foo", "", "qux"],
command_parser.parse_command(command))
class ExtractOutputFilePathTest(test_util.TensorFlowTestCase):
def testNoOutputFilePathIsReflected(self):
args, output_path = command_parser.extract_output_file_path(["pt", "a:0"])
self.assertEqual(["pt", "a:0"], args)
self.assertIsNone(output_path)
def testHasOutputFilePathInOneArgsIsReflected(self):
args, output_path = command_parser.extract_output_file_path(
["pt", "a:0", ">/tmp/foo.txt"])
self.assertEqual(["pt", "a:0"], args)
self.assertEqual(output_path, "/tmp/foo.txt")
def testHasOutputFilePathInTwoArgsIsReflected(self):
args, output_path = command_parser.extract_output_file_path(
["pt", "a:0", ">", "/tmp/foo.txt"])
self.assertEqual(["pt", "a:0"], args)
self.assertEqual(output_path, "/tmp/foo.txt")
def testHasGreaterThanSignButNoFileNameCausesSyntaxError(self):
with self.assertRaisesRegexp(SyntaxError, "Redirect file path is empty"):
command_parser.extract_output_file_path(
["pt", "a:0", ">"])
def testOutputPathMergedWithLastArgIsHandledCorrectly(self):
args, output_path = command_parser.extract_output_file_path(
["pt", "a:0>/tmp/foo.txt"])
self.assertEqual(["pt", "a:0"], args)
self.assertEqual(output_path, "/tmp/foo.txt")
def testOutputPathInLastArgGreaterThanInSecondLastIsHandledCorrectly(self):
args, output_path = command_parser.extract_output_file_path(
["pt", "a:0>", "/tmp/foo.txt"])
self.assertEqual(["pt", "a:0"], args)
self.assertEqual(output_path, "/tmp/foo.txt")
def testFlagWithEqualGreaterThanShouldIgnoreIntervalFlags(self):
args, output_path = command_parser.extract_output_file_path(
["lp", "--execution_time=>100ms"])
self.assertEqual(["lp", "--execution_time=>100ms"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--execution_time", ">1.2s"])
self.assertEqual(["lp", "--execution_time", ">1.2s"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "-e", ">1200"])
self.assertEqual(["lp", "-e", ">1200"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--foo_value", ">-.2MB"])
self.assertEqual(["lp", "--foo_value", ">-.2MB"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--bar_value", ">-42e3GB"])
self.assertEqual(["lp", "--bar_value", ">-42e3GB"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--execution_time", ">=100ms"])
self.assertEqual(["lp", "--execution_time", ">=100ms"], args)
self.assertIsNone(output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--execution_time=>=100ms"])
self.assertEqual(["lp", "--execution_time=>=100ms"], args)
self.assertIsNone(output_path)
def testFlagWithEqualGreaterThanShouldRecognizeFilePaths(self):
args, output_path = command_parser.extract_output_file_path(
["lp", ">1.2s"])
self.assertEqual(["lp"], args)
self.assertEqual("1.2s", output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--execution_time", ">x.yms"])
self.assertEqual(["lp", "--execution_time"], args)
self.assertEqual("x.yms", output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--memory", ">a.1kB"])
self.assertEqual(["lp", "--memory"], args)
self.assertEqual("a.1kB", output_path)
args, output_path = command_parser.extract_output_file_path(
["lp", "--memory", ">e002MB"])
self.assertEqual(["lp", "--memory"], args)
self.assertEqual("e002MB", output_path)
def testOneArgumentIsHandledCorrectly(self):
args, output_path = command_parser.extract_output_file_path(["lt"])
self.assertEqual(["lt"], args)
self.assertIsNone(output_path)
def testEmptyArgumentIsHandledCorrectly(self):
args, output_path = command_parser.extract_output_file_path([])
self.assertEqual([], args)
self.assertIsNone(output_path)
class ParseTensorNameTest(test_util.TensorFlowTestCase):
def testParseTensorNameWithoutSlicing(self):
(tensor_name,
tensor_slicing) = command_parser.parse_tensor_name_with_slicing(
"hidden/weights/Variable:0")
self.assertEqual("hidden/weights/Variable:0", tensor_name)
self.assertEqual("", tensor_slicing)
def testParseTensorNameWithSlicing(self):
(tensor_name,
tensor_slicing) = command_parser.parse_tensor_name_with_slicing(
"hidden/weights/Variable:0[:, 1]")
self.assertEqual("hidden/weights/Variable:0", tensor_name)
self.assertEqual("[:, 1]", tensor_slicing)
class ValidateSlicingStringTest(test_util.TensorFlowTestCase):
def testValidateValidSlicingStrings(self):
self.assertTrue(command_parser.validate_slicing_string("[1]"))
self.assertTrue(command_parser.validate_slicing_string("[2,3]"))
self.assertTrue(command_parser.validate_slicing_string("[4, 5, 6]"))
self.assertTrue(command_parser.validate_slicing_string("[7,:, :]"))
def testValidateInvalidSlicingStrings(self):
self.assertFalse(command_parser.validate_slicing_string(""))
self.assertFalse(command_parser.validate_slicing_string("[1,"))
self.assertFalse(command_parser.validate_slicing_string("2,3]"))
self.assertFalse(command_parser.validate_slicing_string("[4, foo()]"))
self.assertFalse(command_parser.validate_slicing_string("[5, bar]"))
class ParseIndicesTest(test_util.TensorFlowTestCase):
def testParseValidIndicesStringsWithBrackets(self):
self.assertEqual([0], command_parser.parse_indices("[0]"))
self.assertEqual([0], command_parser.parse_indices(" [0] "))
self.assertEqual([-1, 2], command_parser.parse_indices("[-1, 2]"))
self.assertEqual([3, 4, -5],
command_parser.parse_indices("[3,4,-5]"))
def testParseValidIndicesStringsWithoutBrackets(self):
self.assertEqual([0], command_parser.parse_indices("0"))
self.assertEqual([0], command_parser.parse_indices(" 0 "))
self.assertEqual([-1, 2], command_parser.parse_indices("-1, 2"))
self.assertEqual([3, 4, -5], command_parser.parse_indices("3,4,-5"))
def testParseInvalidIndicesStringsWithoutBrackets(self):
with self.assertRaisesRegexp(
ValueError, r"invalid literal for int\(\) with base 10: 'a'"):
self.assertEqual([0], command_parser.parse_indices("0,a"))
with self.assertRaisesRegexp(
ValueError, r"invalid literal for int\(\) with base 10: '2\]'"):
self.assertEqual([0], command_parser.parse_indices("1, 2]"))
with self.assertRaisesRegexp(
ValueError, r"invalid literal for int\(\) with base 10: ''"):
self.assertEqual([0], command_parser.parse_indices("3, 4,"))
class ParseRangesTest(test_util.TensorFlowTestCase):
INF_VALUE = sys.float_info.max
def testParseEmptyRangeString(self):
self.assertEqual([], command_parser.parse_ranges(""))
self.assertEqual([], command_parser.parse_ranges(" "))
def testParseSingleRange(self):
self.assertAllClose([[-0.1, 0.2]],
command_parser.parse_ranges("[-0.1, 0.2]"))
self.assertAllClose([[-0.1, self.INF_VALUE]],
command_parser.parse_ranges("[-0.1, inf]"))
self.assertAllClose([[-self.INF_VALUE, self.INF_VALUE]],
command_parser.parse_ranges("[-inf, inf]"))
def testParseSingleListOfRanges(self):
self.assertAllClose([[-0.1, 0.2], [10.0, 12.0]],
command_parser.parse_ranges("[[-0.1, 0.2], [10, 12]]"))
self.assertAllClose(
[[-self.INF_VALUE, -1.0], [1.0, self.INF_VALUE]],
command_parser.parse_ranges("[[-inf, -1.0],[1.0, inf]]"))
def testParseInvalidRangeString(self):
with self.assertRaises(SyntaxError):
command_parser.parse_ranges("[[1,2]")
with self.assertRaisesRegexp(ValueError,
"Incorrect number of elements in range"):
command_parser.parse_ranges("[1,2,3]")
with self.assertRaisesRegexp(ValueError,
"Incorrect number of elements in range"):
command_parser.parse_ranges("[inf]")
with self.assertRaisesRegexp(ValueError,
"Incorrect type in the 1st element of range"):
command_parser.parse_ranges("[1j, 1]")
with self.assertRaisesRegexp(ValueError,
"Incorrect type in the 2nd element of range"):
command_parser.parse_ranges("[1, 1j]")
class ParseReadableSizeStrTest(test_util.TensorFlowTestCase):
def testParseNoUnitWorks(self):
self.assertEqual(0, command_parser.parse_readable_size_str("0"))
self.assertEqual(1024, command_parser.parse_readable_size_str("1024 "))
self.assertEqual(2000, command_parser.parse_readable_size_str(" 2000 "))
def testParseKiloBytesWorks(self):
self.assertEqual(0, command_parser.parse_readable_size_str("0kB"))
self.assertEqual(1024**2, command_parser.parse_readable_size_str("1024 kB"))
self.assertEqual(1024**2 * 2,
command_parser.parse_readable_size_str("2048k"))
self.assertEqual(1024**2 * 2,
command_parser.parse_readable_size_str("2048kB"))
self.assertEqual(1024 / 4, command_parser.parse_readable_size_str("0.25k"))
def testParseMegaBytesWorks(self):
self.assertEqual(0, command_parser.parse_readable_size_str("0MB"))
self.assertEqual(1024**3, command_parser.parse_readable_size_str("1024 MB"))
self.assertEqual(1024**3 * 2,
command_parser.parse_readable_size_str("2048M"))
self.assertEqual(1024**3 * 2,
command_parser.parse_readable_size_str("2048MB"))
self.assertEqual(1024**2 / 4,
command_parser.parse_readable_size_str("0.25M"))
def testParseGigaBytesWorks(self):
self.assertEqual(0, command_parser.parse_readable_size_str("0GB"))
self.assertEqual(1024**4, command_parser.parse_readable_size_str("1024 GB"))
self.assertEqual(1024**4 * 2,
command_parser.parse_readable_size_str("2048G"))
self.assertEqual(1024**4 * 2,
command_parser.parse_readable_size_str("2048GB"))
self.assertEqual(1024**3 / 4,
command_parser.parse_readable_size_str("0.25G"))
def testParseUnsupportedUnitRaisesException(self):
with self.assertRaisesRegexp(
ValueError, "Failed to parsed human-readable byte size str: \"0foo\""):
command_parser.parse_readable_size_str("0foo")
with self.assertRaisesRegexp(
ValueError, "Failed to parsed human-readable byte size str: \"2E\""):
command_parser.parse_readable_size_str("2EB")
class ParseReadableTimeStrTest(test_util.TensorFlowTestCase):
def testParseNoUnitWorks(self):
self.assertEqual(0, command_parser.parse_readable_time_str("0"))
self.assertEqual(100, command_parser.parse_readable_time_str("100 "))
self.assertEqual(25, command_parser.parse_readable_time_str(" 25 "))
def testParseSeconds(self):
self.assertEqual(1e6, command_parser.parse_readable_time_str("1 s"))
self.assertEqual(2e6, command_parser.parse_readable_time_str("2s"))
def testParseMicros(self):
self.assertEqual(2, command_parser.parse_readable_time_str("2us"))
def testParseMillis(self):
self.assertEqual(2e3, command_parser.parse_readable_time_str("2ms"))
def testParseUnsupportedUnitRaisesException(self):
with self.assertRaisesRegexp(
ValueError, r".*float.*2us.*"):
command_parser.parse_readable_time_str("2uss")
with self.assertRaisesRegexp(
ValueError, r".*float.*2m.*"):
command_parser.parse_readable_time_str("2m")
with self.assertRaisesRegexp(
ValueError, r"Invalid time -1. Time value must be positive."):
command_parser.parse_readable_time_str("-1s")
class ParseInterval(test_util.TensorFlowTestCase):
def testParseTimeInterval(self):
self.assertEquals(
command_parser.Interval(10, True, 1e3, True),
command_parser.parse_time_interval("[10us, 1ms]"))
self.assertEquals(
command_parser.Interval(10, False, 1e3, False),
command_parser.parse_time_interval("(10us, 1ms)"))
self.assertEquals(
command_parser.Interval(10, False, 1e3, True),
command_parser.parse_time_interval("(10us, 1ms]"))
self.assertEquals(
command_parser.Interval(10, True, 1e3, False),
command_parser.parse_time_interval("[10us, 1ms)"))
self.assertEquals(command_parser.Interval(0, False, 1e3, True),
command_parser.parse_time_interval("<=1ms"))
self.assertEquals(
command_parser.Interval(1e3, True, float("inf"), False),
command_parser.parse_time_interval(">=1ms"))
self.assertEquals(command_parser.Interval(0, False, 1e3, False),
command_parser.parse_time_interval("<1ms"))
self.assertEquals(
command_parser.Interval(1e3, False, float("inf"), False),
command_parser.parse_time_interval(">1ms"))
def testParseTimeGreaterLessThanWithInvalidValueStrings(self):
with self.assertRaisesRegexp(ValueError, "Invalid value string after >= "):
command_parser.parse_time_interval(">=wms")
with self.assertRaisesRegexp(ValueError, "Invalid value string after > "):
command_parser.parse_time_interval(">Yms")
with self.assertRaisesRegexp(ValueError, "Invalid value string after <= "):
command_parser.parse_time_interval("<= _ms")
with self.assertRaisesRegexp(ValueError, "Invalid value string after < "):
command_parser.parse_time_interval("<-ms")
def testParseTimeIntervalsWithInvalidValueStrings(self):
with self.assertRaisesRegexp(ValueError, "Invalid first item in interval:"):
command_parser.parse_time_interval("[wms, 10ms]")
with self.assertRaisesRegexp(ValueError,
"Invalid second item in interval:"):
command_parser.parse_time_interval("[ 0ms, _ms]")
with self.assertRaisesRegexp(ValueError, "Invalid first item in interval:"):
command_parser.parse_time_interval("(xms, _ms]")
with self.assertRaisesRegexp(ValueError, "Invalid first item in interval:"):
command_parser.parse_time_interval("((3ms, _ms)")
def testInvalidTimeIntervalRaisesException(self):
with self.assertRaisesRegexp(
ValueError,
r"Invalid interval format: \[10us, 1ms. Valid formats are: "
r"\[min, max\], \(min, max\), <max, >min"):
command_parser.parse_time_interval("[10us, 1ms")
with self.assertRaisesRegexp(
ValueError,
r"Incorrect interval format: \[10us, 1ms, 2ms\]. Interval should "
r"specify two values: \[min, max\] or \(min, max\)"):
command_parser.parse_time_interval("[10us, 1ms, 2ms]")
with self.assertRaisesRegexp(
ValueError,
r"Invalid interval \[1s, 1ms\]. Start must be before end of interval."):
command_parser.parse_time_interval("[1s, 1ms]")
def testParseMemoryInterval(self):
self.assertEquals(
command_parser.Interval(1024, True, 2048, True),
command_parser.parse_memory_interval("[1k, 2k]"))
self.assertEquals(
command_parser.Interval(1024, False, 2048, False),
command_parser.parse_memory_interval("(1kB, 2kB)"))
self.assertEquals(
command_parser.Interval(1024, False, 2048, True),
command_parser.parse_memory_interval("(1k, 2k]"))
self.assertEquals(
command_parser.Interval(1024, True, 2048, False),
command_parser.parse_memory_interval("[1k, 2k)"))
self.assertEquals(
command_parser.Interval(0, False, 2048, True),
command_parser.parse_memory_interval("<=2k"))
self.assertEquals(
command_parser.Interval(11, True, float("inf"), False),
command_parser.parse_memory_interval(">=11"))
self.assertEquals(command_parser.Interval(0, False, 2048, False),
command_parser.parse_memory_interval("<2k"))
self.assertEquals(
command_parser.Interval(11, False, float("inf"), False),
command_parser.parse_memory_interval(">11"))
def testParseMemoryIntervalsWithInvalidValueStrings(self):
with self.assertRaisesRegexp(ValueError, "Invalid value string after >= "):
command_parser.parse_time_interval(">=wM")
with self.assertRaisesRegexp(ValueError, "Invalid value string after > "):
command_parser.parse_time_interval(">YM")
with self.assertRaisesRegexp(ValueError, "Invalid value string after <= "):
command_parser.parse_time_interval("<= _MB")
with self.assertRaisesRegexp(ValueError, "Invalid value string after < "):
command_parser.parse_time_interval("<-MB")
def testInvalidMemoryIntervalRaisesException(self):
with self.assertRaisesRegexp(
ValueError,
r"Invalid interval \[5k, 3k\]. Start of interval must be less than or "
"equal to end of interval."):
command_parser.parse_memory_interval("[5k, 3k]")
def testIntervalContains(self):
interval = command_parser.Interval(
start=1, start_included=True, end=10, end_included=True)
self.assertTrue(interval.contains(1))
self.assertTrue(interval.contains(10))
self.assertTrue(interval.contains(5))
interval.start_included = False
self.assertFalse(interval.contains(1))
self.assertTrue(interval.contains(10))
interval.end_included = False
self.assertFalse(interval.contains(1))
self.assertFalse(interval.contains(10))
interval.start_included = True
self.assertTrue(interval.contains(1))
self.assertFalse(interval.contains(10))
if __name__ == "__main__":
googletest.main()
|
|
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from oslo_config import cfg
from oslo_config import fixture as oslo_fixture
from oslo_utils import uuidutils
from taskflow.types import failure
import tenacity
from octavia.api.drivers import utils as provider_utils
from octavia.common import constants
from octavia.common import data_models as o_data_models
from octavia.common import exceptions
from octavia.controller.worker.v2.tasks import network_tasks
from octavia.network import base as net_base
from octavia.network import data_models
from octavia.tests.common import constants as t_constants
import octavia.tests.unit.base as base
AMPHORA_ID = 7
COMPUTE_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
SUBNET_ID = uuidutils.generate_uuid()
NETWORK_ID = uuidutils.generate_uuid()
SG_ID = uuidutils.generate_uuid()
IP_ADDRESS = "172.24.41.1"
VIP = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID,
subnet_id=t_constants.MOCK_SUBNET_ID,
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1)
VIP2 = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID2,
subnet_id=t_constants.MOCK_SUBNET_ID2,
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID2)
LB = o_data_models.LoadBalancer(vip=VIP)
LB2 = o_data_models.LoadBalancer(vip=VIP2)
FIRST_IP = {"ip_address": IP_ADDRESS, "subnet_id": SUBNET_ID}
FIXED_IPS = [FIRST_IP]
INTERFACE = data_models.Interface(id=uuidutils.generate_uuid(),
compute_id=COMPUTE_ID, fixed_ips=FIXED_IPS,
port_id=PORT_ID)
AMPS_DATA = [o_data_models.Amphora(id=t_constants.MOCK_AMP_ID1,
vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID1,
vrrp_ip=t_constants.MOCK_VRRP_IP1),
o_data_models.Amphora(id=t_constants.MOCK_AMP_ID2,
vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID2,
vrrp_ip=t_constants.MOCK_VRRP_IP2)
]
UPDATE_DICT = {constants.TOPOLOGY: None}
_session_mock = mock.MagicMock()
class TestException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
@mock.patch('octavia.common.utils.get_network_driver')
class TestNetworkTasks(base.TestCase):
def setUp(self):
network_tasks.LOG = mock.MagicMock()
self.db_amphora_mock = mock.MagicMock()
self.db_load_balancer_mock = mock.MagicMock()
self.vip_mock = mock.MagicMock()
self.vip_mock.subnet_id = SUBNET_ID
self.db_load_balancer_mock.vip = self.vip_mock
self.db_load_balancer_mock.amphorae = []
self.db_amphora_mock.id = AMPHORA_ID
self.db_amphora_mock.compute_id = COMPUTE_ID
self.db_amphora_mock.status = constants.AMPHORA_ALLOCATED
self.boot_net_id = NETWORK_ID
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
conf.config(group="controller_worker",
amp_boot_network_list=[self.boot_net_id])
conf.config(group="networking", max_retries=1)
self.amphora_mock = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: COMPUTE_ID,
constants.LB_NETWORK_IP: IP_ADDRESS,
}
self.load_balancer_mock = {
constants.LOADBALANCER_ID: uuidutils.generate_uuid(),
constants.VIP_SUBNET_ID: VIP.subnet_id,
constants.VIP_PORT_ID: VIP.port_id,
constants.VIP_ADDRESS: VIP.ip_address,
constants.VIP_QOS_POLICY_ID: t_constants.MOCK_QOS_POLICY_ID1
}
conf = oslo_fixture.Config(cfg.CONF)
conf.config(group="controller_worker",
amp_boot_network_list=[self.boot_net_id])
super().setUp()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_calculate_amphora_delta(self, mock_get_session, mock_lb_repo_get,
mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
DELETE_NETWORK_ID = uuidutils.generate_uuid()
MEMBER_NETWORK_ID = uuidutils.generate_uuid()
MEMBER_SUBNET_ID = uuidutils.generate_uuid()
VRRP_PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
member_mock = mock.MagicMock()
member_mock.subnet_id = MEMBER_SUBNET_ID
pool_mock = mock.MagicMock()
pool_mock.members = [member_mock]
lb_mock = mock.MagicMock()
lb_mock.pools = [pool_mock]
lb_dict = {constants.LOADBALANCER_ID: LB_ID}
amphora_dict = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: COMPUTE_ID,
constants.VRRP_PORT_ID: VRRP_PORT_ID}
vrrp_port_mock = mock.MagicMock()
vrrp_port_mock.network_id = self.boot_net_id
vrrp_port_dict = {constants.NETWORK_ID: self.boot_net_id}
mock_subnet = mock.MagicMock()
mock_subnet.network_id = MEMBER_NETWORK_ID
nic1_delete_mock = mock.MagicMock()
nic1_delete_mock.network_id = DELETE_NETWORK_ID
nic2_keep_mock = mock.MagicMock()
nic2_keep_mock.network_id = self.boot_net_id
mock_lb_repo_get.return_value = lb_mock
mock_driver.get_port.return_value = vrrp_port_mock
mock_driver.get_subnet.return_value = mock_subnet
mock_driver.get_plugged_networks.return_value = [nic1_delete_mock,
nic2_keep_mock]
calc_amp_delta = network_tasks.CalculateAmphoraDelta()
# Test vrrp_port_id is None
result = calc_amp_delta.execute(lb_dict, amphora_dict, {})
self.assertEqual(AMPHORA_ID, result[constants.AMPHORA_ID])
self.assertEqual(COMPUTE_ID, result[constants.COMPUTE_ID])
self.assertEqual(1, len(result[constants.ADD_NICS]))
self.assertEqual(MEMBER_NETWORK_ID,
result[constants.ADD_NICS][0][constants.NETWORK_ID])
self.assertEqual(1, len(result[constants.DELETE_NICS]))
self.assertEqual(
DELETE_NETWORK_ID,
result[constants.DELETE_NICS][0][constants.NETWORK_ID])
mock_driver.get_port.assert_called_once_with(VRRP_PORT_ID)
mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
# Test with vrrp_port_id
mock_driver.reset_mock()
result = calc_amp_delta.execute(lb_dict, amphora_dict, {},
vrrp_port=vrrp_port_dict)
self.assertEqual(AMPHORA_ID, result[constants.AMPHORA_ID])
self.assertEqual(COMPUTE_ID, result[constants.COMPUTE_ID])
self.assertEqual(1, len(result[constants.ADD_NICS]))
self.assertEqual(MEMBER_NETWORK_ID,
result[constants.ADD_NICS][0][constants.NETWORK_ID])
self.assertEqual(1, len(result[constants.DELETE_NICS]))
self.assertEqual(
DELETE_NETWORK_ID,
result[constants.DELETE_NICS][0][constants.NETWORK_ID])
mock_driver.get_port.assert_not_called()
mock_driver.get_subnet.assert_called_once_with(MEMBER_SUBNET_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_calculate_delta(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = self.db_load_balancer_mock
self.db_amphora_mock.to_dict.return_value = {
constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID,
constants.VRRP_PORT_ID: PORT_ID}
mock_get_net_driver.return_value = mock_driver
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=self.boot_net_id)]
mock_driver.get_port.return_value = data_models.Port(
network_id=self.boot_net_id)
EMPTY = {}
empty_deltas = {self.db_amphora_mock.id: data_models.Delta(
amphora_id=AMPHORA_ID,
compute_id=COMPUTE_ID,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)}
calc_delta = network_tasks.CalculateDelta()
self.assertEqual(EMPTY,
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and no pools, nothing plugged
# Delta should be empty
mock_driver.reset_mock()
self.db_amphora_mock.load_balancer = self.db_load_balancer_mock
self.db_load_balancer_mock.amphorae = [self.db_amphora_mock]
self.db_load_balancer_mock.pools = []
self.assertEqual(empty_deltas,
calc_delta.execute(self.load_balancer_mock, {}))
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
# Pool mock should be configured explicitly for each test
pool_mock = mock.MagicMock()
self.db_load_balancer_mock.pools = [pool_mock]
# Test with one amp and one pool but no members, nothing plugged
# Delta should be empty
pool_mock.members = []
self.assertEqual(empty_deltas,
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and one pool and one member, nothing plugged
# Dummy AZ is provided
# Delta should be one additional subnet to plug
mock_driver.reset_mock()
member_mock = mock.MagicMock()
member_mock.subnet_id = 1
pool_mock.members = [member_mock]
az = {
constants.COMPUTE_ZONE: 'foo'
}
mock_driver.get_subnet.return_value = data_models.Subnet(id=2,
network_id=3)
ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[
data_models.Interface(network_id=3)],
delete_nics=[]).to_dict(recurse=True)
self.assertEqual({self.db_amphora_mock.id: ndm},
calc_delta.execute(self.load_balancer_mock, az))
vrrp_port_call = mock.call(PORT_ID)
mock_driver.get_port.assert_has_calls([vrrp_port_call])
self.assertEqual(1, mock_driver.get_port.call_count)
member_subnet_call = mock.call(member_mock.subnet_id)
mock_driver.get_subnet.assert_has_calls([member_subnet_call])
self.assertEqual(1, mock_driver.get_subnet.call_count)
# Test with one amp and one pool and one member, already plugged
# Delta should be empty
mock_driver.reset_mock()
member_mock = mock.MagicMock()
member_mock.subnet_id = 1
pool_mock.members = [member_mock]
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=3),
data_models.Interface(network_id=self.boot_net_id)]
self.assertEqual(empty_deltas,
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and one pool and one member, wrong network plugged
# Delta should be one network to add and one to remove
mock_driver.reset_mock()
member_mock = mock.MagicMock()
member_mock.subnet_id = 1
pool_mock.members = [member_mock]
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=2),
data_models.Interface(network_id=self.boot_net_id)]
ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[
data_models.Interface(network_id=3)],
delete_nics=[
data_models.Interface(network_id=2)]
).to_dict(recurse=True)
self.assertEqual({self.db_amphora_mock.id: ndm},
calc_delta.execute(self.load_balancer_mock, {}))
# Test with one amp and one pool and no members, one network plugged
# Delta should be one network to remove
mock_driver.reset_mock()
pool_mock.members = []
mock_driver.get_plugged_networks.return_value = [
data_models.Interface(network_id=2),
data_models.Interface(network_id=self.boot_net_id)
]
ndm = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[
data_models.Interface(network_id=2)]
).to_dict(recurse=True)
self.assertEqual({self.db_amphora_mock.id: ndm},
calc_delta.execute(self.load_balancer_mock, {}))
def test_get_plumbed_networks(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_driver.get_plugged_networks.side_effect = [['blah']]
net = network_tasks.GetPlumbedNetworks()
self.assertEqual(['blah'], net.execute(self.amphora_mock))
mock_driver.get_plugged_networks.assert_called_once_with(
COMPUTE_ID)
def test_plug_networks(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(network_id):
return [data_models.Interface(network_id=network_id)]
net = network_tasks.PlugNetworks()
net.execute(self.amphora_mock, None)
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
mock_driver.plug_network.assert_called_once_with(COMPUTE_ID, 1)
# revert
net.revert(self.amphora_mock, None)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.revert(self.amphora_mock, delta)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
net.revert(self.amphora_mock, delta)
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
net.revert(self.amphora_mock, delta) # No exception
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = TestException('test')
self.assertRaises(TestException,
net.revert,
self.amphora_mock,
delta)
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
def test_unplug_networks(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(network_id):
return [data_models.Interface(network_id=network_id)]
net = network_tasks.UnPlugNetworks()
net.execute(self.db_amphora_mock, None)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=_interface(1)
).to_dict(recurse=True)
net.execute(self.amphora_mock, delta)
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
net.execute(self.amphora_mock, delta) # No exception
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
# Do a test with a general exception in case behavior changes
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = Exception()
net.execute(self.amphora_mock, delta) # No exception
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
def test_get_member_ports(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(port_id):
return [data_models.Interface(port_id=port_id)]
net_task = network_tasks.GetMemberPorts()
net_task.execute(self.load_balancer_mock, self.amphora_mock)
mock_driver.get_port.assert_called_once_with(t_constants.MOCK_PORT_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
mock_driver.reset_mock()
net_task = network_tasks.GetMemberPorts()
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.side_effect = [
data_models.Port(network_id=NETWORK_ID),
data_models.Port(network_id=NETWORK_ID)]
net_task.execute(self.load_balancer_mock, self.amphora_mock)
self.assertEqual(2, mock_driver.get_port.call_count)
self.assertFalse(mock_driver.get_network.called)
mock_driver.reset_mock()
port_mock = mock.MagicMock()
fixed_ip_mock = mock.MagicMock()
fixed_ip_mock.subnet_id = 1
port_mock.fixed_ips = [fixed_ip_mock]
net_task = network_tasks.GetMemberPorts()
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.side_effect = [
data_models.Port(network_id=NETWORK_ID), port_mock]
ports = net_task.execute(self.load_balancer_mock, self.amphora_mock)
mock_driver.get_subnet.assert_called_once_with(1)
self.assertEqual([port_mock], ports)
def test_handle_network_delta(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
self.db_amphora_mock.to_dict.return_value = {
constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID}
mock_get_net_driver.return_value = mock_net_driver
nic1 = data_models.Interface()
nic1.network_id = uuidutils.generate_uuid()
nic2 = data_models.Interface()
nic2.network_id = uuidutils.generate_uuid()
interface1 = mock.MagicMock()
interface1.port_id = uuidutils.generate_uuid()
port1 = mock.MagicMock()
port1.network_id = uuidutils.generate_uuid()
fixed_ip = mock.MagicMock()
fixed_ip.subnet_id = uuidutils.generate_uuid()
port1.fixed_ips = [fixed_ip]
subnet = mock.MagicMock()
network = mock.MagicMock()
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[nic1],
delete_nics=[nic2, nic2, nic2]
).to_dict(recurse=True)
mock_net_driver.plug_network.return_value = interface1
mock_net_driver.get_port.return_value = port1
mock_net_driver.get_network.return_value = network
mock_net_driver.get_subnet.return_value = subnet
mock_net_driver.unplug_network.side_effect = [
None, net_base.NetworkNotFound, Exception]
handle_net_delta_obj = network_tasks.HandleNetworkDelta()
result = handle_net_delta_obj.execute(self.amphora_mock,
delta)
mock_net_driver.plug_network.assert_called_once_with(
self.db_amphora_mock.compute_id, nic1.network_id)
mock_net_driver.get_port.assert_called_once_with(interface1.port_id)
mock_net_driver.get_network.assert_called_once_with(port1.network_id)
mock_net_driver.get_subnet.assert_called_once_with(fixed_ip.subnet_id)
self.assertEqual({self.db_amphora_mock.id: [port1.to_dict()]}, result)
mock_net_driver.unplug_network.assert_called_with(
self.db_amphora_mock.compute_id, nic2.network_id)
# Revert
delta2 = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[nic1, nic1],
delete_nics=[nic2, nic2, nic2]
).to_dict(recurse=True)
mock_net_driver.unplug_network.reset_mock()
handle_net_delta_obj.revert(
failure.Failure.from_exception(Exception('boom')), None, None)
mock_net_driver.unplug_network.assert_not_called()
mock_net_driver.unplug_network.reset_mock()
handle_net_delta_obj.revert(None, None, None)
mock_net_driver.unplug_network.assert_not_called()
mock_net_driver.unplug_network.reset_mock()
handle_net_delta_obj.revert(None, None, delta2)
def test_handle_network_deltas(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
self.db_amphora_mock.to_dict.return_value = {
constants.ID: AMPHORA_ID, constants.COMPUTE_ID: COMPUTE_ID}
mock_get_net_driver.return_value = mock_driver
def _interface(network_id):
return [data_models.Interface(network_id=network_id)]
net = network_tasks.HandleNetworkDeltas()
net.execute({})
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.plug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
mock_driver.plug_network.assert_called_once_with(COMPUTE_ID, 1)
# revert
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=_interface(1),
delete_nics=[]).to_dict(recurse=True)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = TestException('test')
self.assertRaises(TestException, net.revert, mock.ANY,
{self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
net.execute({})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=[]).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
self.assertFalse(mock_driver.unplug_network.called)
delta = data_models.Delta(amphora_id=self.db_amphora_mock.id,
compute_id=self.db_amphora_mock.compute_id,
add_nics=[],
delete_nics=_interface(1)
).to_dict(recurse=True)
net.execute({self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = net_base.NetworkNotFound
net.execute({self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
# Do a test with a general exception in case behavior changes
mock_driver.reset_mock()
mock_driver.unplug_network.side_effect = Exception()
net.execute({self.db_amphora_mock.id: delta})
mock_driver.unplug_network.assert_called_once_with(COMPUTE_ID, 1)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_plug_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
LB.amphorae = AMPS_DATA
mock_get_lb.return_value = LB
LB.amphorae = AMPS_DATA
net = network_tasks.PlugVIP()
amp = mock.MagicMock()
amp.to_dict.return_value = 'vip'
mock_driver.plug_vip.return_value = [amp]
data = net.execute(self.load_balancer_mock)
mock_driver.plug_vip.assert_called_once_with(LB, LB.vip)
self.assertEqual(["vip"], data)
# revert
net.revert([o_data_models.Amphora().to_dict()],
self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
# revert with exception
mock_driver.reset_mock()
mock_driver.unplug_vip.side_effect = Exception('UnplugVipException')
net.revert([o_data_models.Amphora().to_dict()],
self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'get_current_loadbalancer_from_db')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_apply_qos_on_creation(self, mock_get_session, mock_get_lb,
mock_get_lb_db, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.ApplyQos()
mock_get_lb_db.return_value = LB
mock_get_lb.return_value = LB
# execute
UPDATE_DICT[constants.TOPOLOGY] = constants.TOPOLOGY_SINGLE
update_dict = UPDATE_DICT
net.execute(self.load_balancer_mock, [AMPS_DATA[0]], update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
VIP.qos_policy_id, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
standby_topology = constants.TOPOLOGY_ACTIVE_STANDBY
mock_driver.reset_mock()
update_dict[constants.TOPOLOGY] = standby_topology
net.execute(self.load_balancer_mock, AMPS_DATA, update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
# revert
mock_driver.reset_mock()
update_dict = UPDATE_DICT
net.revert(None, self.load_balancer_mock, [AMPS_DATA[0]], update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict[constants.TOPOLOGY] = standby_topology
net.revert(None, self.load_balancer_mock, AMPS_DATA, update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'get_current_loadbalancer_from_db')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_apply_qos_on_update(self, mock_get_session, mock_get_lb,
mock_get_lb_db, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.ApplyQos()
null_qos_vip = o_data_models.Vip(qos_policy_id=None)
null_qos_lb = o_data_models.LoadBalancer(
vip=null_qos_vip, topology=constants.TOPOLOGY_SINGLE,
amphorae=[AMPS_DATA[0]])
null_qos_lb_dict = (
provider_utils.db_loadbalancer_to_provider_loadbalancer(
null_qos_lb).to_dict())
tmp_vip_object = o_data_models.Vip(
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1)
tmp_lb = o_data_models.LoadBalancer(
vip=tmp_vip_object, topology=constants.TOPOLOGY_SINGLE,
amphorae=[AMPS_DATA[0]])
pr_tm_dict = provider_utils.db_loadbalancer_to_provider_loadbalancer(
tmp_lb).to_dict()
mock_get_lb.return_value = tmp_lb
# execute
update_dict = {'description': 'fool'}
net.execute(pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
t_constants.MOCK_QOS_POLICY_ID1, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
mock_get_lb.return_value = null_qos_lb
update_dict = {'vip': {'qos_policy_id': None}}
net.execute(null_qos_lb_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
None, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'name': '123'}
net.execute(null_qos_lb_dict, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
update_dict = {'description': 'fool'}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
mock_get_lb.return_value = tmp_lb
net.execute(pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'description': 'fool',
'vip': {
'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID1}}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
net.execute(pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
mock_get_lb.return_value = null_qos_lb
mock_driver.reset_mock()
update_dict = {}
net.execute(null_qos_lb_dict, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
# revert
mock_driver.reset_mock()
mock_get_lb.reset_mock()
tmp_lb.amphorae = [AMPS_DATA[0]]
tmp_lb.topology = constants.TOPOLOGY_SINGLE
update_dict = {'description': 'fool'}
mock_get_lb_db.return_value = tmp_lb
net.revert(None, pr_tm_dict, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'vip': {'qos_policy_id': None}}
ori_lb_db = LB2
ori_lb_db.amphorae = [AMPS_DATA[0]]
mock_get_lb_db.return_value = ori_lb_db
net.revert(None, null_qos_lb_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
t_constants.MOCK_QOS_POLICY_ID2, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
mock_get_lb.reset_mock()
update_dict = {'vip': {
'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID2}}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
ori_lb_db = LB2
ori_lb_db.amphorae = [AMPS_DATA[0]]
mock_get_lb_db.return_value = ori_lb_db
net.revert(None, pr_tm_dict, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID2, mock.ANY)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_unplug_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.UnplugVIP()
net.execute(self.load_balancer_mock)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_allocate_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.AllocateVIP()
mock_driver.allocate_vip.return_value = LB.vip
mock_driver.reset_mock()
self.assertEqual(LB.vip.to_dict(),
net.execute(self.load_balancer_mock))
mock_driver.allocate_vip.assert_called_once_with(LB)
# revert
vip_mock = VIP.to_dict()
net.revert(vip_mock, self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(
o_data_models.Vip(**vip_mock))
# revert exception
mock_driver.reset_mock()
mock_driver.deallocate_vip.side_effect = Exception('DeallVipException')
vip_mock = VIP.to_dict()
net.revert(vip_mock, self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(o_data_models.Vip(
**vip_mock))
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_allocate_vip_for_failover(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_lb.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.AllocateVIPforFailover()
mock_driver.allocate_vip.return_value = LB.vip
mock_driver.reset_mock()
self.assertEqual(LB.vip.to_dict(),
net.execute(self.load_balancer_mock))
mock_driver.allocate_vip.assert_called_once_with(LB)
# revert
vip_mock = VIP.to_dict()
net.revert(vip_mock, self.load_balancer_mock)
mock_driver.deallocate_vip.assert_not_called()
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_deallocate_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.DeallocateVIP()
vip = o_data_models.Vip()
lb = o_data_models.LoadBalancer(vip=vip)
mock_get_lb.return_value = lb
net.execute(self.load_balancer_mock)
mock_driver.deallocate_vip.assert_called_once_with(lb.vip)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_update_vip(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
vip = o_data_models.Vip()
lb = o_data_models.LoadBalancer(vip=vip)
mock_get_lb.return_value = lb
listeners = [{constants.LOADBALANCER_ID: lb.id}]
net_task = network_tasks.UpdateVIP()
net_task.execute(listeners)
mock_driver.update_vip.assert_called_once_with(lb)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_update_vip_for_delete(self, mock_get_session, mock_get_lb,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
vip = o_data_models.Vip()
lb = o_data_models.LoadBalancer(vip=vip)
mock_get_lb.return_value = lb
listener = {constants.LOADBALANCER_ID: lb.id}
net_task = network_tasks.UpdateVIPForDelete()
net_task.execute(listener)
mock_driver.update_vip.assert_called_once_with(lb, for_delete=True)
@mock.patch('octavia.db.api.get_session', return_value='TEST')
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_get_amphora_network_configs_by_id(
self, mock_lb_get, mock_amp_get,
mock_get_session, mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
AMP_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_amp_get.return_value = 'mock amphora'
mock_lb_get.return_value = 'mock load balancer'
net_task = network_tasks.GetAmphoraNetworkConfigsByID()
net_task.execute(LB_ID, AMP_ID)
mock_driver.get_network_configs.assert_called_once_with(
'mock load balancer', amphora='mock amphora')
mock_amp_get.assert_called_once_with('TEST', id=AMP_ID)
mock_lb_get.assert_called_once_with('TEST', id=LB_ID)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_get_amphorae_network_configs(self, mock_session, mock_lb_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_lb_get.return_value = LB
mock_get_net_driver.return_value = mock_driver
lb = o_data_models.LoadBalancer()
net_task = network_tasks.GetAmphoraeNetworkConfigs()
net_task.execute(self.load_balancer_mock)
mock_driver.get_network_configs.assert_called_once_with(lb)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_failover_preparation_for_amphora(self, mock_session, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
failover = network_tasks.FailoverPreparationForAmphora()
failover.execute(self.amphora_mock)
mock_driver.failover_preparation.assert_called_once_with(
self.db_amphora_mock)
def test_retrieve_portids_on_amphora_except_lb_network(
self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
def _interface(port_id):
return [data_models.Interface(port_id=port_id)]
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
mock_driver.get_plugged_networks.return_value = []
net_task.execute(self.amphora_mock)
mock_driver.get_plugged_networks.assert_called_once_with(
compute_id=COMPUTE_ID)
self.assertFalse(mock_driver.get_port.called)
mock_driver.reset_mock()
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
mock_driver.get_plugged_networks.return_value = _interface(1)
net_task.execute(self.amphora_mock)
mock_driver.get_port.assert_called_once_with(port_id=1)
mock_driver.reset_mock()
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
port_mock = mock.MagicMock()
fixed_ip_mock = mock.MagicMock()
fixed_ip_mock.ip_address = IP_ADDRESS
port_mock.fixed_ips = [fixed_ip_mock]
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.return_value = port_mock
ports = net_task.execute(self.amphora_mock)
self.assertEqual([], ports)
mock_driver.reset_mock()
net_task = network_tasks.RetrievePortIDsOnAmphoraExceptLBNetwork()
port_mock = mock.MagicMock()
fixed_ip_mock = mock.MagicMock()
fixed_ip_mock.ip_address = "172.17.17.17"
port_mock.fixed_ips = [fixed_ip_mock]
mock_driver.get_plugged_networks.return_value = _interface(1)
mock_driver.get_port.return_value = port_mock
ports = net_task.execute(self.amphora_mock)
self.assertEqual(1, len(ports))
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=mock.MagicMock())
def test_plug_ports(self, mock_session, mock_get, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
port1 = mock.MagicMock()
port2 = mock.MagicMock()
amp = {constants.ID: AMPHORA_ID,
constants.COMPUTE_ID: '1234'}
plugports = network_tasks.PlugPorts()
plugports.execute(amp, [port1, port2])
mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port1)
mock_driver.plug_port.assert_any_call(self.db_amphora_mock, port2)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_update_vip_sg(self, mock_session, mock_lb_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_driver.update_vip_sg.return_value = SG_ID
mock_lb_get.return_value = LB
mock_get_net_driver.return_value = mock_driver
net = network_tasks.UpdateVIPSecurityGroup()
sg_id = net.execute(self.load_balancer_mock)
mock_driver.update_vip_sg.assert_called_once_with(LB, LB.vip)
self.assertEqual(sg_id, SG_ID)
def test_get_subnet_from_vip(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.GetSubnetFromVIP()
net.execute(self.load_balancer_mock)
mock_driver.get_subnet.assert_called_once_with(LB.vip.subnet_id)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_plug_vip_amphora(self, mock_session, mock_lb_get, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
amphora = {constants.ID: AMPHORA_ID,
constants.LB_NETWORK_IP: IP_ADDRESS}
mock_lb_get.return_value = LB
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
net = network_tasks.PlugVIPAmphora()
subnet = {constants.ID: SUBNET_ID}
mockSubnet = mock.MagicMock()
mock_driver.get_subnet.return_value = mockSubnet
net.execute(self.load_balancer_mock, amphora, subnet)
mock_driver.plug_aap_port.assert_called_once_with(
LB, LB.vip, self.db_amphora_mock, mockSubnet)
@mock.patch('octavia.db.repositories.AmphoraRepository.get')
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
def test_revert_plug_vip_amphora(self, mock_session, mock_lb_get, mock_get,
mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_lb_get.return_value = LB
mock_get.return_value = self.db_amphora_mock
mock_get_net_driver.return_value = mock_driver
net = network_tasks.PlugVIPAmphora()
amphora = {constants.ID: AMPHORA_ID,
constants.LB_NETWORK_IP: IP_ADDRESS}
subnet = {constants.ID: SUBNET_ID}
mockSubnet = mock.MagicMock()
mock_driver.get_subnet.return_value = mockSubnet
net.revert(AMPS_DATA[0].to_dict(), self.load_balancer_mock,
amphora, subnet)
mock_driver.unplug_aap_port.assert_called_once_with(
LB.vip, self.db_amphora_mock, mockSubnet)
@mock.patch('octavia.controller.worker.v2.tasks.network_tasks.DeletePort.'
'update_progress')
def test_delete_port(self, mock_update_progress, mock_get_net_driver):
PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_driver.delete_port.side_effect = [
mock.DEFAULT, exceptions.OctaviaException('boom'), mock.DEFAULT,
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom')]
mock_driver.admin_down_port.side_effect = [
mock.DEFAULT, exceptions.OctaviaException('boom')]
net_task = network_tasks.DeletePort()
# Limit the retry attempts for the test run to save time
net_task.execute.retry.stop = tenacity.stop_after_attempt(2)
# Test port ID is None (no-op)
net_task.execute(None)
mock_update_progress.assert_not_called()
mock_driver.delete_port.assert_not_called()
# Test successful delete
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_update_progress.assert_called_once_with(0.5)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
# Test exception and successful retry
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
# Test passive failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
net_task.execute(PORT_ID, passive_failure=True)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_called_once_with(PORT_ID)
# Test passive failure admin down failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
mock_driver.admin_down_port.reset_mock()
net_task.execute(PORT_ID, passive_failure=True)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_called_once_with(PORT_ID)
# Test non-passive failure
mock_update_progress.reset_mock()
mock_driver.reset_mock()
mock_driver.admin_down_port.reset_mock()
mock_driver.admin_down_port.side_effect = [
exceptions.OctaviaException('boom')]
self.assertRaises(exceptions.OctaviaException, net_task.execute,
PORT_ID)
mock_update_progress.assert_has_calls([mock.call(0.5), mock.call(1.0)])
mock_driver.delete_port.assert_has_calls([mock.call(PORT_ID),
mock.call(PORT_ID)])
mock_driver.admin_down_port.assert_not_called()
def test_create_vip_base_port(self, mock_get_net_driver):
AMP_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
VIP_NETWORK_ID = uuidutils.generate_uuid()
VIP_QOS_ID = uuidutils.generate_uuid()
VIP_SG_ID = uuidutils.generate_uuid()
VIP_SUBNET_ID = uuidutils.generate_uuid()
VIP_IP_ADDRESS = '203.0.113.81'
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
vip_dict = {constants.IP_ADDRESS: VIP_IP_ADDRESS,
constants.NETWORK_ID: VIP_NETWORK_ID,
constants.QOS_POLICY_ID: VIP_QOS_ID,
constants.SUBNET_ID: VIP_SUBNET_ID}
port_mock = mock.MagicMock()
port_mock.id = PORT_ID
mock_driver.create_port.side_effect = [
port_mock, exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom'),
exceptions.OctaviaException('boom')]
mock_driver.delete_port.side_effect = [mock.DEFAULT, Exception('boom')]
net_task = network_tasks.CreateVIPBasePort()
# Limit the retry attempts for the test run to save time
net_task.execute.retry.stop = tenacity.stop_after_attempt(2)
# Test execute
result = net_task.execute(vip_dict, VIP_SG_ID, AMP_ID)
self.assertEqual(port_mock.to_dict(), result)
mock_driver.create_port.assert_called_once_with(
VIP_NETWORK_ID, name=constants.AMP_BASE_PORT_PREFIX + AMP_ID,
fixed_ips=[{constants.SUBNET_ID: VIP_SUBNET_ID}],
secondary_ips=[VIP_IP_ADDRESS], security_group_ids=[VIP_SG_ID],
qos_policy_id=VIP_QOS_ID)
# Test execute exception
mock_driver.reset_mock()
self.assertRaises(exceptions.OctaviaException, net_task.execute,
vip_dict, None, AMP_ID)
# Test revert when this task failed
mock_driver.reset_mock()
net_task.revert(failure.Failure.from_exception(Exception('boom')),
vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_not_called()
# Test revert
mock_driver.reset_mock()
net_task.revert([port_mock], vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
# Test revert exception
mock_driver.reset_mock()
net_task.revert([port_mock], vip_dict, VIP_SG_ID, AMP_ID)
mock_driver.delete_port.assert_called_once_with(PORT_ID)
@mock.patch('time.sleep')
def test_admin_down_port(self, mock_sleep, mock_get_net_driver):
PORT_ID = uuidutils.generate_uuid()
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
port_down_mock = mock.MagicMock()
port_down_mock.status = constants.DOWN
port_up_mock = mock.MagicMock()
port_up_mock.status = constants.UP
mock_driver.set_port_admin_state_up.side_effect = [
mock.DEFAULT, net_base.PortNotFound, mock.DEFAULT, mock.DEFAULT,
Exception('boom')]
mock_driver.get_port.side_effect = [port_down_mock, port_up_mock]
net_task = network_tasks.AdminDownPort()
# Test execute
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_called_once_with(PORT_ID)
# Test passive fail on port not found
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_not_called()
# Test passive fail on port stays up
mock_driver.reset_mock()
net_task.execute(PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
False)
mock_driver.get_port.assert_called_once_with(PORT_ID)
# Test revert when this task failed
mock_driver.reset_mock()
net_task.revert(failure.Failure.from_exception(Exception('boom')),
PORT_ID)
mock_driver.set_port_admin_state_up.assert_not_called()
# Test revert
mock_driver.reset_mock()
net_task.revert(None, PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
True)
# Test revert exception passive failure
mock_driver.reset_mock()
net_task.revert(None, PORT_ID)
mock_driver.set_port_admin_state_up.assert_called_once_with(PORT_ID,
True)
@mock.patch('octavia.common.utils.get_vip_security_group_name')
def test_get_vip_security_group_id(self, mock_get_sg_name,
mock_get_net_driver):
LB_ID = uuidutils.generate_uuid()
SG_ID = uuidutils.generate_uuid()
SG_NAME = 'fake_SG_name'
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
mock_get_sg_name.return_value = SG_NAME
sg_mock = mock.MagicMock()
sg_mock.id = SG_ID
mock_driver.get_security_group.side_effect = [
sg_mock, None, net_base.SecurityGroupNotFound,
net_base.SecurityGroupNotFound]
net_task = network_tasks.GetVIPSecurityGroupID()
# Test execute
result = net_task.execute(LB_ID)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute with empty get subnet response
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
result = net_task.execute(LB_ID)
self.assertIsNone(result)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute no security group found, security groups enabled
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
mock_driver.sec_grp_enabled = True
self.assertRaises(net_base.SecurityGroupNotFound, net_task.execute,
LB_ID)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)
# Test execute no security group found, security groups disabled
mock_driver.reset_mock()
mock_get_sg_name.reset_mock()
mock_driver.sec_grp_enabled = False
result = net_task.execute(LB_ID)
self.assertIsNone(result)
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
mock_get_sg_name.assert_called_once_with(LB_ID)
|
|
import itertools
import functools
import six
from flex._compat import Mapping, Sequence
from flex.exceptions import (
ValidationError,
ErrorList,
ErrorDict,
)
from flex.error_messages import MESSAGES
from flex.constants import (
ARRAY,
OBJECT,
)
from flex.decorators import skip_if_not_of_type
from flex.validation.reference import (
LazyReferenceValidator,
)
from flex.validation.common import (
noop,
skip_if_empty,
generate_type_validator,
generate_format_validator,
generate_multiple_of_validator,
generate_minimum_validator,
generate_maximum_validator,
generate_min_length_validator,
generate_max_length_validator,
generate_min_items_validator,
generate_max_items_validator,
generate_unique_items_validator,
generate_pattern_validator,
generate_enum_validator,
validate_object,
generate_object_validator,
generate_allof_validator,
generate_anyof_validator,
)
from flex.datastructures import (
ValidationDict,
)
@skip_if_empty
@skip_if_not_of_type(OBJECT)
def validate_required(value, required_fields, **kwargs):
with ErrorDict() as errors:
for key in required_fields:
if key not in value:
errors.add_error(key, MESSAGES['required']['required'])
def generate_required_validator(required, **kwargs):
if required:
return functools.partial(
validate_required,
required_fields=required,
)
else:
return noop
@skip_if_empty
@skip_if_not_of_type(OBJECT)
def validate_min_properties(value, minimum, **kwargs):
if len(value.keys()) < minimum:
raise ValidationError(
MESSAGES['min_properties']['invalid'].format(
minimum, len(value.keys()),
),
)
def generate_min_properties_validator(minProperties, **kwargs):
return functools.partial(validate_min_properties, minimum=minProperties)
@skip_if_empty
@skip_if_not_of_type(OBJECT)
def validate_max_properties(value, maximum, **kwargs):
if len(value.keys()) > maximum:
raise ValidationError(
MESSAGES['max_properties']['invalid'].format(
maximum, len(value.keys()),
),
)
def generate_max_properties_validator(maxProperties, **kwargs):
return functools.partial(validate_max_properties, maximum=maxProperties)
def construct_items_validators(items, context):
if isinstance(items, Mapping):
items_validators = construct_schema_validators(
schema=items,
context=context,
)
elif isinstance(items, six.string_types):
items_validators = {
'$ref': SchemaReferenceValidator(items, context),
}
else:
assert 'Should not be possible'
return items_validators
@skip_if_not_of_type(ARRAY)
@skip_if_empty
def validate_items(objs, field_validators, **kwargs):
errors = ErrorList()
for obj, _field_validators in zip(objs, field_validators):
try:
validate_object(
obj,
field_validators=_field_validators,
**kwargs
)
except ValidationError as e:
errors.add_error(e.detail)
if errors:
raise ValidationError(errors)
def generate_items_validator(items, context, **kwargs):
if isinstance(items, Mapping):
# If items is a reference or a schema, we pass it through as an
# ever repeating list of the same validation dictionary, thus
# validating all of the objects against the same schema.
items_validators = itertools.repeat(construct_items_validators(
items,
context,
))
elif isinstance(items, Sequence):
# We generate a list of validator dictionaries and then chain it
# with an empty schema that repeats forever. This ensures that if
# the array of objects to be validated is longer than the array of
# validators, then the extra elements will always validate since
# they will be validated against an empty schema.
items_validators = itertools.chain(
map(functools.partial(construct_items_validators, context=context), items),
itertools.repeat({}),
)
else:
assert "Should not be possible"
return functools.partial(
validate_items, field_validators=items_validators,
)
@skip_if_not_of_type(OBJECT)
@skip_if_empty
def validate_additional_properties(obj, additional_properties, properties, **kwargs):
if additional_properties is False:
allowed_properties = set(properties.keys())
actual_properties = set(obj.keys())
extra_properties = actual_properties.difference(allowed_properties)
if extra_properties:
raise ValidationError(
MESSAGES['additional_properties']['extra_properties'].format(
repr(extra_properties),
)
)
def generate_additional_properties_validator(additionalProperties, properties, **kwargs):
return functools.partial(
validate_additional_properties,
additional_properties=additionalProperties,
properties=properties,
)
validator_mapping = {
'type': generate_type_validator,
'multipleOf': generate_multiple_of_validator,
'minimum': generate_minimum_validator,
'maximum': generate_maximum_validator,
'minLength': generate_min_length_validator,
'maxLength': generate_max_length_validator,
'minItems': generate_min_items_validator,
'maxItems': generate_max_items_validator,
'uniqueItems': generate_unique_items_validator,
'enum': generate_enum_validator,
'minProperties': generate_min_properties_validator,
'maxProperties': generate_max_properties_validator,
'pattern': generate_pattern_validator,
'format': generate_format_validator,
'required': generate_required_validator,
'items': generate_items_validator,
'allOf': generate_allof_validator,
'anyOf': generate_anyof_validator,
}
def construct_schema_validators(schema, context):
"""
Given a schema object, construct a dictionary of validators needed to
validate a response matching the given schema.
Special Cases:
- $ref:
These validators need to be Lazily evaluating so that circular
validation dependencies do not result in an infinitely deep
validation chain.
- properties:
These validators are meant to apply to properties of the object
being validated rather than the object itself. In this case, we
need recurse back into this function to generate a dictionary of
validators for the property.
"""
validators = ValidationDict()
if '$ref' in schema:
validators.add_validator(
'$ref', SchemaReferenceValidator(schema['$ref'], context),
)
if 'properties' in schema:
for property_, property_schema in schema['properties'].items():
property_validator = generate_object_validator(
schema=property_schema,
context=context,
)
validators.add_property_validator(property_, property_validator)
if schema.get('additionalProperties') is False:
validators.add_validator(
'additionalProperties',
generate_additional_properties_validator(context=context, **schema),
)
assert 'context' not in schema
for key in schema:
if key in validator_mapping:
validators.add_validator(key, validator_mapping[key](context=context, **schema))
return validators
class SchemaReferenceValidator(LazyReferenceValidator):
"""
This class acts as a lazy validator for references in schemas to prevent an
infinite recursion error when a schema references itself, or there is a
reference loop between more than one schema.
The validator is only constructed if validator is needed.
"""
validators_constructor = construct_schema_validators
|
|
import math
import numpy as np
import copy
import random
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import pylab as pl
def cart2pol(x, y):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi)
def cart2pillar(x, y,nbPillar):
rho = np.sqrt(x**2 + y**2)
phi = np.arctan2(y, x)
return(rho, phi / (2*math.pi) * nbPillar)
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return(x, y)
def circleAroundPillar(rc,theta, rp,dir):
out = []
up = 1.0
down = 0.0
initangle = math.pi + theta
nbsteps = 20
dt = 2.0*math.pi / (nbsteps)
for i in range(nbsteps+1):
ang2 = initangle+ dir*dt*i
pc = (rc * math.cos( theta ),rc*math.sin(theta))
if (float)(i) / nbsteps > 0.9 or (float)(i) /nbsteps < 0.1:
z = up
else:
z = down
out.append( np.array([ pc[0]+ rp*math.cos(ang2) , pc[1] + rp* math.sin(ang2),z]) )
return out
def getPillarAngle( pillar, nbPillar ):
return 2.0*math.pi / (nbPillar) * ( pillar + 0.5)
def generateTrajectoryBetweenPillar(pillar,nbPillar,rc,rp,dir):
#we go to the inner circle middle angle
angle = (getPillarAngle(pillar,nbPillar) + getPillarAngle(pillar-1,nbPillar) ) / 2.0
out = []
pin = ( (rc-dir*rp)*math.cos(angle),(rc-dir*rp)*math.sin(angle) )
pullout = ( (rc+dir*1.5*rp)*math.cos(angle),(rc+dir*1.5*rp)*math.sin(angle) )
pout = ( (rc+dir*rp)*math.cos(angle),(rc+dir*rp)*math.sin(angle) )
up = 3.0
mid = 0.0
down = -1.0
if dir > 0.0 :
out.append( np.array( [pin[0],pin[1],up] ) )
out.append(np.array([pin[0], pin[1], mid]))
#out.append(np.array([pin[0], pin[1], up]))
#out.append(np.array([pout[0], pout[1], up]))
out.append(np.array([pout[0], pout[1], down]))
else:
out.append(np.array([pin[0], pin[1], down]))
#out.append(np.array([pin[0], pin[1], up]))
out.append(np.array([pout[0], pout[1], mid]))
out.append(np.array([pout[0], pout[1], down]))
out.append(np.array([pullout[0], pullout[1], down]))
out.append(np.array([pullout[0], pullout[1], up]))
#out.append(np.array([pout[0], pout[1], up]))
return out
def generateOutMoveTrajectory(pillar1, pillar2, nbPillar,rc,rp,dir):
print "out move from " + str(pillar1) + " to " + str(pillar2)
angle = (getPillarAngle(pillar1, nbPillar) + getPillarAngle(pillar1 - 1, nbPillar)) / 2.0
targetangle = (getPillarAngle(pillar2, nbPillar) + getPillarAngle(pillar2 - 1, nbPillar)) / 2.0
while 0 < dir * ( angle - targetangle ):
targetangle = targetangle + dir* 2.0*math.pi
dangle = dir*2.5 * math.pi / 180.0
out = []
while dir * (angle - targetangle) < 0:
p = ((rc + rp) * math.cos(angle), (rc + rp) * math.sin(angle))
out.append( np.array( [ p[0],p[1], -1.0 ] ) )
angle += dangle
p = ((rc + rp) * math.cos(targetangle), (rc + rp) * math.sin(targetangle))
out.append(np.array([p[0], p[1], -1.0]))
return out
def hookPillar( pillar, nbPillar, rc,rp):
out = []
out.extend( generateTrajectoryBetweenPillar( pillar,nbPillar,rc,rp, 1.0) )
out.extend( generateOutMoveTrajectory( pillar, pillar-1, nbPillar,rc,rp,-1.0) )
out.extend( generateTrajectoryBetweenPillar( pillar-1,nbPillar,rc,rp,-1.0) )
return out
def jumpPillar( pillar1, pillar2, nbPillar,rc,rp,dir):
out = []
out.extend( generateTrajectoryBetweenPillar( pillar1,nbPillar,rc,rp, 1.0) )
out.extend( generateOutMoveTrajectory(pillar1, pillar2 , nbPillar, rc, rp, dir))
out.extend( generateTrajectoryBetweenPillar(pillar2, nbPillar, rc, rp, -1.0))
return out
#common rp is 4mm
def generateTrajectory( pillarseq, nbPillar,rc, rp):
out = [ np.array( [rc,0.0,0.0] ), np.array( [rc-rp,0.0,0.0] ) ]
for ind in pillarseq:
out.extend( circleAroundPillar(rc,getPillarAngle( ind, nbPillar),rp,1.0 ))
out.append( np.array( [rc-rp,0.0,1.0] ) )
out.append(np.array( [rc, 0.0, 0.0]) )
return out
def writeGcode(traj,outname):
with open(outname,"w") as f:
f.write("G91\n")
f.write("G21 ; set units to millimeter\n")
#f.write("M204 S900;\n")
f.write("G00;\n")
for i in range(1,len(traj) ):
diff = traj[i]-traj[i-1]
print diff
if math.fabs(diff[2])> 1e-6 :
f.write(str.format( "G00 X{0:.3f} Y{1:.3f} Z{2:.3f}\n", diff[0],diff[1],diff[2]))
else:
f.write(str.format("G00 X{0:.3f} Y{1:.3f}\n", diff[0], diff[1]))
def removeEdgeIfWeighIs0( g, n1,n2):
if (g[n1][n2] == 0):
if (len(g[n1]) == 1):
g.pop(n1)
else:
g[n1].pop(n2)
def decreaseWeightEdge( g, n1,n2):
g[n1][n2] = g[n1][n2] - 1
g[n2][n1] = g[n2][n1] - 1
removeEdgeIfWeighIs0(g,n1,n2)
removeEdgeIfWeighIs0(g,n2,n1)
def addEdge( g, n1,n2, weight):
if n1 in g:
if n2 in g[n1]:
g[n1][n2] = g[n1][n2] + weight
else:
g[n1][n2] = weight
else:
g[n1] = {n2:weight}
def addUndirectedEdge( g, n1,n2, weight):
addEdge(g,n1,n2,weight)
addEdge(g,n2,n1,weight)
def orcircleDist( p1,p2, nbPillars):
if p2 - p1 >= 0:
return p2-p1
else:
return nbPillars+p2-p1
def generateGraphTrajectory( graph, nbPillar, rc,rp ):
out = [np.array([rc, 0.0, 0.0]), np.array([rc - rp, 0.0, 0.0])]
#tant qu'il y a des arretes
d2 = copy.deepcopy(graph)
cur = 0
while len(d2) > 0:
prevcur = cur
keys = np.array( d2.keys() )
dist = np.array( [ orcircleDist(prevcur,p,nbPillar) for p in keys ] )
nextind = np.argmin(dist)
cur = keys[nextind]
out.extend( jumpPillar(prevcur,cur,nbPillar,rc,rp,1.0))
print "exterior move to : " + str(cur)
while cur in d2:
print cur
nextcur = d2[cur].iterkeys().next()
out.extend( hookPillar(nextcur,nbPillar,rc,rp))
#print (cur,nextcur)
decreaseWeightEdge(d2,cur,nextcur)
cur = nextcur
print cur
out.extend(hookPillar(nextcur, nbPillar, rc, rp))
out.extend(jumpPillar(cur, 0, nbPillar, rc, rp, 1))
out.append( np.array([rc - rp, 0.0, 0.0]) )
out.append(np.array([rc , 0.0, 0.0]))
return out
def strictOrientedCircleDist( p1, p2, nbPillars, dir, hookPenalty):
maxValue = 10000.0
if( p1 % nbPillars == p2 % nbPillars ):
return maxValue
out = dir*(p2-p1)
if out < 0:
out = out + nbPillars
if out < 0:
print "out negative"
print (out, p1, p2, dir)
if p1%2 == p2%2:
return out +hookPenalty
return out
def nextdir(pos):
if pos % 2 == 0:
return 1.0
else:
return -1.0
def PickNode1( curPosState, d2,nbPillar,hookPenalty ):
keys = d2.keys()
dist = np.array([strictOrientedCircleDist(curPosState, p, 2*nbPillar,nextdir( curPosState ),hookPenalty) for p in keys])
nextind = np.argmin(dist)
print( str.format("next node1 : {0} dist : {1}",keys[nextind],dist[nextind]))
#print keys
#print dist
return keys[nextind]
def generateGraphTrajectory2( graph, nbPillar, rc, rp):
out = [np.array([rc, 0.0, 0.0]), np.array([rc + rp, 0.0, 0.0])]
# tant qu'il y a des arretes
d2 = copy.deepcopy(graph)
#Be careful of the constraint on nextdir which should
curPosState = 1
while len(d2) > 0:
#pick node 1 -> GO TO OUT 1; GO TO IN 1 FROM current posion state
node1 = PickNode1(curPosState,d2,nbPillar,50)
print "node1 :" + str( node1 )
if( node1 %2 != curPosState %2):
#we don't have to hook
hookpillar = (node1+1) / 2
out.extend( generateOutMoveTrajectory((curPosState+1)/2, hookpillar , nbPillar, rc, rp, nextdir( curPosState )) )
out.extend( generateTrajectoryBetweenPillar(hookpillar,nbPillar,rc,rp, -1.0 ))
else:
#we need to hook the pillar
hookpillar = (node1+1) /2 + int( nextdir(curPosState))
outhookpillar = (node1+1) /2
print "outhookpillar :" + str( outhookpillar )
out.extend(generateOutMoveTrajectory((curPosState+1)/2, outhookpillar, nbPillar, rc, rp, nextdir( curPosState) ))
out.extend( generateTrajectoryBetweenPillar(outhookpillar, nbPillar, rc, rp, -1.0))
out.extend( generateTrajectoryBetweenPillar(hookpillar, nbPillar, rc, rp, 1.0))
out.extend( generateTrajectoryBetweenPillar(outhookpillar, nbPillar, rc, rp, -1.0))
#pick node 2 -> GO TO IN 2 ;GO TO OUT 2
#node2 = random.choice( d2[node1].keys() )
node2 = d2[node1].keys()[0]
print "node2 :" + str(node2)
decreaseWeightEdge(d2,node1,node2)
out.extend( generateTrajectoryBetweenPillar((node2+1) / 2 , nbPillar, rc, rp, 1.0) )
#update current position state: pos = node2 if node2 % 2 == 0 nextdir = 1.0 else nextdir = -1.0
curPosState = node2
out.extend( generateOutMoveTrajectory((curPosState+1)/2, 0, nbPillar, rc, rp, nextdir( curPosState ) ) )
out.append(np.array([rc + rp, 0.0, 0.0]))
out.append(np.array([rc, 0.0, 0.0]))
return np.stack(out)
def lengthOfWireNeeded( traj ):
out = 0
for i in range( 1, len(traj) ):
dist2 = math.pow(traj[i,0] - traj[i-1,0],2.0)+math.pow(traj[i,1] - traj[i-1,1],2.0)
out = out + math.sqrt(dist2)
return out
def testGraph():
g = {}
addUndirectedEdge(g, 0, 30, 3)
addUndirectedEdge(g, 30, 60, 3)
addUndirectedEdge(g, 60, 90, 3)
addUndirectedEdge(g, 0, 60, 1)
return g
def uppillar(p):
return 2*p+1
def downpillar(p):
return 2*p
def testGraph2():
g = {}
addUndirectedEdge(g, downpillar(0), uppillar(30), 1)
addUndirectedEdge(g, downpillar(30), uppillar(80), 1)
addUndirectedEdge(g, downpillar(80), uppillar(90), 1)
addUndirectedEdge(g, downpillar(90), uppillar(140), 1)
addUndirectedEdge(g, downpillar(140), uppillar(0), 1)
return g
def circleDist( p1,p2,nbPillars):
return min( abs(p1-p2), nbPillars-abs(p1-p2) )
def brokenPins():
return {149:True,1:True,13:True,14:True, 15:True,28:True,131:True,60:True}
def loadGraphFromFile( fileName ,brokenPin,apartdist, nbEdges ):
with open(fileName)as f:
g = {}
l = int( next(f) )
coords = []
edges = []
for i in range(l):
line = next(f)
coords.append( [ float(x) for x in line.split() ])
next(f)
for line in f:
s = line.split()
spl = [int(s[0]),int(s[1]),float(s[2]) ]
if( spl[0]/2 in brokenPin or spl[1]/2 in brokenPin):
continue
if( circleDist( spl[0],spl[1], l) > apartdist ):
w = math.pow( spl[2],1.0)
#print (spl[0],spl[1], w)
edges.append((spl[0],spl[1],w))
if( nbEdges > 0):
p = np.array( [e[2] for e in edges],dtype='float32' )
p = p / p.sum()
sel = np.random.choice(len(edges),nbEdges,True,p)
sampledEdges = []
for i in sel:
sampledEdges.append(edges[i])
else:
sampledEdges = edges
print "number of edges : " + str(len(sampledEdges))
for e in sampledEdges:
addUndirectedEdge(g, e[0], e[1], 1)
return (l,g)
def displayTraj(traj):
nptraj = np.stack(traj)
plt.plot( nptraj[:,0],nptraj[:,1])
plt.show()
def pillar2tocart( i,n,rc ):
tanangle = math.atan( 1.0/n)
if i%2 == 0:
angle = getPillarAngle(i/2,n) - tanangle
else:
angle = getPillarAngle(i/2,n) + tanangle
return (rc*math.cos(angle),rc*math.sin(angle))
def displayGraph2( nbPillar,g,r, lw):
x1 = []
x2 = []
y1 = []
y2 = []
lines = []
c = []
d2 = copy.deepcopy(g)
while len(d2) > 0:
n1 = d2.keys()[0]
n2 = d2[n1].keys()[0]
c1 = pillar2tocart(n1,nbPillar,r)
c2 = pillar2tocart(n2,nbPillar,r)
#x1.append(c1[0])
#x2.append(c2[0])
#y1.append(c1[1])
#y2.append(c2[1])
lines.append(((c1[0],-c1[1]),(c2[0],-c2[1])))
c.append( (0,0,0,1) )
decreaseWeightEdge(d2,n1,n2)
#lines = plt.plot( np.stack(x1),np.stack(y1),np.stack(x2),np.stack(y2))
#plt.setp(lines, color='white', linewidth=1.0)
#plt.gca().set_axis_bgcolor('black')
lc = mc.LineCollection(lines,colors=np.array(c) ,linewidths=lw)
fig, ax = pl.subplots()
ax.add_collection(lc)
ax.autoscale()
ax.margins(0.1)
fig.show()
#plt.show()
def testSeq(n,dk):
out = []
dict = {}
k = 0
while k not in dict:
out.append( k )
dict[k] = 1
k = (k + dk) % n
out.append(k)
return out;
|
|
#!/usr/bin/env python
'''
left elbow is in upper arm controller
Created March, 2012
@author: Peter Heim
r_shoulder.py - gateway to Arduino based arm controller
Copyright (c) 2011 Peter Heim. All right reserved.
Borrowed heavily from Mike Feguson's ArbotiX base_controller.py code.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Vanadium Labs LLC nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VANADIUM LABS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import rospy
import tf
import math
from math import sin, cos, pi, radians, degrees
import sys
import time
from std_msgs.msg import String
from std_msgs.msg import Float64, Float32
from dynamixel_msgs.msg import MotorState
from dynamixel_msgs.msg import JointState
#from sensor_msgs.msg import JointState
from SerialDataGateway import SerialDataGateway
class R_shoulder(object):
'''
Helper class for communicating with an R_shoulder board over serial port
'''
def _HandleReceivedLine(self, line):
self._Counter = self._Counter + 1
#rospy.logwarn(str(self._Counter) + " " + line)
#if (self._Counter % 50 == 0):
self._SerialPublisher.publish(String(str(self._Counter) + ", in: " + line))
if (len(line) > 0):
lineParts = line.split('\t')
if (lineParts[0] == 'p5'):
self._BroadcastJointStateinfo_P5(lineParts)
return
if (lineParts[0] == 'p6'):
self._BroadcastJointStateinfo_P6(lineParts)
return
if (lineParts[0] == 'p7'):
self._BroadcastJointStateinfo_P7(lineParts)
return
if (lineParts[0] == 'p8'):
self._BroadcastJointStateinfo_P8(lineParts)
return
def _BroadcastJointStateinfo_P5(self, lineParts):
partsCount = len(lineParts)
#rospy.logwarn(partsCount)
if (partsCount < 7):
pass
try:
P1 = radians(float(lineParts[1]))
P2 = self.right_rotate #0-((float(lineParts[2])* 0.00174532925)-1.57)
P3 = float(lineParts[3])
P4 = 0
val = [P1, P2, P3, P4]
Motor_State = MotorState()
Motor_State.id = 11
Motor_State.goal = P2
Motor_State.position = P1
Motor_State.speed = P4
Motor_State.load = P3
Motor_State.moving = 0
Motor_State.timestamp = time.time()
self.P5_MotorPublisher.publish(Motor_State)
#rospy.logwarn(Motor_State)
self._right_rotate_Publisher.publish(P1)
Joint_State = JointState()
Joint_State.name = "right_arm_rotate_joint"
Joint_State.goal_pos = P2
Joint_State.current_pos = P1
Joint_State.velocity = P4
Joint_State.load = P3
Joint_State.error = P1 - P2
Joint_State.is_moving = 0
Joint_State.header.stamp = rospy.Time.now()
self._P5_JointPublisher.publish(Joint_State)
#rospy.logwarn(val)
except:
rospy.logwarn("Unexpected error:right_arm_rotate_joint" + str(sys.exc_info()[0]))
def _BroadcastJointStateinfo_P6(self, lineParts):
partsCount = len(lineParts)
#rospy.logwarn(partsCount)
if (partsCount < 7):
pass
try:
P1 = 0 - (radians(float(lineParts[1])))
P2 = self.left_rotate #0-((float(lineParts[2])* 0.00174532925)-1.57)
P3 = float(lineParts[3])
P4 = 0
val = [P1, P2, P3, P4]
Motor_State = MotorState()
Motor_State.id = 11
Motor_State.goal = P2
Motor_State.position = P1
Motor_State.speed = P4
Motor_State.load = P3
Motor_State.moving = 0
Motor_State.timestamp = time.time()
self.P6_MotorPublisher.publish(Motor_State)
self._left_rotate_Publisher.publish(P1)
Joint_State = JointState()
Joint_State.name = "left_arm_rotate_joint"
Joint_State.goal_pos = P2
Joint_State.current_pos = P1
Joint_State.velocity = P4
Joint_State.load = P3
Joint_State.error = P1 - P2
Joint_State.is_moving = 0
Joint_State.header.stamp = rospy.Time.now()
self._P6_JointPublisher.publish(Joint_State)
#rospy.logwarn(val)
except:
rospy.logwarn("Unexpected error:left_arm_rotate_joint" + str(sys.exc_info()[0]))
def _BroadcastJointStateinfo_P7(self, lineParts):
partsCount = len(lineParts)
#rospy.logwarn(partsCount)
if (partsCount < 5):
pass
try:
#P1 = 0-(radians(float(lineParts[1])))/10
P1 = radians(float(lineParts[1]))
P2 = self.right_elbow #0-((float(lineParts[2])* 0.00174532925)-0.67)
P3 = float(lineParts[3])
P4 = 0
val = [P1, P2, P3, P4]
Motor_State = MotorState()
Motor_State.id = 11
Motor_State.goal = P2
Motor_State.position = P1
Motor_State.speed = P4
Motor_State.load = P3
Motor_State.moving = 0
Motor_State.timestamp = time.time()
self.P7_MotorPublisher.publish(Motor_State)
self._right_elbow_Publisher.publish(P1)
Joint_State = JointState()
Joint_State.name = "right_arm_elbow_joint"
Joint_State.goal_pos = P2
Joint_State.current_pos = P1
Joint_State.velocity = P4
Joint_State.load = P3
Joint_State.error = P1 - P2
Joint_State.is_moving = 0
Joint_State.header.stamp = rospy.Time.now()
self._P7_JointPublisher.publish(Joint_State)
#rospy.logwarn(val)
except:
rospy.logwarn("Unexpected error:right_arm_elbow_joint" + str(sys.exc_info()[0]))
def _BroadcastJointStateinfo_P8(self, lineParts):
partsCount = len(lineParts)
#rospy.logwarn(lineParts)
if (partsCount < 4):
pass
try:
P1 = radians(float(lineParts[1]))#P1 = radians(float(lineParts[1]))
P2 = self.pan
P3 = 0#float(lineParts[3])
P4 = 0
val = [P1, P2, P3, P4]
Motor_State = MotorState()
Motor_State.id = 11
Motor_State.goal = P2
Motor_State.position = P1
Motor_State.speed = P4
Motor_State.load = P3
Motor_State.moving = 0
Motor_State.timestamp = time.time()
self.P8_MotorPublisher.publish(Motor_State)
self._pan_Publisher.publish(P1)
Joint_State = JointState()
Joint_State.name = "pan_joint"
Joint_State.goal_pos = P2
Joint_State.current_pos = P1
Joint_State.velocity = P4
Joint_State.load = P3
Joint_State.error = P1 - P2
Joint_State.is_moving = 0
Joint_State.header.stamp = rospy.Time.now()
self._P8_JointPublisher.publish(Joint_State)
#rospy.logwarn(val)
except:
pass#rospy.logwarn("Unexpected error:pan_joint" + str(sys.exc_info()[0]))
def _WriteSerial(self, message):
self._SerialPublisher.publish(String(str(self._Counter) + ", out: " + message))
self._SerialDataGateway.Write(message)
def __init__(self,):
'''
Initializes the receiver class.
port: The serial port to listen to.
baudrate: Baud rate for the serial communication
'''
#port = rospy.get_param("~port", "/dev/ttyACM0")
#baud = int(rospy.get_param("~baud", "115200"))
#self.name = name
self.rate = rospy.get_param("~rate", 100.0)
self.fake = rospy.get_param("~sim", False)
self.cal_pan = rospy.get_param("~cal_pan", 0)
self.cal_tilt = rospy.get_param("~cal_tilt", 0)
self.cal_lift = rospy.get_param("~cal_lift", 0)
self.cal_rotate = rospy.get_param("~cal_rotate", 0)
self.cal_elbow = rospy.get_param("~cal_elbow", 0)
self.right_rotate = 0
self.left_rotate = 0
self.right_elbow = 0
self.pan = 0
#name = rospy.get_param("~name")
self._Counter = 0
rospy.init_node('lower_arms')
port = rospy.get_param("~port", "/dev/ttyACM0")
baudRate = int(rospy.get_param("~baudRate", 115200))
rospy.logwarn("Starting lower arms with serial port: " + port + ", baud rate: " + str(baudRate))
# subscriptions
rospy.Subscriber('right_arm_rotate_joint/command',Float64, self._HandleJoint_5_Command)
rospy.Subscriber('left_arm_rotate_joint/command',Float64, self._HandleJoint_6_Command)
rospy.Subscriber('right_arm_elbow_joint/command',Float64, self._HandleJoint_7_Command)
rospy.Subscriber('pan_joint/command',Float64, self._HandleJoint_8_Command)
self._SerialPublisher = rospy.Publisher('arm_lower', String, queue_size=5)
self.P5_MotorPublisher = rospy.Publisher("/right_arm_rotate/motor_state", MotorState, queue_size=5)
self.P6_MotorPublisher = rospy.Publisher("/left_arm_rotate/motor_state", MotorState, queue_size=5)
self.P7_MotorPublisher = rospy.Publisher("/right_arm_elbow/motor_state", MotorState, queue_size=5)
#self.P8_MotorPublisher = rospy.Publisher("/left_arm_elbow/motor_state", MotorState, queue_size=5)
self.P8_MotorPublisher = rospy.Publisher("/pan/motor_state", MotorState, queue_size=5)
self._P5_JointPublisher = rospy.Publisher("/right_arm_rotate_joint/state", JointState, queue_size=5)
self._P6_JointPublisher = rospy.Publisher("/left_arm_rotate_joint/state", JointState, queue_size=5)
self._P7_JointPublisher = rospy.Publisher("/right_arm_elbow_joint/state", JointState, queue_size=5)
#self._P8_JointPublisher = rospy.Publisher("/left_arm_elbow_joint/state", JointState, queue_size=5)
self._P8_JointPublisher = rospy.Publisher("/pan_joint/state", JointState, queue_size=5)
self._right_rotate_Publisher = rospy.Publisher("right_rotate", Float32, queue_size=5)
self._right_elbow_Publisher = rospy.Publisher("right_elbow", Float32, queue_size=5)
self._left_rotate_Publisher = rospy.Publisher("left_rotate", Float32, queue_size=5)
#self._left_elbow_Publisher = rospy.Publisher("left_elbow", Float32, queue_size=5)
self._pan_Publisher = rospy.Publisher("pan", Float32, queue_size=5)
self._SerialDataGateway = SerialDataGateway(port, baudRate, self._HandleReceivedLine)
def Start(self):
rospy.loginfo("Starting start function")
self._SerialDataGateway.Start()
message = 'r \r'
self._WriteSerial(message)
def Stop(self):
rospy.loginfo("Stopping")
message = 'r \r'
self._WriteSerial(message)
sleep(5)
self._SerialDataGateway.Stop()
def _HandleJoint_5_Command(self, Command):
""" Handle movement requests.
right_arm_rotate_joint
send message in degrees
"""
v = Command.data # angel request in radians
self.right_rotate = v
v1 =int(degrees(v))
message = 'j5 %d \r' % (v1)#% self._GetBaseAndExponents((v1)
rospy.logwarn("Sending right_arm_rotate_joint command: " + (message))
self._WriteSerial(message)
def _HandleJoint_6_Command(self, Command):
""" Handle movement requests.
left_arm_rotate_joint
send message in degrees
"""
v = Command.data # angel request in radians
self.left_rotate = v
v1 =0 -(int(degrees(v)))
message = 'j7 %d \r' % (v1)#% self._GetBaseAndExponents((v1)
rospy.logwarn("Sending left_arm_rotate_joint command : " + (message))
self._WriteSerial(message)
def _HandleJoint_7_Command(self, Command):
""" Handle movement requests.
right_arm_elbow_joint
send message in degrees
"""
v = Command.data # angel request in radians
self.right_elbow = v
v1 =int(degrees(v))
message = 'j6 %d \r' % (v1)#% self._GetBaseAndExponents((v1)
rospy.logwarn("Sending right_arm_elbow_joint command: " + (message))
self._WriteSerial(message)
def _HandleJoint_8_Command(self, Command):
""" Handle movement requests.
pan_joint
send message in degrees
"""
v = Command.data # angel request in radians
self.pan = v
v1 =int(degrees(v))
message = 'j8 %d \r' % (v1)#% self._GetBaseAndExponents((v1)
#rospy.logwarn("Sending pan_joint command: " + (message))
self._WriteSerial(message)
def _GetBaseAndExponent(self, floatValue, resolution=4):
'''
Converts a float into a tuple holding two integers:
The base, an integer with the number of digits equaling resolution.
The exponent indicating what the base needs to multiplied with to get
back the original float value with the specified resolution.
'''
if (floatValue == 0.0):
return (0, 0)
else:
exponent = int(1.0 + math.log10(abs(floatValue)))
multiplier = math.pow(10, resolution - exponent)
base = int(floatValue * multiplier)
return(base, exponent - resolution)
def _GetBaseAndExponents(self, floatValues, resolution=4):
'''
Converts a list or tuple of floats into a tuple holding two integers for each float:
The base, an integer with the number of digits equaling resolution.
The exponent indicating what the base needs to multiplied with to get
back the original float value with the specified resolution.
'''
baseAndExponents = []
for floatValue in floatValues:
baseAndExponent = self._GetBaseAndExponent(floatValue)
baseAndExponents.append(baseAndExponent[0])
baseAndExponents.append(baseAndExponent[1])
return tuple(baseAndExponents)
if __name__ == '__main__':
r_shoulder = R_shoulder()
try:
r_shoulder.Start()
rospy.spin()
except rospy.ROSInterruptException:
r_shoulder.Stop()
|
|
import logging
import os
import signal
import re
from tornado import gen
from delivery.models.db_models import StagingStatus
from delivery.exceptions import RunfolderNotFoundException, InvalidStatusException,\
ProjectNotFoundException, TooManyProjectsFound
from delivery.services.file_system_service import FileSystemService
log = logging.getLogger(__name__)
class StagingService(object):
"""
Starting in this context means copying a directory or file to a separate directory before delivering it.
This service handles that in a asynchronous way. Copying operations (right nwo powered by rsync) can be
started, and their status monitored by querying the underlying database for their status.
"""
# TODO On initiation of a Staging service, restart any ongoing stagings
# since they should all have been killed.
# And if we do so we need to make sure that the Staging service
# acts as a singleton, look at:
# http://python-3-patterns-idioms-test.readthedocs.io/en/latest/Singleton.html
#
# Alternative suggestion from Steinar on how to solve the problem, which is probably better:
# "Do you mean to ensure that only one thread tries doing that at a time? An idea could
# be to take a database lock to ensure this, i.e. fetch all objects in the unfinished
# state, restart them, change the status and then commit, locking the sqlite database
# briefly while doing so (I think row level locking is limited in sqlite.)"
# / JD 20161111
def __init__(self,
staging_dir,
external_program_service,
staging_repo,
runfolder_repo,
project_dir_repo,
project_links_directory,
session_factory,
file_system_service = FileSystemService):
"""
Instantiate a new StagingService
:param staging_dir: the directory to which files/dirs should be staged
:param external_program_service: a instance of ExternalProgramService
:param staging_repo: a instance of DatabaseBasedStagingRepository
:param runfolder_repo: a instance of FileSystemBasedRunfolderRepository
:param project_dir_repo: a instance of GeneralProjectRepository
:param project_links_directory: a path to a directory where links will be created temporarily
before they are rsynced into staging (for batched deliveries etc)
:param session_factory: a factory method which can produce new sqlalchemy Session instances
"""
self.staging_dir = staging_dir
self.external_program_service = external_program_service
self.staging_repo = staging_repo
self.runfolder_repo = runfolder_repo
self.project_dir_repo = project_dir_repo
self.project_links_directory = project_links_directory
self.session_factory = session_factory
self.file_system_service = file_system_service
@staticmethod
@gen.coroutine
def _copy_dir(staging_order_id, external_program_service, session_factory, staging_repo):
"""
Copies the file or directory indicated by the staging order by calling the external_program_service.
It will attempt the copying and update the database with the status of the StagingOrder depending on the
outcome.
:param staging_order_id: The id of the staging order to execute
:param external_program_service: A instance of ExternalProgramService
:param session_factory: A factory method which can produce a new sql alchemy Session instance
:param staging_repo: A instance of DatabaseBasedStagingRepository
:return: None, only reports back through side-effects
"""
session = session_factory()
# This is a somewhat hacky work-around to the problem that objects created in one
# thread, and thus associated with another session cannot be accessed by another
# thread, there fore it is re-materialized in here...
staging_order = staging_repo.get_staging_order_by_id(staging_order_id, session)
try:
staging_source_with_trailing_slash = staging_order.source + "/"
cmd = ['rsync', '--stats', '-r', '--copy-links', '--times',
staging_source_with_trailing_slash, staging_order.staging_target]
log.debug("Running rsync with command: {}".format(" ".join(cmd)))
execution = external_program_service.run(cmd)
staging_order.pid = execution.pid
session.commit()
execution_result = yield external_program_service.wait_for_execution(execution)
log.debug("Execution result: {}".format(execution_result))
if execution_result.status_code == 0:
# Parse the file size from the output of rsync stats:
# Total file size: 207,707,566 bytes
match = re.search('Total file size: ([\d,]+) bytes',
execution_result.stdout,
re.MULTILINE)
size_of_transfer = match.group(1)
size_of_transfer = int(size_of_transfer.replace(",", ""))
staging_order.size = size_of_transfer
staging_order.status = StagingStatus.staging_successful
log.info("Successfully staged: {} to: {}".format(staging_order, staging_order.get_staging_path()))
else:
staging_order.status = StagingStatus.staging_failed
log.info("Failed in staging: {} because rsync returned exit code: {}".
format(staging_order, execution_result.status_code))
# TODO Better exception handling here...
except Exception as e:
staging_order.status = StagingStatus.staging_failed
log.info("Failed in staging: {} because this exception was logged: {}".
format(staging_order, e))
finally:
# Always commit the state change to the database
session.commit()
@gen.coroutine
def stage_order(self, stage_order):
"""
Validate a staging order and hand of the actual stating to a separate thread.
:param stage_order: to stage
:return: None
"""
session = self.session_factory()
try:
if stage_order.status != StagingStatus.pending:
raise InvalidStatusException("Cannot start staging a delivery order with status: {}".
format(stage_order.status))
stage_order.status = StagingStatus.staging_in_progress
session.commit()
args_for_copy_dir = {"staging_order_id": stage_order.id,
"external_program_service": self.external_program_service,
"staging_repo": self.staging_repo,
"session_factory": self.session_factory}
if not self.file_system_service.exists(stage_order.staging_target):
self.file_system_service.makedirs(stage_order.staging_target)
yield StagingService._copy_dir(**args_for_copy_dir)
# TODO Better error handling
except Exception as e:
stage_order.status = StagingStatus.staging_failed
session.commit()
raise e
def create_new_stage_order(self, path, project_name):
staging_order = self.staging_repo.create_staging_order(source=path,
status=StagingStatus.pending,
staging_target_dir=self.staging_dir,
project_name=project_name)
return staging_order
def get_stage_order_by_id(self, stage_order_id):
"""
Get stage order by id
:param stage_order_id: id of StageOrder to get
:return: the StageOrder instance
"""
stage_order = self.staging_repo.get_staging_order_by_id(stage_order_id)
return stage_order
def get_status_of_stage_order(self, stage_order_id):
"""
Get the status of a stage order
:param stage_order_id: id of StageOrder to get
:return: the status of the stage order, or None if not found
"""
stage_order = self.get_stage_order_by_id(stage_order_id)
if stage_order:
return stage_order.status
else:
return None
def kill_process_of_staging_order(self, stage_order_id):
"""
Attempt to kill the process of the stage order.
Will only kill stage orders which have a 'staging_in_progress' status.
:param stage_order_id:
:return: True if the process was killed successfully, otherwise False
"""
session = self.session_factory()
stage_order = self.staging_repo.get_staging_order_by_id(stage_order_id, session)
if not stage_order:
return False
try:
if stage_order.status != StagingStatus.staging_in_progress:
raise InvalidStatusException(
"Can only kill processes where the staging order is 'staging_in_progress'")
os.kill(stage_order.pid, signal.SIGTERM)
except OSError:
log.error("Failed to kill process with pid: {} associated with staging order: {} ".
format(stage_order.id, stage_order.pid))
return False
except InvalidStatusException:
log.warning("Tried to kill process for staging order: {}, but didn't to it because it's status did not make"
"it eligible for killing.".format(stage_order.id))
return False
else:
log.debug("Successfully killed process with pid: {} associated with staging order: {} ".
format(stage_order.id, stage_order.pid))
stage_order.status = StagingStatus.staging_failed
session.commit()
return True
|
|
"""Support for Ambiclimate ac."""
import asyncio
import logging
import ambiclimate
import voluptuous as vol
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_ON_OFF, STATE_HEAT)
from homeassistant.const import ATTR_NAME
from homeassistant.const import (ATTR_TEMPERATURE,
STATE_OFF, TEMP_CELSIUS)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (ATTR_VALUE, CONF_CLIENT_ID, CONF_CLIENT_SECRET,
DOMAIN, SERVICE_COMFORT_FEEDBACK, SERVICE_COMFORT_MODE,
SERVICE_TEMPERATURE_MODE, STORAGE_KEY, STORAGE_VERSION)
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = (SUPPORT_TARGET_TEMPERATURE |
SUPPORT_ON_OFF)
SEND_COMFORT_FEEDBACK_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.string,
})
SET_COMFORT_MODE_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
})
SET_TEMPERATURE_MODE_SCHEMA = vol.Schema({
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Ambicliamte device."""
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Ambicliamte device from config entry."""
config = entry.data
websession = async_get_clientsession(hass)
store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
token_info = await store.async_load()
oauth = ambiclimate.AmbiclimateOAuth(config[CONF_CLIENT_ID],
config[CONF_CLIENT_SECRET],
config['callback_url'],
websession)
try:
_token_info = await oauth.refresh_access_token(token_info)
except ambiclimate.AmbiclimateOauthError:
_LOGGER.error("Failed to refresh access token")
return
if _token_info:
await store.async_save(token_info)
token_info = _token_info
data_connection = ambiclimate.AmbiclimateConnection(oauth,
token_info=token_info,
websession=websession)
if not await data_connection.find_devices():
_LOGGER.error("No devices found")
return
tasks = []
for heater in data_connection.get_devices():
tasks.append(heater.update_device_info())
await asyncio.wait(tasks)
devs = []
for heater in data_connection.get_devices():
devs.append(AmbiclimateEntity(heater, store))
async_add_entities(devs, True)
async def send_comfort_feedback(service):
"""Send comfort feedback."""
device_name = service.data[ATTR_NAME]
device = data_connection.find_device_by_room_name(device_name)
if device:
await device.set_comfort_feedback(service.data[ATTR_VALUE])
hass.services.async_register(DOMAIN,
SERVICE_COMFORT_FEEDBACK,
send_comfort_feedback,
schema=SEND_COMFORT_FEEDBACK_SCHEMA)
async def set_comfort_mode(service):
"""Set comfort mode."""
device_name = service.data[ATTR_NAME]
device = data_connection.find_device_by_room_name(device_name)
if device:
await device.set_comfort_mode()
hass.services.async_register(DOMAIN,
SERVICE_COMFORT_MODE,
set_comfort_mode,
schema=SET_COMFORT_MODE_SCHEMA)
async def set_temperature_mode(service):
"""Set temperature mode."""
device_name = service.data[ATTR_NAME]
device = data_connection.find_device_by_room_name(device_name)
if device:
await device.set_temperature_mode(service.data[ATTR_VALUE])
hass.services.async_register(DOMAIN,
SERVICE_TEMPERATURE_MODE,
set_temperature_mode,
schema=SET_TEMPERATURE_MODE_SCHEMA)
class AmbiclimateEntity(ClimateDevice):
"""Representation of a Ambiclimate Thermostat device."""
def __init__(self, heater, store):
"""Initialize the thermostat."""
self._heater = heater
self._store = store
self._data = {}
@property
def unique_id(self):
"""Return a unique ID."""
return self._heater.device_id
@property
def name(self):
"""Return the name of the entity."""
return self._heater.name
@property
def device_info(self):
"""Return the device info."""
return {
'identifiers': {
(DOMAIN, self.unique_id)
},
'name': self.name,
'manufacturer': 'Ambiclimate',
}
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return TEMP_CELSIUS
@property
def target_temperature(self):
"""Return the target temperature."""
return self._data.get('target_temperature')
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return 1
@property
def current_temperature(self):
"""Return the current temperature."""
return self._data.get('temperature')
@property
def current_humidity(self):
"""Return the current humidity."""
return self._data.get('humidity')
@property
def is_on(self):
"""Return true if heater is on."""
return self._data.get('power', '').lower() == 'on'
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._heater.get_min_temp()
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._heater.get_max_temp()
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def current_operation(self):
"""Return current operation."""
return STATE_HEAT if self.is_on else STATE_OFF
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
await self._heater.set_target_temperature(temperature)
async def async_turn_on(self):
"""Turn device on."""
await self._heater.turn_on()
async def async_turn_off(self):
"""Turn device off."""
await self._heater.turn_off()
async def async_update(self):
"""Retrieve latest state."""
try:
token_info = await self._heater.control.refresh_access_token()
except ambiclimate.AmbiclimateOauthError:
_LOGGER.error("Failed to refresh access token")
return
if token_info:
await self._store.async_save(token_info)
self._data = await self._heater.update_device()
|
|
# Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute import quota_sets as quotas_v21
from nova.db import api as db
from nova import exception
from nova import quota
from nova import test
from nova.tests.unit.api.openstack import fakes
def quota_set(id, include_server_group_quotas=True):
res = {'quota_set': {'id': id, 'metadata_items': 128,
'ram': 51200, 'floating_ips': -1, 'fixed_ips': -1,
'instances': 10, 'injected_files': 5, 'cores': 20,
'injected_file_content_bytes': 10240,
'security_groups': -1, 'security_group_rules': -1,
'key_pairs': 100, 'injected_file_path_bytes': 255}}
if include_server_group_quotas:
res['quota_set']['server_groups'] = 10
res['quota_set']['server_group_members'] = 10
return res
class BaseQuotaSetsTest(test.TestCase):
def setUp(self):
super(BaseQuotaSetsTest, self).setUp()
# We need to stub out verify_project_id so that it doesn't
# generate an EndpointNotFound exception and result in a
# server error.
self.stub_out('nova.api.openstack.identity.verify_project_id',
lambda ctx, project_id: True)
def get_delete_status_int(self, res):
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
return self.controller.delete.wsgi_code
class QuotaSetsTestV21(BaseQuotaSetsTest):
plugin = quotas_v21
validation_error = exception.ValidationError
include_server_group_quotas = True
def setUp(self):
super(QuotaSetsTestV21, self).setUp()
self._setup_controller()
self.default_quotas = {
'instances': 10,
'cores': 20,
'ram': 51200,
'floating_ips': -1,
'fixed_ips': -1,
'metadata_items': 128,
'injected_files': 5,
'injected_file_path_bytes': 255,
'injected_file_content_bytes': 10240,
'security_groups': -1,
'security_group_rules': -1,
'key_pairs': 100,
}
if self.include_server_group_quotas:
self.default_quotas['server_groups'] = 10
self.default_quotas['server_group_members'] = 10
def _setup_controller(self):
self.controller = self.plugin.QuotaSetsController()
def _get_http_request(self, url=''):
return fakes.HTTPRequest.blank(url)
def test_format_quota_set(self):
quota_set = self.controller._format_quota_set('1234',
self.default_quotas,
[])
qs = quota_set['quota_set']
self.assertEqual(qs['id'], '1234')
self.assertEqual(qs['instances'], 10)
self.assertEqual(qs['cores'], 20)
self.assertEqual(qs['ram'], 51200)
self.assertEqual(qs['floating_ips'], -1)
self.assertEqual(qs['fixed_ips'], -1)
self.assertEqual(qs['metadata_items'], 128)
self.assertEqual(qs['injected_files'], 5)
self.assertEqual(qs['injected_file_path_bytes'], 255)
self.assertEqual(qs['injected_file_content_bytes'], 10240)
self.assertEqual(qs['security_groups'], -1)
self.assertEqual(qs['security_group_rules'], -1)
self.assertEqual(qs['key_pairs'], 100)
if self.include_server_group_quotas:
self.assertEqual(qs['server_groups'], 10)
self.assertEqual(qs['server_group_members'], 10)
def test_validate_quota_limit(self):
resource = 'fake'
# Valid - finite values
self.assertIsNone(self.controller._validate_quota_limit(resource,
50, 10, 100))
# Valid - finite limit and infinite maximum
self.assertIsNone(self.controller._validate_quota_limit(resource,
50, 10, -1))
# Valid - infinite limit and infinite maximum
self.assertIsNone(self.controller._validate_quota_limit(resource,
-1, 10, -1))
# Valid - all infinite
self.assertIsNone(self.controller._validate_quota_limit(resource,
-1, -1, -1))
# Invalid - limit is less than -1
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._validate_quota_limit,
resource, -2, 10, 100)
# Invalid - limit is less than minimum
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._validate_quota_limit,
resource, 5, 10, 100)
# Invalid - limit is greater than maximum
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._validate_quota_limit,
resource, 200, 10, 100)
# Invalid - infinite limit is greater than maximum
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._validate_quota_limit,
resource, -1, 10, 100)
# Invalid - limit is less than infinite minimum
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._validate_quota_limit,
resource, 50, -1, -1)
# Invalid - limit is larger than 0x7FFFFFFF
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._validate_quota_limit,
resource, db.MAX_INT + 1, -1, -1)
def test_quotas_defaults(self):
uri = '/v2/%s/os-quota-sets/%s/defaults' % (
fakes.FAKE_PROJECT_ID, fakes.FAKE_PROJECT_ID)
req = fakes.HTTPRequest.blank(uri)
res_dict = self.controller.defaults(req, fakes.FAKE_PROJECT_ID)
self.default_quotas.update({'id': fakes.FAKE_PROJECT_ID})
expected = {'quota_set': self.default_quotas}
self.assertEqual(res_dict, expected)
def test_quotas_show(self):
req = self._get_http_request()
res_dict = self.controller.show(req, 1234)
ref_quota_set = quota_set('1234', self.include_server_group_quotas)
self.assertEqual(res_dict, ref_quota_set)
def test_quotas_update(self):
self.default_quotas.update({
'instances': 50,
'cores': 50
})
body = {'quota_set': self.default_quotas}
req = self._get_http_request()
res_dict = self.controller.update(req, 'update_me', body=body)
self.assertEqual(body, res_dict)
@mock.patch('nova.objects.Quotas.create_limit')
def test_quotas_update_with_good_data(self, mock_createlimit):
self.default_quotas.update({})
body = {'quota_set': self.default_quotas}
req = self._get_http_request()
self.controller.update(req, 'update_me', body=body)
self.assertEqual(len(self.default_quotas),
len(mock_createlimit.mock_calls))
@mock.patch('nova.api.validation.validators._SchemaValidator.validate')
@mock.patch('nova.objects.Quotas.create_limit')
def test_quotas_update_with_bad_data(self, mock_createlimit,
mock_validate):
self.default_quotas.update({
'instances': 50,
'cores': -50
})
body = {'quota_set': self.default_quotas}
req = self._get_http_request()
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
self.assertEqual(0,
len(mock_createlimit.mock_calls))
def test_quotas_update_zero_value(self):
body = {'quota_set': {'instances': 0, 'cores': 0,
'ram': 0, 'floating_ips': -1,
'metadata_items': 0,
'injected_files': 0,
'injected_file_content_bytes': 0,
'injected_file_path_bytes': 0,
'security_groups': -1,
'security_group_rules': -1,
'key_pairs': 100, 'fixed_ips': -1}}
if self.include_server_group_quotas:
body['quota_set']['server_groups'] = 10
body['quota_set']['server_group_members'] = 10
req = self._get_http_request()
res_dict = self.controller.update(req, 'update_me', body=body)
self.assertEqual(body, res_dict)
def _quotas_update_bad_request_case(self, body):
req = self._get_http_request()
self.assertRaises(self.validation_error, self.controller.update,
req, 'update_me', body=body)
def test_quotas_update_invalid_key(self):
body = {'quota_set': {'instances2': -2, 'cores': -2,
'ram': -2, 'floating_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_limit(self):
body = {'quota_set': {'instances': -2, 'cores': -2,
'ram': -2, 'floating_ips': -2, 'fixed_ips': -2,
'metadata_items': -2, 'injected_files': -2,
'injected_file_content_bytes': -2}}
self._quotas_update_bad_request_case(body)
def test_quotas_update_empty_body(self):
body = {}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_value_non_int(self):
# when PUT non integer value
self.default_quotas.update({
'instances': 'test'
})
body = {'quota_set': self.default_quotas}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_value_with_float(self):
# when PUT non integer value
self.default_quotas.update({
'instances': 50.5
})
body = {'quota_set': self.default_quotas}
self._quotas_update_bad_request_case(body)
def test_quotas_update_invalid_value_with_unicode(self):
# when PUT non integer value
self.default_quotas.update({
'instances': u'\u30aa\u30fc\u30d7\u30f3'
})
body = {'quota_set': self.default_quotas}
self._quotas_update_bad_request_case(body)
@mock.patch('nova.objects.Quotas.destroy_all_by_project')
def test_quotas_delete(self, mock_destroy_all_by_project):
req = self._get_http_request()
res = self.controller.delete(req, 1234)
self.assertEqual(202, self.get_delete_status_int(res))
mock_destroy_all_by_project.assert_called_once_with(
req.environ['nova.context'], 1234)
def test_duplicate_quota_filter(self):
query_string = 'user_id=1&user_id=2'
req = fakes.HTTPRequest.blank('', query_string=query_string)
self.controller.show(req, 1234)
self.controller.update(req, 1234, body={'quota_set': {}})
self.controller.detail(req, 1234)
self.controller.delete(req, 1234)
def test_quota_filter_negative_int_as_string(self):
req = fakes.HTTPRequest.blank('', query_string='user_id=-1')
self.controller.show(req, 1234)
self.controller.update(req, 1234, body={'quota_set': {}})
self.controller.detail(req, 1234)
self.controller.delete(req, 1234)
def test_quota_filter_int_as_string(self):
req = fakes.HTTPRequest.blank('', query_string='user_id=123')
self.controller.show(req, 1234)
self.controller.update(req, 1234, body={'quota_set': {}})
self.controller.detail(req, 1234)
self.controller.delete(req, 1234)
def test_unknown_quota_filter(self):
query_string = 'unknown_filter=abc'
req = fakes.HTTPRequest.blank('', query_string=query_string)
self.controller.show(req, 1234)
self.controller.update(req, 1234, body={'quota_set': {}})
self.controller.detail(req, 1234)
self.controller.delete(req, 1234)
def test_quota_additional_filter(self):
query_string = 'user_id=1&additional_filter=2'
req = fakes.HTTPRequest.blank('', query_string=query_string)
self.controller.show(req, 1234)
self.controller.update(req, 1234, body={'quota_set': {}})
self.controller.detail(req, 1234)
self.controller.delete(req, 1234)
class ExtendedQuotasTestV21(BaseQuotaSetsTest):
plugin = quotas_v21
def setUp(self):
super(ExtendedQuotasTestV21, self).setUp()
self._setup_controller()
fake_quotas = {'ram': {'limit': 51200,
'in_use': 12800,
'reserved': 12800},
'cores': {'limit': 20,
'in_use': 10,
'reserved': 5},
'instances': {'limit': 100,
'in_use': 0,
'reserved': 0}}
def _setup_controller(self):
self.controller = self.plugin.QuotaSetsController()
def fake_get_quotas(self, context, id, user_id=None, usages=False):
if usages:
return self.fake_quotas
else:
return {k: v['limit'] for k, v in self.fake_quotas.items()}
def fake_get_settable_quotas(self, context, project_id, user_id=None):
return {
'ram': {'minimum': self.fake_quotas['ram']['in_use'] +
self.fake_quotas['ram']['reserved'],
'maximum': -1},
'cores': {'minimum': self.fake_quotas['cores']['in_use'] +
self.fake_quotas['cores']['reserved'],
'maximum': -1},
'instances': {'minimum': self.fake_quotas['instances']['in_use'] +
self.fake_quotas['instances']['reserved'],
'maximum': -1},
}
def _get_http_request(self, url=''):
return fakes.HTTPRequest.blank(url)
@mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
def test_quotas_update_exceed_in_used(self, get_settable_quotas):
body = {'quota_set': {'cores': 10}}
get_settable_quotas.side_effect = self.fake_get_settable_quotas
req = self._get_http_request()
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
@mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
def test_quotas_force_update_exceed_in_used(self, get_settable_quotas):
with mock.patch.object(self.plugin.QuotaSetsController,
'_get_quotas') as _get_quotas:
body = {'quota_set': {'cores': 10, 'force': 'True'}}
get_settable_quotas.side_effect = self.fake_get_settable_quotas
_get_quotas.side_effect = self.fake_get_quotas
req = self._get_http_request()
self.controller.update(req, 'update_me', body=body)
@mock.patch('nova.objects.Quotas.create_limit')
def test_quotas_update_good_data(self, mock_createlimit):
body = {'quota_set': {'cores': 1,
'instances': 1}}
req = fakes.HTTPRequest.blank(
'/v2/%s/os-quota-sets/update_me' % fakes.FAKE_PROJECT_ID,
use_admin_context=True)
self.controller.update(req, 'update_me', body=body)
self.assertEqual(2,
len(mock_createlimit.mock_calls))
@mock.patch('nova.objects.Quotas.create_limit')
@mock.patch.object(quota.QUOTAS, 'get_settable_quotas')
def test_quotas_update_bad_data(self, mock_gsq, mock_createlimit):
body = {'quota_set': {'cores': 10,
'instances': 1}}
mock_gsq.side_effect = self.fake_get_settable_quotas
req = fakes.HTTPRequest.blank(
'/v2/%s/os-quota-sets/update_me' % fakes.FAKE_PROJECT_ID,
use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
self.assertEqual(0,
len(mock_createlimit.mock_calls))
class UserQuotasTestV21(BaseQuotaSetsTest):
plugin = quotas_v21
include_server_group_quotas = True
def setUp(self):
super(UserQuotasTestV21, self).setUp()
self._setup_controller()
def _get_http_request(self, url=''):
return fakes.HTTPRequest.blank(url)
def _setup_controller(self):
self.controller = self.plugin.QuotaSetsController()
def test_user_quotas_show(self):
req = self._get_http_request(
'/v2/%s/os-quota-sets/%s?user_id=1' % (fakes.FAKE_PROJECT_ID,
fakes.FAKE_PROJECT_ID))
res_dict = self.controller.show(req, fakes.FAKE_PROJECT_ID)
ref_quota_set = quota_set(fakes.FAKE_PROJECT_ID,
self.include_server_group_quotas)
self.assertEqual(res_dict, ref_quota_set)
def test_user_quotas_update(self):
body = {'quota_set': {'instances': 10, 'cores': 20,
'ram': 51200, 'floating_ips': -1,
'fixed_ips': -1, 'metadata_items': 128,
'injected_files': 5,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'security_groups': -1,
'security_group_rules': -1,
'key_pairs': 100}}
if self.include_server_group_quotas:
body['quota_set']['server_groups'] = 10
body['quota_set']['server_group_members'] = 10
url = ('/v2/%s/os-quota-sets/update_me?user_id=1' %
fakes.FAKE_PROJECT_ID)
req = self._get_http_request(url)
res_dict = self.controller.update(req, 'update_me', body=body)
self.assertEqual(body, res_dict)
def test_user_quotas_update_exceed_project(self):
body = {'quota_set': {'instances': 20}}
url = ('/v2/%s/os-quota-sets/update_me?user_id=1' %
fakes.FAKE_PROJECT_ID)
req = self._get_http_request(url)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
@mock.patch('nova.objects.Quotas.destroy_all_by_project_and_user')
def test_user_quotas_delete(self, mock_destroy_all_by_project_and_user):
url = '/v2/%s/os-quota-sets/%s?user_id=1' % (fakes.FAKE_PROJECT_ID,
fakes.FAKE_PROJECT_ID)
req = self._get_http_request(url)
res = self.controller.delete(req, fakes.FAKE_PROJECT_ID)
self.assertEqual(202, self.get_delete_status_int(res))
mock_destroy_all_by_project_and_user.assert_called_once_with(
req.environ['nova.context'], fakes.FAKE_PROJECT_ID, '1'
)
@mock.patch('nova.objects.Quotas.create_limit')
def test_user_quotas_update_good_data(self, mock_createlimit):
body = {'quota_set': {'instances': 1,
'cores': 1}}
url = ('/v2/%s/os-quota-sets/update_me?user_id=1' %
fakes.FAKE_PROJECT_ID)
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
self.controller.update(req, 'update_me', body=body)
self.assertEqual(2,
len(mock_createlimit.mock_calls))
@mock.patch('nova.objects.Quotas.create_limit')
def test_user_quotas_update_bad_data(self, mock_createlimit):
body = {'quota_set': {'instances': 20,
'cores': 1}}
url = ('/v2/%s/os-quota-sets/update_me?user_id=1' %
fakes.FAKE_PROJECT_ID)
req = fakes.HTTPRequest.blank(url, use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
req, 'update_me', body=body)
self.assertEqual(0,
len(mock_createlimit.mock_calls))
class QuotaSetsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(QuotaSetsPolicyEnforcementV21, self).setUp()
self.controller = quotas_v21.QuotaSetsController()
self.req = fakes.HTTPRequest.blank('')
def test_delete_policy_failed(self):
rule_name = "os_compute_api:os-quota-sets:delete"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_defaults_policy_failed(self):
rule_name = "os_compute_api:os-quota-sets:defaults"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.defaults, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = "os_compute_api:os-quota-sets:show"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_detail_policy_failed(self):
rule_name = "os_compute_api:os-quota-sets:detail"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.detail, self.req, fakes.FAKE_UUID)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_update_policy_failed(self):
rule_name = "os_compute_api:os-quota-sets:update"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update, self.req, fakes.FAKE_UUID,
body={'quota_set': {}})
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class QuotaSetsTestV236(test.NoDBTestCase):
microversion = '2.36'
def setUp(self):
super(QuotaSetsTestV236, self).setUp()
# We need to stub out verify_project_id so that it doesn't
# generate an EndpointNotFound exception and result in a
# server error.
self.stub_out('nova.api.openstack.identity.verify_project_id',
lambda ctx, project_id: True)
self.old_req = fakes.HTTPRequest.blank('', version='2.1')
self.filtered_quotas = ['fixed_ips', 'floating_ips',
'security_group_rules', 'security_groups']
self.quotas = {
'cores': {'limit': 20},
'fixed_ips': {'limit': -1},
'floating_ips': {'limit': -1},
'injected_file_content_bytes': {'limit': 10240},
'injected_file_path_bytes': {'limit': 255},
'injected_files': {'limit': 5},
'instances': {'limit': 10},
'key_pairs': {'limit': 100},
'metadata_items': {'limit': 128},
'ram': {'limit': 51200},
'security_group_rules': {'limit': -1},
'security_groups': {'limit': -1},
'server_group_members': {'limit': 10},
'server_groups': {'limit': 10}
}
self.defaults = {
'cores': 20,
'fixed_ips': -1,
'floating_ips': -1,
'injected_file_content_bytes': 10240,
'injected_file_path_bytes': 255,
'injected_files': 5,
'instances': 10,
'key_pairs': 100,
'metadata_items': 128,
'ram': 51200,
'security_group_rules': -1,
'security_groups': -1,
'server_group_members': 10,
'server_groups': 10
}
self.controller = quotas_v21.QuotaSetsController()
self.req = fakes.HTTPRequest.blank('', version=self.microversion)
def _ensure_filtered_quotas_existed_in_old_api(self):
res_dict = self.controller.show(self.old_req, 1234)
for filtered in self.filtered_quotas:
self.assertIn(filtered, res_dict['quota_set'])
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
def test_quotas_show_filtered(self, mock_quotas):
mock_quotas.return_value = self.quotas
self._ensure_filtered_quotas_existed_in_old_api()
res_dict = self.controller.show(self.req, 1234)
for filtered in self.filtered_quotas:
self.assertNotIn(filtered, res_dict['quota_set'])
@mock.patch('nova.quota.QUOTAS.get_defaults')
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
def test_quotas_default_filtered(self, mock_quotas, mock_defaults):
mock_quotas.return_value = self.quotas
self._ensure_filtered_quotas_existed_in_old_api()
res_dict = self.controller.defaults(self.req, 1234)
for filtered in self.filtered_quotas:
self.assertNotIn(filtered, res_dict['quota_set'])
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
def test_quotas_detail_filtered(self, mock_quotas):
mock_quotas.return_value = self.quotas
self._ensure_filtered_quotas_existed_in_old_api()
res_dict = self.controller.detail(self.req, 1234)
for filtered in self.filtered_quotas:
self.assertNotIn(filtered, res_dict['quota_set'])
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
def test_quotas_update_input_filtered(self, mock_quotas):
mock_quotas.return_value = self.quotas
self._ensure_filtered_quotas_existed_in_old_api()
for filtered in self.filtered_quotas:
self.assertRaises(exception.ValidationError,
self.controller.update, self.req, 1234,
body={'quota_set': {filtered: 100}})
@mock.patch('nova.objects.Quotas.create_limit')
@mock.patch('nova.quota.QUOTAS.get_settable_quotas')
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
def test_quotas_update_output_filtered(self, mock_quotas, mock_settable,
mock_create_limit):
mock_quotas.return_value = self.quotas
mock_settable.return_value = {'cores': {'maximum': -1, 'minimum': 0}}
self._ensure_filtered_quotas_existed_in_old_api()
res_dict = self.controller.update(self.req, 1234,
body={'quota_set': {'cores': 100}})
for filtered in self.filtered_quotas:
self.assertNotIn(filtered, res_dict['quota_set'])
class QuotaSetsTestV257(QuotaSetsTestV236):
microversion = '2.57'
def setUp(self):
super(QuotaSetsTestV257, self).setUp()
self.filtered_quotas.extend(quotas_v21.FILTERED_QUOTAS_2_57)
class QuotaSetsTestV275(QuotaSetsTestV257):
microversion = '2.75'
@mock.patch('nova.objects.Quotas.destroy_all_by_project')
@mock.patch('nova.objects.Quotas.create_limit')
@mock.patch('nova.quota.QUOTAS.get_settable_quotas')
@mock.patch('nova.quota.QUOTAS.get_project_quotas')
def test_quota_additional_filter_older_version(self, mock_quotas,
mock_settable,
mock_create_limit,
mock_destroy):
mock_quotas.return_value = self.quotas
mock_settable.return_value = {'cores': {'maximum': -1, 'minimum': 0}}
query_string = 'additional_filter=2'
req = fakes.HTTPRequest.blank('', version='2.74',
query_string=query_string)
self.controller.show(req, 1234)
self.controller.update(req, 1234, body={'quota_set': {}})
self.controller.detail(req, 1234)
self.controller.delete(req, 1234)
def test_quota_update_additional_filter(self):
query_string = 'user_id=1&additional_filter=2'
req = fakes.HTTPRequest.blank('', version=self.microversion,
query_string=query_string)
self.assertRaises(exception.ValidationError, self.controller.update,
req, 'update_me', body={'quota_set': {}})
def test_quota_show_additional_filter(self):
query_string = 'user_id=1&additional_filter=2'
req = fakes.HTTPRequest.blank('', version=self.microversion,
query_string=query_string)
self.assertRaises(exception.ValidationError, self.controller.show,
req, 1234)
def test_quota_detail_additional_filter(self):
query_string = 'user_id=1&additional_filter=2'
req = fakes.HTTPRequest.blank('', version=self.microversion,
query_string=query_string)
self.assertRaises(exception.ValidationError, self.controller.detail,
req, 1234)
def test_quota_delete_additional_filter(self):
query_string = 'user_id=1&additional_filter=2'
req = fakes.HTTPRequest.blank('', version=self.microversion,
query_string=query_string)
self.assertRaises(exception.ValidationError, self.controller.delete,
req, 1234)
|
|
#!/usr/bin/env python
"""buildpkg.py -- Build OS X packages for Apple's Installer.app.
This is an experimental command-line tool for building packages to be
installed with the Mac OS X Installer.app application.
Please read the file ReadMe.txt for more information!
Dinu C. Gherman,
[email protected]
September 2002
!! USE AT YOUR OWN RISK !!
"""
__version__ = 0.3
__license__ = "FreeBSD"
import os, sys, glob, fnmatch, shutil, string, copy, getopt
from os.path import basename, dirname, join, islink, isdir, isfile
Error = "buildpkg.Error"
PKG_INFO_FIELDS = """\
Title
Version
Description
DefaultLocation
Diskname
DeleteWarning
NeedsAuthorization
DisableStop
UseUserMask
Application
Relocatable
Required
InstallOnly
RequiresReboot
InstallFat\
"""
######################################################################
# Helpers
######################################################################
# Convenience class, as suggested by /F.
class GlobDirectoryWalker:
"A forward iterator that traverses files in a directory tree."
def __init__(self, directory, pattern="*"):
self.stack = [directory]
self.pattern = pattern
self.files = []
self.index = 0
def __getitem__(self, index):
while 1:
try:
file = self.files[self.index]
self.index = self.index + 1
except IndexError:
# pop next directory from stack
self.directory = self.stack.pop()
self.files = os.listdir(self.directory)
self.index = 0
else:
# got a filename
fullname = join(self.directory, file)
if isdir(fullname) and not islink(fullname):
self.stack.append(fullname)
if fnmatch.fnmatch(file, self.pattern):
return fullname
######################################################################
# The real thing
######################################################################
class PackageMaker:
"""A class to generate packages for Mac OS X.
This is intended to create OS X packages (with extension .pkg)
containing archives of arbitrary files that the Installer.app
(Apple's OS X installer) will be able to handle.
As of now, PackageMaker instances need to be created with the
title, version and description of the package to be built.
The package is built after calling the instance method
build(root, resources, **options). The generated package is
a folder hierarchy with the top-level folder name equal to the
constructor's title argument plus a '.pkg' extension. This final
package is stored in the current folder.
The sources from the root folder will be stored in the package
as a compressed archive, while all files and folders from the
resources folder will be added to the package as they are.
Example:
With /my/space being the current directory, the following will
create /my/space/distutils-1.0.2.pkg/:
PM = PackageMaker
pm = PM("distutils-1.0.2", "1.0.2", "Python distutils.")
pm.build("/my/space/sources/distutils-1.0.2")
After a package is built you can still add further individual
resource files or folders to its Contents/Resources subfolder
by using the addResource(path) method:
pm.addResource("/my/space/metainfo/distutils/")
"""
packageInfoDefaults = {
'Title': None,
'Version': None,
'Description': '',
'DefaultLocation': '/',
'Diskname': '(null)',
'DeleteWarning': '',
'NeedsAuthorization': 'NO',
'DisableStop': 'NO',
'UseUserMask': 'YES',
'Application': 'NO',
'Relocatable': 'YES',
'Required': 'NO',
'InstallOnly': 'NO',
'RequiresReboot': 'NO',
'InstallFat': 'NO'}
def __init__(self, title, version, desc):
"Init. with mandatory title/version/description arguments."
info = {"Title": title, "Version": version, "Description": desc}
self.packageInfo = copy.deepcopy(self.packageInfoDefaults)
self.packageInfo.update(info)
# variables set later
self.packageRootFolder = None
self.packageResourceFolder = None
self.sourceFolder = None
self.resourceFolder = None
def _escapeBlanks(self, s):
"Return a string with escaped blanks."
return s.replace(' ', '\ ')
def build(self, root, resources=None, **options):
"""Create a package for some given root folder.
With no 'resources' argument set it is assumed to be the same
as the root directory. Option items replace the default ones
in the package info.
"""
# set folder attributes
self.sourceFolder = root
if resources == None:
self.resourceFolder = None
else:
self.resourceFolder = resources
# replace default option settings with user ones if provided
fields = self. packageInfoDefaults.keys()
for k, v in options.items():
if k in fields:
self.packageInfo[k] = v
elif not k in ["OutputDir"]:
raise Error, "Unknown package option: %s" % k
# Check where we should leave the output. Default is current directory
outputdir = options.get("OutputDir", os.getcwd())
packageName = self.packageInfo["Title"]
self.packageRootFolder = os.path.join(outputdir, packageName + ".pkg")
# do what needs to be done
self._makeFolders()
self._addInfo()
self._addBom()
self._addArchive()
self._addResources()
self._addSizes()
def addResource(self, path):
"Add arbitrary file or folder to the package resource folder."
# Folder basenames become subfolders of Contents/Resources.
# This method is made public for those who wknow what they do!
prf = self.packageResourceFolder
if isfile(path) and not isdir(path):
shutil.copy(path, prf)
elif isdir(path):
path = self._escapeBlanks(path)
prf = self._escapeBlanks(prf)
os.system("cp -r %s %s" % (path, prf))
def _makeFolders(self):
"Create package folder structure."
# Not sure if the package name should contain the version or not...
# packageName = "%s-%s" % (self.packageInfo["Title"],
# self.packageInfo["Version"]) # ??
contFolder = join(self.packageRootFolder, "Contents")
self.packageResourceFolder = join(contFolder, "Resources")
os.mkdir(self.packageRootFolder)
os.mkdir(contFolder)
os.mkdir(self.packageResourceFolder)
def _addInfo(self):
"Write .info file containing installing options."
# Not sure if options in PKG_INFO_FIELDS are complete...
info = ""
for f in string.split(PKG_INFO_FIELDS, "\n"):
info = info + "%s %%(%s)s\n" % (f, f)
info = info % self.packageInfo
base = self.packageInfo["Title"] + ".info"
path = join(self.packageResourceFolder, base)
f = open(path, "w")
f.write(info)
def _addBom(self):
"Write .bom file containing 'Bill of Materials'."
# Currently ignores if the 'mkbom' tool is not available.
try:
base = self.packageInfo["Title"] + ".bom"
bomPath = join(self.packageResourceFolder, base)
bomPath = self._escapeBlanks(bomPath)
sourceFolder = self._escapeBlanks(self.sourceFolder)
cmd = "mkbom %s %s" % (sourceFolder, bomPath)
res = os.system(cmd)
except:
pass
def _addArchive(self):
"Write .pax.gz file, a compressed archive using pax/gzip."
# Currently ignores if the 'pax' tool is not available.
cwd = os.getcwd()
# create archive
os.chdir(self.sourceFolder)
base = basename(self.packageInfo["Title"]) + ".pax"
self.archPath = join(self.packageResourceFolder, base)
archPath = self._escapeBlanks(self.archPath)
cmd = "pax -w -f %s %s" % (archPath, ".")
res = os.system(cmd)
# compress archive
cmd = "gzip %s" % archPath
res = os.system(cmd)
os.chdir(cwd)
def _addResources(self):
"Add all files and folders inside a resources folder to the package."
# This folder normally contains Welcome/ReadMe/License files,
# .lproj folders and scripts.
if not self.resourceFolder:
return
files = glob.glob("%s/*" % self.resourceFolder)
for f in files:
self.addResource(f)
def _addSizes(self):
"Write .sizes file with info about number and size of files."
# Not sure if this is correct, but 'installedSize' and
# 'zippedSize' are now in Bytes. Maybe blocks are needed?
# Well, Installer.app doesn't seem to care anyway, saying
# the installation needs 100+ MB...
numFiles = 0
installedSize = 0
zippedSize = 0
files = GlobDirectoryWalker(self.sourceFolder)
for f in files:
numFiles = numFiles + 1
installedSize = installedSize + os.lstat(f)[6]
try:
zippedSize = os.stat(self.archPath+ ".gz")[6]
except OSError: # ignore error
pass
base = self.packageInfo["Title"] + ".sizes"
f = open(join(self.packageResourceFolder, base), "w")
format = "NumFiles %d\nInstalledSize %d\nCompressedSize %d\n"
f.write(format % (numFiles, installedSize, zippedSize))
# Shortcut function interface
def buildPackage(*args, **options):
"A shortcut function for building a package."
o = options
title, version, desc = o["Title"], o["Version"], o["Description"]
pm = PackageMaker(title, version, desc)
apply(pm.build, list(args), options)
return pm
######################################################################
# Command-line interface
######################################################################
def printUsage():
"Print usage message."
format = "Usage: %s <opts1> [<opts2>] <root> [<resources>]"
print format % basename(sys.argv[0])
print
print " with arguments:"
print " (mandatory) root: the package root folder"
print " (optional) resources: the package resources folder"
print
print " and options:"
print " (mandatory) opts1:"
mandatoryKeys = string.split("Title Version Description", " ")
for k in mandatoryKeys:
print " --%s" % k
print " (optional) opts2: (with default values)"
pmDefaults = PackageMaker.packageInfoDefaults
optionalKeys = pmDefaults.keys()
for k in mandatoryKeys:
optionalKeys.remove(k)
optionalKeys.sort()
maxKeyLen = max(map(len, optionalKeys))
for k in optionalKeys:
format = " --%%s:%s %%s"
format = format % (" " * (maxKeyLen-len(k)))
print format % (k, repr(pmDefaults[k]))
def main():
"Command-line interface."
shortOpts = ""
keys = PackageMaker.packageInfoDefaults.keys()
longOpts = map(lambda k: k+"=", keys)
try:
opts, args = getopt.getopt(sys.argv[1:], shortOpts, longOpts)
except getopt.GetoptError, details:
print details
printUsage()
return
optsDict = {}
for k, v in opts:
optsDict[k[2:]] = v
ok = optsDict.keys()
if not (1 <= len(args) <= 2):
print "No argument given!"
elif not ("Title" in ok and \
"Version" in ok and \
"Description" in ok):
print "Missing mandatory option!"
else:
pm = apply(buildPackage, args, optsDict)
return
printUsage()
# sample use:
# buildpkg.py --Title=distutils \
# --Version=1.0.2 \
# --Description="Python distutils package." \
# /Users/dinu/Desktop/distutils
if __name__ == "__main__":
main()
|
|
from cStringIO import StringIO
import calendar
import functools
import logging
import re
import time
from urllib import urlencode
import urllib2
from xml.etree import ElementTree
_log = logging.getLogger('evelink.api')
try:
import requests
_has_requests = True
except ImportError:
_log.info('`requests` not available, falling back to urllib2')
_has_requests = None
def _clean(v):
"""Convert parameters into an acceptable format for the API."""
if isinstance(v, (list, set, tuple)):
return ",".join(str(i) for i in v)
else:
return str(v)
def parse_ts(v):
"""Parse a timestamp from EVE API XML into a unix-ish timestamp."""
if v == '':
return None
ts = calendar.timegm(time.strptime(v, "%Y-%m-%d %H:%M:%S"))
# Deal with EVE's nonexistent 0001-01-01 00:00:00 timestamp
return ts if ts > 0 else None
def get_named_value(elem, field):
"""Returns the string value of the named child element."""
try:
return elem.find(field).text
except AttributeError:
return None
def get_ts_value(elem, field):
"""Returns the timestamp value of the named child element."""
val = get_named_value(elem, field)
if val:
return parse_ts(val)
return None
def get_int_value(elem, field):
"""Returns the integer value of the named child element."""
val = get_named_value(elem, field)
if val:
return int(val)
return val
def get_float_value(elem, field):
"""Returns the float value of the named child element."""
val = get_named_value(elem, field)
if val:
return float(val)
return val
def get_bool_value(elem, field):
"""Returns the boolean value of the named child element."""
val = get_named_value(elem, field)
if val == 'True':
return True
elif val == 'False':
return False
return None
def elem_getters(elem):
"""Returns a tuple of (_str, _int, _float, _bool, _ts) functions.
These are getters closed around the provided element.
"""
_str = lambda key: get_named_value(elem, key)
_int = lambda key: get_int_value(elem, key)
_float = lambda key: get_float_value(elem, key)
_bool = lambda key: get_bool_value(elem, key)
_ts = lambda key: get_ts_value(elem, key)
return _str, _int, _float, _bool, _ts
def parse_keyval_data(data_string):
"""Parse 'key: value' lines from a LF-delimited string."""
keyval_pairs = data_string.strip().split('\n')
results = {}
for pair in keyval_pairs:
key, _, val = pair.strip().partition(': ')
if 'Date' in key:
val = parse_ms_date(val)
elif val == 'null':
val = None
elif re.match(r"^-?\d+$", val):
val = int(val)
elif re.match(r"-?\d+\.\d+", val):
val = float(val)
results[key] = val
return results
def parse_ms_date(date_string):
"""Convert MS date format into epoch"""
return int(date_string)/10000000 - 11644473600;
class APIError(Exception):
"""Exception raised when the EVE API returns an error."""
def __init__(self, code=None, message=None):
self.code = code
self.message = message
def __repr__(self):
return "APIError(%r, %r)" % (self.code, self.message)
def __str__(self):
return "%s (code=%d)" % (self.message, int(self.code))
class APICache(object):
"""Minimal interface for caching API requests.
This very basic implementation simply stores values in
memory, with no other persistence. You can subclass it
to define a more complex/featureful/persistent cache.
"""
def __init__(self):
self.cache = {}
def get(self, key):
"""Return the value referred to by 'key' if it is cached.
key:
a result from the Python hash() function.
"""
result = self.cache.get(key)
if not result:
return None
value, expiration = result
if expiration < time.time():
del self.cache[key]
return None
return value
def put(self, key, value, duration):
"""Cache the provided value, referenced by 'key', for the given duration.
key:
a result from the Python hash() function.
value:
an xml.etree.ElementTree.Element object
duration:
a number of seconds before this cache entry should expire.
"""
expiration = time.time() + duration
self.cache[key] = (value, expiration)
class API(object):
"""A wrapper around the EVE API."""
def __init__(self, base_url="api.eveonline.com", cache=None, api_key=None):
self.base_url = base_url
cache = cache or APICache()
if not isinstance(cache, APICache):
raise ValueError("The provided cache must subclass from APICache.")
self.cache = cache
self.CACHE_VERSION = '1'
if api_key and len(api_key) != 2:
raise ValueError("The provided API key must be a tuple of (keyID, vCode).")
self.api_key = api_key
self._set_last_timestamps()
def _set_last_timestamps(self, current_time=0, cached_until=0):
self.last_timestamps = {
'current_time': current_time,
'cached_until': cached_until,
}
def _cache_key(self, path, params):
sorted_params = sorted(params.iteritems())
# Paradoxically, Shelve doesn't like integer keys.
return '%s-%s' % (self.CACHE_VERSION, hash((path, tuple(sorted_params))))
def get(self, path, params=None):
"""Request a specific path from the EVE API.
The supplied path should be a slash-separated path
frament, e.g. "corp/AssetList". (Basically, the portion
of the API url in between the root / and the .xml bit.)
"""
params = params or {}
params = dict((k, _clean(v)) for k,v in params.iteritems())
_log.debug("Calling %s with params=%r", path, params)
if self.api_key:
_log.debug("keyID and vCode added")
params['keyID'] = self.api_key[0]
params['vCode'] = self.api_key[1]
key = self._cache_key(path, params)
response = self.cache.get(key)
cached = response is not None
if not cached:
# no cached response body found, call the API for one.
params = urlencode(params)
full_path = "https://%s/%s.xml.aspx" % (self.base_url, path)
response = self.send_request(full_path, params)
else:
_log.debug("Cache hit, returning cached payload")
tree = ElementTree.parse(StringIO(response))
current_time = get_ts_value(tree, 'currentTime')
expires_time = get_ts_value(tree, 'cachedUntil')
self._set_last_timestamps(current_time, expires_time)
if not cached:
# Have to split this up from above as timestamps have to be
# extracted.
self.cache.put(key, response, expires_time - current_time)
error = tree.find('error')
if error is not None:
code = error.attrib['code']
message = error.text.strip()
exc = APIError(code, message)
_log.error("Raising API error: %r" % exc)
raise exc
result = tree.find('result')
return result
def send_request(self, full_path, params):
if _has_requests:
return self.requests_request(full_path, params)
else:
return self.urllib2_request(full_path, params)
def urllib2_request(self, full_path, params):
try:
if params:
# POST request
_log.debug("POSTing request")
r = urllib2.urlopen(full_path, params)
else:
# GET request
_log.debug("GETting request")
r = urllib2.urlopen(full_path)
result = r.read()
r.close()
return result
except urllib2.URLError as e:
# TODO: Handle this better?
raise e
def requests_request(self, full_path, params):
session = getattr(self, 'session', None)
if not session:
session = requests.Session()
self.session = session
try:
if params:
# POST request
_log.debug("POSTing request")
r = session.post(full_path, params=params)
else:
# GET request
_log.debug("GETting request")
r = session.get(full_path)
return r.content
except requests.exceptions.RequestException as e:
# TODO: Handle this better?
raise e
def auto_api(func):
"""A decorator to automatically provide an API instance.
Functions decorated with this will have the api= kwarg
automatically supplied with a default-initialized API()
object if no other API object is supplied.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if 'api' not in kwargs:
kwargs['api'] = API()
return func(*args, **kwargs)
return wrapper
# vim: set ts=4 sts=4 sw=4 et:
|
|
"""HTML utilities suitable for global use."""
from __future__ import unicode_literals
import re
from django.utils.safestring import SafeData, mark_safe
from django.utils.encoding import force_text, force_str
from django.utils.functional import allow_lazy
from django.utils import six
from django.utils.six.moves.urllib.parse import quote, unquote, urlsplit, urlunsplit
from django.utils.text import normalize_newlines
from .html_parser import HTMLParser, HTMLParseError
# Configuration for urlize() function.
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)']
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
"""
return mark_safe(force_text(text).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
escapejs = allow_lazy(escapejs, six.text_type)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if isinstance(text, SafeData):
return text
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in
six.iteritems(kwargs)])
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
class MLStripper(HTMLParser):
def __init__(self):
if six.PY2:
HTMLParser.__init__(self)
else:
HTMLParser.__init__(self, strict=False)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def _strip_once(value):
"""
Internal tag stripping utility used by strip_tags.
"""
s = MLStripper()
try:
s.feed(value)
except HTMLParseError:
return value
try:
s.close()
except (HTMLParseError, UnboundLocalError) as err:
# UnboundLocalError because of http://bugs.python.org/issue17802
# on Python 3.2, triggered by strict=False mode of HTMLParser
return s.get_data() + s.rawdata
else:
return s.get_data()
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
# Note: in typical case this loop executes _strip_once once. Loop condition
# is redundant, but helps to reduce number of executions of _strip_once.
while '<' in value and '>' in value:
new_value = _strip_once(value)
if len(new_value) >= len(value):
# _strip_once was not able to detect more tags or length increased
# due to http://bugs.python.org/issue20288
# (affects Python 2 < 2.7.7 and Python 3 < 3.3.5)
break
value = new_value
return value
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('</%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def fix_ampersands(value):
"""Returns the given HTML with all unencoded ampersands encoded correctly."""
return unencoded_ampersands_re.sub('&', force_text(value))
fix_ampersands = allow_lazy(fix_ampersands, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
pass
url = unquote(force_str(url))
# See http://bugs.python.org/issue2637
url = quote(url, safe=b'!*\'();:@&=+$,/?#[]~')
return force_text(url)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an elipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
match = None
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote(middle)
elif simple_url_2_re.match(middle):
url = smart_urlquote('http://%s' % middle)
elif not ':' in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
urlize = allow_lazy(urlize, six.text_type)
def clean_html(text):
"""
Clean the given HTML. Specifically, do the following:
* Convert <b> and <i> to <strong> and <em>.
* Encode all ampersands correctly.
* Remove all "target" attributes from <a> tags.
* Remove extraneous HTML, such as presentational tags that open and
immediately close and <br clear="all">.
* Convert hard-coded bullets into HTML unordered lists.
* Remove stuff like "<p> </p>", but only if it's at the
bottom of the text.
"""
from django.utils.text import normalize_newlines
text = normalize_newlines(force_text(text))
text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
text = fix_ampersands(text)
# Remove all target="" attributes from <a> tags.
text = link_target_attribute_re.sub('\\1', text)
# Trim stupid HTML such as <br clear="all">.
text = html_gunk_re.sub('', text)
# Convert hard-coded bullets into HTML unordered lists.
def replace_p_tags(match):
s = match.group().replace('</p>', '</li>')
for d in DOTS:
s = s.replace('<p>%s' % d, '<li>')
return '<ul>\n%s\n</ul>' % s
text = hard_coded_bullets_re.sub(replace_p_tags, text)
# Remove stuff like "<p> </p>", but only if it's at the bottom
# of the text.
text = trailing_empty_content_re.sub('', text)
return text
clean_html = allow_lazy(clean_html, six.text_type)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
|
|
# This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import operator
import threading
from mapproxy.grid import bbox_intersects, bbox_contains
from mapproxy.util.py import cached_property
from mapproxy.util.geom import (
require_geom_support,
load_polygon_lines,
transform_geometry,
bbox_polygon,
)
from mapproxy.srs import SRS
import logging
from functools import reduce
log_config = logging.getLogger('mapproxy.config.coverage')
try:
import shapely.geometry
import shapely.prepared
except ImportError:
# missing Shapely is handled by require_geom_support
pass
def coverage(geom, srs):
if isinstance(geom, (list, tuple)):
return BBOXCoverage(geom, srs)
else:
return GeomCoverage(geom, srs)
def load_limited_to(limited_to):
require_geom_support()
srs = SRS(limited_to['srs'])
geom = limited_to['geometry']
if not hasattr(geom, 'type'): # not a Shapely geometry
if isinstance(geom, (list, tuple)):
geom = bbox_polygon(geom)
else:
polygons = load_polygon_lines(geom.split('\n'))
if len(polygons) == 1:
geom = polygons[0]
else:
geom = shapely.geometry.MultiPolygon(polygons)
return GeomCoverage(geom, srs, clip=True)
class MultiCoverage(object):
clip = False
"""Aggregates multiple coverages"""
def __init__(self, coverages):
self.coverages = coverages
self.bbox = self.extent.bbox
@cached_property
def extent(self):
return reduce(operator.add, [c.extent for c in self.coverages])
def intersects(self, bbox, srs):
return any(c.intersects(bbox, srs) for c in self.coverages)
def contains(self, bbox, srs):
return any(c.contains(bbox, srs) for c in self.coverages)
def transform_to(self, srs):
return MultiCoverage([c.transform_to(srs) for c in self.coverages])
def __eq__(self, other):
if not isinstance(other, MultiCoverage):
return NotImplemented
if self.bbox != other.bbox:
return False
if len(self.coverages) != len(other.coverages):
return False
for a, b in zip(self.coverages, other.coverages):
if a != b:
return False
return True
def __ne__(self, other):
if not isinstance(other, MultiCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<MultiCoverage %r: %r>' % (self.extent.llbbox, self.coverages)
class BBOXCoverage(object):
clip = False
def __init__(self, bbox, srs):
self.bbox = bbox
self.srs = srs
self.geom = None
@property
def extent(self):
from mapproxy.layer import MapExtent
return MapExtent(self.bbox, self.srs)
def _bbox_in_coverage_srs(self, bbox, srs):
if srs != self.srs:
bbox = srs.transform_bbox_to(self.srs, bbox)
return bbox
def intersects(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
return bbox_intersects(self.bbox, bbox)
def intersection(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
intersection = (
max(self.bbox[0], bbox[0]),
max(self.bbox[1], bbox[1]),
min(self.bbox[2], bbox[2]),
min(self.bbox[3], bbox[3]),
)
if intersection[0] >= intersection[2] or intersection[1] >= intersection[3]:
return None
return BBOXCoverage(intersection, self.srs)
def contains(self, bbox, srs):
bbox = self._bbox_in_coverage_srs(bbox, srs)
return bbox_contains(self.bbox, bbox)
def transform_to(self, srs):
if srs == self.srs:
return self
bbox = self.srs.transform_bbox_to(srs, self.bbox)
return BBOXCoverage(bbox, srs)
def __eq__(self, other):
if not isinstance(other, BBOXCoverage):
return NotImplemented
if self.srs != other.srs:
return False
if self.bbox != other.bbox:
return False
return True
def __ne__(self, other):
if not isinstance(other, BBOXCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<BBOXCoverage %r/%r>' % (self.extent.llbbox, self.bbox)
class GeomCoverage(object):
def __init__(self, geom, srs, clip=False):
self.geom = geom
self.bbox = geom.bounds
self.srs = srs
self.clip = clip
self._prep_lock = threading.Lock()
self._prepared_geom = None
self._prepared_counter = 0
self._prepared_max = 10000
@property
def extent(self):
from mapproxy.layer import MapExtent
return MapExtent(self.bbox, self.srs)
@property
def prepared_geom(self):
# GEOS internal data structure for prepared geometries grows over time,
# recreate to limit memory consumption
if not self._prepared_geom or self._prepared_counter > self._prepared_max:
self._prepared_geom = shapely.prepared.prep(self.geom)
self._prepared_counter = 0
self._prepared_counter += 1
return self._prepared_geom
def _geom_in_coverage_srs(self, geom, srs):
if isinstance(geom, shapely.geometry.base.BaseGeometry):
if srs != self.srs:
geom = transform_geometry(srs, self.srs, geom)
elif len(geom) == 2:
if srs != self.srs:
geom = srs.transform_to(self.srs, geom)
geom = shapely.geometry.Point(geom)
else:
if srs != self.srs:
geom = srs.transform_bbox_to(self.srs, geom)
geom = bbox_polygon(geom)
return geom
def transform_to(self, srs):
if srs == self.srs:
return self
geom = transform_geometry(self.srs, srs, self.geom)
return GeomCoverage(geom, srs)
def intersects(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
with self._prep_lock:
return self.prepared_geom.intersects(bbox)
def intersection(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
return GeomCoverage(self.geom.intersection(bbox), self.srs)
def contains(self, bbox, srs):
bbox = self._geom_in_coverage_srs(bbox, srs)
with self._prep_lock:
return self.prepared_geom.contains(bbox)
def __eq__(self, other):
if not isinstance(other, GeomCoverage):
return NotImplemented
if self.srs != other.srs:
return False
if self.bbox != other.bbox:
return False
if not self.geom.equals(other.geom):
return False
return True
def __ne__(self, other):
if not isinstance(other, GeomCoverage):
return NotImplemented
return not self.__eq__(other)
def __repr__(self):
return '<GeomCoverage %r: %r>' % (self.extent.llbbox, self.geom)
|
|
from __future__ import unicode_literals
import base64
import logging
import string
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sessions.exceptions import SuspiciousSession
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.crypto import (
constant_time_compare, get_random_string, salted_hmac,
)
from django.utils.encoding import force_bytes, force_text
from django.utils.module_loading import import_string
# session_key should not be case sensitive because some backends can store it
# on case insensitive file systems.
VALID_KEY_CHARS = string.ascii_lowercase + string.digits
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class UpdateError(Exception):
"""
Occurs if Django tries to update a session that was deleted.
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
__not_given = object()
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
self.serializer = import_string(settings.SESSION_SERIALIZER)
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, default=__not_given):
self.modified = self.modified or key in self._session
args = () if default is self.__not_given else (default,)
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def _hash(self, value):
key_salt = "django.contrib.sessions" + self.__class__.__name__
return salted_hmac(key_salt, value).hexdigest()
def encode(self, session_dict):
"Returns the given session dictionary serialized and encoded as a string."
serialized = self.serializer().dumps(session_dict)
hash = self._hash(serialized)
return base64.b64encode(hash.encode() + b":" + serialized).decode('ascii')
def decode(self, session_data):
encoded_data = base64.b64decode(force_bytes(session_data))
try:
# could produce ValueError if there is no ':'
hash, serialized = encoded_data.split(b':', 1)
expected_hash = self._hash(serialized)
if not constant_time_compare(hash.decode(), expected_hash):
raise SuspiciousSession("Session data corrupted")
else:
return self.serializer().loads(serialized)
except Exception as e:
# ValueError, SuspiciousOperation, unpickling exceptions. If any of
# these happen, just return an empty dictionary (an empty session).
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(force_text(e))
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return key in self._session
def keys(self):
return self._session.keys()
def values(self):
return self._session.values()
def items(self):
return self._session.items()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def is_empty(self):
"Returns True when there is no session_key and the session is empty"
try:
return not bool(self._session_key) and not self._session_cache
except AttributeError:
return True
def _get_new_session_key(self):
"Returns session key that isn't being used."
while True:
session_key = get_random_string(32, VALID_KEY_CHARS)
if not self.exists(session_key):
break
return session_key
def _get_or_create_session_key(self):
if self._session_key is None:
self._session_key = self._get_new_session_key()
return self._session_key
def _validate_session_key(self, key):
"""
Key must be truthy and at least 8 characters long. 8 characters is an
arbitrary lower bound for some minimal key security.
"""
return key and len(key) >= 8
def _get_session_key(self):
return self.__session_key
def _set_session_key(self, value):
"""
Validate session key on assignment. Invalid values will set to None.
"""
if self._validate_session_key(value):
self.__session_key = value
else:
self.__session_key = None
session_key = property(_get_session_key)
_session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self.session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self, **kwargs):
"""Get the number of seconds until the session expires.
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Make the difference between "expiry=None passed in kwargs" and
# "expiry not passed in kwargs", in order to guarantee not to trigger
# self.load() when expiry is provided.
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - modification
return delta.days * 86400 + delta.seconds
def get_expiry_date(self, **kwargs):
"""Get session the expiry date (as a datetime object).
Optionally, this function accepts `modification` and `expiry` keyword
arguments specifying the modification and expiry of the session.
"""
try:
modification = kwargs['modification']
except KeyError:
modification = timezone.now()
# Same comment as in get_expiry_age
try:
expiry = kwargs['expiry']
except KeyError:
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return modification + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = timezone.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self._session_key = None
def cycle_key(self):
"""
Creates a new session key, while retaining the current session data.
"""
try:
data = self._session_cache
except AttributeError:
data = {}
key = self.session_key
self.create()
self._session_cache = data
if key:
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError('subclasses of SessionBase must provide an exists() method')
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError('subclasses of SessionBase must provide a create() method')
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() only updates an existing object and does not create one
(an UpdateError is raised).
"""
raise NotImplementedError('subclasses of SessionBase must provide a save() method')
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError('subclasses of SessionBase must provide a delete() method')
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError('subclasses of SessionBase must provide a load() method')
@classmethod
def clear_expired(cls):
"""
Remove expired sessions from the session store.
If this operation isn't possible on a given backend, it should raise
NotImplementedError. If it isn't necessary, because the backend has
a built-in expiration mechanism, it should be a no-op.
"""
raise NotImplementedError('This backend does not support clear_expired().')
|
|
from abpytools.features.regions import ChainDomains
from collections import Counter
import numpy as np
from matplotlib import pyplot as plt
from abpytools.utils.data_loader import DataLoader
import os
from abpytools.utils import PythonConfig
amino_acid_index = {"R": 0,
"N": 1,
"D": 2,
"E": 3,
"Q": 4,
"K": 5,
"S": 6,
"T": 7,
"C": 8,
"H": 9,
"M": 10,
"A": 11,
"V": 12,
"G": 13,
"I": 14,
"L": 15,
"F": 16,
"P": 17,
"W": 18,
"Y": 19}
class AminoAcidFreq(ChainDomains):
def __init__(self, antibody_objects=None, path=None, region='CDR3', load=False):
super(AminoAcidFreq, self).__init__(antibody_objects=antibody_objects, path=path, load=load)
regions = ['all', 'CDRs', 'FRs', 'FR1', 'FR2', 'FR3', 'FR4', 'CDR1', 'CDR2', 'CDR3']
if region in regions:
# get the sequence for the specified region
self.region = region
if self.region.startswith('CDR'):
self._sequences = [self.cdr_sequences()[name][self.region] for name in self.names]
data_loader = DataLoader(data_type='CDR_positions', data=['chothia', self.chain])
self._numbering = data_loader.get_data()[self.region]
elif self.region.startswith('FR'):
self._sequences = [self.framework_sequences()[name][self.region] for name in self.names]
# TODO: implement 'all'
elif self.region == 'all':
raise NotImplementedError("This is not the code you're looking for.")
else:
raise ValueError('Parameter region must be either: {}. Not {}'.format(' ,'.join(regions), region))
self._sequence_count = len(max(self._sequences, key=len))
self._aa_freq = np.zeros((20, self._sequence_count))
self._aa_hyd_freq = np.zeros((3, self._sequence_count))
self._aa_chg_freq = np.zeros((3, self._sequence_count))
self._aa_count = np.zeros((20, self._sequence_count))
self._aa_hyd_count = np.zeros((3, self._sequence_count))
self._aa_chg_count = np.zeros((3, self._sequence_count))
def _amino_acid_freq(self, normalize):
# if the sum of self._aa_count is zero then the count has not been performed at this point
if self._aa_count.sum() == 0:
for position in range(len(max(self._sequences, key=len))):
position_sequence = [x[position] for x in self._sequences if len(x) > position]
count_i = Counter(position_sequence)
total_i = len(position_sequence)
for amino_acid_i in count_i.keys():
self._aa_count[amino_acid_index[amino_acid_i], position] = count_i[amino_acid_i]
# _aa_hyd_freq: row1 -> hydrophilic
# row2 -> moderate
# row3 -> hydrophobic
if amino_acid_i in ['R', 'N', 'D', 'E', 'Q', 'K', 'S', 'T']:
self._aa_hyd_count[0, position] += count_i[amino_acid_i]
elif amino_acid_i in ['C', 'H', 'M']:
self._aa_hyd_count[1, position] += count_i[amino_acid_i]
else:
self._aa_hyd_count[2, position] += count_i[amino_acid_i]
# _aa_chg_freq: row1 -> negative
# row2 -> positive
# row3 -> neutral
if amino_acid_i in ['D', 'E']:
self._aa_chg_count[0, position] += count_i[amino_acid_i]
elif amino_acid_i in ['R', 'K', 'H']:
self._aa_chg_count[1, position] += count_i[amino_acid_i]
else:
self._aa_chg_count[2, position] += count_i[amino_acid_i]
# normalize values
# doing it even when it is not required comes at a small computational cost
# it would take longer if the user had to recalculate everything to have a count plot and then a
# frequency plot
self._aa_freq[:, position] = self._aa_count[:, position] / total_i
self._aa_chg_freq[:, position] = self._aa_chg_count[:, position] / total_i
self._aa_hyd_freq[:, position] = self._aa_hyd_count[:, position] / total_i
if normalize:
return self._aa_freq, self._aa_chg_freq, self._aa_hyd_freq
else:
return self._aa_count, self._aa_chg_count, self._aa_hyd_count
def plot(self, sort_by='name', normalize=True, display_count=True, plot_path='./',
plot_name='AminoAcidFrequency.png', notebook_plot=True):
ipython_config = PythonConfig()
if ipython_config.matplotlib_interactive is False and ipython_config.ipython_info == 'notebook':
plt.ion()
if sort_by not in ['name', 'hydropathy', 'charge']:
raise ValueError("Argument for sort_by not valid. Valid arguments are name, hydrophobicity and charge")
# get count/ freq matrices
# to avoid writing more code than necessary the count and freq are stored in the same variable
# since they will always be plotted independently
aa, chg, hyd = self._amino_acid_freq(normalize=normalize)
fig = plt.figure(1, figsize=(8, 8))
ax = fig.add_subplot(111)
for position in range(self._aa_freq.shape[1]):
previous = 0
# 20 distinct colors
colors = ["#023fa5", "#7d87b9", "#bec1d4", "#d6bcc0", "#bb7784", "#8e063b", "#4a6fe3", "#8595e1",
"#b5bbe3", "#e6afb9", "#e07b91", "#d33f6a", "#11c638", "#8dd593", "#c6dec7", "#ead3c6",
"#f0b98d", "#ef9708", "#0fcfc0", "#9cded6"]
if sort_by == 'name':
# previous, lgd = self.plot_helper(ax=ax, colors=colors,
# title='amino acids',
# keys=sorted(amino_acid_index.keys()),
# position=position, data=aa,
# previous=previous)
ax.set_title(self.region + ' amino acids', size=20)
for i, amino_acid in enumerate(sorted(amino_acid_index.keys())):
c = colors[i]
ax.bar(position, aa[amino_acid_index[amino_acid], position], bottom=previous,
label=amino_acid, color=c, align='center')
previous += aa[amino_acid_index[amino_acid], position]
lgd = ax.legend(sorted(amino_acid_index.keys()), loc='center left', bbox_to_anchor=(1, 0.5),
prop={"size": 16})
elif sort_by == 'hydropathy':
previous, lgd = self.plot_helper(ax=ax, colors=['b', 'r', 'k'],
title=sort_by,
keys=['Hydrophilic', 'Moderate',
'Hydrophobic'],
position=position, data=hyd,
previous=previous)
else:
previous, lgd = self.plot_helper(ax=ax, colors=['b', 'r', 'k'],
title='amino acid charge',
keys=['Negative', 'Positive', 'Neutral'],
position=position, data=chg,
previous=previous)
if display_count:
for position in range(aa.shape[1]):
ax.text(x=position, y=aa[:, position].sum(),
s=str(int(self._aa_count[:, position].sum())),
rotation=45, ha='center', va='bottom')
if normalize:
ax.set_ylabel('Frequency', size=16)
else:
ax.set_ylabel('Count', size=16)
ax.set_xticks(np.arange(len(self._numbering)))
ax.set_xticklabels(self._numbering, rotation=60)
ax.set_xlabel('Position', size=16)
ax.set_ylim([0, aa.sum(0).max()*1.1])
ax.margins(0.02)
ax.grid(axis='y')
if ipython_config.ipython_info == 'notebook' and notebook_plot:
ax.plot()
else:
fig.savefig(os.path.join(plot_path, plot_name), bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def plot_helper(self, ax, colors, title, keys, position, data, previous):
ax.set_title('{} {}'.format(self.region, title), size=20)
for i, prop_i in enumerate(keys):
c = colors[i]
ax.bar(position, data[i, position], bottom=previous, label=prop_i, color=c, align='center')
previous += data[i, position]
lgd = ax.legend(keys, loc='center left', bbox_to_anchor=(1, 0.5),
prop={"size": 16})
return previous, lgd
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for http://oss.sgi.com/projects/ogl-sample/ABI/glxext.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import ctypes
from ctypes import *
from pyglet.gl.lib import link_GLX as _link_function
from pyglet.gl.lib import c_ptrdiff_t
if not hasattr(ctypes, 'c_int64'):
# XXX TODO completely wrong, but at least can import.
# Can c_longlong still be used?
c_int64 = c_long
c_uint64 = c_ulong
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for http://www.opengl.org/registry/api/glxext.h
import pyglet.libs.x11.xlib
import pyglet.gl.glx
# VERSION_1_3 (/usr/include/GL/glx.h:73)
# VERSION_1_4 (/usr/include/GL/glx.h:132)
# ARB_get_proc_address (/usr/include/GL/glx.h:137)
# VERSION_1_1 (/usr/include/GL/glx.h:208)
# VERSION_1_2 (/usr/include/GL/glx.h:221)
# VERSION_1_3 (/usr/include/GL/glx.h:229)
# VERSION_1_4 (/usr/include/GL/glx.h:301)
# ARB_get_proc_address (/usr/include/GL/glx.h:317)
# GLXEXT_LEGACY (/usr/include/GL/glx.h:349)
GLX_GLXEXT_VERSION = 21 # GL/glxext.h:51
# VERSION_1_3 (GL/glxext.h:53)
# VERSION_1_4 (GL/glxext.h:112)
# ARB_get_proc_address (GL/glxext.h:117)
# ARB_multisample (GL/glxext.h:120)
GLX_SAMPLE_BUFFERS_ARB = 100000 # GL/glxext.h:121
GLX_SAMPLES_ARB = 100001 # GL/glxext.h:122
# ARB_fbconfig_float (GL/glxext.h:125)
GLX_RGBA_FLOAT_TYPE_ARB = 8377 # GL/glxext.h:126
GLX_RGBA_FLOAT_BIT_ARB = 4 # GL/glxext.h:127
# ARB_create_context (GL/glxext.h:130)
GLX_CONTEXT_DEBUG_BIT_ARB = 1 # GL/glxext.h:131
GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB = 2 # GL/glxext.h:132
GLX_CONTEXT_MAJOR_VERSION_ARB = 8337 # GL/glxext.h:133
GLX_CONTEXT_MINOR_VERSION_ARB = 8338 # GL/glxext.h:134
GLX_CONTEXT_FLAGS_ARB = 8340 # GL/glxext.h:135
# SGIS_multisample (GL/glxext.h:138)
GLX_SAMPLE_BUFFERS_SGIS = 100000 # GL/glxext.h:139
GLX_SAMPLES_SGIS = 100001 # GL/glxext.h:140
# EXT_visual_info (GL/glxext.h:143)
GLX_X_VISUAL_TYPE_EXT = 34 # GL/glxext.h:144
GLX_TRANSPARENT_TYPE_EXT = 35 # GL/glxext.h:145
GLX_TRANSPARENT_INDEX_VALUE_EXT = 36 # GL/glxext.h:146
GLX_TRANSPARENT_RED_VALUE_EXT = 37 # GL/glxext.h:147
GLX_TRANSPARENT_GREEN_VALUE_EXT = 38 # GL/glxext.h:148
GLX_TRANSPARENT_BLUE_VALUE_EXT = 39 # GL/glxext.h:149
GLX_TRANSPARENT_ALPHA_VALUE_EXT = 40 # GL/glxext.h:150
GLX_NONE_EXT = 32768 # GL/glxext.h:151
GLX_TRUE_COLOR_EXT = 32770 # GL/glxext.h:152
GLX_DIRECT_COLOR_EXT = 32771 # GL/glxext.h:153
GLX_PSEUDO_COLOR_EXT = 32772 # GL/glxext.h:154
GLX_STATIC_COLOR_EXT = 32773 # GL/glxext.h:155
GLX_GRAY_SCALE_EXT = 32774 # GL/glxext.h:156
GLX_STATIC_GRAY_EXT = 32775 # GL/glxext.h:157
GLX_TRANSPARENT_RGB_EXT = 32776 # GL/glxext.h:158
GLX_TRANSPARENT_INDEX_EXT = 32777 # GL/glxext.h:159
# SGI_swap_control (GL/glxext.h:162)
# SGI_video_sync (GL/glxext.h:165)
# SGI_make_current_read (GL/glxext.h:168)
# SGIX_video_source (GL/glxext.h:171)
# EXT_visual_rating (GL/glxext.h:174)
GLX_VISUAL_CAVEAT_EXT = 32 # GL/glxext.h:175
GLX_SLOW_VISUAL_EXT = 32769 # GL/glxext.h:176
GLX_NON_CONFORMANT_VISUAL_EXT = 32781 # GL/glxext.h:177
# EXT_import_context (GL/glxext.h:181)
GLX_SHARE_CONTEXT_EXT = 32778 # GL/glxext.h:182
GLX_VISUAL_ID_EXT = 32779 # GL/glxext.h:183
GLX_SCREEN_EXT = 32780 # GL/glxext.h:184
# SGIX_fbconfig (GL/glxext.h:187)
GLX_WINDOW_BIT_SGIX = 1 # GL/glxext.h:188
GLX_PIXMAP_BIT_SGIX = 2 # GL/glxext.h:189
GLX_RGBA_BIT_SGIX = 1 # GL/glxext.h:190
GLX_COLOR_INDEX_BIT_SGIX = 2 # GL/glxext.h:191
GLX_DRAWABLE_TYPE_SGIX = 32784 # GL/glxext.h:192
GLX_RENDER_TYPE_SGIX = 32785 # GL/glxext.h:193
GLX_X_RENDERABLE_SGIX = 32786 # GL/glxext.h:194
GLX_FBCONFIG_ID_SGIX = 32787 # GL/glxext.h:195
GLX_RGBA_TYPE_SGIX = 32788 # GL/glxext.h:196
GLX_COLOR_INDEX_TYPE_SGIX = 32789 # GL/glxext.h:197
# SGIX_pbuffer (GL/glxext.h:201)
GLX_PBUFFER_BIT_SGIX = 4 # GL/glxext.h:202
GLX_BUFFER_CLOBBER_MASK_SGIX = 134217728 # GL/glxext.h:203
GLX_FRONT_LEFT_BUFFER_BIT_SGIX = 1 # GL/glxext.h:204
GLX_FRONT_RIGHT_BUFFER_BIT_SGIX = 2 # GL/glxext.h:205
GLX_BACK_LEFT_BUFFER_BIT_SGIX = 4 # GL/glxext.h:206
GLX_BACK_RIGHT_BUFFER_BIT_SGIX = 8 # GL/glxext.h:207
GLX_AUX_BUFFERS_BIT_SGIX = 16 # GL/glxext.h:208
GLX_DEPTH_BUFFER_BIT_SGIX = 32 # GL/glxext.h:209
GLX_STENCIL_BUFFER_BIT_SGIX = 64 # GL/glxext.h:210
GLX_ACCUM_BUFFER_BIT_SGIX = 128 # GL/glxext.h:211
GLX_SAMPLE_BUFFERS_BIT_SGIX = 256 # GL/glxext.h:212
GLX_MAX_PBUFFER_WIDTH_SGIX = 32790 # GL/glxext.h:213
GLX_MAX_PBUFFER_HEIGHT_SGIX = 32791 # GL/glxext.h:214
GLX_MAX_PBUFFER_PIXELS_SGIX = 32792 # GL/glxext.h:215
GLX_OPTIMAL_PBUFFER_WIDTH_SGIX = 32793 # GL/glxext.h:216
GLX_OPTIMAL_PBUFFER_HEIGHT_SGIX = 32794 # GL/glxext.h:217
GLX_PRESERVED_CONTENTS_SGIX = 32795 # GL/glxext.h:218
GLX_LARGEST_PBUFFER_SGIX = 32796 # GL/glxext.h:219
GLX_WIDTH_SGIX = 32797 # GL/glxext.h:220
GLX_HEIGHT_SGIX = 32798 # GL/glxext.h:221
GLX_EVENT_MASK_SGIX = 32799 # GL/glxext.h:222
GLX_DAMAGED_SGIX = 32800 # GL/glxext.h:223
GLX_SAVED_SGIX = 32801 # GL/glxext.h:224
GLX_WINDOW_SGIX = 32802 # GL/glxext.h:225
GLX_PBUFFER_SGIX = 32803 # GL/glxext.h:226
# SGI_cushion (GL/glxext.h:229)
# SGIX_video_resize (GL/glxext.h:232)
GLX_SYNC_FRAME_SGIX = 0 # GL/glxext.h:233
GLX_SYNC_SWAP_SGIX = 1 # GL/glxext.h:234
# SGIX_dmbuffer (GL/glxext.h:237)
GLX_DIGITAL_MEDIA_PBUFFER_SGIX = 32804 # GL/glxext.h:238
# SGIX_swap_group (GL/glxext.h:241)
# SGIX_swap_barrier (GL/glxext.h:244)
# SGIS_blended_overlay (GL/glxext.h:247)
GLX_BLENDED_RGBA_SGIS = 32805 # GL/glxext.h:248
# SGIS_shared_multisample (GL/glxext.h:251)
GLX_MULTISAMPLE_SUB_RECT_WIDTH_SGIS = 32806 # GL/glxext.h:252
GLX_MULTISAMPLE_SUB_RECT_HEIGHT_SGIS = 32807 # GL/glxext.h:253
# SUN_get_transparent_index (GL/glxext.h:256)
# 3DFX_multisample (GL/glxext.h:259)
GLX_SAMPLE_BUFFERS_3DFX = 32848 # GL/glxext.h:260
GLX_SAMPLES_3DFX = 32849 # GL/glxext.h:261
# MESA_copy_sub_buffer (GL/glxext.h:264)
# MESA_pixmap_colormap (GL/glxext.h:267)
# MESA_release_buffers (GL/glxext.h:270)
# MESA_set_3dfx_mode (GL/glxext.h:273)
GLX_3DFX_WINDOW_MODE_MESA = 1 # GL/glxext.h:274
GLX_3DFX_FULLSCREEN_MODE_MESA = 2 # GL/glxext.h:275
# SGIX_visual_select_group (GL/glxext.h:278)
GLX_VISUAL_SELECT_GROUP_SGIX = 32808 # GL/glxext.h:279
# OML_swap_method (GL/glxext.h:282)
GLX_SWAP_METHOD_OML = 32864 # GL/glxext.h:283
GLX_SWAP_EXCHANGE_OML = 32865 # GL/glxext.h:284
GLX_SWAP_COPY_OML = 32866 # GL/glxext.h:285
GLX_SWAP_UNDEFINED_OML = 32867 # GL/glxext.h:286
# OML_sync_control (GL/glxext.h:289)
# NV_float_buffer (GL/glxext.h:292)
GLX_FLOAT_COMPONENTS_NV = 8368 # GL/glxext.h:293
# SGIX_hyperpipe (GL/glxext.h:296)
GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX = 80 # GL/glxext.h:297
GLX_BAD_HYPERPIPE_CONFIG_SGIX = 91 # GL/glxext.h:298
GLX_BAD_HYPERPIPE_SGIX = 92 # GL/glxext.h:299
GLX_HYPERPIPE_DISPLAY_PIPE_SGIX = 1 # GL/glxext.h:300
GLX_HYPERPIPE_RENDER_PIPE_SGIX = 2 # GL/glxext.h:301
GLX_PIPE_RECT_SGIX = 1 # GL/glxext.h:302
GLX_PIPE_RECT_LIMITS_SGIX = 2 # GL/glxext.h:303
GLX_HYPERPIPE_STEREO_SGIX = 3 # GL/glxext.h:304
GLX_HYPERPIPE_PIXEL_AVERAGE_SGIX = 4 # GL/glxext.h:305
GLX_HYPERPIPE_ID_SGIX = 32816 # GL/glxext.h:306
# MESA_agp_offset (GL/glxext.h:309)
# EXT_fbconfig_packed_float (GL/glxext.h:312)
GLX_RGBA_UNSIGNED_FLOAT_TYPE_EXT = 8369 # GL/glxext.h:313
GLX_RGBA_UNSIGNED_FLOAT_BIT_EXT = 8 # GL/glxext.h:314
# EXT_framebuffer_sRGB (GL/glxext.h:317)
GLX_FRAMEBUFFER_SRGB_CAPABLE_EXT = 8370 # GL/glxext.h:318
# EXT_texture_from_pixmap (GL/glxext.h:321)
GLX_TEXTURE_1D_BIT_EXT = 1 # GL/glxext.h:322
GLX_TEXTURE_2D_BIT_EXT = 2 # GL/glxext.h:323
GLX_TEXTURE_RECTANGLE_BIT_EXT = 4 # GL/glxext.h:324
GLX_BIND_TO_TEXTURE_RGB_EXT = 8400 # GL/glxext.h:325
GLX_BIND_TO_TEXTURE_RGBA_EXT = 8401 # GL/glxext.h:326
GLX_BIND_TO_MIPMAP_TEXTURE_EXT = 8402 # GL/glxext.h:327
GLX_BIND_TO_TEXTURE_TARGETS_EXT = 8403 # GL/glxext.h:328
GLX_Y_INVERTED_EXT = 8404 # GL/glxext.h:329
GLX_TEXTURE_FORMAT_EXT = 8405 # GL/glxext.h:330
GLX_TEXTURE_TARGET_EXT = 8406 # GL/glxext.h:331
GLX_MIPMAP_TEXTURE_EXT = 8407 # GL/glxext.h:332
GLX_TEXTURE_FORMAT_NONE_EXT = 8408 # GL/glxext.h:333
GLX_TEXTURE_FORMAT_RGB_EXT = 8409 # GL/glxext.h:334
GLX_TEXTURE_FORMAT_RGBA_EXT = 8410 # GL/glxext.h:335
GLX_TEXTURE_1D_EXT = 8411 # GL/glxext.h:336
GLX_TEXTURE_2D_EXT = 8412 # GL/glxext.h:337
GLX_TEXTURE_RECTANGLE_EXT = 8413 # GL/glxext.h:338
GLX_FRONT_LEFT_EXT = 8414 # GL/glxext.h:339
GLX_FRONT_RIGHT_EXT = 8415 # GL/glxext.h:340
GLX_BACK_LEFT_EXT = 8416 # GL/glxext.h:341
GLX_BACK_RIGHT_EXT = 8417 # GL/glxext.h:342
GLX_FRONT_EXT = 8414 # GL/glxext.h:343
GLX_BACK_EXT = 8416 # GL/glxext.h:344
GLX_AUX0_EXT = 8418 # GL/glxext.h:345
GLX_AUX1_EXT = 8419 # GL/glxext.h:346
GLX_AUX2_EXT = 8420 # GL/glxext.h:347
GLX_AUX3_EXT = 8421 # GL/glxext.h:348
GLX_AUX4_EXT = 8422 # GL/glxext.h:349
GLX_AUX5_EXT = 8423 # GL/glxext.h:350
GLX_AUX6_EXT = 8424 # GL/glxext.h:351
GLX_AUX7_EXT = 8425 # GL/glxext.h:352
GLX_AUX8_EXT = 8426 # GL/glxext.h:353
GLX_AUX9_EXT = 8427 # GL/glxext.h:354
# NV_present_video (GL/glxext.h:357)
GLX_NUM_VIDEO_SLOTS_NV = 8432 # GL/glxext.h:358
# NV_video_out (GL/glxext.h:361)
GLX_VIDEO_OUT_COLOR_NV = 8387 # GL/glxext.h:362
GLX_VIDEO_OUT_ALPHA_NV = 8388 # GL/glxext.h:363
GLX_VIDEO_OUT_DEPTH_NV = 8389 # GL/glxext.h:364
GLX_VIDEO_OUT_COLOR_AND_ALPHA_NV = 8390 # GL/glxext.h:365
GLX_VIDEO_OUT_COLOR_AND_DEPTH_NV = 8391 # GL/glxext.h:366
GLX_VIDEO_OUT_FRAME_NV = 8392 # GL/glxext.h:367
GLX_VIDEO_OUT_FIELD_1_NV = 8393 # GL/glxext.h:368
GLX_VIDEO_OUT_FIELD_2_NV = 8394 # GL/glxext.h:369
GLX_VIDEO_OUT_STACKED_FIELDS_1_2_NV = 8395 # GL/glxext.h:370
GLX_VIDEO_OUT_STACKED_FIELDS_2_1_NV = 8396 # GL/glxext.h:371
# NV_swap_group (GL/glxext.h:374)
# ARB_get_proc_address (GL/glxext.h:380)
# SGIX_video_source (GL/glxext.h:384)
XID = pyglet.libs.x11.xlib.XID
GLXVideoSourceSGIX = XID # GL/glxext.h:385
# SGIX_fbconfig (GL/glxext.h:388)
GLXFBConfigIDSGIX = XID # GL/glxext.h:389
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
class struct___GLXFBConfigRec(Structure):
__slots__ = [
]
struct___GLXFBConfigRec._fields_ = [
('_opaque_struct', c_int)
]
GLXFBConfigSGIX = POINTER(struct___GLXFBConfigRec) # GL/glxext.h:390
# SGIX_pbuffer (GL/glxext.h:393)
GLXPbufferSGIX = XID # GL/glxext.h:394
class struct_anon_103(Structure):
__slots__ = [
'type',
'serial',
'send_event',
'display',
'drawable',
'event_type',
'draw_type',
'mask',
'x',
'y',
'width',
'height',
'count',
]
Display = pyglet.libs.x11.xlib.Display
GLXDrawable = pyglet.gl.glx.GLXDrawable
struct_anon_103._fields_ = [
('type', c_int),
('serial', c_ulong),
('send_event', c_int),
('display', POINTER(Display)),
('drawable', GLXDrawable),
('event_type', c_int),
('draw_type', c_int),
('mask', c_uint),
('x', c_int),
('y', c_int),
('width', c_int),
('height', c_int),
('count', c_int),
]
GLXBufferClobberEventSGIX = struct_anon_103 # GL/glxext.h:407
# VERSION_1_3 (GL/glxext.h:447)
# VERSION_1_4 (GL/glxext.h:489)
# ARB_get_proc_address (GL/glxext.h:497)
# ARB_multisample (GL/glxext.h:505)
GLX_ARB_multisample = 1 # GL/glxext.h:506
# ARB_fbconfig_float (GL/glxext.h:509)
GLX_ARB_fbconfig_float = 1 # GL/glxext.h:510
# ARB_create_context (GL/glxext.h:513)
GLX_ARB_create_context = 1 # GL/glxext.h:514
GLXContext = pyglet.gl.glx.GLXContext
GLXFBConfig = pyglet.gl.glx.GLXFBConfig
# GL/glxext.h:516
glXCreateContextAttribsARB = _link_function('glXCreateContextAttribsARB', GLXContext, [POINTER(Display), GLXFBConfig, GLXContext, c_int, POINTER(c_int)], 'ARB_create_context')
PFNGLXCREATECONTEXTATTRIBSARBPROC = CFUNCTYPE(GLXContext, POINTER(Display), GLXFBConfig, GLXContext, c_int, POINTER(c_int)) # GL/glxext.h:518
# SGIS_multisample (GL/glxext.h:521)
GLX_SGIS_multisample = 1 # GL/glxext.h:522
# EXT_visual_info (GL/glxext.h:525)
GLX_EXT_visual_info = 1 # GL/glxext.h:526
# SGI_swap_control (GL/glxext.h:529)
GLX_SGI_swap_control = 1 # GL/glxext.h:530
# GL/glxext.h:532
glXSwapIntervalSGI = _link_function('glXSwapIntervalSGI', c_int, [c_int], 'SGI_swap_control')
PFNGLXSWAPINTERVALSGIPROC = CFUNCTYPE(c_int, c_int) # GL/glxext.h:534
# SGI_video_sync (GL/glxext.h:537)
GLX_SGI_video_sync = 1 # GL/glxext.h:538
# GL/glxext.h:540
glXGetVideoSyncSGI = _link_function('glXGetVideoSyncSGI', c_int, [POINTER(c_uint)], 'SGI_video_sync')
# GL/glxext.h:541
glXWaitVideoSyncSGI = _link_function('glXWaitVideoSyncSGI', c_int, [c_int, c_int, POINTER(c_uint)], 'SGI_video_sync')
PFNGLXGETVIDEOSYNCSGIPROC = CFUNCTYPE(c_int, POINTER(c_uint)) # GL/glxext.h:543
PFNGLXWAITVIDEOSYNCSGIPROC = CFUNCTYPE(c_int, c_int, c_int, POINTER(c_uint)) # GL/glxext.h:544
# SGI_make_current_read (GL/glxext.h:547)
GLX_SGI_make_current_read = 1 # GL/glxext.h:548
# GL/glxext.h:550
glXMakeCurrentReadSGI = _link_function('glXMakeCurrentReadSGI', c_int, [POINTER(Display), GLXDrawable, GLXDrawable, GLXContext], 'SGI_make_current_read')
# GL/glxext.h:551
glXGetCurrentReadDrawableSGI = _link_function('glXGetCurrentReadDrawableSGI', GLXDrawable, [], 'SGI_make_current_read')
PFNGLXMAKECURRENTREADSGIPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, GLXDrawable, GLXContext) # GL/glxext.h:553
PFNGLXGETCURRENTREADDRAWABLESGIPROC = CFUNCTYPE(GLXDrawable) # GL/glxext.h:554
# SGIX_video_source (GL/glxext.h:557)
GLX_SGIX_video_source = 1 # GL/glxext.h:558
# EXT_visual_rating (GL/glxext.h:569)
GLX_EXT_visual_rating = 1 # GL/glxext.h:570
# EXT_import_context (GL/glxext.h:573)
GLX_EXT_import_context = 1 # GL/glxext.h:574
# GL/glxext.h:576
glXGetCurrentDisplayEXT = _link_function('glXGetCurrentDisplayEXT', POINTER(Display), [], 'EXT_import_context')
# GL/glxext.h:577
glXQueryContextInfoEXT = _link_function('glXQueryContextInfoEXT', c_int, [POINTER(Display), GLXContext, c_int, POINTER(c_int)], 'EXT_import_context')
GLXContextID = pyglet.gl.glx.GLXContextID
# GL/glxext.h:578
glXGetContextIDEXT = _link_function('glXGetContextIDEXT', GLXContextID, [GLXContext], 'EXT_import_context')
# GL/glxext.h:579
glXImportContextEXT = _link_function('glXImportContextEXT', GLXContext, [POINTER(Display), GLXContextID], 'EXT_import_context')
# GL/glxext.h:580
glXFreeContextEXT = _link_function('glXFreeContextEXT', None, [POINTER(Display), GLXContext], 'EXT_import_context')
PFNGLXGETCURRENTDISPLAYEXTPROC = CFUNCTYPE(POINTER(Display)) # GL/glxext.h:582
PFNGLXQUERYCONTEXTINFOEXTPROC = CFUNCTYPE(c_int, POINTER(Display), GLXContext, c_int, POINTER(c_int)) # GL/glxext.h:583
PFNGLXGETCONTEXTIDEXTPROC = CFUNCTYPE(GLXContextID, GLXContext) # GL/glxext.h:584
PFNGLXIMPORTCONTEXTEXTPROC = CFUNCTYPE(GLXContext, POINTER(Display), GLXContextID) # GL/glxext.h:585
PFNGLXFREECONTEXTEXTPROC = CFUNCTYPE(None, POINTER(Display), GLXContext) # GL/glxext.h:586
# SGIX_fbconfig (GL/glxext.h:589)
GLX_SGIX_fbconfig = 1 # GL/glxext.h:590
# GL/glxext.h:592
glXGetFBConfigAttribSGIX = _link_function('glXGetFBConfigAttribSGIX', c_int, [POINTER(Display), GLXFBConfigSGIX, c_int, POINTER(c_int)], 'SGIX_fbconfig')
# GL/glxext.h:593
glXChooseFBConfigSGIX = _link_function('glXChooseFBConfigSGIX', POINTER(GLXFBConfigSGIX), [POINTER(Display), c_int, POINTER(c_int), POINTER(c_int)], 'SGIX_fbconfig')
GLXPixmap = pyglet.gl.glx.GLXPixmap
Pixmap = pyglet.libs.x11.xlib.Pixmap
# GL/glxext.h:594
glXCreateGLXPixmapWithConfigSGIX = _link_function('glXCreateGLXPixmapWithConfigSGIX', GLXPixmap, [POINTER(Display), GLXFBConfigSGIX, Pixmap], 'SGIX_fbconfig')
# GL/glxext.h:595
glXCreateContextWithConfigSGIX = _link_function('glXCreateContextWithConfigSGIX', GLXContext, [POINTER(Display), GLXFBConfigSGIX, c_int, GLXContext, c_int], 'SGIX_fbconfig')
XVisualInfo = pyglet.libs.x11.xlib.XVisualInfo
# GL/glxext.h:596
glXGetVisualFromFBConfigSGIX = _link_function('glXGetVisualFromFBConfigSGIX', POINTER(XVisualInfo), [POINTER(Display), GLXFBConfigSGIX], 'SGIX_fbconfig')
# GL/glxext.h:597
glXGetFBConfigFromVisualSGIX = _link_function('glXGetFBConfigFromVisualSGIX', GLXFBConfigSGIX, [POINTER(Display), POINTER(XVisualInfo)], 'SGIX_fbconfig')
PFNGLXGETFBCONFIGATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), GLXFBConfigSGIX, c_int, POINTER(c_int)) # GL/glxext.h:599
PFNGLXCHOOSEFBCONFIGSGIXPROC = CFUNCTYPE(POINTER(GLXFBConfigSGIX), POINTER(Display), c_int, POINTER(c_int), POINTER(c_int)) # GL/glxext.h:600
PFNGLXCREATEGLXPIXMAPWITHCONFIGSGIXPROC = CFUNCTYPE(GLXPixmap, POINTER(Display), GLXFBConfigSGIX, Pixmap) # GL/glxext.h:601
PFNGLXCREATECONTEXTWITHCONFIGSGIXPROC = CFUNCTYPE(GLXContext, POINTER(Display), GLXFBConfigSGIX, c_int, GLXContext, c_int) # GL/glxext.h:602
PFNGLXGETVISUALFROMFBCONFIGSGIXPROC = CFUNCTYPE(POINTER(XVisualInfo), POINTER(Display), GLXFBConfigSGIX) # GL/glxext.h:603
PFNGLXGETFBCONFIGFROMVISUALSGIXPROC = CFUNCTYPE(GLXFBConfigSGIX, POINTER(Display), POINTER(XVisualInfo)) # GL/glxext.h:604
# SGIX_pbuffer (GL/glxext.h:607)
GLX_SGIX_pbuffer = 1 # GL/glxext.h:608
# GL/glxext.h:610
glXCreateGLXPbufferSGIX = _link_function('glXCreateGLXPbufferSGIX', GLXPbufferSGIX, [POINTER(Display), GLXFBConfigSGIX, c_uint, c_uint, POINTER(c_int)], 'SGIX_pbuffer')
# GL/glxext.h:611
glXDestroyGLXPbufferSGIX = _link_function('glXDestroyGLXPbufferSGIX', None, [POINTER(Display), GLXPbufferSGIX], 'SGIX_pbuffer')
# GL/glxext.h:612
glXQueryGLXPbufferSGIX = _link_function('glXQueryGLXPbufferSGIX', c_int, [POINTER(Display), GLXPbufferSGIX, c_int, POINTER(c_uint)], 'SGIX_pbuffer')
# GL/glxext.h:613
glXSelectEventSGIX = _link_function('glXSelectEventSGIX', None, [POINTER(Display), GLXDrawable, c_ulong], 'SGIX_pbuffer')
# GL/glxext.h:614
glXGetSelectedEventSGIX = _link_function('glXGetSelectedEventSGIX', None, [POINTER(Display), GLXDrawable, POINTER(c_ulong)], 'SGIX_pbuffer')
PFNGLXCREATEGLXPBUFFERSGIXPROC = CFUNCTYPE(GLXPbufferSGIX, POINTER(Display), GLXFBConfigSGIX, c_uint, c_uint, POINTER(c_int)) # GL/glxext.h:616
PFNGLXDESTROYGLXPBUFFERSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXPbufferSGIX) # GL/glxext.h:617
PFNGLXQUERYGLXPBUFFERSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), GLXPbufferSGIX, c_int, POINTER(c_uint)) # GL/glxext.h:618
PFNGLXSELECTEVENTSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_ulong) # GL/glxext.h:619
PFNGLXGETSELECTEDEVENTSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, POINTER(c_ulong)) # GL/glxext.h:620
# SGI_cushion (GL/glxext.h:623)
GLX_SGI_cushion = 1 # GL/glxext.h:624
Window = pyglet.libs.x11.xlib.Window
# GL/glxext.h:626
glXCushionSGI = _link_function('glXCushionSGI', None, [POINTER(Display), Window, c_float], 'SGI_cushion')
PFNGLXCUSHIONSGIPROC = CFUNCTYPE(None, POINTER(Display), Window, c_float) # GL/glxext.h:628
# SGIX_video_resize (GL/glxext.h:631)
GLX_SGIX_video_resize = 1 # GL/glxext.h:632
# GL/glxext.h:634
glXBindChannelToWindowSGIX = _link_function('glXBindChannelToWindowSGIX', c_int, [POINTER(Display), c_int, c_int, Window], 'SGIX_video_resize')
# GL/glxext.h:635
glXChannelRectSGIX = _link_function('glXChannelRectSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, c_int, c_int, c_int], 'SGIX_video_resize')
# GL/glxext.h:636
glXQueryChannelRectSGIX = _link_function('glXQueryChannelRectSGIX', c_int, [POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)], 'SGIX_video_resize')
# GL/glxext.h:637
glXQueryChannelDeltasSGIX = _link_function('glXQueryChannelDeltasSGIX', c_int, [POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)], 'SGIX_video_resize')
GLenum = c_uint # /usr/include/GL/gl.h:53
# GL/glxext.h:638
glXChannelRectSyncSGIX = _link_function('glXChannelRectSyncSGIX', c_int, [POINTER(Display), c_int, c_int, GLenum], 'SGIX_video_resize')
PFNGLXBINDCHANNELTOWINDOWSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, Window) # GL/glxext.h:640
PFNGLXCHANNELRECTSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, c_int, c_int, c_int) # GL/glxext.h:641
PFNGLXQUERYCHANNELRECTSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)) # GL/glxext.h:642
PFNGLXQUERYCHANNELDELTASSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(c_int), POINTER(c_int), POINTER(c_int), POINTER(c_int)) # GL/glxext.h:643
PFNGLXCHANNELRECTSYNCSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, GLenum) # GL/glxext.h:644
# SGIX_dmbuffer (GL/glxext.h:647)
GLX_SGIX_dmbuffer = 1 # GL/glxext.h:648
# SGIX_swap_group (GL/glxext.h:657)
GLX_SGIX_swap_group = 1 # GL/glxext.h:658
# GL/glxext.h:660
glXJoinSwapGroupSGIX = _link_function('glXJoinSwapGroupSGIX', None, [POINTER(Display), GLXDrawable, GLXDrawable], 'SGIX_swap_group')
PFNGLXJOINSWAPGROUPSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, GLXDrawable) # GL/glxext.h:662
# SGIX_swap_barrier (GL/glxext.h:665)
GLX_SGIX_swap_barrier = 1 # GL/glxext.h:666
# GL/glxext.h:668
glXBindSwapBarrierSGIX = _link_function('glXBindSwapBarrierSGIX', None, [POINTER(Display), GLXDrawable, c_int], 'SGIX_swap_barrier')
# GL/glxext.h:669
glXQueryMaxSwapBarriersSGIX = _link_function('glXQueryMaxSwapBarriersSGIX', c_int, [POINTER(Display), c_int, POINTER(c_int)], 'SGIX_swap_barrier')
PFNGLXBINDSWAPBARRIERSGIXPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int) # GL/glxext.h:671
PFNGLXQUERYMAXSWAPBARRIERSSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:672
# SUN_get_transparent_index (GL/glxext.h:675)
GLX_SUN_get_transparent_index = 1 # GL/glxext.h:676
# GL/glxext.h:678
glXGetTransparentIndexSUN = _link_function('glXGetTransparentIndexSUN', c_int, [POINTER(Display), Window, Window, POINTER(c_long)], 'SUN_get_transparent_index')
PFNGLXGETTRANSPARENTINDEXSUNPROC = CFUNCTYPE(c_int, POINTER(Display), Window, Window, POINTER(c_long)) # GL/glxext.h:680
# MESA_copy_sub_buffer (GL/glxext.h:683)
GLX_MESA_copy_sub_buffer = 1 # GL/glxext.h:684
# GL/glxext.h:686
glXCopySubBufferMESA = _link_function('glXCopySubBufferMESA', None, [POINTER(Display), GLXDrawable, c_int, c_int, c_int, c_int], 'MESA_copy_sub_buffer')
PFNGLXCOPYSUBBUFFERMESAPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int, c_int, c_int, c_int) # GL/glxext.h:688
# MESA_pixmap_colormap (GL/glxext.h:691)
GLX_MESA_pixmap_colormap = 1 # GL/glxext.h:692
Colormap = pyglet.libs.x11.xlib.Colormap
# GL/glxext.h:694
glXCreateGLXPixmapMESA = _link_function('glXCreateGLXPixmapMESA', GLXPixmap, [POINTER(Display), POINTER(XVisualInfo), Pixmap, Colormap], 'MESA_pixmap_colormap')
PFNGLXCREATEGLXPIXMAPMESAPROC = CFUNCTYPE(GLXPixmap, POINTER(Display), POINTER(XVisualInfo), Pixmap, Colormap) # GL/glxext.h:696
# MESA_release_buffers (GL/glxext.h:699)
GLX_MESA_release_buffers = 1 # GL/glxext.h:700
# GL/glxext.h:702
glXReleaseBuffersMESA = _link_function('glXReleaseBuffersMESA', c_int, [POINTER(Display), GLXDrawable], 'MESA_release_buffers')
PFNGLXRELEASEBUFFERSMESAPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable) # GL/glxext.h:704
# MESA_set_3dfx_mode (GL/glxext.h:707)
GLX_MESA_set_3dfx_mode = 1 # GL/glxext.h:708
# GL/glxext.h:710
glXSet3DfxModeMESA = _link_function('glXSet3DfxModeMESA', c_int, [c_int], 'MESA_set_3dfx_mode')
PFNGLXSET3DFXMODEMESAPROC = CFUNCTYPE(c_int, c_int) # GL/glxext.h:712
# SGIX_visual_select_group (GL/glxext.h:715)
GLX_SGIX_visual_select_group = 1 # GL/glxext.h:716
# OML_swap_method (GL/glxext.h:719)
GLX_OML_swap_method = 1 # GL/glxext.h:720
# OML_sync_control (GL/glxext.h:723)
GLX_OML_sync_control = 1 # GL/glxext.h:724
# GL/glxext.h:726
glXGetSyncValuesOML = _link_function('glXGetSyncValuesOML', c_int, [POINTER(Display), GLXDrawable, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control')
# GL/glxext.h:727
glXGetMscRateOML = _link_function('glXGetMscRateOML', c_int, [POINTER(Display), GLXDrawable, POINTER(c_int32), POINTER(c_int32)], 'OML_sync_control')
# GL/glxext.h:728
glXSwapBuffersMscOML = _link_function('glXSwapBuffersMscOML', c_int64, [POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64], 'OML_sync_control')
# GL/glxext.h:729
glXWaitForMscOML = _link_function('glXWaitForMscOML', c_int, [POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control')
# GL/glxext.h:730
glXWaitForSbcOML = _link_function('glXWaitForSbcOML', c_int, [POINTER(Display), GLXDrawable, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)], 'OML_sync_control')
PFNGLXGETSYNCVALUESOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:732
PFNGLXGETMSCRATEOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, POINTER(c_int32), POINTER(c_int32)) # GL/glxext.h:733
PFNGLXSWAPBUFFERSMSCOMLPROC = CFUNCTYPE(c_int64, POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64) # GL/glxext.h:734
PFNGLXWAITFORMSCOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, c_int64, c_int64, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:735
PFNGLXWAITFORSBCOMLPROC = CFUNCTYPE(c_int, POINTER(Display), GLXDrawable, c_int64, POINTER(c_int64), POINTER(c_int64), POINTER(c_int64)) # GL/glxext.h:736
# NV_float_buffer (GL/glxext.h:739)
GLX_NV_float_buffer = 1 # GL/glxext.h:740
# SGIX_hyperpipe (GL/glxext.h:743)
GLX_SGIX_hyperpipe = 1 # GL/glxext.h:744
class struct_anon_105(Structure):
__slots__ = [
'pipeName',
'networkId',
]
struct_anon_105._fields_ = [
('pipeName', c_char * 80),
('networkId', c_int),
]
GLXHyperpipeNetworkSGIX = struct_anon_105 # GL/glxext.h:749
class struct_anon_106(Structure):
__slots__ = [
'pipeName',
'channel',
'participationType',
'timeSlice',
]
struct_anon_106._fields_ = [
('pipeName', c_char * 80),
('channel', c_int),
('participationType', c_uint),
('timeSlice', c_int),
]
GLXHyperpipeConfigSGIX = struct_anon_106 # GL/glxext.h:757
class struct_anon_107(Structure):
__slots__ = [
'pipeName',
'srcXOrigin',
'srcYOrigin',
'srcWidth',
'srcHeight',
'destXOrigin',
'destYOrigin',
'destWidth',
'destHeight',
]
struct_anon_107._fields_ = [
('pipeName', c_char * 80),
('srcXOrigin', c_int),
('srcYOrigin', c_int),
('srcWidth', c_int),
('srcHeight', c_int),
('destXOrigin', c_int),
('destYOrigin', c_int),
('destWidth', c_int),
('destHeight', c_int),
]
GLXPipeRect = struct_anon_107 # GL/glxext.h:763
class struct_anon_108(Structure):
__slots__ = [
'pipeName',
'XOrigin',
'YOrigin',
'maxHeight',
'maxWidth',
]
struct_anon_108._fields_ = [
('pipeName', c_char * 80),
('XOrigin', c_int),
('YOrigin', c_int),
('maxHeight', c_int),
('maxWidth', c_int),
]
GLXPipeRectLimits = struct_anon_108 # GL/glxext.h:768
# GL/glxext.h:771
glXQueryHyperpipeNetworkSGIX = _link_function('glXQueryHyperpipeNetworkSGIX', POINTER(GLXHyperpipeNetworkSGIX), [POINTER(Display), POINTER(c_int)], 'SGIX_hyperpipe')
# GL/glxext.h:772
glXHyperpipeConfigSGIX = _link_function('glXHyperpipeConfigSGIX', c_int, [POINTER(Display), c_int, c_int, POINTER(GLXHyperpipeConfigSGIX), POINTER(c_int)], 'SGIX_hyperpipe')
# GL/glxext.h:773
glXQueryHyperpipeConfigSGIX = _link_function('glXQueryHyperpipeConfigSGIX', POINTER(GLXHyperpipeConfigSGIX), [POINTER(Display), c_int, POINTER(c_int)], 'SGIX_hyperpipe')
# GL/glxext.h:774
glXDestroyHyperpipeConfigSGIX = _link_function('glXDestroyHyperpipeConfigSGIX', c_int, [POINTER(Display), c_int], 'SGIX_hyperpipe')
# GL/glxext.h:775
glXBindHyperpipeSGIX = _link_function('glXBindHyperpipeSGIX', c_int, [POINTER(Display), c_int], 'SGIX_hyperpipe')
# GL/glxext.h:776
glXQueryHyperpipeBestAttribSGIX = _link_function('glXQueryHyperpipeBestAttribSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, POINTER(None), POINTER(None)], 'SGIX_hyperpipe')
# GL/glxext.h:777
glXHyperpipeAttribSGIX = _link_function('glXHyperpipeAttribSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, POINTER(None)], 'SGIX_hyperpipe')
# GL/glxext.h:778
glXQueryHyperpipeAttribSGIX = _link_function('glXQueryHyperpipeAttribSGIX', c_int, [POINTER(Display), c_int, c_int, c_int, POINTER(None)], 'SGIX_hyperpipe')
PFNGLXQUERYHYPERPIPENETWORKSGIXPROC = CFUNCTYPE(POINTER(GLXHyperpipeNetworkSGIX), POINTER(Display), POINTER(c_int)) # GL/glxext.h:780
PFNGLXHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, POINTER(GLXHyperpipeConfigSGIX), POINTER(c_int)) # GL/glxext.h:781
PFNGLXQUERYHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(POINTER(GLXHyperpipeConfigSGIX), POINTER(Display), c_int, POINTER(c_int)) # GL/glxext.h:782
PFNGLXDESTROYHYPERPIPECONFIGSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int) # GL/glxext.h:783
PFNGLXBINDHYPERPIPESGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int) # GL/glxext.h:784
PFNGLXQUERYHYPERPIPEBESTATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, POINTER(None), POINTER(None)) # GL/glxext.h:785
PFNGLXHYPERPIPEATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, POINTER(None)) # GL/glxext.h:786
PFNGLXQUERYHYPERPIPEATTRIBSGIXPROC = CFUNCTYPE(c_int, POINTER(Display), c_int, c_int, c_int, POINTER(None)) # GL/glxext.h:787
# MESA_agp_offset (GL/glxext.h:790)
GLX_MESA_agp_offset = 1 # GL/glxext.h:791
# GL/glxext.h:793
glXGetAGPOffsetMESA = _link_function('glXGetAGPOffsetMESA', c_uint, [POINTER(None)], 'MESA_agp_offset')
PFNGLXGETAGPOFFSETMESAPROC = CFUNCTYPE(c_uint, POINTER(None)) # GL/glxext.h:795
# EXT_fbconfig_packed_float (GL/glxext.h:798)
GLX_EXT_fbconfig_packed_float = 1 # GL/glxext.h:799
# EXT_framebuffer_sRGB (GL/glxext.h:802)
GLX_EXT_framebuffer_sRGB = 1 # GL/glxext.h:803
# EXT_texture_from_pixmap (GL/glxext.h:806)
GLX_EXT_texture_from_pixmap = 1 # GL/glxext.h:807
# GL/glxext.h:809
glXBindTexImageEXT = _link_function('glXBindTexImageEXT', None, [POINTER(Display), GLXDrawable, c_int, POINTER(c_int)], 'EXT_texture_from_pixmap')
# GL/glxext.h:810
glXReleaseTexImageEXT = _link_function('glXReleaseTexImageEXT', None, [POINTER(Display), GLXDrawable, c_int], 'EXT_texture_from_pixmap')
PFNGLXBINDTEXIMAGEEXTPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int, POINTER(c_int)) # GL/glxext.h:812
PFNGLXRELEASETEXIMAGEEXTPROC = CFUNCTYPE(None, POINTER(Display), GLXDrawable, c_int) # GL/glxext.h:813
# NV_present_video (GL/glxext.h:816)
GLX_NV_present_video = 1 # GL/glxext.h:817
# NV_video_out (GL/glxext.h:820)
GLX_NV_video_out = 1 # GL/glxext.h:821
# NV_swap_group (GL/glxext.h:824)
GLX_NV_swap_group = 1 # GL/glxext.h:825
__all__ = ['GLX_GLXEXT_VERSION', 'GLX_SAMPLE_BUFFERS_ARB', 'GLX_SAMPLES_ARB',
'GLX_RGBA_FLOAT_TYPE_ARB', 'GLX_RGBA_FLOAT_BIT_ARB',
'GLX_CONTEXT_DEBUG_BIT_ARB', 'GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB',
'GLX_CONTEXT_MAJOR_VERSION_ARB', 'GLX_CONTEXT_MINOR_VERSION_ARB',
'GLX_CONTEXT_FLAGS_ARB', 'GLX_SAMPLE_BUFFERS_SGIS', 'GLX_SAMPLES_SGIS',
'GLX_X_VISUAL_TYPE_EXT', 'GLX_TRANSPARENT_TYPE_EXT',
'GLX_TRANSPARENT_INDEX_VALUE_EXT', 'GLX_TRANSPARENT_RED_VALUE_EXT',
'GLX_TRANSPARENT_GREEN_VALUE_EXT', 'GLX_TRANSPARENT_BLUE_VALUE_EXT',
'GLX_TRANSPARENT_ALPHA_VALUE_EXT', 'GLX_NONE_EXT', 'GLX_TRUE_COLOR_EXT',
'GLX_DIRECT_COLOR_EXT', 'GLX_PSEUDO_COLOR_EXT', 'GLX_STATIC_COLOR_EXT',
'GLX_GRAY_SCALE_EXT', 'GLX_STATIC_GRAY_EXT', 'GLX_TRANSPARENT_RGB_EXT',
'GLX_TRANSPARENT_INDEX_EXT', 'GLX_VISUAL_CAVEAT_EXT', 'GLX_SLOW_VISUAL_EXT',
'GLX_NON_CONFORMANT_VISUAL_EXT', 'GLX_SHARE_CONTEXT_EXT', 'GLX_VISUAL_ID_EXT',
'GLX_SCREEN_EXT', 'GLX_WINDOW_BIT_SGIX', 'GLX_PIXMAP_BIT_SGIX',
'GLX_RGBA_BIT_SGIX', 'GLX_COLOR_INDEX_BIT_SGIX', 'GLX_DRAWABLE_TYPE_SGIX',
'GLX_RENDER_TYPE_SGIX', 'GLX_X_RENDERABLE_SGIX', 'GLX_FBCONFIG_ID_SGIX',
'GLX_RGBA_TYPE_SGIX', 'GLX_COLOR_INDEX_TYPE_SGIX', 'GLX_PBUFFER_BIT_SGIX',
'GLX_BUFFER_CLOBBER_MASK_SGIX', 'GLX_FRONT_LEFT_BUFFER_BIT_SGIX',
'GLX_FRONT_RIGHT_BUFFER_BIT_SGIX', 'GLX_BACK_LEFT_BUFFER_BIT_SGIX',
'GLX_BACK_RIGHT_BUFFER_BIT_SGIX', 'GLX_AUX_BUFFERS_BIT_SGIX',
'GLX_DEPTH_BUFFER_BIT_SGIX', 'GLX_STENCIL_BUFFER_BIT_SGIX',
'GLX_ACCUM_BUFFER_BIT_SGIX', 'GLX_SAMPLE_BUFFERS_BIT_SGIX',
'GLX_MAX_PBUFFER_WIDTH_SGIX', 'GLX_MAX_PBUFFER_HEIGHT_SGIX',
'GLX_MAX_PBUFFER_PIXELS_SGIX', 'GLX_OPTIMAL_PBUFFER_WIDTH_SGIX',
'GLX_OPTIMAL_PBUFFER_HEIGHT_SGIX', 'GLX_PRESERVED_CONTENTS_SGIX',
'GLX_LARGEST_PBUFFER_SGIX', 'GLX_WIDTH_SGIX', 'GLX_HEIGHT_SGIX',
'GLX_EVENT_MASK_SGIX', 'GLX_DAMAGED_SGIX', 'GLX_SAVED_SGIX',
'GLX_WINDOW_SGIX', 'GLX_PBUFFER_SGIX', 'GLX_SYNC_FRAME_SGIX',
'GLX_SYNC_SWAP_SGIX', 'GLX_DIGITAL_MEDIA_PBUFFER_SGIX',
'GLX_BLENDED_RGBA_SGIS', 'GLX_MULTISAMPLE_SUB_RECT_WIDTH_SGIS',
'GLX_MULTISAMPLE_SUB_RECT_HEIGHT_SGIS', 'GLX_SAMPLE_BUFFERS_3DFX',
'GLX_SAMPLES_3DFX', 'GLX_3DFX_WINDOW_MODE_MESA',
'GLX_3DFX_FULLSCREEN_MODE_MESA', 'GLX_VISUAL_SELECT_GROUP_SGIX',
'GLX_SWAP_METHOD_OML', 'GLX_SWAP_EXCHANGE_OML', 'GLX_SWAP_COPY_OML',
'GLX_SWAP_UNDEFINED_OML', 'GLX_FLOAT_COMPONENTS_NV',
'GLX_HYPERPIPE_PIPE_NAME_LENGTH_SGIX', 'GLX_BAD_HYPERPIPE_CONFIG_SGIX',
'GLX_BAD_HYPERPIPE_SGIX', 'GLX_HYPERPIPE_DISPLAY_PIPE_SGIX',
'GLX_HYPERPIPE_RENDER_PIPE_SGIX', 'GLX_PIPE_RECT_SGIX',
'GLX_PIPE_RECT_LIMITS_SGIX', 'GLX_HYPERPIPE_STEREO_SGIX',
'GLX_HYPERPIPE_PIXEL_AVERAGE_SGIX', 'GLX_HYPERPIPE_ID_SGIX',
'GLX_RGBA_UNSIGNED_FLOAT_TYPE_EXT', 'GLX_RGBA_UNSIGNED_FLOAT_BIT_EXT',
'GLX_FRAMEBUFFER_SRGB_CAPABLE_EXT', 'GLX_TEXTURE_1D_BIT_EXT',
'GLX_TEXTURE_2D_BIT_EXT', 'GLX_TEXTURE_RECTANGLE_BIT_EXT',
'GLX_BIND_TO_TEXTURE_RGB_EXT', 'GLX_BIND_TO_TEXTURE_RGBA_EXT',
'GLX_BIND_TO_MIPMAP_TEXTURE_EXT', 'GLX_BIND_TO_TEXTURE_TARGETS_EXT',
'GLX_Y_INVERTED_EXT', 'GLX_TEXTURE_FORMAT_EXT', 'GLX_TEXTURE_TARGET_EXT',
'GLX_MIPMAP_TEXTURE_EXT', 'GLX_TEXTURE_FORMAT_NONE_EXT',
'GLX_TEXTURE_FORMAT_RGB_EXT', 'GLX_TEXTURE_FORMAT_RGBA_EXT',
'GLX_TEXTURE_1D_EXT', 'GLX_TEXTURE_2D_EXT', 'GLX_TEXTURE_RECTANGLE_EXT',
'GLX_FRONT_LEFT_EXT', 'GLX_FRONT_RIGHT_EXT', 'GLX_BACK_LEFT_EXT',
'GLX_BACK_RIGHT_EXT', 'GLX_FRONT_EXT', 'GLX_BACK_EXT', 'GLX_AUX0_EXT',
'GLX_AUX1_EXT', 'GLX_AUX2_EXT', 'GLX_AUX3_EXT', 'GLX_AUX4_EXT',
'GLX_AUX5_EXT', 'GLX_AUX6_EXT', 'GLX_AUX7_EXT', 'GLX_AUX8_EXT',
'GLX_AUX9_EXT', 'GLX_NUM_VIDEO_SLOTS_NV', 'GLX_VIDEO_OUT_COLOR_NV',
'GLX_VIDEO_OUT_ALPHA_NV', 'GLX_VIDEO_OUT_DEPTH_NV',
'GLX_VIDEO_OUT_COLOR_AND_ALPHA_NV', 'GLX_VIDEO_OUT_COLOR_AND_DEPTH_NV',
'GLX_VIDEO_OUT_FRAME_NV', 'GLX_VIDEO_OUT_FIELD_1_NV',
'GLX_VIDEO_OUT_FIELD_2_NV', 'GLX_VIDEO_OUT_STACKED_FIELDS_1_2_NV',
'GLX_VIDEO_OUT_STACKED_FIELDS_2_1_NV', 'GLXVideoSourceSGIX',
'GLXFBConfigIDSGIX', 'GLXFBConfigSGIX', 'GLXPbufferSGIX',
'GLXBufferClobberEventSGIX', 'GLX_ARB_multisample', 'GLX_ARB_fbconfig_float',
'GLX_ARB_create_context', 'glXCreateContextAttribsARB',
'PFNGLXCREATECONTEXTATTRIBSARBPROC', 'GLX_SGIS_multisample',
'GLX_EXT_visual_info', 'GLX_SGI_swap_control', 'glXSwapIntervalSGI',
'PFNGLXSWAPINTERVALSGIPROC', 'GLX_SGI_video_sync', 'glXGetVideoSyncSGI',
'glXWaitVideoSyncSGI', 'PFNGLXGETVIDEOSYNCSGIPROC',
'PFNGLXWAITVIDEOSYNCSGIPROC', 'GLX_SGI_make_current_read',
'glXMakeCurrentReadSGI', 'glXGetCurrentReadDrawableSGI',
'PFNGLXMAKECURRENTREADSGIPROC', 'PFNGLXGETCURRENTREADDRAWABLESGIPROC',
'GLX_SGIX_video_source', 'GLX_EXT_visual_rating', 'GLX_EXT_import_context',
'glXGetCurrentDisplayEXT', 'glXQueryContextInfoEXT', 'glXGetContextIDEXT',
'glXImportContextEXT', 'glXFreeContextEXT', 'PFNGLXGETCURRENTDISPLAYEXTPROC',
'PFNGLXQUERYCONTEXTINFOEXTPROC', 'PFNGLXGETCONTEXTIDEXTPROC',
'PFNGLXIMPORTCONTEXTEXTPROC', 'PFNGLXFREECONTEXTEXTPROC', 'GLX_SGIX_fbconfig',
'glXGetFBConfigAttribSGIX', 'glXChooseFBConfigSGIX',
'glXCreateGLXPixmapWithConfigSGIX', 'glXCreateContextWithConfigSGIX',
'glXGetVisualFromFBConfigSGIX', 'glXGetFBConfigFromVisualSGIX',
'PFNGLXGETFBCONFIGATTRIBSGIXPROC', 'PFNGLXCHOOSEFBCONFIGSGIXPROC',
'PFNGLXCREATEGLXPIXMAPWITHCONFIGSGIXPROC',
'PFNGLXCREATECONTEXTWITHCONFIGSGIXPROC',
'PFNGLXGETVISUALFROMFBCONFIGSGIXPROC', 'PFNGLXGETFBCONFIGFROMVISUALSGIXPROC',
'GLX_SGIX_pbuffer', 'glXCreateGLXPbufferSGIX', 'glXDestroyGLXPbufferSGIX',
'glXQueryGLXPbufferSGIX', 'glXSelectEventSGIX', 'glXGetSelectedEventSGIX',
'PFNGLXCREATEGLXPBUFFERSGIXPROC', 'PFNGLXDESTROYGLXPBUFFERSGIXPROC',
'PFNGLXQUERYGLXPBUFFERSGIXPROC', 'PFNGLXSELECTEVENTSGIXPROC',
'PFNGLXGETSELECTEDEVENTSGIXPROC', 'GLX_SGI_cushion', 'glXCushionSGI',
'PFNGLXCUSHIONSGIPROC', 'GLX_SGIX_video_resize', 'glXBindChannelToWindowSGIX',
'glXChannelRectSGIX', 'glXQueryChannelRectSGIX', 'glXQueryChannelDeltasSGIX',
'glXChannelRectSyncSGIX', 'PFNGLXBINDCHANNELTOWINDOWSGIXPROC',
'PFNGLXCHANNELRECTSGIXPROC', 'PFNGLXQUERYCHANNELRECTSGIXPROC',
'PFNGLXQUERYCHANNELDELTASSGIXPROC', 'PFNGLXCHANNELRECTSYNCSGIXPROC',
'GLX_SGIX_dmbuffer', 'GLX_SGIX_swap_group', 'glXJoinSwapGroupSGIX',
'PFNGLXJOINSWAPGROUPSGIXPROC', 'GLX_SGIX_swap_barrier',
'glXBindSwapBarrierSGIX', 'glXQueryMaxSwapBarriersSGIX',
'PFNGLXBINDSWAPBARRIERSGIXPROC', 'PFNGLXQUERYMAXSWAPBARRIERSSGIXPROC',
'GLX_SUN_get_transparent_index', 'glXGetTransparentIndexSUN',
'PFNGLXGETTRANSPARENTINDEXSUNPROC', 'GLX_MESA_copy_sub_buffer',
'glXCopySubBufferMESA', 'PFNGLXCOPYSUBBUFFERMESAPROC',
'GLX_MESA_pixmap_colormap', 'glXCreateGLXPixmapMESA',
'PFNGLXCREATEGLXPIXMAPMESAPROC', 'GLX_MESA_release_buffers',
'glXReleaseBuffersMESA', 'PFNGLXRELEASEBUFFERSMESAPROC',
'GLX_MESA_set_3dfx_mode', 'glXSet3DfxModeMESA', 'PFNGLXSET3DFXMODEMESAPROC',
'GLX_SGIX_visual_select_group', 'GLX_OML_swap_method', 'GLX_OML_sync_control',
'glXGetSyncValuesOML', 'glXGetMscRateOML', 'glXSwapBuffersMscOML',
'glXWaitForMscOML', 'glXWaitForSbcOML', 'PFNGLXGETSYNCVALUESOMLPROC',
'PFNGLXGETMSCRATEOMLPROC', 'PFNGLXSWAPBUFFERSMSCOMLPROC',
'PFNGLXWAITFORMSCOMLPROC', 'PFNGLXWAITFORSBCOMLPROC', 'GLX_NV_float_buffer',
'GLX_SGIX_hyperpipe', 'GLXHyperpipeNetworkSGIX', 'GLXHyperpipeConfigSGIX',
'GLXPipeRect', 'GLXPipeRectLimits', 'glXQueryHyperpipeNetworkSGIX',
'glXHyperpipeConfigSGIX', 'glXQueryHyperpipeConfigSGIX',
'glXDestroyHyperpipeConfigSGIX', 'glXBindHyperpipeSGIX',
'glXQueryHyperpipeBestAttribSGIX', 'glXHyperpipeAttribSGIX',
'glXQueryHyperpipeAttribSGIX', 'PFNGLXQUERYHYPERPIPENETWORKSGIXPROC',
'PFNGLXHYPERPIPECONFIGSGIXPROC', 'PFNGLXQUERYHYPERPIPECONFIGSGIXPROC',
'PFNGLXDESTROYHYPERPIPECONFIGSGIXPROC', 'PFNGLXBINDHYPERPIPESGIXPROC',
'PFNGLXQUERYHYPERPIPEBESTATTRIBSGIXPROC', 'PFNGLXHYPERPIPEATTRIBSGIXPROC',
'PFNGLXQUERYHYPERPIPEATTRIBSGIXPROC', 'GLX_MESA_agp_offset',
'glXGetAGPOffsetMESA', 'PFNGLXGETAGPOFFSETMESAPROC',
'GLX_EXT_fbconfig_packed_float', 'GLX_EXT_framebuffer_sRGB',
'GLX_EXT_texture_from_pixmap', 'glXBindTexImageEXT', 'glXReleaseTexImageEXT',
'PFNGLXBINDTEXIMAGEEXTPROC', 'PFNGLXRELEASETEXIMAGEEXTPROC',
'GLX_NV_present_video', 'GLX_NV_video_out', 'GLX_NV_swap_group']
# END GENERATED CONTENT (do not edit above this line)
|
|
"""
The Geod class can perform forward and inverse geodetic, or
Great Circle, computations. The forward computation involves
determining latitude, longitude and back azimuth of a terminus
point given the latitude and longitude of an initial point, plus
azimuth and distance. The inverse computation involves
determining the forward and back azimuths and distance given the
latitudes and longitudes of an initial and terminus point.
"""
__all__ = ["Geod", "pj_ellps", "geodesic_version_str"]
import math
from typing import Any, Dict, List, Optional, Tuple, Union
from pyproj._geod import Geod as _Geod
from pyproj._geod import geodesic_version_str
from pyproj._list import get_ellps_map
from pyproj.exceptions import GeodError
from pyproj.utils import _convertback, _copytobuffer
pj_ellps = get_ellps_map()
class Geod(_Geod):
"""
performs forward and inverse geodetic, or Great Circle,
computations. The forward computation (using the 'fwd' method)
involves determining latitude, longitude and back azimuth of a
terminus point given the latitude and longitude of an initial
point, plus azimuth and distance. The inverse computation (using
the 'inv' method) involves determining the forward and back
azimuths and distance given the latitudes and longitudes of an
initial and terminus point.
Attributes
----------
initstring: str
The string form of the user input used to create the Geod.
sphere: bool
If True, it is a sphere.
a: float
The ellipsoid equatorial radius, or semi-major axis.
b: float
The ellipsoid polar radius, or semi-minor axis.
es: float
The 'eccentricity' of the ellipse, squared (1-b2/a2).
f: float
The ellipsoid 'flattening' parameter ( (a-b)/a ).
"""
def __init__(self, initstring: Optional[str] = None, **kwargs) -> None:
"""
initialize a Geod class instance.
Geodetic parameters for specifying the ellipsoid
can be given in a dictionary 'initparams', as keyword arguments,
or as as proj geod initialization string.
You can get a dictionary of ellipsoids using :func:`pyproj.get_ellps_map`
or with the variable `pyproj.pj_ellps`.
The parameters of the ellipsoid may also be set directly using
the 'a' (semi-major or equatorial axis radius) keyword, and
any one of the following keywords: 'b' (semi-minor,
or polar axis radius), 'e' (eccentricity), 'es' (eccentricity
squared), 'f' (flattening), or 'rf' (reciprocal flattening).
See the proj documentation (https://proj.org) for more
information about specifying ellipsoid parameters.
Example usage:
>>> from pyproj import Geod
>>> g = Geod(ellps='clrk66') # Use Clarke 1866 ellipsoid.
>>> # specify the lat/lons of some cities.
>>> boston_lat = 42.+(15./60.); boston_lon = -71.-(7./60.)
>>> portland_lat = 45.+(31./60.); portland_lon = -123.-(41./60.)
>>> newyork_lat = 40.+(47./60.); newyork_lon = -73.-(58./60.)
>>> london_lat = 51.+(32./60.); london_lon = -(5./60.)
>>> # compute forward and back azimuths, plus distance
>>> # between Boston and Portland.
>>> az12,az21,dist = g.inv(boston_lon,boston_lat,portland_lon,portland_lat)
>>> f"{az12:.3f} {az21:.3f} {dist:.3f}"
'-66.531 75.654 4164192.708'
>>> # compute latitude, longitude and back azimuth of Portland,
>>> # given Boston lat/lon, forward azimuth and distance to Portland.
>>> endlon, endlat, backaz = g.fwd(boston_lon, boston_lat, az12, dist)
>>> f"{endlat:.3f} {endlon:.3f} {backaz:.3f}"
'45.517 -123.683 75.654'
>>> # compute the azimuths, distances from New York to several
>>> # cities (pass a list)
>>> lons1 = 3*[newyork_lon]; lats1 = 3*[newyork_lat]
>>> lons2 = [boston_lon, portland_lon, london_lon]
>>> lats2 = [boston_lat, portland_lat, london_lat]
>>> az12,az21,dist = g.inv(lons1,lats1,lons2,lats2)
>>> for faz, baz, d in list(zip(az12,az21,dist)):
... f"{faz:7.3f} {baz:8.3f} {d:12.3f}"
' 54.663 -123.448 288303.720'
'-65.463 79.342 4013037.318'
' 51.254 -71.576 5579916.651'
>>> g2 = Geod('+ellps=clrk66') # use proj4 style initialization string
>>> az12,az21,dist = g2.inv(boston_lon,boston_lat,portland_lon,portland_lat)
>>> f"{az12:.3f} {az21:.3f} {dist:.3f}"
'-66.531 75.654 4164192.708'
"""
# if initparams is a proj-type init string,
# convert to dict.
ellpsd = {} # type: Dict[str, Union[str, float]]
if initstring is not None:
for kvpair in initstring.split():
# Actually only +a and +b are needed
# We can ignore safely any parameter that doesn't have a value
if kvpair.find("=") == -1:
continue
k, v = kvpair.split("=")
k = k.lstrip("+")
if k in ["a", "b", "rf", "f", "es", "e"]:
ellpsd[k] = float(v)
else:
ellpsd[k] = v
# merge this dict with kwargs dict.
kwargs = dict(list(kwargs.items()) + list(ellpsd.items()))
sphere = False
if "ellps" in kwargs:
# ellipse name given, look up in pj_ellps dict
ellps_dict = pj_ellps[kwargs["ellps"]]
a = ellps_dict["a"] # type: float
if ellps_dict["description"] == "Normal Sphere":
sphere = True
if "b" in ellps_dict:
b = ellps_dict["b"] # type: float
es = 1.0 - (b * b) / (a * a) # type: float
f = (a - b) / a # type: float
elif "rf" in ellps_dict:
f = 1.0 / ellps_dict["rf"]
b = a * (1.0 - f)
es = 1.0 - (b * b) / (a * a)
else:
# a (semi-major axis) and one of
# b the semi-minor axis
# rf the reciprocal flattening
# f flattening
# es eccentricity squared
# must be given.
a = kwargs["a"]
if "b" in kwargs:
b = kwargs["b"]
es = 1.0 - (b * b) / (a * a)
f = (a - b) / a
elif "rf" in kwargs:
f = 1.0 / kwargs["rf"]
b = a * (1.0 - f)
es = 1.0 - (b * b) / (a * a)
elif "f" in kwargs:
f = kwargs["f"]
b = a * (1.0 - f)
es = 1.0 - (b / a) ** 2
elif "es" in kwargs:
es = kwargs["es"]
b = math.sqrt(a ** 2 - es * a ** 2)
f = (a - b) / a
elif "e" in kwargs:
es = kwargs["e"] ** 2
b = math.sqrt(a ** 2 - es * a ** 2)
f = (a - b) / a
else:
b = a
f = 0.0
es = 0.0
# msg='ellipse name or a, plus one of f,es,b must be given'
# raise ValueError(msg)
if math.fabs(f) < 1.0e-8:
sphere = True
super().__init__(a, f, sphere, b, es)
def fwd(
self, lons: Any, lats: Any, az: Any, dist: Any, radians=False
) -> Tuple[Any, Any, Any]:
"""
Forward transformation
Determine longitudes, latitudes and back azimuths of terminus
points given longitudes and latitudes of initial points,
plus forward azimuths and distances.
Parameters
----------
lons: array, :class:`numpy.ndarray`, list, tuple, or scalar
Longitude(s) of initial point(s)
lats: array, :class:`numpy.ndarray`, list, tuple, or scalar
Latitude(s) of initial point(s)
az: array, :class:`numpy.ndarray`, list, tuple, or scalar
Forward azimuth(s)
dist: array, :class:`numpy.ndarray`, list, tuple, or scalar
Distance(s) between initial and terminus point(s)
in meters
radians: bool, optional
If True, the input data is assumed to be in radians.
Returns
-------
array, :class:`numpy.ndarray`, list, tuple, or scalar:
Longitude(s) of terminus point(s)
array, :class:`numpy.ndarray`, list, tuple, or scalar:
Latitude(s) of terminus point(s)
array, :class:`numpy.ndarray`, list, tuple, or scalar:
Back azimuth(s)
"""
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(lons)
iny, yisfloat, yislist, yistuple = _copytobuffer(lats)
inz, zisfloat, zislist, zistuple = _copytobuffer(az)
ind, disfloat, dislist, distuple = _copytobuffer(dist)
self._fwd(inx, iny, inz, ind, radians=radians)
# if inputs were lists, tuples or floats, convert back.
outx = _convertback(xisfloat, xislist, xistuple, inx)
outy = _convertback(yisfloat, yislist, yistuple, iny)
outz = _convertback(zisfloat, zislist, zistuple, inz)
return outx, outy, outz
def inv(
self, lons1: Any, lats1: Any, lons2: Any, lats2: Any, radians=False
) -> Tuple[Any, Any, Any]:
"""
Inverse transformation
Determine forward and back azimuths, plus distances
between initial points and terminus points.
Parameters
----------
lons1: array, :class:`numpy.ndarray`, list, tuple, or scalar
Longitude(s) of initial point(s)
lats1: array, :class:`numpy.ndarray`, list, tuple, or scalar
Latitude(s) of initial point(s)
lons2: array, :class:`numpy.ndarray`, list, tuple, or scalar
Longitude(s) of terminus point(s)
lats2: array, :class:`numpy.ndarray`, list, tuple, or scalar
Latitude(s) of terminus point(s)
radians: bool, optional
If True, the input data is assumed to be in radians.
Returns
-------
array, :class:`numpy.ndarray`, list, tuple, or scalar:
Forward azimuth(s)
array, :class:`numpy.ndarray`, list, tuple, or scalar:
Back azimuth(s)
array, :class:`numpy.ndarray`, list, tuple, or scalar:
Distance(s) between initial and terminus point(s)
in meters
"""
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(lons1)
iny, yisfloat, yislist, yistuple = _copytobuffer(lats1)
inz, zisfloat, zislist, zistuple = _copytobuffer(lons2)
ind, disfloat, dislist, distuple = _copytobuffer(lats2)
self._inv(inx, iny, inz, ind, radians=radians)
# if inputs were lists, tuples or floats, convert back.
outx = _convertback(xisfloat, xislist, xistuple, inx)
outy = _convertback(yisfloat, yislist, yistuple, iny)
outz = _convertback(zisfloat, zislist, zistuple, inz)
return outx, outy, outz
def npts(
self,
lon1: float,
lat1: float,
lon2: float,
lat2: float,
npts: int,
radians: bool = False,
) -> List:
"""
Given a single initial point and terminus point, returns
a list of longitude/latitude pairs describing npts equally
spaced intermediate points along the geodesic between the
initial and terminus points.
Example usage:
>>> from pyproj import Geod
>>> g = Geod(ellps='clrk66') # Use Clarke 1866 ellipsoid.
>>> # specify the lat/lons of Boston and Portland.
>>> boston_lat = 42.+(15./60.); boston_lon = -71.-(7./60.)
>>> portland_lat = 45.+(31./60.); portland_lon = -123.-(41./60.)
>>> # find ten equally spaced points between Boston and Portland.
>>> lonlats = g.npts(boston_lon,boston_lat,portland_lon,portland_lat,10)
>>> for lon,lat in lonlats: f'{lat:.3f} {lon:.3f}'
'43.528 -75.414'
'44.637 -79.883'
'45.565 -84.512'
'46.299 -89.279'
'46.830 -94.156'
'47.149 -99.112'
'47.251 -104.106'
'47.136 -109.100'
'46.805 -114.051'
'46.262 -118.924'
>>> # test with radians=True (inputs/outputs in radians, not degrees)
>>> import math
>>> dg2rad = math.radians(1.)
>>> rad2dg = math.degrees(1.)
>>> lonlats = g.npts(
... dg2rad*boston_lon,
... dg2rad*boston_lat,
... dg2rad*portland_lon,
... dg2rad*portland_lat,
... 10,
... radians=True
... )
>>> for lon,lat in lonlats: f'{rad2dg*lat:.3f} {rad2dg*lon:.3f}'
'43.528 -75.414'
'44.637 -79.883'
'45.565 -84.512'
'46.299 -89.279'
'46.830 -94.156'
'47.149 -99.112'
'47.251 -104.106'
'47.136 -109.100'
'46.805 -114.051'
'46.262 -118.924'
Parameters
----------
lon1: float
Longitude of the initial point
lat1: float
Latitude of the initial point
lon2: float
Longitude of the terminus point
lat2: float
Latitude of the terminus point
npts: int
Number of points to be returned
radians: bool, optional
If True, the input data is assumed to be in radians.
Returns
-------
list of tuples:
list of (lon, lat) points along the geodesic
between the initial and terminus points.
"""
lons, lats = super()._npts(lon1, lat1, lon2, lat2, npts, radians=radians)
return list(zip(lons, lats))
def line_length(self, lons: Any, lats: Any, radians: bool = False) -> float:
"""
.. versionadded:: 2.3.0
Calculate the total distance between points along a line.
>>> from pyproj import Geod
>>> geod = Geod('+a=6378137 +f=0.0033528106647475126')
>>> lats = [-72.9, -71.9, -74.9, -74.3, -77.5, -77.4, -71.7, -65.9, -65.7,
... -66.6, -66.9, -69.8, -70.0, -71.0, -77.3, -77.9, -74.7]
>>> lons = [-74, -102, -102, -131, -163, 163, 172, 140, 113,
... 88, 59, 25, -4, -14, -33, -46, -61]
>>> total_length = geod.line_length(lons, lats)
>>> f"{total_length:.3f}"
'14259605.611'
Parameters
----------
lons: array, :class:`numpy.ndarray`, list, tuple, or scalar
The longitude points along a line.
lats: array, :class:`numpy.ndarray`, list, tuple, or scalar
The latitude points along a line.
radians: bool, optional
If True, the input data is assumed to be in radians.
Returns
-------
float:
The total length of the line.
"""
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(lons)
iny, yisfloat, yislist, yistuple = _copytobuffer(lats)
return self._line_length(inx, iny, radians=radians)
def line_lengths(self, lons: Any, lats: Any, radians: bool = False) -> Any:
"""
.. versionadded:: 2.3.0
Calculate the distances between points along a line.
>>> from pyproj import Geod
>>> geod = Geod(ellps="WGS84")
>>> lats = [-72.9, -71.9, -74.9]
>>> lons = [-74, -102, -102]
>>> for line_length in geod.line_lengths(lons, lats):
... f"{line_length:.3f}"
'943065.744'
'334805.010'
Parameters
----------
lons: array, :class:`numpy.ndarray`, list, tuple, or scalar
The longitude points along a line.
lats: array, :class:`numpy.ndarray`, list, tuple, or scalar
The latitude points along a line.
radians: bool, optional
If True, the input data is assumed to be in radians.
Returns
-------
array, :class:`numpy.ndarray`, list, tuple, or scalar:
The total length of the line.
"""
# process inputs, making copies that support buffer API.
inx, xisfloat, xislist, xistuple = _copytobuffer(lons)
iny, yisfloat, yislist, yistuple = _copytobuffer(lats)
self._line_length(inx, iny, radians=radians)
line_lengths = _convertback(xisfloat, xislist, xistuple, inx)
return line_lengths if xisfloat else line_lengths[:-1]
def polygon_area_perimeter(
self, lons: Any, lats: Any, radians: bool = False
) -> Tuple[float, float]:
"""
.. versionadded:: 2.3.0
A simple interface for computing the area (meters^2) and perimeter (meters)
of a geodesic polygon.
Arbitrarily complex polygons are allowed. In the case self-intersecting
of polygons the area is accumulated "algebraically", e.g., the areas of
the 2 loops in a figure-8 polygon will partially cancel. There's no need
to "close" the polygon by repeating the first vertex. The area returned
is signed with counter-clockwise traversal being treated as positive.
.. note:: lats should be in the range [-90 deg, 90 deg].
Example usage:
>>> from pyproj import Geod
>>> geod = Geod('+a=6378137 +f=0.0033528106647475126')
>>> lats = [-72.9, -71.9, -74.9, -74.3, -77.5, -77.4, -71.7, -65.9, -65.7,
... -66.6, -66.9, -69.8, -70.0, -71.0, -77.3, -77.9, -74.7]
>>> lons = [-74, -102, -102, -131, -163, 163, 172, 140, 113,
... 88, 59, 25, -4, -14, -33, -46, -61]
>>> poly_area, poly_perimeter = geod.polygon_area_perimeter(lons, lats)
>>> f"{poly_area:.1f} {poly_perimeter:.1f}"
'13376856682207.4 14710425.4'
Parameters
----------
lons: array, :class:`numpy.ndarray`, list, tuple, or scalar
An array of longitude values.
lats: array, :class:`numpy.ndarray`, list, tuple, or scalar
An array of latitude values.
radians: bool, optional
If True, the input data is assumed to be in radians.
Returns
-------
(float, float):
The geodesic area (meters^2) and permimeter (meters) of the polygon.
"""
return self._polygon_area_perimeter(
_copytobuffer(lons)[0], _copytobuffer(lats)[0], radians=radians
)
def geometry_length(self, geometry, radians: bool = False) -> float:
"""
.. versionadded:: 2.3.0
Returns the geodesic length (meters) of the shapely geometry.
If it is a Polygon, it will return the sum of the
lengths along the perimeter.
If it is a MultiPolygon or MultiLine, it will return
the sum of the lengths.
Example usage:
>>> from pyproj import Geod
>>> from shapely.geometry import Point, LineString
>>> line_string = LineString([Point(1, 2), Point(3, 4)])
>>> geod = Geod(ellps="WGS84")
>>> f"{geod.geometry_length(line_string):.3f}"
'313588.397'
Parameters
----------
geometry: :class:`shapely.geometry.BaseGeometry`
The geometry to calculate the length from.
radians: bool, optional
If True, the input data is assumed to be in radians.
Returns
-------
float:
The total geodesic length of the geometry (meters).
"""
try:
return self.line_length(*geometry.xy, radians=radians) # type: ignore
except (AttributeError, NotImplementedError):
pass
if hasattr(geometry, "exterior"):
return self.geometry_length(geometry.exterior, radians=radians)
elif hasattr(geometry, "geoms"):
total_length = 0.0
for geom in geometry.geoms:
total_length += self.geometry_length(geom, radians=radians)
return total_length
raise GeodError("Invalid geometry provided.")
def geometry_area_perimeter(
self, geometry, radians: bool = False
) -> Tuple[float, float]:
"""
.. versionadded:: 2.3.0
A simple interface for computing the area (meters^2) and perimeter (meters)
of a geodesic polygon as a shapely geometry.
Arbitrarily complex polygons are allowed. In the case self-intersecting
of polygons the area is accumulated "algebraically", e.g., the areas of
the 2 loops in a figure-8 polygon will partially cancel. There's no need
to "close" the polygon by repeating the first vertex.
.. note:: lats should be in the range [-90 deg, 90 deg].
.. warning:: The area returned is signed with counter-clockwise (CCW) traversal
being treated as positive. For polygons, holes should use the
opposite traversal to the exterior (if the exterior is CCW, the
holes/interiors should be CW). You can use `shapely.ops.orient` to
modify the orientation.
If it is a Polygon, it will return the area and exterior perimeter.
It will subtract the area of the interior holes.
If it is a MultiPolygon or MultiLine, it will return
the sum of the areas and perimeters of all geometries.
Example usage:
>>> from pyproj import Geod
>>> from shapely.geometry import LineString, Point, Polygon
>>> geod = Geod(ellps="WGS84")
>>> poly_area, poly_perimeter = geod.geometry_area_perimeter(
... Polygon(
... LineString([
... Point(1, 1), Point(10, 1), Point(10, 10), Point(1, 10)
... ]),
... holes=[LineString([Point(1, 2), Point(3, 4), Point(5, 2)])],
... )
... )
>>> f"{poly_area:.3f} {poly_perimeter:.3f}"
'944373881400.339 3979008.036'
Parameters
----------
geometry: :class:`shapely.geometry.BaseGeometry`
The geometry to calculate the area and perimeter from.
radians: bool, optional
If True, the input data is assumed to be in radians. Default is degrees.
Returns
-------
(float, float):
The geodesic area (meters^2) and permimeter (meters) of the polygon.
"""
try:
return self.polygon_area_perimeter( # type: ignore
*geometry.xy, radians=radians,
)
except (AttributeError, NotImplementedError):
pass
# polygon
if hasattr(geometry, "exterior"):
total_area, total_perimeter = self.geometry_area_perimeter(
geometry.exterior, radians=radians
)
# subtract area of holes
for hole in geometry.interiors:
area, _ = self.geometry_area_perimeter(hole, radians=radians)
total_area += area
return total_area, total_perimeter
# multi geometries
elif hasattr(geometry, "geoms"):
total_area = 0.0
total_perimeter = 0.0
for geom in geometry.geoms:
area, perimeter = self.geometry_area_perimeter(geom, radians=radians)
total_area += area
total_perimeter += perimeter
return total_area, total_perimeter
raise GeodError("Invalid geometry provided.")
def __repr__(self) -> str:
# search for ellipse name
for (ellps, vals) in pj_ellps.items():
if self.a == vals["a"]:
b = vals.get("b", None)
rf = vals.get("rf", None)
# self.sphere is True when self.f is zero or very close to
# zero (0), so prevent divide by zero.
if self.b == b or (not self.sphere and (1.0 / self.f) == rf):
return f"{self.__class__.__name__}(ellps={ellps!r})"
# no ellipse name found, call super class
return super().__repr__()
def __eq__(self, other: Any) -> bool:
"""
equality operator == for Geod objects
Example usage:
>>> from pyproj import Geod
>>> # Use Clarke 1866 ellipsoid.
>>> gclrk1 = Geod(ellps='clrk66')
>>> # Define Clarke 1866 using parameters
>>> gclrk2 = Geod(a=6378206.4, b=6356583.8)
>>> gclrk1 == gclrk2
True
>>> # WGS 66 ellipsoid, PROJ style
>>> gwgs66 = Geod('+ellps=WGS66')
>>> # Naval Weapons Lab., 1965 ellipsoid
>>> gnwl9d = Geod('+ellps=NWL9D')
>>> # these ellipsoids are the same
>>> gnwl9d == gwgs66
True
>>> gclrk1 != gnwl9d # Clarke 1866 is unlike NWL9D
True
"""
if not isinstance(other, _Geod):
return False
return self.__repr__() == other.__repr__()
|
|
# Copyright 2020 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import ipaddress
import math
import random
import time
import uuid
import eventlet
from os_brick import exception as brick_exception
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _
from cinder.image import image_utils
from cinder import utils
import cinder.volume.drivers.datera.datera_common as datc
from cinder.volume import utils as volutils
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
dexceptions = importutils.try_import('dfs_sdk.exceptions')
API_VERSION = "2.2"
# The DateraAPI classes (2.1, 2.2) are enhanced by datera_common's lookup()
# decorator which generates members run-time. Therefore on the class we disable
# pylint's no-member check pylint: disable=no-member
class DateraApi(object):
# =================
# = Create Volume =
# =================
def _create_volume_2_2(self, volume):
policies = self._get_policies_for_resource(volume)
num_replicas = int(policies['replica_count'])
storage_name = 'storage-1'
volume_name = 'volume-1'
template = policies['template']
placement = policies['placement_mode']
ppolicy = policies['placement_policy']
ip_pool = datc.get_ip_pool(policies)
name = datc.get_name(volume)
if template:
app_params = (
{
'create_mode': 'openstack',
# 'uuid': str(volume['id']),
'name': name,
'app_template': {'path': '/app_templates/{}'.format(
template)}
})
if self._support_template_override_2_2():
app_params['template_override'] = {
'storage_instances': {
storage_name: {
'volumes': {
volume_name: {
'size': str(volume['size'])}}}}}
else:
app_params = (
{
'create_mode': 'openstack',
'uuid': str(volume['id']),
'name': name,
'access_control_mode': 'deny_all',
'storage_instances': [
{
'name': storage_name,
'ip_pool': {'path': ('/access_network_ip_pools/'
'{}'.format(ip_pool))},
'volumes': [
{
'name': volume_name,
'size': volume['size'],
'replica_count': num_replicas,
'snapshot_policies': [
]
}
]
}
]
})
create_vol = app_params['storage_instances'][0]['volumes'][0]
if datc.dat_version_gte(self.datera_version, '3.3.0.0'):
create_vol['placement_policy'] = {
'path': '/placement_policies/{}'.format(ppolicy)}
else:
create_vol['placement_mode'] = placement
tenant = self.create_tenant(volume['project_id'])
self.api.app_instances.create(tenant=tenant, **app_params)
self._update_qos_2_2(volume, policies)
self._add_vol_meta_2_2(volume)
# =================
# = Extend Volume =
# =================
def _extend_volume_2_2(self, volume, new_size):
if volume['size'] >= new_size:
LOG.warning("Volume size not extended due to original size being "
"greater or equal to new size. Original: "
"%(original)s, New: %(new)s",
{'original': volume['size'],
'new': new_size})
return
policies = self._get_policies_for_resource(volume)
template = policies['template']
if template and not self._support_template_override_2_2():
LOG.warning("Volume size not extended due to template binding. "
"Template override is supported in product versions "
"3.3.X+: volume: %(volume)s, template: %(template)s",
{'volume': volume, 'template': template})
return
with self._offline_flip_2_2(volume):
# Change Volume Size
tenant = self.get_tenant(volume['project_id'])
dvol = self.cvol_to_dvol(volume, tenant)
dvol.set(tenant=tenant, size=new_size)
# =================
# = Cloned Volume =
# =================
def _create_cloned_volume_2_2(self, volume, src_vref):
tenant = self.get_tenant(volume['project_id'])
sdvol = self.cvol_to_dvol(src_vref, tenant=tenant)
src = sdvol.path
data = {
'create_mode': 'openstack',
'name': datc.get_name(volume),
'uuid': str(volume['id']),
'clone_volume_src': {'path': src},
}
tenant = self.get_tenant(volume['project_id'])
self.api.app_instances.create(tenant=tenant, **data)
if volume['size'] > src_vref['size']:
self._extend_volume_2_2(volume, volume['size'])
self._add_vol_meta_2_2(volume)
# =================
# = Delete Volume =
# =================
def _delete_volume_2_2(self, volume):
try:
tenant = self.get_tenant(volume['project_id'])
ai = self.cvol_to_ai(volume, tenant=tenant)
si = ai.storage_instances.list(tenant=tenant)[0]
# Clear out ACL
acl = si.acl_policy.get(tenant=tenant)
acl.set(tenant=tenant, initiators=[])
# Bring volume offline
data = {
'admin_state': 'offline',
'force': True
}
ai.set(tenant=tenant, **data)
ai.delete(tenant=tenant, force=True)
except exception.NotFound:
msg = ("Tried to delete volume %s, but it was not found in the "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc.get_name(volume))
# =================
# = Ensure Export =
# =================
def _ensure_export_2_2(self, context, volume, connector=None):
pass
# =========================
# = Initialize Connection =
# =========================
def _initialize_connection_2_2(self, volume, connector):
# Now online the app_instance (which will online all storage_instances)
multipath = connector.get('multipath', False)
tenant = self.get_tenant(volume['project_id'])
ai = self.cvol_to_ai(volume, tenant=tenant)
data = {
'admin_state': 'online'
}
ai.set(tenant=tenant, **data)
si = ai.storage_instances.list(tenant=tenant)[0]
# randomize portal chosen
choice = 0
policies = self._get_policies_for_resource(volume)
if policies["round_robin"]:
choice = random.randint(0, 1)
portal = si.access['ips'][choice] + ':3260'
iqn = si.access['iqn']
if multipath:
portals = [p + ':3260' for p in si.access['ips']]
iqns = [iqn for _ in si.access['ips']]
lunids = [self._get_lunid() for _ in si.access['ips']]
result = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_iqns': iqns,
'target_portal': portal,
'target_portals': portals,
'target_lun': self._get_lunid(),
'target_luns': lunids,
'volume_id': volume['id'],
'discard': False}}
else:
result = {
'driver_volume_type': 'iscsi',
'data': {
'target_discovered': False,
'target_iqn': iqn,
'target_portal': portal,
'target_lun': self._get_lunid(),
'volume_id': volume['id'],
'discard': False}}
if self.use_chap_auth:
result['data'].update(
auth_method="CHAP",
auth_username=self.chap_username,
auth_password=self.chap_password)
return result
# =================
# = Create Export =
# =================
def _create_export_2_2(self, context, volume, connector):
tenant = self.get_tenant(volume['project_id'])
ai = self.cvol_to_ai(volume, tenant=tenant)
data = {
'admin_state': 'offline',
'force': True
}
ai.set(tenant=tenant, **data)
si = ai.storage_instances.list(tenant=tenant)[0]
policies = self._get_policies_for_resource(volume)
if connector and connector.get('ip'):
# Case where volume_type has non default IP Pool info
ip_pool = datc.get_ip_pool(policies)
if ip_pool != 'default':
initiator_ip_pool_path = self.api.access_network_ip_pools.get(
ip_pool).path
# Fallback to trying reasonable IP based guess
else:
initiator_ip_pool_path = self._get_ip_pool_for_string_ip_2_2(
connector['ip'], tenant)
ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}}
if not ai.app_template["path"]:
si.set(tenant=tenant, **ip_pool_data)
data = {
'admin_state': 'online'
}
ai.set(tenant=tenant, **data)
# Check if we've already setup everything for this volume
storage_instances = ai.storage_instances.list(tenant=tenant)
# Handle adding initiator to product if necessary
# Then add initiator to ACL
if connector and connector.get('initiator'):
initiator_name = "OpenStack-{}".format(str(uuid.uuid4())[:8])
initiator = connector['initiator']
dinit = None
try:
# We want to make sure the initiator is created under the
# current tenant rather than using the /root one
dinit = self.api.initiators.get(initiator, tenant=tenant)
if dinit.tenant != tenant:
raise dexceptions.ApiNotFoundError(
"Initiator {} was not found under tenant {} "
"[{} != {}]".format(
initiator, tenant, dinit.tenant, tenant))
except dexceptions.ApiNotFoundError:
# TODO(_alastor_): Take out the 'force' flag when we fix
# DAT-15931
data = {'id': initiator, 'name': initiator_name, 'force': True}
# Try and create the initiator
# If we get a conflict, ignore it
try:
dinit = self.api.initiators.create(tenant=tenant, **data)
except dexceptions.ApiConflictError:
pass
initiator_path = dinit['path']
# Create ACL with initiator group as reference for each
# storage_instance in app_instance
# TODO(_alastor_): We need to avoid changing the ACLs if the
# template already specifies an ACL policy.
for si in storage_instances:
existing_acl = si.acl_policy.get(tenant=tenant)
data = {}
# Grabbing only the 'path' key from each existing initiator
# within the existing acl. eacli --> existing acl initiator
eacli = []
for acl in existing_acl['initiators']:
nacl = {}
nacl['path'] = acl['path']
eacli.append(nacl)
data['initiators'] = eacli
data['initiators'].append({"path": initiator_path})
# Grabbing only the 'path' key from each existing initiator
# group within the existing acl. eaclig --> existing
# acl initiator group
eaclig = []
for acl in existing_acl['initiator_groups']:
nacl = {}
nacl['path'] = acl['path']
eaclig.append(nacl)
data['initiator_groups'] = eaclig
si.acl_policy.set(tenant=tenant, **data)
if self.use_chap_auth:
for si in storage_instances:
data = {'type': 'chap',
'target_user_name': self.chap_username,
'target_pswd': self.chap_password}
si.auth.set(tenant=tenant, **data)
# Check to ensure we're ready for go-time
self._si_poll_2_2(volume, si, tenant)
self._add_vol_meta_2_2(volume, connector=connector)
# =================
# = Detach Volume =
# =================
def _detach_volume_2_2(self, context, volume, attachment=None):
try:
tenant = self.get_tenant(volume['project_id'])
ai = self.cvol_to_ai(volume, tenant=tenant)
# Clear out ACL for this specific attachment
si = ai.storage_instances.list(tenant=tenant)[0]
existing_acl = si.acl_policy.get(tenant=tenant)
data = {}
# Grabbing only the 'path' key from each existing initiator
# within the existing acl. eacli --> existing acl initiator
eacli = []
for acl in existing_acl['initiators']:
if (
attachment is not None
and attachment.connector is not None
and acl['path'].split('/')[-1]
== attachment.connector['initiator']
):
continue
nacl = {}
nacl['path'] = acl['path']
eacli.append(nacl)
data['initiators'] = eacli
data['initiator_groups'] = existing_acl['initiator_groups']
si.acl_policy.set(tenant=tenant, **data)
if not eacli:
# bring the application instance offline if there
# are no initiators left.
data = {
'admin_state': 'offline',
'force': True
}
ai.set(tenant=tenant, **data)
except exception.NotFound:
msg = ("Tried to detach volume %s, but it was not found in the "
"Datera cluster. Continuing with detach.")
LOG.info(msg, volume['id'])
# ===================
# = Create Snapshot =
# ===================
def _create_snapshot_2_2(self, snapshot):
dummy_vol = {'id': snapshot['volume_id'],
'project_id': snapshot['project_id']}
tenant = self.get_tenant(dummy_vol['project_id'])
dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant)
snap_params = {
'uuid': snapshot['id'],
}
snap = dvol.snapshots.create(tenant=tenant, **snap_params)
self._snap_poll_2_2(snap, tenant)
# ===================
# = Delete Snapshot =
# ===================
def _delete_snapshot_2_2(self, snapshot):
# Handle case where snapshot is "managed"
dummy_vol = {'id': snapshot['volume_id'],
'project_id': snapshot['project_id']}
tenant = self.get_tenant(dummy_vol['project_id'])
dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant)
snapshots = None
# Shortcut if this is a managed snapshot
provider_location = snapshot.get('provider_location')
if provider_location:
snap = dvol.snapshots.get(provider_location, tenant=tenant)
snap.delete(tenant=tenant)
return
# Long-way. UUID identification
try:
snapshots = dvol.snapshots.list(tenant=tenant)
except exception.NotFound:
msg = ("Tried to delete snapshot %s, but parent volume %s was "
"not found in Datera cluster. Continuing with delete.")
LOG.info(msg,
datc.get_name(snapshot),
datc.get_name({'id': snapshot['volume_id']}))
return
try:
for snap in snapshots:
if snap.uuid == snapshot['id']:
snap.delete(tenant=tenant)
break
else:
raise exception.NotFound
except exception.NotFound:
msg = ("Tried to delete snapshot %s, but was not found in "
"Datera cluster. Continuing with delete.")
LOG.info(msg, datc.get_name(snapshot))
# ========================
# = Volume From Snapshot =
# ========================
def _create_volume_from_snapshot_2_2(self, volume, snapshot):
# Handle case where snapshot is "managed"
dummy_vol = {'id': snapshot['volume_id'],
'project_id': snapshot['project_id']}
tenant = self.get_tenant(dummy_vol['project_id'])
dvol = self.cvol_to_dvol(dummy_vol, tenant=tenant)
found_snap = None
provider_location = snapshot.get('provider_location')
if provider_location:
found_snap = dvol.snapshots.get(provider_location, tenant=tenant)
else:
snapshots = dvol.snapshots.list(tenant=tenant)
for snap in snapshots:
if snap.uuid == snapshot['id']:
found_snap = snap
break
else:
raise exception.SnapshotNotFound(snapshot_id=snapshot['id'])
self._snap_poll_2_2(found_snap, tenant)
src = found_snap.path
app_params = (
{
'create_mode': 'openstack',
'uuid': str(volume['id']),
'name': datc.get_name(volume),
'clone_snapshot_src': {'path': src},
})
self.api.app_instances.create(tenant=tenant, **app_params)
if (volume['size'] > snapshot['volume_size']):
self._extend_volume_2_2(volume, volume['size'])
self._add_vol_meta_2_2(volume)
# ==========
# = Retype =
# ==========
def _retype_2_2(self, ctxt, volume, new_type, diff, host):
LOG.debug("Retype called\n"
"Volume: %(volume)s\n"
"NewType: %(new_type)s\n"
"Diff: %(diff)s\n"
"Host: %(host)s\n", {'volume': volume, 'new_type': new_type,
'diff': diff, 'host': host})
# We'll take the fast route only if the types share the same backend
# And that backend matches this driver
old_pol = self._get_policies_for_resource(volume)
new_pol = self._get_policies_for_volume_type(new_type)
if (host['capabilities']['volume_backend_name'].lower() ==
self.backend_name.lower()):
LOG.debug("Starting fast volume retype")
if old_pol.get('template') or new_pol.get('template'):
LOG.warning(
"Fast retyping between template-backed volume-types "
"unsupported. Type1: %s, Type2: %s",
volume['volume_type_id'], new_type)
self._update_qos_2_2(volume, new_pol, clear_old=True)
tenant = self.get_tenant(volume['project_id'])
dvol = self.cvol_to_dvol(volume, tenant=tenant)
# Only replica_count ip_pool requires offlining the app_instance
if (new_pol['replica_count'] != old_pol['replica_count'] or
new_pol['ip_pool'] != old_pol['ip_pool']):
with self._offline_flip_2_2(volume):
# ip_pool is Storage Instance level
ai = self.cvol_to_ai(volume, tenant=tenant)
si = ai.storage_instances.list(tenant=tenant)[0]
ip_pool = datc.get_ip_pool(new_pol)
si_params = (
{
'ip_pool': {'path': ('/access_network_ip_pools/'
'{}'.format(ip_pool))},
})
si.set(tenant=tenant, **si_params)
# placement_mode and replica_count are Volume level
vol_params = (
{
'placement_mode': new_pol['placement_mode'],
'replica_count': new_pol['replica_count'],
})
if datc.dat_version_gte(self.datera_version, '3.3.0.0'):
ppolicy = {'path': '/placement_policies/{}'.format(
new_pol.get('placement_policy'))}
vol_params['placement_policy'] = ppolicy
dvol.set(tenant=tenant, **vol_params)
elif (new_pol['placement_mode'] != old_pol[
'placement_mode'] or new_pol[
'placement_policy'] != old_pol['placement_policy']):
vol_params = (
{
'placement_mode': new_pol['placement_mode'],
})
if datc.dat_version_gte(self.datera_version, '3.3.0.0'):
ppolicy = {'path': '/placement_policies/{}'.format(
new_pol.get('placement_policy'))}
vol_params['placement_policy'] = ppolicy
dvol.set(tenant=tenant, **vol_params)
self._add_vol_meta_2_2(volume)
return True
else:
LOG.debug("Couldn't fast-retype volume between specified types")
return False
# ==========
# = Manage =
# ==========
def _manage_existing_2_2(self, volume, existing_ref):
# Only volumes created under the requesting tenant can be managed in
# the v2.1+ API. Eg. If tenant A is the tenant for the volume to be
# managed, it must also be tenant A that makes this request.
# This will be fixed in a later API update
existing_ref = existing_ref['source-name']
app_inst_name, __, __, __ = datc._parse_vol_ref(existing_ref)
LOG.debug("Managing existing Datera volume %s "
"Changing name to %s",
datc.get_name(volume), existing_ref)
# Rename AppInstance
dummy_vol = {'id': app_inst_name,
'project_id': volume['project_id']}
tenant = self.get_tenant(volume['project_id'])
ai = self.cvol_to_ai(dummy_vol, tenant=tenant)
data = {'name': datc.get_name(volume)}
ai.set(tenant=tenant, **data)
self._add_vol_meta_2_2(volume)
# ===================
# = Manage Get Size =
# ===================
def _manage_existing_get_size_2_2(self, volume, existing_ref):
existing_ref = existing_ref['source-name']
app_inst_name, storage_inst_name, vol_name, __ = datc._parse_vol_ref(
existing_ref)
dummy_vol = {'id': app_inst_name,
'project_id': volume['project_id']}
dvol = self.cvol_to_dvol(dummy_vol)
return dvol.size
# =========================
# = Get Manageable Volume =
# =========================
def _list_manageable_2_2(self, cinder_volumes):
# Use the first volume to determine the tenant we're working under
if cinder_volumes:
tenant = self.get_tenant(cinder_volumes[0]['project_id'])
else:
tenant = None
app_instances = self.api.app_instances.list(tenant=tenant)
results = []
if cinder_volumes and 'volume_id' in cinder_volumes[0]:
cinder_volume_ids = [vol['volume_id'] for vol in cinder_volumes]
else:
cinder_volume_ids = [vol['id'] for vol in cinder_volumes]
for ai in app_instances:
ai_name = ai['name']
reference = None
size = None
safe_to_manage = False
reason_not_safe = ""
cinder_id = None
extra_info = {}
(safe_to_manage, reason_not_safe,
cinder_id) = self._is_manageable_2_2(
ai, cinder_volume_ids, tenant)
si = ai.storage_instances.list(tenant=tenant)[0]
si_name = si.name
vol = si.volumes.list(tenant=tenant)[0]
vol_name = vol.name
size = vol.size
snaps = [(snap.utc_ts, snap.uuid)
for snap in vol.snapshots.list(tenant=tenant)]
extra_info["snapshots"] = json.dumps(snaps)
reference = {"source-name": "{}:{}:{}".format(
ai_name, si_name, vol_name)}
results.append({
'reference': reference,
'size': size,
'safe_to_manage': safe_to_manage,
'reason_not_safe': reason_not_safe,
'cinder_id': cinder_id,
'extra_info': extra_info})
return results
def _get_manageable_volumes_2_2(self, cinder_volumes, marker, limit,
offset, sort_keys, sort_dirs):
LOG.debug("Listing manageable Datera volumes")
results = self._list_manageable_2_2(cinder_volumes)
page_results = volutils.paginate_entries_list(
results, marker, limit, offset, sort_keys, sort_dirs)
return page_results
def _is_manageable_2_2(self, ai, cinder_volume_ids, tenant):
cinder_id = None
ai_name = ai.name
match = datc.UUID4_RE.match(ai_name)
if match:
cinder_id = match.group(1)
if cinder_id and cinder_id in cinder_volume_ids:
return (False,
"App Instance already managed by Cinder",
cinder_id)
if len(ai.storage_instances.list(tenant=tenant)) == 1:
si = ai.storage_instances.list(tenant=tenant)[0]
if len(si['volumes']) == 1:
return (True, "", cinder_id)
return (False,
"App Instance has more than one storage instance or volume",
cinder_id)
# ============
# = Unmanage =
# ============
def _unmanage_2_2(self, volume):
LOG.debug("Unmanaging Cinder volume %s. Changing name to %s",
volume['id'], datc.get_unmanaged(volume['id']))
data = {'name': datc.get_unmanaged(volume['id'])}
tenant = self.get_tenant(volume['project_id'])
ai = self.cvol_to_ai(volume, tenant=tenant)
ai.set(tenant=tenant, **data)
# ===================
# = Manage Snapshot =
# ===================
def _manage_existing_snapshot_2_2(self, snapshot, existing_ref):
existing_ref = existing_ref['source-name']
datc._check_snap_ref(existing_ref)
LOG.debug("Managing existing Datera volume snapshot %s for volume %s",
existing_ref, datc.get_name({'id': snapshot['volume_id']}))
return {'provider_location': existing_ref}
def _manage_existing_snapshot_get_size_2_2(self, snapshot, existing_ref):
existing_ref = existing_ref['source-name']
datc._check_snap_ref(existing_ref)
dummy_vol = {'id': snapshot['volume_id'],
'project_id': snapshot['project_id']}
dvol = self.cvol_to_dvol(dummy_vol)
return dvol.size
def _get_manageable_snapshots_2_2(self, cinder_snapshots, marker, limit,
offset, sort_keys, sort_dirs):
LOG.debug("Listing manageable Datera snapshots")
results = self._list_manageable_2_2(cinder_snapshots)
snap_results = []
snapids = set((snap['id'] for snap in cinder_snapshots))
snaprefs = set((snap.get('provider_location')
for snap in cinder_snapshots))
for volume in results:
snaps = json.loads(volume["extra_info"]["snapshots"])
for snapshot in snaps:
reference = snapshot[0]
uuid = snapshot[1]
size = volume["size"]
safe_to_manage = True
reason_not_safe = ""
cinder_id = ""
extra_info = {}
source_reference = volume["reference"]
if uuid in snapids or reference in snaprefs:
safe_to_manage = False
reason_not_safe = _("already managed by Cinder")
elif not volume['safe_to_manage'] and not volume['cinder_id']:
safe_to_manage = False
reason_not_safe = _("parent volume not safe to manage")
snap_results.append({
'reference': {'source-name': reference},
'size': size,
'safe_to_manage': safe_to_manage,
'reason_not_safe': reason_not_safe,
'cinder_id': cinder_id,
'extra_info': extra_info,
'source_reference': source_reference})
page_results = volutils.paginate_entries_list(
snap_results, marker, limit, offset, sort_keys, sort_dirs)
return page_results
def _unmanage_snapshot_2_2(self, snapshot):
return {'provider_location': None}
# ====================
# = Fast Image Clone =
# ====================
def _clone_image_2_2(self, context, volume, image_location, image_meta,
image_service):
# We're not going to fast image clone if the feature is not enabled
# and/or we can't reach the image being requested
if (not self.image_cache or
not self._image_accessible(context, volume, image_meta)):
return None, False
# Check to make sure we're working with a valid volume type
try:
found = volume_types.get_volume_type(context, self.image_type)
except (exception.VolumeTypeNotFound, exception.InvalidVolumeType):
found = None
if not found:
msg = "Invalid volume type: %s"
LOG.error(msg, self.image_type)
raise ValueError(_("Option datera_image_cache_volume_type_id must"
" be set to a valid volume_type id"))
# Check image format
fmt = image_meta.get('disk_format', '')
if fmt.lower() != 'raw':
LOG.debug("Image format is not RAW, image requires conversion "
"before clone. Image format: [%s]", fmt)
return None, False
LOG.debug("Starting fast image clone")
# TODO(_alastor_): determine if Datera is already an image backend
# for this request and direct clone instead of caching
# Dummy volume, untracked by Cinder
src_vol = {'id': image_meta['id'],
'volume_type_id': self.image_type,
'size': volume['size'],
'project_id': volume['project_id']}
# Determine if we have a cached version of the image
cached = self._vol_exists_2_2(src_vol)
if cached:
tenant = self.get_tenant(src_vol['project_id'])
ai = self.cvol_to_ai(src_vol, tenant=tenant)
metadata = ai.metadata.get(tenant=tenant)
# Check to see if the master image has changed since we created
# The cached version
ts = self._get_vol_timestamp_2_2(src_vol)
mts = time.mktime(image_meta['updated_at'].timetuple())
LOG.debug("Original image timestamp: %s, cache timestamp %s",
mts, ts)
# If the image is created by Glance, we'll trust that even if the
# timestamps don't match up, the data is ok to clone as it's not
# managed by this driver
if metadata.get('type') == 'image':
LOG.debug("Found Glance volume-backed image for %s",
src_vol['id'])
# If the master image time is greater than the volume creation
# time, we invalidate the cache and delete the volume. The
# exception is if the cached volume was created by Glance. We
# NEVER want to delete this volume. It's annotated with
# 'type': 'image' in the metadata, so we'll check for that
elif mts > ts and metadata.get('type') != 'image':
LOG.debug("Cache is older than original image, deleting cache")
cached = False
self._delete_volume_2_2(src_vol)
# If we don't have the image, we'll cache it
if not cached:
LOG.debug("No image cache found for: %s, caching image",
image_meta['id'])
self._cache_vol_2_2(context, src_vol, image_meta, image_service)
# Now perform the clone of the found image or newly cached image
self._create_cloned_volume_2_2(volume, src_vol)
# Force volume resize
vol_size = volume['size']
volume['size'] = 0
self._extend_volume_2_2(volume, vol_size)
volume['size'] = vol_size
# Determine if we need to retype the newly created volume
vtype_id = volume.get('volume_type_id')
if vtype_id and self.image_type and vtype_id != self.image_type:
vtype = volume_types.get_volume_type(context, vtype_id)
LOG.debug("Retyping newly cloned volume from type: %s to type: %s",
self.image_type, vtype_id)
diff, discard = volume_types.volume_types_diff(
context, self.image_type, vtype_id)
host = {'capabilities': {'vendor_name': self.backend_name}}
self._retype_2_2(context, volume, vtype, diff, host)
return None, True
def _cache_vol_2_2(self, context, vol, image_meta, image_service):
image_id = image_meta['id']
# Pull down image and determine if valid
with image_utils.TemporaryImages.fetch(image_service,
context,
image_id) as tmp_image:
data = image_utils.qemu_img_info(tmp_image)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("fmt=%(fmt)s backed by:%(backing_file)s")
% {'fmt': fmt, 'backing_file': backing_file, })
vsize = int(
math.ceil(float(data.virtual_size) / units.Gi))
vol['size'] = vsize
vtype = vol['volume_type_id']
LOG.info("Creating cached image with volume type: %(vtype)s and "
"size %(size)s", {'vtype': vtype, 'size': vsize})
self._create_volume_2_2(vol)
with self._connect_vol(context, vol) as device:
LOG.debug("Moving image %s to volume %s",
image_meta['id'], datc.get_name(vol))
image_utils.convert_image(tmp_image,
device,
'raw',
run_as_root=True)
LOG.debug("Finished moving image %s to volume %s",
image_meta['id'], datc.get_name(vol))
data = image_utils.qemu_img_info(device, run_as_root=True)
if data.file_format != 'raw':
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_(
"Converted to %(vol_format)s, but format is "
"now %(file_format)s") % {
'vol_format': 'raw',
'file_format': data.file_format})
# TODO(_alastor_): Remove this snapshot creation when we fix
# "created_at" attribute in the frontend
# We don't actually care about the snapshot uuid, we just want
# a single snapshot
snapshot = {'id': str(uuid.uuid4()),
'volume_id': vol['id'],
'project_id': vol['project_id']}
self._create_snapshot_2_2(snapshot)
metadata = {'type': 'cached_image'}
tenant = self.get_tenant(vol['project_id'])
ai = self.cvol_to_ai(vol, tenant=tenant)
ai.metadata.set(tenant=tenant, **metadata)
# Cloning offline AI is ~4 seconds faster than cloning online AI
self._detach_volume_2_2(None, vol)
def _get_vol_timestamp_2_2(self, volume):
tenant = self.get_tenant(volume['project_id'])
dvol = self.cvol_to_dvol(volume, tenant=tenant)
snapshots = dvol.snapshots.list(tenant=tenant)
if len(snapshots) == 1:
return float(snapshots[0].utc_ts)
else:
# We'll return 0 if we find no snapshots (or the incorrect number)
# to ensure the timestamp comparison with the master copy fails
# since the master copy will always have a timestamp > 0.
LOG.debug("Number of snapshots found: %s", len(snapshots))
return 0
def _vol_exists_2_2(self, volume):
LOG.debug("Checking if volume %s exists", volume['id'])
try:
ai = self.cvol_to_ai(volume)
LOG.debug("Volume %s exists", volume['id'])
return ai
except exception.NotFound:
LOG.debug("Volume %s not found", volume['id'])
return None
@contextlib.contextmanager
def _connect_vol(self, context, vol):
connector = None
try:
# Start connection, get the connector object and create the
# export (ACL, IP-Pools, etc)
conn = self._initialize_connection_2_2(
vol, {'multipath': False})
connector = utils.brick_get_connector(
conn['driver_volume_type'],
use_multipath=False,
device_scan_attempts=10,
conn=conn)
connector_info = {'initiator': connector.get_initiator()}
self._create_export_2_2(None, vol, connector_info)
retries = 10
attach_info = conn['data']
while True:
try:
attach_info.update(
connector.connect_volume(conn['data']))
break
except brick_exception.FailedISCSITargetPortalLogin:
retries -= 1
if not retries:
LOG.error("Could not log into portal before end of "
"polling period")
raise
LOG.debug("Failed to login to portal, retrying")
eventlet.sleep(2)
device_path = attach_info['path']
yield device_path
finally:
# Close target connection
if connector:
# Best effort disconnection
try:
connector.disconnect_volume(attach_info, attach_info)
except Exception:
pass
# ===========
# = Polling =
# ===========
def _snap_poll_2_2(self, snap, tenant):
eventlet.sleep(datc.DEFAULT_SNAP_SLEEP)
TIMEOUT = 20
retry = 0
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
snap = snap.reload(tenant=tenant)
if snap.op_state == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Snapshot not ready.'))
def _si_poll_2_2(self, volume, si, tenant):
# Initial 4 second sleep required for some Datera versions
eventlet.sleep(datc.DEFAULT_SI_SLEEP)
TIMEOUT = 10
retry = 0
poll = True
while poll and not retry >= TIMEOUT:
retry += 1
si = si.reload(tenant=tenant)
if si.op_state == 'available':
poll = False
else:
eventlet.sleep(1)
if retry >= TIMEOUT:
raise exception.VolumeDriverException(
message=_('Resource not ready.'))
# ================
# = Volume Stats =
# ================
def _get_volume_stats_2_2(self, refresh=False):
# cluster_stats is defined by datera_iscsi
# pylint: disable=access-member-before-definition
if refresh or not self.cluster_stats:
try:
LOG.debug("Updating cluster stats info.")
results = self.api.system.get()
self.datera_version = results.sw_version
if 'uuid' not in results:
LOG.error(
'Failed to get updated stats from Datera Cluster.')
stats = {
'volume_backend_name': self.backend_name,
'vendor_name': 'Datera',
'driver_version': self.VERSION,
'storage_protocol': 'iSCSI',
'total_capacity_gb': (
int(results.total_capacity) / units.Gi),
'free_capacity_gb': (
int(results.available_capacity) / units.Gi),
'total_flash_capacity_gb': (
int(results.all_flash_total_capacity) / units.Gi),
'total_hybrid_capacity_gb': (
int(results.hybrid_total_capacity) / units.Gi),
'free_flash_capacity_gb': (
int(results.all_flash_available_capacity) / units.Gi),
'free_hybrid_capacity_gb': (
int(results.hybrid_available_capacity) / units.Gi),
'reserved_percentage': 0,
'QoS_support': True,
'compression': results.get('compression_enabled', False),
'compression_ratio': results.get('compression_ratio', '0'),
'l3_enabled': results.get('l3_enabled', False),
'filter_function': self.filterf,
'goodness_function': self.goodnessf
}
self.cluster_stats = stats
except exception.DateraAPIException:
LOG.error('Failed to get updated stats from Datera cluster.')
return self.cluster_stats
# =======
# = QoS =
# =======
def _update_qos_2_2(self, volume, policies, clear_old=False):
tenant = self.get_tenant(volume['project_id'])
dvol = self.cvol_to_dvol(volume, tenant=tenant)
type_id = volume.get('volume_type_id', None)
if type_id is not None:
iops_per_gb = int(policies.get('iops_per_gb', 0))
bandwidth_per_gb = int(policies.get('bandwidth_per_gb', 0))
# Filter for just QOS policies in result. All of their keys
# should end with "max"
fpolicies = {k: int(v) for k, v in
policies.items() if k.endswith("max")}
# Filter all 0 values from being passed
fpolicies = {k: int(v) for k, v in
fpolicies.items() if v > 0}
# Calculate and set iops/gb and bw/gb, but only if they don't
# exceed total_iops_max and total_bw_max aren't set since they take
# priority
if iops_per_gb:
ipg = iops_per_gb * volume['size']
# Not using zero, because zero means unlimited
im = fpolicies.get('total_iops_max', 1)
r = ipg
if ipg > im:
r = im
fpolicies['total_iops_max'] = r
if bandwidth_per_gb:
bpg = bandwidth_per_gb * volume['size']
# Not using zero, because zero means unlimited
bm = fpolicies.get('total_bandwidth_max', 1)
r = bpg
if bpg > bm:
r = bm
fpolicies['total_bandwidth_max'] = r
if fpolicies or clear_old:
try:
pp = dvol.performance_policy.get(tenant=tenant)
pp.delete(tenant=tenant)
except dexceptions.ApiNotFoundError:
LOG.debug("No existing performance policy found")
if fpolicies:
dvol.performance_policy.create(tenant=tenant, **fpolicies)
# ============
# = IP Pools =
# ============
def _get_ip_pool_for_string_ip_2_2(self, ip, tenant):
"""Takes a string ipaddress and return the ip_pool API object dict """
pool = 'default'
ip_obj = ipaddress.ip_address(six.text_type(ip))
ip_pools = self.api.access_network_ip_pools.list(tenant=tenant)
for ipdata in ip_pools:
for adata in ipdata['network_paths']:
if not adata.get('start_ip'):
continue
pool_if = ipaddress.ip_interface(
"/".join((adata['start_ip'], str(adata['netmask']))))
if ip_obj in pool_if.network:
pool = ipdata.name
return self.api.access_network_ip_pools.get(pool, tenant=tenant).path
# ====================
# = Volume Migration =
# ====================
def _update_migrated_volume_2_2(self, context, volume, new_volume,
volume_status):
"""Rename the newly created volume to the original volume.
So we can find it correctly.
"""
tenant = self.get_tenant(new_volume['project_id'])
ai = self.cvol_to_ai(new_volume, tenant=tenant)
data = {'name': datc.get_name(volume)}
ai.set(tenant=tenant, **data)
return {'_name_id': None}
@contextlib.contextmanager
def _offline_flip_2_2(self, volume):
reonline = False
tenant = self.get_tenant(volume['project_id'])
ai = self.cvol_to_ai(volume, tenant=tenant)
if ai.admin_state == 'online':
reonline = True
ai.set(tenant=tenant, admin_state='offline')
yield
if reonline:
ai.set(tenant=tenant, admin_state='online')
def _add_vol_meta_2_2(self, volume, connector=None):
if not self.do_metadata:
return
metadata = {'host': volume.get('host', ''),
'display_name': datc.filter_chars(
volume.get('display_name', '')),
'bootable': str(volume.get('bootable', False)),
'availability_zone': volume.get('availability_zone', '')}
if connector:
metadata.update(connector)
LOG.debug("Adding volume metadata: %s", metadata)
tenant = self.get_tenant(volume['project_id'])
ai = self.cvol_to_ai(volume, tenant=tenant)
ai.metadata.set(tenant=tenant, **metadata)
def _support_template_override_2_2(self):
# Getting the whole api schema is expensive
# so we only want to do this once per driver
# instantiation.
if not self.template_override:
return False
if not hasattr(self, '_to_22'):
api = self.api.api.get()
prop = api['/app_instances']['create']['bodyParamSchema'][
'properties']
self._to_22 = 'template_override' in prop
return self._to_22
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unlessf required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..layer_helper import LayerHelper
from ..param_attr import ParamAttr
from ..framework import convert_np_dtype_to_dtype_
from ..framework import Variable
from ..initializer import Constant, force_init_on_cpu
from ..core import VarDesc
from layer_function_generator import templatedoc
import numpy
__all__ = [
'create_tensor',
'create_parameter',
'create_global_var',
'cast',
'concat',
'sums',
'assign',
'fill_constant_batch_size_like',
'fill_constant',
'argmin',
'argmax',
'argsort',
'ones',
'zeros',
'reverse',
]
def create_tensor(dtype, name=None, persistable=False):
"""
Create an variable, which will hold a LoDTensor with data type dtype.
Args:
dtype(string): 'float32'|'int32'|..., the data type of the
created tensor.
name(string): The name of the created tensor, if not set,
the name will be a random unique one.
persistable(bool): Set the persistable flag of the create tensor.
Returns:
Variable: The tensor variable storing the created tensor.
Examples:
.. code-block:: python
tensor = fluid.layers.create_tensor(dtype='float32')
"""
helper = LayerHelper("create_tensor", **locals())
return helper.create_variable(
name=helper.name, dtype=dtype, persistable=persistable)
def create_parameter(shape,
dtype,
name=None,
attr=None,
is_bias=False,
default_initializer=None):
"""
Create a parameter. The parameter is a learnable variable, which can have
gradient, and can be optimized.
NOTE: this is a very low-level API. This API is useful when you create
operator by your self. instead of using layers.
Args:
shape(list[int]): shape of the parameter
dtype(string): element type of the parameter
attr(ParamAttr): attributes of the parameter
is_bias(bool): This can affect which default initializer is chosen
when default_initializer is None. If is_bias,
initializer.Constant(0.0) will be used. Otherwise,
Xavier() will be used.
default_initializer(Initializer): initializer for the parameter
Returns:
the created parameter.
Examples:
>>> W = fluid.layers.create_parameter(shape=[784, 200], dtype='float32')
>>> data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
>>> hidden = fluid.layers.matmul(x=data, y=W)
"""
helper = LayerHelper("create_parameter", **locals())
if attr is None:
attr = ParamAttr(name=name)
return helper.create_parameter(attr, shape, dtype, is_bias,
default_initializer)
def create_global_var(shape,
value,
dtype,
persistable=False,
force_cpu=False,
name=None):
"""
Create a new variable in the global block(block 0).
Args:
shape(list[int]): shape of the variable
value(float): the value of the variable. The new created
variable will be filled with it.
dtype(string): data type of the variable
persistable(bool): if this variable is persistable.
Default: False
force_cpu(bool): force this variable to be on CPU.
Default: False
name(str|None): The name of the variable. If set to None the variable
name will be generated automatically.
Default: None
Returns:
Variable: the created Variable
Examples:
.. code-block:: python
var = fluid.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
helper = LayerHelper("global_var", **locals())
var = helper.create_global_variable(
dtype=dtype, shape=shape, persistable=persistable, name=name)
helper.set_variable_initializer(
var, initializer=Constant(
value=float(value), force_cpu=force_cpu))
return var
def cast(x, dtype):
"""
This layer takes in the Variable :attr:`x` with :attr:`x.dtype` and casts
it to the output with :attr:`dtype`.
Args:
x (Variable): The input Variable for casting.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output Variable.
Returns:
Variable: The output Variable after casting.
Examples:
.. code-block:: python
data = fluid.layers.data(name='x', shape=[13], dtype='float32')
result = fluid.layers.cast(x=data, dtype='float64')
"""
helper = LayerHelper('cast', **locals())
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='cast',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'in_dtype': x.dtype,
'out_dtype': out.dtype})
return out
def concat(input, axis=0, name=None):
"""
**Concat**
This function concatenates the input along the axis mentioned
and returns that as the output.
Args:
input(list): List of tensors to be concatenated
axis(int): Integer axis along which the tensors will be concatenated
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Variable: Output variable of the concatenation
Examples:
.. code-block:: python
out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth])
"""
helper = LayerHelper('concat', **locals())
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='concat',
inputs={'X': input},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def sums(input, out=None):
"""
This function performs the sum operation on the input and returns the
result as the output.
Args:
input (Variable|list): The input tensor that has the elements
that need to be summed up.
out (Variable|None): Output parameter. The sum result.
Default: None
Returns:
Variable: the sum of input. The same as the argument 'out'
Examples:
.. code-block:: python
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
a0 = layers.array_read(array=tmp, i=i)
i = layers.increment(x=i)
a1 = layers.array_read(array=tmp, i=i)
mean_a0 = layers.mean(a0)
mean_a1 = layers.mean(a1)
a_sum = layers.sums(input=[mean_a0, mean_a1])
"""
helper = LayerHelper('sum', **locals())
if out is None:
out = helper.create_tmp_variable(dtype=helper.input_dtype())
helper.append_op(
type='sum',
inputs={'X': input},
outputs={'Out': out},
attrs={'use_mkldnn': False})
return out
def assign(input, output=None):
"""
**Assign**
This function copies the *input* Variable to the *output* Variable.
Args:
input(Variable|numpy.ndarray): The source variable
output(Variable|None): The destination variable
Returns:
Variable: The destination variable that was supplied as the *output*.
Examples:
.. code-block:: python
out = fluid.layers.create_tensor(dtype='float32')
hidden = fluid.layers.fc(input=data, size=10)
fluid.layers.assign(hidden, out)
"""
helper = LayerHelper('assign', **locals())
if output is None:
output = helper.create_tmp_variable(dtype=input.dtype)
if isinstance(input, Variable):
helper.append_op(
type='assign', inputs={'X': [input]}, outputs={'Out': [output]})
elif isinstance(input, numpy.ndarray):
dtype = convert_np_dtype_to_dtype_(input.dtype)
if dtype == VarDesc.VarType.FP32:
value_name = "fp32_values"
values = [float(v) for v in input.flat]
elif dtype == VarDesc.VarType.INT32:
value_name = "int32_values"
values = [int(v) for v in input.flat]
else:
raise ValueError("Unsupported dtype %s", input.dtype)
if input.size > 1024 * 1024:
raise ValueError("The size of input is too big. Please consider "
"saving it to file and 'load_op' to load it")
helper.append_op(
type='assign_value',
outputs={'Out': [output]},
attrs={
'dtype': dtype,
'shape': list(input.shape),
value_name: values
})
else:
raise ValueError("Wrong type for assign input: %s" % type(input))
return output
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
"""
**fill_constant**
This function creates a tensor with specified `shape` and `dtype`, and
initializes it with a constant specifed by `value`.
The attribute `stop_gradient` of the created tensor is set to True.
Args:
shape(tuple|list|None): Shape of the output tensor.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor.
value(float): The constant value used to initialize the output tensor.
out(Variable): The output tensor.
force_cpu(True|False): data should be on CPU if set true.
Returns:
Variable: The tensor variable storing the output.
Examples:
.. code-block:: python
data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64')
"""
helper = LayerHelper("fill_constant", **locals())
if out is None:
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='fill_constant',
inputs={},
outputs={'Out': [out]},
attrs={
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'force_cpu': force_cpu or force_init_on_cpu()
})
out.stop_gradient = True
return out
@templatedoc()
def fill_constant_batch_size_like(input,
shape,
dtype,
value,
input_dim_idx=0,
output_dim_idx=0):
"""
${comment}
It also sets *stop_gradient* to True.
>>> data = fluid.layers.fill_constant_batch_size_like(
>>> input=like, shape=[1], value=0, dtype='int64')
Args:
input(${input_type}): ${input_comment}.
shape(${shape_type}): ${shape_comment}.
dtype(${dtype_type}): ${dtype_comment}.
value(${value_type}): ${value_comment}.
input_dim_idx(${input_dim_idx_type}): ${input_dim_idx_comment}.
output_dim_idx(${output_dim_idx_type}): ${output_dim_idx_comment}.
Returns:
${out_comment}.
"""
helper = LayerHelper("fill_constant_batch_size_like", **locals())
out = helper.create_tmp_variable(dtype=dtype)
helper.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': input},
outputs={'Out': [out]},
attrs={
'shape': shape,
'dtype': out.dtype,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx
})
out.stop_gradient = True
return out
def argmin(x, axis=0):
"""
**argmin**
This function computes the indices of the min elements
of the input tensor's element along the provided axis.
Args:
x(Variable): The input to compute the indices of
the min elements.
axis(int): Axis to compute indices along.
Returns:
Variable: The tensor variable storing the output
Examples:
.. code-block:: python
out = fluid.layers.argmin(x=in, axis=0)
out = fluid.layers.argmin(x=in, axis=-1)
"""
helper = LayerHelper("arg_min", **locals())
out = helper.create_tmp_variable(VarDesc.VarType.INT64)
helper.append_op(
type='arg_min',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def argmax(x, axis=0):
"""
**argmax**
This function computes the indices of the max elements
of the input tensor's element along the provided axis.
Args:
x(Variable): The input to compute the indices of
the max elements.
axis(int): Axis to compute indices along.
Returns:
Variable: The tensor variable storing the output
Examples:
.. code-block:: python
out = fluid.layers.argmax(x=in, axis=0)
out = fluid.layers.argmax(x=in, axis=-1)
"""
helper = LayerHelper("arg_max", **locals())
out = helper.create_tmp_variable(VarDesc.VarType.INT64)
helper.append_op(
type='arg_max',
inputs={'X': x},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def argsort(input, axis=-1, name=None):
"""
Performs sorting on the input Variable along the given axis, and outputs
sorted data Varibale and its corresponding index Variable with the same
shape as :attr:`input`.
.. code-block:: text
For example, the given axis is -1 and the input Variable
input = [[0.15849551, 0.45865775, 0.8563702 ],
[0.12070083, 0.28766365, 0.18776911]],
after argsort, the sorted Vairable becomes
out = [[0.15849551, 0.45865775, 0.8563702 ],
[0.12070083, 0.18776911, 0.28766365]],
and the sorted indices along the given axis turn outs to be
indices = [[0, 1, 2],
[0, 2, 1]]
Args:
input(Variable): The input Variable for sorting.
axis(int): The axis along which to sort the input Variable. When
:attr:`axis` < 0, the actual axis will be :attr:`axis` +
rank(:attr:`input`). Default -1, the last dimension.
name(str|None): (optional) A name for this layer. If set None, the
layer will be named automatically.
Returns:
tuple: A tuple of sorted data Variable and the sorted indices.
Examples:
.. code-block:: python
input = fluid.layers.data(data=[2, 3])
out, indices = fluid.layers.argsort(input, axis=0)
"""
helper = LayerHelper("argsort", **locals())
out = helper.create_tmp_variable(dtype=input.dtype, stop_gradient=True)
ids = helper.create_tmp_variable(VarDesc.VarType.INT64, stop_gradient=True)
helper.append_op(
type='argsort',
inputs={'X': input},
outputs={'Out': out,
'Indices': ids},
attrs={'axis': axis})
return out, ids
def ones(shape, dtype, force_cpu=False):
"""
**ones**
This function creates a tensor of specified *shape* and
*dtype*, and initializes this with 1.
It also sets *stop_gradient* to True.
Args:
shape(tuple|list|None): Shape of output tensor
dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor
Returns:
Variable: The tensor variable storing the output
Examples:
.. code-block:: python
data = fluid.layers.ones(shape=[1], dtype='int64')
"""
return fill_constant(value=1.0, **locals())
def zeros(shape, dtype, force_cpu=False):
"""
**zeros**
This function creates a tensor of specified *shape* and
*dtype*, and initializes this with 0.
It also sets *stop_gradient* to True.
Args:
shape(tuple|list|None): Shape of output tensor.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor.
force_cpu(bool, default False): Whether to make output stay on CPU.
Returns:
Variable: The tensor variable storing the output.
Examples:
.. code-block:: python
data = fluid.layers.zeros(shape=[1], dtype='int64')
"""
return fill_constant(value=0.0, **locals())
def reverse(x, axis):
"""
**reverse**
This function reverse the input 'x' along given axises.
Args:
x(Vairbale): the input to be reversed.
axis(int|tuple|list): Axis that along which order of elements
is reversed. If it is a tuple or a list, reversing
will be apply on each axis in the tuple or list.
Returns:
Variable: The reversed tensor.
Examples:
.. code-block:: python
out = fluid.layers.reverse(x=in, axis=0)
# or:
out = fluid.layers.reverse(x=in, axis=[0,1])
"""
if isinstance(axis, int):
axis = [axis]
helper = LayerHelper("reverse", **locals())
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type='reverse',
inputs={'Input': x},
outputs={'Out': [out]},
attrs={'axis': axis})
return out
def save(x, file_path, overwrite=True):
"""
Saves a variable as a file.
Args:
x(variable): The Tensor/LoDTensor to be saved.
file_path(str): The file path where the variable will be saved.
overwrite(bool): Whether or not cover the given file when it has already
existed. If it's set 'False' and the file is existed, a runtime
error will be thrown.
"""
helper = LayerHelper("save", **locals())
helper.append_op(
type="save",
inputs={"input": x},
outputs={},
args={"file_path": file_path,
"overwrite": overwrite})
def save_combine(x, file_path, overwrite=True):
"""
Saves a list of variables into a single file.
Args:
x(list): A list of Tensor/LoDTensor variables to be saved together in
a single file.
file_path(str): The file path where variables will be saved.
overwrite(bool): Whether or not cover the given file when it has already
existed. If it's set 'False' and the file is existed, a runtime
error will be thrown.
Returns:
There is no return value.
Examples:
.. code-block:: python
v1 = fluid.layers.data(name="data",
shape=(4, 6),
dtype="float32")
v2 = fluid.layers.data(name="data",
shape=(6, 8, 4),
dtype="float32")
normed = fluid.layers.save_combine([v1, v2], file_path="output")
"""
helper = LayerHelper("save_combine", **locals())
helper.append_op(
type="save_combine",
inputs={"input": x},
outputs={},
args={"file_path": file_path,
"overwrite": overwrite})
def load_combine(out, file_path):
"""
Loads a list of vairables from a single file.
Args:
out(list): The list of variables to be read from the disk file.
file_path(str): The path of the disk file.
"""
helper = LayerHelper("load_combine", **locals())
helper.append_op(
type="load_combine",
inputs={},
output={"Out": out},
args={"file_path": file_path})
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Tests for spline models and fitters"""
# pylint: disable=invalid-name
from astropy.utils.exceptions import AstropyUserWarning
import pytest
import unittest.mock as mk
import numpy as np
from numpy.testing import assert_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY # noqa
from astropy.utils.exceptions import (AstropyUserWarning,)
from astropy.modeling.core import (FittableModel, ModelDefinitionError)
from astropy.modeling.spline import (_Spline, Spline1D, _SplineFitter)
from astropy.modeling.fitting import (SplineInterpolateFitter, SplineSmoothingFitter,
SplineExactKnotsFitter, SplineSplrepFitter)
from astropy.modeling.parameters import Parameter
npts = 50
nknots = 10
np.random.seed(42)
test_w = np.random.rand(npts)
test_t = [-1, 0, 1]
noise = np.random.randn(npts)
degree_tests = [1, 2, 3, 4, 5]
wieght_tests = [None, test_w]
smoothing_tests = [None, 0.01]
class TestSpline:
def setup_class(self):
self.num_opt = 3
self.optional_inputs = {f'test{i}': mk.MagicMock() for i in range(self.num_opt)}
self.extra_kwargs = {f'new{i}': mk.MagicMock() for i in range(self.num_opt)}
class Spline(_Spline):
optional_inputs = {'test': 'test'}
def _init_parameters(self):
super()._init_parameters()
def _init_data(self, knots, coeffs, bounds=None):
super()._init_data(knots, coeffs, bounds=bounds)
self.Spline = Spline
def test___init__(self):
# empty spline
spl = self.Spline()
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
assert not hasattr(spl, 'degree')
# Call _init_spline
with mk.patch.object(_Spline, '_init_spline',
autospec=True) as mkInit:
# No call (knots=None)
spl = self.Spline()
assert mkInit.call_args_list == []
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
spl = self.Spline(knots=knots, coeffs=coeffs, bounds=bounds)
assert mkInit.call_args_list == \
[mk.call(spl, knots, coeffs, bounds)]
assert spl._t is None
assert spl._c is None
assert spl._user_knots is False
assert spl._degree is None
assert spl._test is None
# Coeffs but no knots
with pytest.raises(ValueError) as err:
self.Spline(coeffs=mk.MagicMock())
assert str(err.value) == \
"If one passes a coeffs vector one needs to also pass knots!"
def test_param_names(self):
# no parameters
spl = self.Spline()
assert spl.param_names == ()
knot_names = tuple([mk.MagicMock() for _ in range(3)])
spl._knot_names = knot_names
assert spl.param_names == knot_names
coeff_names = tuple([mk.MagicMock() for _ in range(3)])
spl._coeff_names = coeff_names
assert spl.param_names == knot_names + coeff_names
def test__optional_arg(self):
spl = self.Spline()
assert spl._optional_arg('test') == '_test'
def test__create_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert hasattr(spl, attribute)
assert getattr(spl, attribute) is None
with pytest.raises(ValueError,
match=r"Optional argument .* already exists in this class!"):
spl._create_optional_inputs()
def test__intercept_optional_inputs(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
def __init__(self):
self._create_optional_inputs()
spl = Spline()
new_kwargs = spl._intercept_optional_inputs(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
assert new_kwargs == self.extra_kwargs
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
new_kwargs = spl._intercept_optional_inputs(**kwargs)
for arg, value in self.optional_inputs.items():
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is not None
assert getattr(spl, attribute) == kwargs[arg]
assert getattr(spl, attribute) != value
assert arg not in new_kwargs
assert new_kwargs == self.extra_kwargs
assert kwargs != self.extra_kwargs
with pytest.raises(RuntimeError,
match=r".* has already been set, something has gone wrong!"):
spl._intercept_optional_inputs(**kwargs)
def test_evaluate(self):
class Spline(self.Spline):
optional_inputs = self.optional_inputs
spl = Spline()
# No options passed in and No options set
new_kwargs = spl.evaluate(**self.extra_kwargs)
for arg, value in self.optional_inputs.items():
assert new_kwargs[arg] == value
for arg, value in self.extra_kwargs.items():
assert new_kwargs[arg] == value
assert len(new_kwargs) == (len(self.optional_inputs) + len(self.extra_kwargs))
# No options passed in and Options set
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**kwargs)
new_kwargs = spl.evaluate(**self.extra_kwargs)
assert new_kwargs == kwargs
for arg in self.optional_inputs:
attribute = spl._optional_arg(arg)
assert getattr(spl, attribute) is None
# Options passed in
set_kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
spl._intercept_optional_inputs(**set_kwargs)
kwargs = self.extra_kwargs.copy()
for arg in self.optional_inputs:
kwargs[arg] = mk.MagicMock()
assert set_kwargs != kwargs
new_kwargs = spl.evaluate(**kwargs)
assert new_kwargs == kwargs
def test___call__(self):
spl = self.Spline()
args = tuple([mk.MagicMock() for _ in range(3)])
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, "_intercept_optional_inputs",
autospec=True, return_value=new_kwargs) as mkIntercept:
with mk.patch.object(FittableModel, "__call__",
autospec=True) as mkCall:
assert mkCall.return_value == spl(*args, **kwargs)
assert mkCall.call_args_list == \
[mk.call(spl, *args, **new_kwargs)]
assert mkIntercept.call_args_list == \
[mk.call(spl, **kwargs)]
def test__create_parameter(self):
np.random.seed(37)
base_vec = np.random.random(20)
test = base_vec.copy()
fixed_test = base_vec.copy()
class Spline(self.Spline):
@property
def test(self):
return test
@property
def fixed_test(self):
return fixed_test
spl = Spline()
assert (spl.test == test).all()
assert (spl.fixed_test == fixed_test).all()
for index in range(20):
name = f"test_name{index}"
spl._create_parameter(name, index, 'test')
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is False
assert param.value == test[index] == spl.test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.test[index] == new_set
assert spl.test[index] != base_vec[index]
new_get = np.random.random()
spl.test[index] = new_get
assert param.value == new_get
assert param.value != new_set
for index in range(20):
name = f"fixed_test_name{index}"
spl._create_parameter(name, index, 'fixed_test', True)
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.model == spl
assert param.fixed is True
assert param.value == fixed_test[index] == spl.fixed_test[index] == base_vec[index]
new_set = np.random.random()
param.value = new_set
assert spl.fixed_test[index] == new_set
assert spl.fixed_test[index] != base_vec[index]
new_get = np.random.random()
spl.fixed_test[index] = new_get
assert param.value == new_get
assert param.value != new_set
def test__create_parameters(self):
np.random.seed(37)
test = np.random.random(20)
class Spline(self.Spline):
@property
def test(self):
return test
spl = Spline()
fixed = mk.MagicMock()
with mk.patch.object(_Spline, '_create_parameter',
autospec=True) as mkCreate:
params = spl._create_parameters("test_param", "test", fixed)
assert params == tuple([f"test_param{idx}" for idx in range(20)])
assert mkCreate.call_args_list == \
[mk.call(spl, f"test_param{idx}", idx, 'test', fixed) for idx in range(20)]
def test__init_parameters(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_parameters()
assert str(err.value) == \
"This needs to be implemented"
def test__init_data(self):
spl = self.Spline()
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) == \
"This needs to be implemented"
with pytest.raises(NotImplementedError) as err:
spl._init_data(mk.MagicMock(), mk.MagicMock())
assert str(err.value) == \
"This needs to be implemented"
def test__init_spline(self):
spl = self.Spline()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
with mk.patch.object(_Spline, "_init_parameters",
autospec=True) as mkParameters:
with mk.patch.object(_Spline, "_init_data",
autospec=True) as mkData:
main = mk.MagicMock()
main.attach_mock(mkParameters, 'parameters')
main.attach_mock(mkData, 'data')
spl._init_spline(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.data(spl, knots, coeffs, bounds=bounds),
mk.call.parameters(spl)
]
def test__init_tck(self):
spl = self.Spline()
assert spl._c is None
assert spl._t is None
assert spl._degree is None
spl = self.Spline(degree=4)
assert spl._c is None
assert spl._t is None
assert spl._degree == 4
@pytest.mark.skipif('not HAS_SCIPY')
class TestSpline1D:
def setup_class(self):
def func(x, noise=0):
return np.exp(-x**2) + 0.1*noise
self.x = np.linspace(-3, 3, npts)
self.y = func(self.x, noise)
self.truth = func(self.x)
arg_sort = np.argsort(self.x)
np.random.shuffle(arg_sort)
self.x_s = self.x[arg_sort]
self.y_s = func(self.x_s, noise[arg_sort])
self.npts_out = 1000
self.xs = np.linspace(-3, 3, self.npts_out)
self.t = np.linspace(-3, 3, nknots)[1:-1]
def check_parameter(self, spl, base_name, name, index, value, fixed):
assert base_name in name
assert index == int(name.split(base_name)[-1])
knot_name = f"{base_name}{index}"
assert knot_name == name
assert hasattr(spl, name)
param = getattr(spl, name)
assert isinstance(param, Parameter)
assert param.name == name
assert param.value == value(index)
assert param.model == spl
assert param.fixed is fixed
def check_parameters(self, spl, params, base_name, value, fixed):
for idx, name in enumerate(params):
self.check_parameter(spl, base_name, name, idx, value, fixed)
def update_parameters(self, spl, knots, value):
for name in knots:
param = getattr(spl, name)
param.value = value
assert param.value == value
def test___init__with_no_knot_information(self):
spl = Spline1D()
assert spl._degree == 3
assert spl._user_knots is False
assert spl._t is None
assert spl._c is None
assert spl._nu is None
# Check no parameters created
assert len(spl._knot_names) == 0
assert len(spl._coeff_names) == 0
def test___init__with_number_of_knots(self):
spl = Spline1D(knots=10)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is False
assert spl._nu is None
# Check vector data
assert len(spl._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert len(spl._c) == 18
assert (spl._c == np.zeros(18)).all()
# Check all parameter names created:
assert len(spl._knot_names) == 18
assert len(spl._coeff_names) == 18
# Check knot values:
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values:
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_full_custom_knots(self):
t = 17*np.arange(20) - 32
spl = Spline1D(knots=t)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == np.zeros(20)).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_interior_custom_knots(self):
t = np.arange(1, 20)
spl = Spline1D(knots=t, bounds=[0, 20])
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert len(spl._t) == 27
assert (spl._t[4:-4] == t).all()
assert (spl._t[:4] == 0).all()
assert (spl._t[-4:] == 20).all()
assert len(spl._c) == 27
assert (spl._c == np.zeros(27)).all()
# Check knot values:
def value0(idx):
if idx < 4:
return 0
elif idx >= 19 + 4:
return 20
else:
return t[idx-4]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__with_user_knots_and_coefficients(self):
t = 17*np.arange(20) - 32
c = np.linspace(-1, 1, 20)
spl = Spline1D(knots=t, coeffs=c)
# Check baseline data
assert spl._degree == 3
assert spl._user_knots is True
assert spl._nu is None
# Check vector data
assert (spl._t == t).all()
assert len(spl._c) == 20
assert (spl._c == c).all()
# Check all parameter names created
assert len(spl._knot_names) == 20
assert len(spl._coeff_names) == 20
# Check knot values:
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check coeff values
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
def test___init__errors(self):
# Bad knot type
knots = 3.5
with pytest.raises(ValueError) as err:
Spline1D(knots=knots)
assert str(err.value) ==\
f"Knots: {knots} must be iterable or value"
# Not enough knots
for idx in range(8):
with pytest.raises(ValueError) as err:
Spline1D(knots=np.arange(idx))
assert str(err.value) ==\
"Must have at least 8 knots."
# Bad scipy spline
t = np.arange(20)[::-1]
with pytest.raises(ValueError):
Spline1D(knots=t)
def test_parameter_array_link(self):
spl = Spline1D(10)
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
# Check knot vector -> knot parameter link
t = np.arange(18)
spl._t = t.copy()
def value1(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value1, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl, spl._knot_names, 3)
assert (spl._t[:] == 3).all()
# Check coeff base values
def value2(idx):
return 0
self.check_parameters(spl, spl._coeff_names, "coeff", value2, False)
# Check coeff vector -> coeff parameter link
c = 5 * np.arange(18) + 18
spl._c = c.copy()
def value3(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value3, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl, spl._coeff_names, 4)
assert (spl._c[:] == 4).all()
def test_two_splines(self):
spl0 = Spline1D(knots=10)
spl1 = Spline1D(knots=15, degree=2)
assert spl0._degree == 3
assert len(spl0._t) == 18
t = np.zeros(18)
t[-4:] = 1
assert (spl0._t == t).all()
assert len(spl0._c) == 18
assert (spl0._c == np.zeros(18)).all()
assert spl1._degree == 2
assert len(spl1._t) == 21
t = np.zeros(21)
t[-3:] = 1
assert (spl1._t == t).all()
assert len(spl1._c) == 21
assert (spl1._c == np.zeros(21)).all()
# Check all knot names created
assert len(spl0._knot_names) == 18
assert len(spl1._knot_names) == 21
# Check knot base values
def value0(idx):
if idx < 18 - 4:
return 0
else:
return 1
self.check_parameters(spl0, spl0._knot_names, "knot", value0, True)
def value1(idx):
if idx < 21 - 3:
return 0
else:
return 1
self.check_parameters(spl1, spl1._knot_names, "knot", value1, True)
# Check knot vector -> knot parameter link
t0 = 7 * np.arange(18) + 27
t1 = 11 * np.arange(21) + 19
spl0._t[:] = t0.copy()
spl1._t[:] = t1.copy()
def value2(idx):
return t0[idx]
self.check_parameters(spl0, spl0._knot_names, "knot", value2, True)
def value3(idx):
return t1[idx]
self.check_parameters(spl1, spl1._knot_names, "knot", value3, True)
# Check knot parameter -> knot vector link
self.update_parameters(spl0, spl0._knot_names, 3)
self.update_parameters(spl1, spl1._knot_names, 4)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
# Check all coeff names created
assert len(spl0._coeff_names) == 18
assert len(spl1._coeff_names) == 21
# Check coeff base values
def value4(idx):
return 0
self.check_parameters(spl0, spl0._coeff_names, "coeff", value4, False)
self.check_parameters(spl1, spl1._coeff_names, "coeff", value4, False)
# Check coeff vector -> coeff parameter link
c0 = 17 * np.arange(18) + 14
c1 = 37 * np.arange(21) + 47
spl0._c[:] = c0.copy()
spl1._c[:] = c1.copy()
def value5(idx):
return c0[idx]
self.check_parameters(spl0, spl0._coeff_names, "coeff", value5, False)
def value6(idx):
return c1[idx]
self.check_parameters(spl1, spl1._coeff_names, "coeff", value6, False)
# Check coeff parameter -> coeff vector link
self.update_parameters(spl0, spl0._coeff_names, 5)
self.update_parameters(spl1, spl1._coeff_names, 6)
assert (spl0._t[:] == 3).all()
assert (spl1._t[:] == 4).all()
assert (spl0._c[:] == 5).all()
assert (spl1._c[:] == 6).all()
def test__knot_names(self):
# no parameters
spl = Spline1D()
assert spl._knot_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._knot_names == tuple(knot_names)
def test__coeff_names(self):
# no parameters
spl = Spline1D()
assert spl._coeff_names == ()
# some parameters
coeff_names = [f"coeff{idx}" for idx in range(18)]
spl = Spline1D(10)
assert spl._coeff_names == tuple(coeff_names)
def test_param_names(self):
# no parameters
spl = Spline1D()
assert spl.param_names == ()
# some parameters
knot_names = [f"knot{idx}" for idx in range(18)]
coeff_names = [f"coeff{idx}" for idx in range(18)]
param_names = knot_names + coeff_names
spl = Spline1D(10)
assert spl.param_names == tuple(param_names)
def test_t(self):
# no parameters
spl = Spline1D()
# test get
assert spl._t is None
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
# test set
with pytest.raises(ValueError) as err:
spl.t = mk.MagicMock()
assert str(err.value) ==\
"The model parameters must be initialized before setting knots."
# with parameters
spl = Spline1D(10)
# test get
t = np.zeros(18)
t[-4:] = 1
assert (spl._t == t).all()
assert (spl.t == t).all()
# test set
spl.t = (np.arange(18) + 15)
assert (spl._t == (np.arange(18) + 15)).all()
assert (spl.t == (np.arange(18) + 15)).all()
assert (spl.t != t).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.t = np.arange(idx)
assert str(err.value) == \
"There must be exactly as many knots as previously defined."
def test_c(self):
# no parameters
spl = Spline1D()
# test get
assert spl._c is None
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
# test set
with pytest.raises(ValueError) as err:
spl.c = mk.MagicMock()
assert str(err.value) ==\
"The model parameters must be initialized before setting coeffs."
# with parameters
spl = Spline1D(10)
# test get
assert (spl._c == np.zeros(18)).all()
assert (spl.c == np.zeros(18)).all()
# test set
spl.c = (np.arange(18) + 15)
assert (spl._c == (np.arange(18) + 15)).all()
assert (spl.c == (np.arange(18) + 15)).all()
assert (spl.c != np.zeros(18)).all()
# set error
for idx in range(30):
if idx == 18:
continue
with pytest.raises(ValueError) as err:
spl.c = np.arange(idx)
assert str(err.value) == \
"There must be exactly as many coeffs as previously defined."
def test_degree(self):
# default degree
spl = Spline1D()
# test get
assert spl._degree == 3
assert spl.degree == 3
# test set
# non-default degree
spl = Spline1D(degree=2)
# test get
assert spl._degree == 2
assert spl.degree == 2
def test__initialized(self):
# no parameters
spl = Spline1D()
assert spl._initialized is False
# with parameters
spl = Spline1D(knots=10, degree=2)
assert spl._initialized is True
def test_tck(self):
# no parameters
spl = Spline1D()
# test get
assert (spl.t == [0, 0, 0, 0, 1, 1, 1, 1]).all()
assert (spl.c == [0, 0, 0, 0, 0, 0, 0, 0]).all()
assert spl.degree == 3
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
spl.tck = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
def value0(idx):
return t[idx]
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
def value1(idx):
return c[idx]
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
# test get
t = np.zeros(16)
t[-3:] = 1
assert (spl.t == t).all()
assert (spl.c == np.zeros(16)).all()
assert spl.degree == 2
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# test set
t = 5*np.arange(16) + 11
c = 7*np.arange(16) + 13
k = 2
spl.tck = (t, c, k)
assert (spl.t == t).all()
assert (spl.c == c).all()
assert spl.degree == k
tck = spl.tck
assert (tck[0] == spl.t).all()
assert (tck[1] == spl.c).all()
assert tck[2] == spl.degree
# Error
with pytest.raises(ValueError) as err:
spl.tck = (t, c, 4)
assert str(err.value) ==\
"tck has incompatible degree!"
def test_bspline(self):
from scipy.interpolate import BSpline
# no parameters
spl = Spline1D()
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
t = np.array([0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 5, 5])
np.random.seed(619)
c = np.random.random(12)
k = 3
def value0(idx):
return t[idx]
def value1(idx):
return c[idx]
# set (bspline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
bspline = BSpline(t, c, k)
spl.bspline = bspline
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# set (tuple spline)
spl = Spline1D()
assert spl._t is None
assert spl._c is None
assert spl._knot_names == ()
assert spl._coeff_names == ()
spl.bspline = (t, c, k)
assert (spl._t == t).all()
assert (spl._c == c).all()
assert spl.degree == k
self.check_parameters(spl, spl._knot_names, "knot", value0, True)
self.check_parameters(spl, spl._coeff_names, "coeff", value1, False)
# with parameters
spl = Spline1D(knots=10, degree=2)
bspline = spl.bspline
assert isinstance(bspline, BSpline)
assert (bspline.tck[0] == spl.tck[0]).all()
assert (bspline.tck[1] == spl.tck[1]).all()
assert bspline.tck[2] == spl.tck[2]
def test_knots(self):
# no parameters
spl = Spline1D()
assert spl.knots == []
# with parameters
spl = Spline1D(10)
knots = spl.knots
assert len(knots) == 18
for knot in knots:
assert isinstance(knot, Parameter)
assert hasattr(spl, knot.name)
assert getattr(spl, knot.name) == knot
def test_coeffs(self):
# no parameters
spl = Spline1D()
assert spl.coeffs == []
# with parameters
spl = Spline1D(10)
coeffs = spl.coeffs
assert len(coeffs) == 18
for coeff in coeffs:
assert isinstance(coeff, Parameter)
assert hasattr(spl, coeff.name)
assert getattr(spl, coeff.name) == coeff
def test__init_parameters(self):
spl = Spline1D()
with mk.patch.object(Spline1D, '_create_parameters',
autospec=True) as mkCreate:
spl._init_parameters()
assert mkCreate.call_args_list == [
mk.call(spl, "knot", "t", fixed=True),
mk.call(spl, "coeff", "c")
]
def test__init_bounds(self):
spl = Spline1D()
has_bounds, lower, upper = spl._init_bounds()
assert has_bounds is False
assert (lower == [0, 0, 0, 0]).all()
assert (upper == [1, 1, 1, 1]).all()
assert spl._user_bounding_box is None
has_bounds, lower, upper = spl._init_bounds((-5, 5))
assert has_bounds is True
assert (lower == [-5, -5, -5, -5]).all()
assert (upper == [5, 5, 5, 5]).all()
assert spl._user_bounding_box == (-5, 5)
def test__init_knots(self):
np.random.seed(19)
lower = np.random.random(4)
upper = np.random.random(4)
# Integer
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._t is None
spl._init_knots(10, mk.MagicMock(), lower, upper)
t = np.concatenate((lower, np.zeros(10), upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, True, lower, upper)
t = np.concatenate((lower, knots, upper))
assert (spl._t == t).all()
assert mkBspline.call_args_list == [mk.call()]
# vector with no bounds
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
knots = np.random.random(10)
spl = Spline1D()
assert spl._t is None
spl._init_knots(knots, False, lower, upper)
assert (spl._t == knots).all()
assert mkBspline.call_args_list == [mk.call()]
# error
for num in range(8):
knots = np.random.random(num)
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(knots, False, lower, upper)
assert str(err.value) == \
"Must have at least 8 knots."
# Error
spl = Spline1D()
assert spl._t is None
with pytest.raises(ValueError) as err:
spl._init_knots(0.5, False, lower, upper)
assert str(err.value) ==\
"Knots: 0.5 must be iterable or value"
def test__init_coeffs(self):
np.random.seed(492)
# No coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
spl = Spline1D()
assert spl._c is None
spl._t = [1, 2, 3, 4]
spl._init_coeffs()
assert (spl._c == [0, 0, 0, 0]).all()
assert mkBspline.call_args_list == [mk.call()]
# Some coeffs
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
coeffs = np.random.random(10)
spl = Spline1D()
assert spl._c is None
spl._init_coeffs(coeffs)
assert (spl._c == coeffs).all()
assert mkBspline.call_args_list == [mk.call()]
def test__init_data(self):
spl = Spline1D()
knots = mk.MagicMock()
coeffs = mk.MagicMock()
bounds = mk.MagicMock()
has_bounds = mk.MagicMock()
lower = mk.MagicMock()
upper = mk.MagicMock()
with mk.patch.object(Spline1D, '_init_bounds', autospec=True,
return_value=(has_bounds, lower, upper)) as mkBounds:
with mk.patch.object(Spline1D, '_init_knots',
autospec=True) as mkKnots:
with mk.patch.object(Spline1D, '_init_coeffs',
autospec=True) as mkCoeffs:
main = mk.MagicMock()
main.attach_mock(mkBounds, 'bounds')
main.attach_mock(mkKnots, 'knots')
main.attach_mock(mkCoeffs, 'coeffs')
spl._init_data(knots, coeffs, bounds)
assert main.mock_calls == [
mk.call.bounds(spl, bounds),
mk.call.knots(spl, knots, has_bounds, lower, upper),
mk.call.coeffs(spl, coeffs)
]
def test_evaluate(self):
spl = Spline1D()
args = tuple([mk.MagicMock() for _ in range(3)])
kwargs = {f"test{idx}": mk.MagicMock() for idx in range(3)}
new_kwargs = {f"new_test{idx}": mk.MagicMock() for idx in range(3)}
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value=new_kwargs) as mkEval:
with mk.patch.object(Spline1D, "bspline",
new_callable=mk.PropertyMock) as mkBspline:
assert mkBspline.return_value.return_value == spl.evaluate(*args, **kwargs)
assert mkBspline.return_value.call_args_list == \
[mk.call(args[0], **new_kwargs)]
assert mkBspline.call_args_list == [mk.call()]
assert mkEval.call_args_list == \
[mk.call(spl, *args, **kwargs)]
# Error
for idx in range(5, 8):
with mk.patch.object(_Spline, 'evaluate', autospec=True,
return_value={'nu': idx}):
with pytest.raises(RuntimeError) as err:
spl.evaluate(*args, **kwargs)
assert str(err.value) == \
"Cannot evaluate a derivative of order higher than 4"
def check_knots_created(self, spl, k):
def value0(idx):
return self.x[0]
def value1(idx):
return self.x[-1]
for idx in range(k + 1):
name = f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value0, True)
index = len(spl.t) - (k + 1) + idx
name = f"knot{index}"
self.check_parameter(spl, "knot", name, index, value1, True)
def value3(idx):
return spl.t[idx]
assert len(spl._knot_names) == len(spl.t)
for idx, name in enumerate(spl._knot_names):
assert name == f"knot{idx}"
self.check_parameter(spl, "knot", name, idx, value3, True)
def check_coeffs_created(self, spl):
def value(idx):
return spl.c[idx]
assert len(spl._coeff_names) == len(spl.c)
for idx, name in enumerate(spl._coeff_names):
assert name == f"coeff{idx}"
self.check_parameter(spl, "coeff", name, idx, value, False)
@staticmethod
def check_base_spline(spl, t, c, k):
"""Check the base spline form"""
if t is None:
assert spl._t is None
else:
assert_allclose(spl._t, t)
if c is None:
assert spl._c is None
else:
assert_allclose(spl._c, c)
assert spl.degree == k
assert spl._bounding_box is None
def check_spline_fit(self, fit_spl, spline, fitter, atol_fit, atol_truth):
"""Check the spline fit"""
assert_allclose(fit_spl.t, spline._eval_args[0])
assert_allclose(fit_spl.c, spline._eval_args[1])
assert_allclose(fitter.fit_info['spline']._eval_args[0], spline._eval_args[0])
assert_allclose(fitter.fit_info['spline']._eval_args[1], spline._eval_args[1])
# check that _parameters are correct
assert len(fit_spl._parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl._parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl._parameters[len(fit_spl.t):], fit_spl.c)
# check that parameters are correct
assert len(fit_spl.parameters) == len(fit_spl.t) + len(fit_spl.c)
assert_allclose(fit_spl.parameters[:len(fit_spl.t)], fit_spl.t)
assert_allclose(fit_spl.parameters[len(fit_spl.t):], fit_spl.c)
assert_allclose(spline.get_residual(), fitter.fit_info['resid'])
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), fitter.fit_info['spline'](self.x))
assert_allclose(fit_spl(self.x), self.y, atol=atol_fit)
assert_allclose(fit_spl(self.x), self.truth, atol=atol_truth)
def check_bbox(self, spl, fit_spl, fitter, w, **kwargs):
"""Check the spline fit with bbox option"""
bbox = [self.x[0], self.x[-1]]
bbox_spl = fitter(spl, self.x, self.y, weights=w, bbox=bbox, **kwargs)
assert bbox_spl.bounding_box == tuple(bbox)
assert_allclose(fit_spl.t, bbox_spl.t)
assert_allclose(fit_spl.c, bbox_spl.c)
def check_knots_warning(self, fitter, knots, k, w, **kwargs):
"""Check that the knots warning is raised"""
spl = Spline1D(knots=knots, degree=k)
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, weights=w, **kwargs)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_interpolate_fitter(self, w, k):
fitter = SplineInterpolateFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, None, None, k)
assert len(fit_spl.t) == (len(self.x) + k + 1) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import InterpolatedUnivariateSpline, UnivariateSpline
spline = InterpolatedUnivariateSpline(self.x, self.y, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert spline.get_residual() == 0
self.check_spline_fit(fit_spl, spline, fitter, 0, 1)
self.check_bbox(spl, fit_spl, fitter, w)
knots = np.linspace(self.x[0], self.x[-1], len(self.x) + k + 1)
self.check_knots_warning(fitter, knots, k, w)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_smoothing_fitter(self, w, k, s):
fitter = SplineSmoothingFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import UnivariateSpline
spline = UnivariateSpline(self.x, self.y, w=w, k=k, s=s)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
# test warning
knots = fit_spl.t.copy()
self.check_knots_warning(fitter, knots, k, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_exact_knots_fitter(self, w, k):
fitter = SplineExactKnotsFitter()
assert fitter.fit_info == {'resid': None, 'spline': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
assert len(fit_spl.t) == len(t) == len(fit_spl._knot_names)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import LSQUnivariateSpline, UnivariateSpline
spline = LSQUnivariateSpline(self.x, self.y, knots, w=w, k=k)
assert isinstance(fitter.fit_info['spline'], UnivariateSpline)
assert_allclose(spline.get_residual(), 0.1, atol=1)
assert_allclose(fitter.fit_info['spline'].get_residual(), 0.1, atol=1)
self.check_spline_fit(fit_spl, spline, fitter, 1, 1)
self.check_bbox(spl, fit_spl, fitter, w)
# Pass knots via fitter function
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# pass no knots
spl = Spline1D(degree=k)
with pytest.raises(RuntimeError) as err:
fitter(spl, self.x, self.y, weights=w)
assert str(err.value) ==\
"No knots have been provided"
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
@pytest.mark.parametrize('s', smoothing_tests)
def test_splrep_fitter_no_knots(self, w, k, s):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, s=s, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import splrep, BSpline
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, s=s, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, s=s)
@pytest.mark.parametrize('w', wieght_tests)
@pytest.mark.parametrize('k', degree_tests)
def test_splrep_fitter_with_knots(self, w, k):
fitter = SplineSplrepFitter()
assert fitter.fit_info == {'fp': None, 'ier': None, 'msg': None}
knots = [-1, 0, 1]
t = np.concatenate(([self.x[0]]*(k + 1), knots, [self.x[-1]]*(k + 1)))
c = np.zeros(len(t))
# With knots preset
spl = Spline1D(knots=knots, degree=k, bounds=[self.x[0], self.x[-1]])
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
fit_spl = fitter(spl, self.x, self.y, weights=w)
self.check_base_spline(spl, t, c, k)
assert (spl.t_interior == knots).all()
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import splrep, BSpline
tck, spline_fp, spline_ier, spline_msg = splrep(self.x, self.y,
w=w, k=k, t=knots, full_output=1)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
assert fitter.fit_info['fp'] == spline_fp
assert fitter.fit_info['ier'] == spline_ier
assert fitter.fit_info['msg'] == spline_msg
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w)
# test warning
with pytest.warns(AstropyUserWarning):
fitter(spl, self.x, self.y, t=knots, weights=w)
# With no knots present
spl = Spline1D(degree=k)
self.check_base_spline(spl, None, None, k)
fit_spl = fitter(spl, self.x, self.y, t=knots, weights=w)
self.check_base_spline(spl, None, None, k)
self.check_knots_created(fit_spl, k)
self.check_coeffs_created(fit_spl)
assert fit_spl._bounding_box is None
from scipy.interpolate import splrep, BSpline
tck = splrep(self.x, self.y, w=w, k=k, t=knots)
assert_allclose(fit_spl.t, tck[0])
assert_allclose(fit_spl.c, tck[1])
spline = BSpline(*tck)
assert_allclose(fit_spl(self.x), spline(self.x))
assert_allclose(fit_spl(self.x), self.y, atol=1)
assert_allclose(fit_spl(self.x), self.truth, atol=1)
self.check_bbox(spl, fit_spl, fitter, w, t=knots)
def generate_spline(self, w=None, bbox=[None]*2, k=None, s=None, t=None):
if k is None:
k = 3
from scipy.interpolate import splrep, BSpline
tck = splrep(self.x, self.y, w=w, xb=bbox[0], xe=bbox[1],
k=k, s=s, t=t)
return BSpline(*tck)
def test_derivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
assert_allclose(spl.t, bspline.t)
assert_allclose(spl.c, bspline.c)
assert spl.degree == bspline.k
# 1st derivative
d_bspline = bspline.derivative(nu=1)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=1))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=3), bspline(self.xs, nu=4))
der = spl.derivative()
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 2
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=1))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=3), spl.evaluate(self.xs, nu=4))
# 2nd derivative
d_bspline = bspline.derivative(nu=2)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=2))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=2), bspline(self.xs, nu=4))
der = spl.derivative(nu=2)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 1
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=2))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=2), spl.evaluate(self.xs, nu=4))
# 3rd derivative
d_bspline = bspline.derivative(nu=3)
assert_allclose(d_bspline(self.xs), bspline(self.xs, nu=3))
assert_allclose(d_bspline(self.xs, nu=1), bspline(self.xs, nu=4))
der = spl.derivative(nu=3)
assert_allclose(der.t, d_bspline.t)
assert_allclose(der.c, d_bspline.c)
assert der.degree == d_bspline.k == 0
assert_allclose(der.evaluate(self.xs), spl.evaluate(self.xs, nu=3))
assert_allclose(der.evaluate(self.xs, nu=1), spl.evaluate(self.xs, nu=4))
# Too many derivatives
for nu in range(4, 9):
with pytest.raises(ValueError) as err:
spl.derivative(nu=nu)
assert str(err.value) == \
"Must have nu <= 3"
def test_antiderivative(self):
bspline = self.generate_spline()
spl = Spline1D()
spl.bspline = bspline
# 1st antiderivative
a_bspline = bspline.antiderivative(nu=1)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=1))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=5))
anti = spl.antiderivative()
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 4
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=1))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=5))
# 2nd antiderivative
a_bspline = bspline.antiderivative(nu=2)
assert_allclose(bspline(self.xs), a_bspline(self.xs, nu=2))
assert_allclose(bspline(self.xs, nu=1), a_bspline(self.xs, nu=3))
assert_allclose(bspline(self.xs, nu=2), a_bspline(self.xs, nu=4))
assert_allclose(bspline(self.xs, nu=3), a_bspline(self.xs, nu=5))
assert_allclose(bspline(self.xs, nu=4), a_bspline(self.xs, nu=6))
anti = spl.antiderivative(nu=2)
assert_allclose(anti.t, a_bspline.t)
assert_allclose(anti.c, a_bspline.c)
assert anti.degree == a_bspline.k == 5
assert_allclose(spl.evaluate(self.xs), anti.evaluate(self.xs, nu=2))
assert_allclose(spl.evaluate(self.xs, nu=1), anti.evaluate(self.xs, nu=3))
assert_allclose(spl.evaluate(self.xs, nu=2), anti.evaluate(self.xs, nu=4))
assert_allclose(spl.evaluate(self.xs, nu=3), anti.evaluate(self.xs, nu=5))
assert_allclose(spl.evaluate(self.xs, nu=4), anti.evaluate(self.xs, nu=6))
# Too many anti derivatives
for nu in range(3, 9):
with pytest.raises(ValueError) as err:
spl.antiderivative(nu=nu)
assert str(err.value) == \
f"Supported splines can have max degree 5, antiderivative degree will be {nu + 3}"
def test__SplineFitter_error(self):
spl = Spline1D()
class SplineFitter(_SplineFitter):
def _fit_method(self, model, x, y, **kwargs):
super()._fit_method(model, x, y, **kwargs)
fitter = SplineFitter()
with pytest.raises(ValueError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) ==\
"1D model can only have 2 data points."
with pytest.raises(ModelDefinitionError) as err:
fitter(mk.MagicMock(), mk.MagicMock(), mk.MagicMock())
assert str(err.value) ==\
"Only spline models are compatible with this fitter."
with pytest.raises(NotImplementedError) as err:
fitter(spl, mk.MagicMock(), mk.MagicMock())
assert str(err.value) ==\
"This has not been implemented for _SplineFitter."
|
|
"""
An LSTM with Recurrent Dropout and the option to use highway
connections between layers.
"""
from typing import Optional, Tuple
import torch
from torch.autograd import Variable
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence, PackedSequence
from allennlp.common.checks import ConfigurationError
from allennlp.nn.util import get_dropout_mask
from allennlp.nn.initializers import block_orthogonal
class AugmentedLstm(torch.nn.Module):
"""
An LSTM with Recurrent Dropout and the option to use highway
connections between layers. Note: this implementation is slower
than the native Pytorch LSTM because it cannot make use of CUDNN
optimizations for stacked RNNs due to the highway layers and
variational dropout.
Parameters
----------
input_size : int, required.
The dimension of the inputs to the LSTM.
hidden_size : int, required.
The dimension of the outputs of the LSTM.
go_forward: bool, optional (default = True)
The direction in which the LSTM is applied to the sequence.
Forwards by default, or backwards if False.
recurrent_dropout_probability: float, optional (default = 0.0)
The dropout probability to be used in a dropout scheme as stated in
`A Theoretically Grounded Application of Dropout in Recurrent Neural Networks
<https://arxiv.org/abs/1512.05287>`_ . Implementation wise, this simply
applies a fixed dropout mask per sequence to the recurrent connection of the
LSTM.
use_highway: bool, optional (default = True)
Whether or not to use highway connections between layers. This effectively involves
reparameterising the normal output of an LSTM as::
gate = sigmoid(W_x1 * x_t + W_h * h_t)
output = gate * h_t + (1 - gate) * (W_x2 * x_t)
use_input_projection_bias : bool, optional (default = True)
Whether or not to use a bias on the input projection layer. This is mainly here
for backwards compatibility reasons and will be removed (and set to False)
in future releases.
Returns
-------
output_accumulator : PackedSequence
The outputs of the LSTM for each timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
"""
def __init__(self,
input_size: int,
hidden_size: int,
go_forward: bool = True,
recurrent_dropout_probability: float = 0.0,
use_highway: bool = True,
use_input_projection_bias: bool = True) -> None:
super(AugmentedLstm, self).__init__()
# Required to be wrapped with a :class:`PytorchSeq2SeqWrapper`.
self.input_size = input_size
self.hidden_size = hidden_size
self.go_forward = go_forward
self.use_highway = use_highway
self.recurrent_dropout_probability = recurrent_dropout_probability
# We do the projections for all the gates all at once, so if we are
# using highway layers, we need some extra projections, which is
# why the sizes of the Linear layers change here depending on this flag.
if use_highway:
self.input_linearity = torch.nn.Linear(input_size, 6 * hidden_size, bias=use_input_projection_bias)
self.state_linearity = torch.nn.Linear(hidden_size, 5 * hidden_size, bias=True)
else:
self.input_linearity = torch.nn.Linear(input_size, 4 * hidden_size, bias=use_input_projection_bias)
self.state_linearity = torch.nn.Linear(hidden_size, 4 * hidden_size, bias=True)
self.reset_parameters()
def reset_parameters(self):
# Use sensible default initializations for parameters.
block_orthogonal(self.input_linearity.weight.data, [self.hidden_size, self.input_size])
block_orthogonal(self.state_linearity.weight.data, [self.hidden_size, self.hidden_size])
self.state_linearity.bias.data.fill_(0.0)
# Initialize forget gate biases to 1.0 as per An Empirical
# Exploration of Recurrent Network Architectures, (Jozefowicz, 2015).
self.state_linearity.bias.data[self.hidden_size:2 * self.hidden_size].fill_(1.0)
def forward(self, # pylint: disable=arguments-differ
inputs: PackedSequence,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None):
"""
Parameters
----------
inputs : PackedSequence, required.
A tensor of shape (batch_size, num_timesteps, input_size)
to apply the LSTM over.
initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (1, batch_size, output_dimension).
Returns
-------
A PackedSequence containing a torch.FloatTensor of shape
(batch_size, num_timesteps, output_dimension) representing
the outputs of the LSTM per timestep and a tuple containing
the LSTM state, with shape (1, batch_size, hidden_size) to
match the Pytorch API.
"""
if not isinstance(inputs, PackedSequence):
raise ConfigurationError('inputs must be PackedSequence but got %s' % (type(inputs)))
sequence_tensor, batch_lengths = pad_packed_sequence(inputs, batch_first=True)
batch_size = sequence_tensor.size()[0]
total_timesteps = sequence_tensor.size()[1]
# We have to use this '.data.new().resize_.fill_' pattern to create tensors with the correct
# type - forward has no knowledge of whether these are torch.Tensors or torch.cuda.Tensors.
output_accumulator = Variable(sequence_tensor.data.new()
.resize_(batch_size, total_timesteps, self.hidden_size).fill_(0))
if initial_state is None:
full_batch_previous_memory = Variable(sequence_tensor.data.new()
.resize_(batch_size, self.hidden_size).fill_(0))
full_batch_previous_state = Variable(sequence_tensor.data.new()
.resize_(batch_size, self.hidden_size).fill_(0))
else:
full_batch_previous_state = initial_state[0].squeeze(0)
full_batch_previous_memory = initial_state[1].squeeze(0)
current_length_index = batch_size - 1 if self.go_forward else 0
if self.recurrent_dropout_probability > 0.0:
dropout_mask = get_dropout_mask(self.recurrent_dropout_probability, full_batch_previous_memory)
else:
dropout_mask = None
for timestep in range(total_timesteps):
# The index depends on which end we start.
index = timestep if self.go_forward else total_timesteps - timestep - 1
# What we are doing here is finding the index into the batch dimension
# which we need to use for this timestep, because the sequences have
# variable length, so once the index is greater than the length of this
# particular batch sequence, we no longer need to do the computation for
# this sequence. The key thing to recognise here is that the batch inputs
# must be _ordered_ by length from longest (first in batch) to shortest
# (last) so initially, we are going forwards with every sequence and as we
# pass the index at which the shortest elements of the batch finish,
# we stop picking them up for the computation.
if self.go_forward:
while batch_lengths[current_length_index] <= index:
current_length_index -= 1
# If we're going backwards, we are _picking up_ more indices.
else:
# First conditional: Are we already at the maximum number of elements in the batch?
# Second conditional: Does the next shortest sequence beyond the current batch
# index require computation use this timestep?
while current_length_index < (len(batch_lengths) - 1) and \
batch_lengths[current_length_index + 1] > index:
current_length_index += 1
# Actually get the slices of the batch which we need for the computation at this timestep.
previous_memory = full_batch_previous_memory[0: current_length_index + 1].clone()
previous_state = full_batch_previous_state[0: current_length_index + 1].clone()
timestep_input = sequence_tensor[0: current_length_index + 1, index]
# Do the projections for all the gates all at once.
projected_input = self.input_linearity(timestep_input)
projected_state = self.state_linearity(previous_state)
# Main LSTM equations using relevant chunks of the big linear
# projections of the hidden state and inputs.
input_gate = torch.sigmoid(projected_input[:, 0 * self.hidden_size:1 * self.hidden_size] +
projected_state[:, 0 * self.hidden_size:1 * self.hidden_size])
forget_gate = torch.sigmoid(projected_input[:, 1 * self.hidden_size:2 * self.hidden_size] +
projected_state[:, 1 * self.hidden_size:2 * self.hidden_size])
memory_init = torch.tanh(projected_input[:, 2 * self.hidden_size:3 * self.hidden_size] +
projected_state[:, 2 * self.hidden_size:3 * self.hidden_size])
output_gate = torch.sigmoid(projected_input[:, 3 * self.hidden_size:4 * self.hidden_size] +
projected_state[:, 3 * self.hidden_size:4 * self.hidden_size])
memory = input_gate * memory_init + forget_gate * previous_memory
timestep_output = output_gate * torch.tanh(memory)
if self.use_highway:
highway_gate = torch.sigmoid(projected_input[:, 4 * self.hidden_size:5 * self.hidden_size] +
projected_state[:, 4 * self.hidden_size:5 * self.hidden_size])
highway_input_projection = projected_input[:, 5 * self.hidden_size:6 * self.hidden_size]
timestep_output = highway_gate * timestep_output + (1 - highway_gate) * highway_input_projection
# Only do dropout if the dropout prob is > 0.0 and we are in training mode.
if dropout_mask is not None and self.training:
timestep_output = timestep_output * dropout_mask[0: current_length_index + 1]
# We've been doing computation with less than the full batch, so here we create a new
# variable for the the whole batch at this timestep and insert the result for the
# relevant elements of the batch into it.
full_batch_previous_memory = Variable(full_batch_previous_memory.data.clone())
full_batch_previous_state = Variable(full_batch_previous_state.data.clone())
full_batch_previous_memory[0:current_length_index + 1] = memory
full_batch_previous_state[0:current_length_index + 1] = timestep_output
output_accumulator[0:current_length_index + 1, index] = timestep_output
output_accumulator = pack_padded_sequence(output_accumulator, batch_lengths, batch_first=True)
# Mimic the pytorch API by returning state in the following shape:
# (num_layers * num_directions, batch_size, hidden_size). As this
# LSTM cannot be stacked, the first dimension here is just 1.
final_state = (full_batch_previous_state.unsqueeze(0),
full_batch_previous_memory.unsqueeze(0))
return output_accumulator, final_state
|
|
from luminoso_api.v5_client import LuminosoClient, get_root_url
from luminoso_api.errors import (
LuminosoClientError, LuminosoServerError, LuminosoError,
LuminosoTimeoutError
)
import pytest
import requests
BASE_URL = 'http://mock-api.localhost/api/v5/'
def test_paths():
"""
Test creating a client and navigating to various paths with sub-clients.
"""
client = LuminosoClient.connect(BASE_URL, token='fake')
client_copy = client.client_for_path('first_path')
assert client.url == BASE_URL
assert client_copy.url == BASE_URL + 'first_path/'
# Paths are relative to the client's URL; paths with slashes in front are
# absolute.
assert (
client_copy.client_for_path('subpath').url
== BASE_URL + 'first_path/subpath/'
)
assert (
client_copy.client_for_path('/second_path').url
== BASE_URL + 'second_path/'
)
# Similarly, test get_root_url
with pytest.raises(ValueError):
get_root_url('not.good.enough/api/v5')
assert (
get_root_url('https://daylight.luminoso.com/', warn=False)
== 'https://daylight.luminoso.com/api/v5'
)
assert (
get_root_url('http://daylight.luminoso.com/api/v5/who/cares?blaah')
== 'http://daylight.luminoso.com/api/v5'
)
# The test cases that mock HTTP responses depend on the 'requests-mock' pytest
# plugin, which can be installed with 'pip install requests-mock', or by using
# a Python packaging mechanism for installing the test dependencies of a package.
# (No such mechanism is standardized as of November 2018.)
#
# pytest plugins are passed in as an argument to the test function, and which
# plugin to use is specified by the name of the argument.
def test_mock_requests(requests_mock):
"""
Test the way that we make GET, POST, PUT, and DELETE requests using the
correspondingly-named methods of the client.
"""
project_list = [{'name': 'Example project'}]
# Set up the mock URLs that should respond
requests_mock.get(BASE_URL + 'projects/', json=project_list)
requests_mock.post(BASE_URL + 'projects/', json={})
requests_mock.put(BASE_URL + 'projects/projid/', json={})
requests_mock.delete(BASE_URL + 'projects/projid/', json={})
client = LuminosoClient.connect(BASE_URL, token='fake')
response = client.get('projects')
assert response == project_list
# Check that we sent the auth token in the request headers
assert requests_mock.last_request.headers['Authorization'] == 'Token fake'
client2 = client.client_for_path('projects')
response = client2.get()
assert response == project_list
assert requests_mock.last_request.headers['Authorization'] == 'Token fake'
# Okay, that's enough testing of the auth header
# Test different kinds of requests with parameters
response = client2.get(param='value')
assert response == project_list
assert requests_mock.last_request.qs == {'param': ['value']}
client2.post(param='value')
assert requests_mock.last_request.method == 'POST'
assert requests_mock.last_request.json() == {'param': 'value'}
client2.put('projid', param='value')
assert requests_mock.last_request.method == 'PUT'
assert requests_mock.last_request.json() == {'param': 'value'}
client2.delete('projid')
assert requests_mock.last_request.method == 'DELETE'
def test_failing_requests(requests_mock):
requests_mock.get(BASE_URL + 'bad/', status_code=404)
requests_mock.get(BASE_URL + 'fail/', status_code=500)
client = LuminosoClient.connect(BASE_URL, token='fake')
with pytest.raises(LuminosoClientError):
client.get('bad')
with pytest.raises(LuminosoServerError):
client.get('fail')
# Test that passing the timeout value has no impact on a normal request
def test_timeout_not_timing_out(requests_mock):
requests_mock.post(BASE_URL + 'projects/', json={})
client = LuminosoClient.connect(BASE_URL, token='fake', timeout=2)
client = client.client_for_path('projects')
client.post(param='value')
assert requests_mock.last_request.method == 'POST'
assert requests_mock.last_request.json() == {'param': 'value'}
# Test that passing the timeout and it timing out raises the right error
def test_timeout_actually_timing_out(requests_mock):
requests_mock.post(BASE_URL + 'projects/',
exc=requests.exceptions.ConnectTimeout)
client = LuminosoClient.connect(BASE_URL, token='fake', timeout=2)
client = client.client_for_path('projects')
try:
client.post(param='value')
except LuminosoTimeoutError:
pass
# The logic in wait_for_build() and wait_for_sentiment_build() gets a little
# complex, so we test that logic more thoroughly here.
def _last_build_infos_to_mock_returns(last_builds):
"""
Helper function for testing waiting for a build. Turns a series of
last_build_info dictionaries into a list suitable for returning
sequentially from requests_mock.
"""
return [{'json': {'last_build_info': build_info}}
for build_info in last_builds]
def test_wait_for_build(requests_mock):
project_url = BASE_URL + 'projects/pr123456/'
client = LuminosoClient.connect(project_url, token='fake')
# A somewhat pared-down representation of what a project record's
# `last_build_info` field looks like in various states
build_running = {'start_time': 1590000000.0, 'stop_time': None,
'sentiment': {}}
build_failed = {'start_time': 1590000000.0, 'stop_time': 1590000001.0,
'sentiment': {}, 'success': False}
build_succeeded = {'start_time': 1590000000.0, 'stop_time': 1590000001.0,
'sentiment': {}, 'success': True}
# If there is no build: error
requests_mock.get(project_url, json={'last_build_info': {}})
with pytest.raises(ValueError, match='not building'):
client.wait_for_build()
# If the build succeeds: the project's last build info
requests_mock.get(
project_url,
_last_build_infos_to_mock_returns(
[build_running, build_running, build_succeeded]
)
)
result = client.wait_for_build(interval=.0001)
assert result == build_succeeded
# If the build fails: error with the project's last build info
requests_mock.get(
project_url,
_last_build_infos_to_mock_returns([build_running, build_failed])
)
with pytest.raises(LuminosoError) as e:
client.wait_for_build(interval=.0001)
assert e.value.args == (build_failed,)
def test_wait_for_sentiment_build(requests_mock):
project_url = BASE_URL + 'projects/pr123456/'
client = LuminosoClient.connect(project_url, token='fake')
# A somewhat pared-down representation of what a project record's
# `last_build_info` field looks like in various states, including
# the sentiment build
build_running = {'start_time': 1590000000.0, 'stop_time': None,
'sentiment': {'start_time': None, 'stop_time': None}}
build_failed = {'start_time': 1590000000.0, 'stop_time': 1590000001.0,
'sentiment': {'start_time': None, 'stop_time': None},
'success': False}
build_succeeded = {'start_time': 1590000000.0,
'stop_time': 1590000001.0,
'sentiment': {'start_time': None, 'stop_time': None},
'success': True}
sentiment_running = {'start_time': 1590000000.0, 'stop_time': 1590000001.0,
'sentiment': {'start_time': 1590000002.0,
'stop_time': None},
'success': True}
sentiment_failed = {'start_time': 1590000000.0, 'stop_time': 1590000001.0,
'sentiment': {'start_time': 1590000002.0,
'stop_time': 1590000003.0,
'success': False},
'success': True}
sentiment_succeeded = {'start_time': 1590000000.0,
'stop_time': 1590000001.0,
'sentiment': {'start_time': 1590000002.0,
'stop_time': 1590000003.0,
'success': True},
'success': True}
# If the base build doesn't exist, or fails: same errors as for regular
# wait_for_build
requests_mock.get(project_url, json={'last_build_info': {}})
with pytest.raises(ValueError, match='not building'):
client.wait_for_sentiment_build()
requests_mock.get(
project_url,
_last_build_infos_to_mock_returns([build_running, build_failed])
)
with pytest.raises(LuminosoError) as e:
client.wait_for_sentiment_build(interval=.0001)
assert e.value.args == (build_failed,)
# If the base build exists but sentiment is not building: error
requests_mock.get(
project_url,
json={'last_build_info': {
'start_time': 1590000000.0, 'stop_time': None, 'sentiment': {}
}}
)
with pytest.raises(ValueError, match='not building sentiment'):
client.wait_for_sentiment_build()
# If the sentiment build succeeds: the project's last build info
requests_mock.get(
project_url,
_last_build_infos_to_mock_returns(
[build_running, build_running, build_succeeded,
sentiment_running, sentiment_running, sentiment_succeeded]
)
)
result = client.wait_for_sentiment_build(interval=.0001)
assert result == sentiment_succeeded
# If the sentiment build fails: error with the project's last build info
requests_mock.get(
project_url,
_last_build_infos_to_mock_returns(
[build_running, build_running, build_succeeded,
sentiment_running, sentiment_running, sentiment_failed]
)
)
with pytest.raises(LuminosoError) as e:
client.wait_for_sentiment_build(interval=.0001)
assert e.value.args == (sentiment_failed,)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MonitorsOperations:
"""MonitorsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~workload_monitor_api.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
subscription_id: str,
resource_group_name: str,
resource_namespace: str,
resource_type: str,
resource_name: str,
filter: Optional[str] = None,
expand: Optional[str] = None,
**kwargs
) -> AsyncIterable["models.MonitorList"]:
"""Get list of a monitors of a resource (with optional filter).
Get list of a monitors of a resource (with optional filter).
:param subscription_id: The subscriptionId of the resource.
:type subscription_id: str
:param resource_group_name: The resourceGroupName of the resource.
:type resource_group_name: str
:param resource_namespace: The resourceNamespace of the resource.
:type resource_namespace: str
:param resource_type: The resourceType of the resource.
:type resource_type: str
:param resource_name: The resourceType of the resource.
:type resource_name: str
:param filter: list example: $filter=monitorName eq 'logical-disks|C:|disk-free-space-mb';
history example: $filter=isHeartbeat eq false.
:type filter: str
:param expand: ex: $expand=evidence,configuration.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MonitorList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~workload_monitor_api.models.MonitorList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MonitorList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-13-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceNamespace': self._serialize.url("resource_namespace", resource_namespace, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('MonitorList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.DefaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.WorkloadMonitor/monitors'} # type: ignore
async def get(
self,
subscription_id: str,
resource_group_name: str,
resource_namespace: str,
resource_type: str,
resource_name: str,
monitor_id: str,
expand: Optional[str] = None,
**kwargs
) -> "models.Monitor":
"""Get the current status of a monitor of a resource.
Get the current status of a monitor of a resource.
:param subscription_id: The subscriptionId of the resource.
:type subscription_id: str
:param resource_group_name: The resourceGroupName of the resource.
:type resource_group_name: str
:param resource_namespace: The resourceNamespace of the resource.
:type resource_namespace: str
:param resource_type: The resourceType of the resource.
:type resource_type: str
:param resource_name: The resourceType of the resource.
:type resource_name: str
:param monitor_id: The monitorId of the resource (url encoded).
:type monitor_id: str
:param expand: ex: $expand=evidence,configuration.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Monitor, or the result of cls(response)
:rtype: ~workload_monitor_api.models.Monitor
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.Monitor"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-13-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceNamespace': self._serialize.url("resource_namespace", resource_namespace, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'monitorId': self._serialize.url("monitor_id", monitor_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.DefaultError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Monitor', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.WorkloadMonitor/monitors/{monitorId}'} # type: ignore
def list_state_changes(
self,
subscription_id: str,
resource_group_name: str,
resource_namespace: str,
resource_type: str,
resource_name: str,
monitor_id: str,
filter: Optional[str] = None,
expand: Optional[str] = None,
start_timestamp_utc: Optional[datetime.datetime] = None,
end_timestamp_utc: Optional[datetime.datetime] = None,
**kwargs
) -> AsyncIterable["models.MonitorStateChangeList"]:
"""Get history of a monitor of a resource (with optional filter).
Get history of a monitor of a resource (with optional filter).
:param subscription_id: The subscriptionId of the resource.
:type subscription_id: str
:param resource_group_name: The resourceGroupName of the resource.
:type resource_group_name: str
:param resource_namespace: The resourceNamespace of the resource.
:type resource_namespace: str
:param resource_type: The resourceType of the resource.
:type resource_type: str
:param resource_name: The resourceType of the resource.
:type resource_name: str
:param monitor_id: The monitorId of the resource (url encoded).
:type monitor_id: str
:param filter: list example: $filter=monitorName eq 'logical-disks|C:|disk-free-space-mb';
history example: $filter=isHeartbeat eq false.
:type filter: str
:param expand: ex: $expand=evidence,configuration.
:type expand: str
:param start_timestamp_utc: The start Timestamp for the desired history.
:type start_timestamp_utc: ~datetime.datetime
:param end_timestamp_utc: The end Timestamp for the desired history.
:type end_timestamp_utc: ~datetime.datetime
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MonitorStateChangeList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~workload_monitor_api.models.MonitorStateChangeList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MonitorStateChangeList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-13-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_state_changes.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceNamespace': self._serialize.url("resource_namespace", resource_namespace, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'monitorId': self._serialize.url("monitor_id", monitor_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if start_timestamp_utc is not None:
query_parameters['startTimestampUtc'] = self._serialize.query("start_timestamp_utc", start_timestamp_utc, 'iso-8601')
if end_timestamp_utc is not None:
query_parameters['endTimestampUtc'] = self._serialize.query("end_timestamp_utc", end_timestamp_utc, 'iso-8601')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('MonitorStateChangeList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.DefaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_state_changes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.WorkloadMonitor/monitors/{monitorId}/history'} # type: ignore
async def get_state_change(
self,
subscription_id: str,
resource_group_name: str,
resource_namespace: str,
resource_type: str,
resource_name: str,
monitor_id: str,
timestamp_unix: str,
expand: Optional[str] = None,
**kwargs
) -> "models.MonitorStateChange":
"""Get the status of a monitor at a specific timestamp in history.
Get the status of a monitor at a specific timestamp in history.
:param subscription_id: The subscriptionId of the resource.
:type subscription_id: str
:param resource_group_name: The resourceGroupName of the resource.
:type resource_group_name: str
:param resource_namespace: The resourceNamespace of the resource.
:type resource_namespace: str
:param resource_type: The resourceType of the resource.
:type resource_type: str
:param resource_name: The resourceType of the resource.
:type resource_name: str
:param monitor_id: The monitorId of the resource (url encoded).
:type monitor_id: str
:param timestamp_unix: The timestamp of the state change (Unix format).
:type timestamp_unix: str
:param expand: ex: $expand=evidence,configuration.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MonitorStateChange, or the result of cls(response)
:rtype: ~workload_monitor_api.models.MonitorStateChange
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MonitorStateChange"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-13-preview"
accept = "application/json"
# Construct URL
url = self.get_state_change.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceNamespace': self._serialize.url("resource_namespace", resource_namespace, 'str'),
'resourceType': self._serialize.url("resource_type", resource_type, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'monitorId': self._serialize.url("monitor_id", monitor_id, 'str'),
'timestampUnix': self._serialize.url("timestamp_unix", timestamp_unix, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.DefaultError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MonitorStateChange', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_state_change.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceNamespace}/{resourceType}/{resourceName}/providers/Microsoft.WorkloadMonitor/monitors/{monitorId}/history/{timestampUnix}'} # type: ignore
|
|
#!/usr/bin/env python3
# LEMP MANAGER v1.1
# Copyright 2017 Matteo Mattei <[email protected]>
import sys
import os
import shutil
import subprocess
import getopt
import crypt
import pwd
from tld import get_tld
######### CONFIGURATION ############
BASE_ROOT='/home'
START_USER_NUM=5001
BASE_USER_NAME='web'
PHP_FPM_TEMPLATE='/etc/php/7.0/fpm/pool.d/www.conf'
USER_PASSWORD='qwertyuioplkjhgfdsazxcvbnm'
####################################
############ FUNCTIONS #############
######### Do not edit below ########
def usage():
"""This function simply returns the usage"""
sys.stdout.write('Usage:\n')
sys.stdout.write('%s -a|--action=<action> [-d|--domain=<domain>] [-A|--alias=<alias>] [options]\n' % sys.argv[0])
sys.stdout.write('\nParameters:\n')
sys.stdout.write('\t-a|--action=ACTION\n\t\tit is mandatory\n')
sys.stdout.write('\t-d|--domain=domain.tld\n\t\tcan be used only with [add_domain, remove_domain, add_alias, get_certs, get_info]\n')
sys.stdout.write('\t-A|--alias=alias.domain.tld\n\t\tcan be used only with [add_alias, remove_alias, get_info]\n')
sys.stdout.write('\nActions:\n')
sys.stdout.write('\tadd_domain\tAdd a new domain\n')
sys.stdout.write('\tadd_alias\tAdd a new domain alias to an existent domain\n')
sys.stdout.write('\tremove_domain\tRemove an existent domain\n')
sys.stdout.write('\tremove_alias\tRemove an existent domain alias\n')
sys.stdout.write('\tget_certs\tObtain SSL certifiate and deploy it\n')
sys.stdout.write('\tget_info\tGet information of a domain or a domain alias (username)\n')
sys.stdout.write('\nOptions:\n')
sys.stdout.write('\t-f|--fakessl\tUse self signed certificate (only usable with [add_domain, add_alias])\n')
def valid_domain(domain):
"""This function return True if the passed domain is valid, false otherwise"""
try:
get_tld(domain,fix_protocol=True)
return True
except:
return False
def tld_and_sub(domain):
"""This function returns a dictionary with tld (top level domain) and
the related subdomain, www in case no subdomain is passed"""
tld = get_tld(domain,fix_protocol=True)
if domain==tld:
return {'tld':domain,'name':'www'}
index = domain.find(tld)
return {'tld':tld,'name':domain[0:(index-1)]}
def get_next_user():
"""This function returns a dictionary with the next available username and its uid"""
buf = []
with open('/etc/passwd','r') as f:
buf = f.readlines()
idx = str(START_USER_NUM)
while True:
user = BASE_USER_NAME+idx+':'
found = False
for line in buf:
if line.startswith(user):
found = True
break
if found == True:
idx = str(int(idx)+1)
else:
return {'username':user.strip(':'),'uid':int(idx)}
def add_new_user(username,uid,homedir):
"""This function adds a new system user with specified parameters"""
res = subprocess.run([
'useradd',
'--comment="WEB_USER_'+str(uid)+',,,"',
'--home-dir='+homedir,
'--no-log-init',
'--create-home',
'--shell=/bin/bash',
'--uid='+str(uid),
username], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout.write('Error adding user %s with uid %d: %s\n' % (username,uid,res.stderr))
sys.exit(1)
enc_password = crypt.crypt(USER_PASSWORD,crypt.mksalt(crypt.METHOD_SHA512))
res = subprocess.run([
'usermod',
'-p',
enc_password,
username], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout.write('Error setting password for user %s: %s\n' % (username,res.stderr))
sys.exit(1)
def remove_user(homedir):
"""This function removes the user which domain belongs to"""
buf = []
with open('/etc/passwd','r') as f:
buf = f.readlines()
username = ''
for line in buf:
if ':'+homedir+':' in line:
username = line.split(':')[0]
break
if username != '':
res = subprocess.run([
'userdel',
username], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout.write('Error removing user %s: %s\n' % (username,res.stderr))
sys.exit(1)
def remove_domain_folder(homedir):
"""This function removes the home directory of the domain"""
if os.path.isdir(homedir):
res = subprocess.run([
'rm',
'-rf',
homedir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout.write('Error removing domain folder %s\n' % homedir)
sys.exit(1)
def lock_password(username):
"""This function lock the password for the user"""
res = subprocess.run([
'passwd',
'-l',
username], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout.write('Error locking password to user %s: %s\n' % (username,res.stderr))
sys.exit(1)
def create_subfolders(username,homedir):
"""This function creates subfolders of domain directory"""
dirname = os.path.join(homedir,'public_html')
if not os.path.isdir(dirname):
os.mkdir(dirname)
shutil.chown(dirname,username,username)
dirname = os.path.join(homedir,'tmp')
if not os.path.isdir(dirname):
os.mkdir(dirname)
shutil.chown(dirname,username,username)
dirname = os.path.join(homedir,'logs')
if not os.path.isdir(dirname):
os.mkdir(dirname)
shutil.chown(dirname,'root','root')
def create_php_pool(username, domain, homedir):
"""This function creates a php pool configuration file"""
if not os.path.isfile(PHP_FPM_TEMPLATE):
sys.stdout.write('No php fpm template found (%s)!\n' % PHP_FPM_TEMPLATE)
sys.exit(1)
filename = os.path.join('/etc/php/7.0/fpm/pool.d/',domain+'.conf')
if os.path.isfile(filename):
sys.stdout.write('PHP configuration file already exists: %s\n' % filename)
sys.exit(1)
lines = []
with open(PHP_FPM_TEMPLATE,'r') as f:
lines = f.readlines()
with open(filename,'w') as f:
for l in lines:
if l.startswith('user = www-data'):
f.write(l.replace('www-data',username))
continue
if l.startswith('group = www-data'):
f.write(l.replace('www-data',username))
continue
if l.startswith('[www]'):
f.write(l.replace('www',domain))
continue
if l.startswith('listen = '):
f.write('listen = /var/run/php/php7.0-fpm_'+domain+'.sock\n')
continue
if l.startswith(';env[TMP]'):
f.write('env[TMP] = '+os.path.join(homedir,'tmp')+'\n')
continue
if l.startswith(';env[TMPDIR]'):
f.write('env[TMPDIR] = '+os.path.join(homedir,'tmp')+'\n')
continue
if l.startswith(';env[TEMP]'):
f.write('env[TEMP] = '+os.path.join(homedir,'tmp')+'\n')
continue
f.write(l)
def remove_php_pool(domain):
"""This function removes the php pool of the domain"""
filename = '/etc/php/7.0/fpm/pool.d/'+domain+'.conf'
if os.path.isfile(filename):
os.unlink(filename)
def domains_in_virtualhost(domain):
"""This function returns the list of domains configured in the virtualhost"""
buf = []
with open('/etc/nginx/sites-available/'+domain,'r') as f:
buf = f.readlines()
domains = []
for line in buf:
if ' server_name ' in line:
domains = line.strip().strip(';').split()[1:]
break
return domains
def check_update_ssl_certs(domains):
"""This function get ssl certificates for all domains in virtualhost and adjust it"""
if len(domains)==0:
sys.stdout.write('No domain provided to certbot!\n')
return
domains_list = []
for d in domains:
domains_list.append('-d')
domains_list.append(d.strip())
res = subprocess.run([
'certbot',
'certonly',
'--keep-until-expiring',
'--expand',
'--webroot',
'--webroot-path',
'/var/www/html']+domains_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not os.path.islink('/etc/letsencrypt/live/'+domains[0].strip()+'/fullchain.pem'):
sys.stdout.write('Missing SSL certificate %s\n' % '/etc/letsencrypt/live/'+domains[0].strip()+'/fullchain.pem')
sys.stdout.write('Look at %s for more information about\n' % '/var/log/letsencrypt/letsencrypt.log')
return
buf = []
with open('/etc/letsencrypt/renewal/'+domains[0].strip()+'.conf','r') as f:
buf = f.readlines()
for d in domains:
for line in buf:
if line.startswith(d.strip()+' ='):
found = True
break
if not found:
with open('/etc/letsencrypt/renewal/'+d.strip()+'.conf','a') as f:
f.write(d.strip()+' = /var/www/html\n')
domain_parts = tld_and_sub(domains[0].strip())
buf = []
with open('/etc/nginx/sites-available/'+domain_parts['name']+'.'+domain_parts['tld'],'r') as f:
buf = f.readlines()
with open('/etc/nginx/sites-available/'+domain_parts['name']+'.'+domain_parts['tld'],'w') as f:
for line in buf:
if 'ssl_certificate ' in line:
f.write(' ssl_certificate /etc/letsencrypt/live/'+domains[0].strip()+'/fullchain.pem;\n')
continue
if 'ssl_certificate_key ' in line:
f.write(' ssl_certificate_key /etc/letsencrypt/live/'+domains[0].strip()+'/privkey.pem;\n')
continue
f.write(line)
def remove_ssl_certs(domain):
"""This function removes all SSL certificates of a domain"""
if os.path.isdir('/etc/letsencrypt/live/'+domain):
shutil.rmtree('/etc/letsencrypt/live/'+domain)
if os.path.isdir('/etc/letsencrypt/archive/'+domain):
shutil.rmtree('/etc/letsencrypt/archive/'+domain)
if os.path.isfile('/etc/letsencrypt/renewal/'+domain+'.conf'):
os.unlink('/etc/letsencrypt/renewal/'+domain+'.conf')
def create_nginx_virtualhost(domain,homedir):
"""This function creates the NGINX virtualhost"""
filename = '/etc/nginx/sites-available/'+domain
dst_filename = '/etc/nginx/sites-enabled/'+domain
if os.path.isfile(filename):
sys.stdout.write('Virtualhost configuration already exists: %s\n' % filename)
sys.exit(1)
domain_parts = tld_and_sub(domain)
with open(filename,'w') as f:
f.write('server {\n')
f.write(' listen 80;\n')
if domain_parts['name'] == 'www':
f.write(' server_name '+domain_parts['tld']+' '+domain_parts['name']+'.'+domain_parts['tld']+';\n');
else:
f.write(' server_name '+domain_parts['name']+'.'+domain_parts['tld']+';\n')
f.write(' return 301 https://'+domain_parts['name']+'.'+domain_parts['tld']+'$request_uri;\n')
f.write('}\n')
f.write('server {\n')
f.write(' server_name '+domain_parts['name']+'.'+domain_parts['tld']+';\n')
f.write(' listen 443 ssl http2;\n')
f.write(' access_log '+os.path.join(homedir,'logs','nginx.access.log')+';\n')
f.write(' error_log '+os.path.join(homedir,'logs','nginx.error.log')+';\n')
f.write(' root '+os.path.join(homedir,'public_html')+';\n')
f.write(' set $php_sock_name '+domain_parts['name']+'.'+domain_parts['tld']+';\n')
f.write(' include /etc/nginx/global/common.conf;\n')
f.write(' include /etc/nginx/global/wordpress.conf;\n')
f.write(' ssl_certificate /etc/nginx/certs/server.crt;\n')
f.write(' ssl_certificate_key /etc/nginx/certs/server.key;\n')
f.write(' include /etc/nginx/global/ssl.conf;\n')
f.write('}\n')
os.symlink(filename,dst_filename)
def remove_nginx_virtualhost(domain):
"""This function removes nginx virtualhost of a domain"""
if os.path.islink('/etc/nginx/sites-enabled/'+domain):
os.unlink('/etc/nginx/sites-enabled/'+domain)
if os.path.isfile('/etc/nginx/sites-available/'+domain):
os.unlink('/etc/nginx/sites-available/'+domain)
def reload_services():
"""This function reloads configurations of PHP-FPM and NGINX services"""
res = subprocess.run([
'/etc/init.d/php7.0-fpm',
'reload'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout('Unable to reload PHP: %s\n' % res.stderr)
sys.exit(1)
res = subprocess.run([
'/usr/sbin/nginx',
'-s',
'reload'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout('Unable to reload NGINX: %s\n' % res.stderr)
sys.exit(1)
def create_symlink(alias_domain_dir,domain_dir):
"""This function creates symlink for the alias domain"""
os.symlink(domain_dir,alias_domain_dir)
def remove_symlink(alias_domain_dir):
"""This function removes symlink for the alias domain"""
os.unlink(alias_domain_dir)
def add_nginx_virtualhost_alias(domain, alias_domain):
"""This function adds a new alias to NGINX virtualhost"""
buf = []
with open('/etc/nginx/sites-available/'+domain,'r') as f:
buf = f.readlines()
with open('/etc/nginx/sites-available/'+domain,'w') as f:
for line in buf:
if ' server_name ' in line:
chunks = line.strip().strip(';').split()[1:]
if alias_domain not in chunks:
chunks.append(alias_domain)
line = ' server_name '+' '.join(chunks)+';\n'
f.write(line)
def remove_nginx_virtualhost_alias(domain, alias_domain):
"""This function removes an alias from NGINX virtualhost"""
buf = []
with open('/etc/nginx/sites-available/'+domain,'r') as f:
buf = f.readlines()
with open('/etc/nginx/sites-available/'+domain,'w') as f:
for line in buf:
if ' server_name ' in line:
chunks = line.strip().strip(';').split()[1:]
if alias_domain in chunks:
chunks.remove(alias_domain)
line = ' server_name '+' '.join(chunks)+';\n'
f.write(line)
def get_alias_parent(alias_domain_dir):
"""This function returns the parent domain of an alias domain"""
domain_dir = os.readlink(alias_domain_dir)
domain = os.path.basename(domain_dir)
return domain
def remove_alias_ssl_certs(domain, alias_domain):
"""This function removes the alias_domain from the letsencrypt renew process"""
buf = []
with open('/etc/letsencrypt/renewal/'+domain+'.conf', 'r') as f:
buf = f.readlines()
with open('/etc/letsencrypt/renewal/'+domain+'.conf', 'w') as f:
for line in buf:
if line.startswith(alias_domain+' ='):
continue
f.write(line)
####################################
######### MAIN STARTS HERE #########
def main():
if os.getuid() != 0:
sys.stdout.write('This program must be executed as root\n')
sys.exit(1)
try:
opts, args = getopt.getopt(sys.argv[1:], "ha:d:A:f", ["help", "action=", "domain=", "alias=", "fakessl"])
except getopt.GetoptError as err:
usage()
sys.exit(2)
domain = None
alias_domain = None
action = None
ssl_fake = False
show_info = False
if len(opts) == 0:
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-a", "--action"):
action = a
if action not in ('add_domain','add_alias','remove_domain','remove_alias','get_certs','get_info'):
sys.stdout.write("Unknown action %s\n" % action)
usage()
sys.exit(1)
elif o in ("-d", "--domain"):
domain = a
elif o in ("-A", "--alias"):
alias_domain = a
elif o in ("-f", "--fakessl"):
ssl_fake = True
else:
sys.stdout.write('Unknown option %s\n' % o)
usage()
sys.exit(1)
if action == 'get_info':
if domain == None and alias_domain == None:
sys.stdout.write('Missing domain or alias domain\n')
sys.exit(1)
if domain != None and alias_domain != None:
sys.stdout.write('Please specify only a domain or an alias domain\n')
sys.exit(1)
# check if domain already exists
if domain != None:
domain_parts = tld_and_sub(domain)
base_domain_dir = os.path.join(BASE_ROOT,domain_parts['tld'])
child_domain_dir = os.path.join(base_domain_dir,domain_parts['name']+'.'+domain_parts['tld'])
domain = domain_parts['name']+'.'+domain_parts['tld']
if not os.path.isdir(child_domain_dir):
sys.stdout.write('Domain %s does not exist at %s\n' % (domain,child_domain_dir))
sys.exit(1)
# check if alias domain already exists
if alias_domain != None:
alias_domain_parts = tld_and_sub(alias_domain)
base_alias_domain_dir = os.path.join(BASE_ROOT,alias_domain_parts['tld'])
child_alias_domain_dir = os.path.join(base_alias_domain_dir,alias_domain_parts['name']+'.'+alias_domain_parts['tld'])
alias_domain = alias_domain_parts['name']+'.'+alias_domain_parts['tld']
if not (os.path.isdir(child_alias_domain_dir) or os.path.islink(child_alias_domain_dir)):
sys.stdout.write('Alias domain %s does not exist at %s\n' % (alias_domain,child_alias_domain_dir))
sys.exit(1)
if domain != None:
sys.stdout.write(pwd.getpwuid(os.stat(child_domain_dir).st_uid).pw_name+'\n')
sys.exit(0)
elif alias_domain != None:
sys.stdout.write(pwd.getpwuid(os.stat(child_alias_domain_dir).st_uid).pw_name+'\n')
sys.exit(0)
elif action == 'add_domain':
if domain == None:
sys.stdout.write('Missing domain\n')
sys.exit(1)
# check if domain already exists
domain_parts = tld_and_sub(domain)
base_domain_dir = os.path.join(BASE_ROOT,domain_parts['tld'])
child_domain_dir = os.path.join(base_domain_dir,domain_parts['name']+'.'+domain_parts['tld'])
domain = domain_parts['name']+'.'+domain_parts['tld']
if os.path.isdir(child_domain_dir):
sys.stdout.write('Domain %s already exists at %s\n' % (domain,child_domain_dir))
sys.exit(1)
# add new user
if not os.path.isdir(base_domain_dir):
os.mkdir(base_domain_dir)
user = get_next_user()
add_new_user(user['username'],user['uid'],child_domain_dir)
# lock user password
#lock_password(user['username'])
# create additional folders
create_subfolders(user['username'],child_domain_dir)
# create PHP pool
create_php_pool(user['username'],domain,child_domain_dir)
# create NGINX virtualhost
create_nginx_virtualhost(domain,child_domain_dir)
# obtain SSL certificates from letsencrypt
if not ssl_fake:
domains = domains_in_virtualhost(domain)
check_update_ssl_certs(domains)
# reload services (nginx + php-fpm)
reload_services()
elif action == 'add_alias':
if domain == None:
sys.stdout.write('Missing domain\n')
sys.exit(1)
if alias_domain == None:
sys.stdout.write('Missing domain alias\n')
sys.exit(1)
# check if domain already exists
domain_parts = tld_and_sub(domain)
base_domain_dir = os.path.join(BASE_ROOT,domain_parts['tld'])
child_domain_dir = os.path.join(base_domain_dir,domain_parts['name']+'.'+domain_parts['tld'])
domain = domain_parts['name']+'.'+domain_parts['tld']
if not os.path.isdir(child_domain_dir):
sys.stdout.write('Domain %s does not exist at %s\n' % (domain,child_domain_dir))
sys.exit(1)
# check if alias domain already exists
alias_domain_parts = tld_and_sub(alias_domain)
base_alias_domain_dir = os.path.join(BASE_ROOT,alias_domain_parts['tld'])
child_alias_domain_dir = os.path.join(base_alias_domain_dir,alias_domain_parts['name']+'.'+alias_domain_parts['tld'])
alias_domain = alias_domain_parts['name']+'.'+alias_domain_parts['tld']
if os.path.isdir(child_alias_domain_dir) or os.path.islink(child_alias_domain_dir):
sys.stdout.write('Alias domain %s already exists at %s\n' % (alias_domain,child_alias_domain_dir))
sys.exit(1)
# add base folder if not exists
if not os.path.isdir(base_domain_dir):
os.mkdir(base_domain_dir)
# create symlink
create_symlink(child_alias_domain_dir,child_domain_dir)
# add NGINX virtualhost alias
add_nginx_virtualhost_alias(domain, alias_domain)
# obtain SSL certificates from letsencrypt
if not ssl_fake:
domains = domains_in_virtualhost(domain)
check_update_ssl_certs(domains)
# reload services (nginx + php-fpm)
reload_services()
elif action == 'remove_domain':
if domain == None:
sys.stdout.write('Missing domain\n')
sys.exit(1)
# check if domain already exists
domain_parts = tld_and_sub(domain)
base_domain_dir = os.path.join(BASE_ROOT,domain_parts['tld'])
child_domain_dir = os.path.join(base_domain_dir,domain_parts['name']+'.'+domain_parts['tld'])
domain = domain_parts['name']+'.'+domain_parts['tld']
if not os.path.isdir(child_domain_dir):
sys.stdout.write('Domain %s does not exist at %s\n' % (domain,child_domain_dir))
sys.exit(1)
# remove php pool
remove_php_pool(domain)
# remove ssl certificates
remove_ssl_certs(domain)
# remove nginx virtualhost
remove_nginx_virtualhost(domain)
# reload services (nginx + php-fpm)
reload_services()
# remove domain folder
remove_domain_folder(child_domain_dir)
# remove user if present
remove_user(child_domain_dir)
elif action == 'remove_alias':
if alias_domain == None:
sys.stdout.write('Missing domain alias\n')
sys.exit(1)
# check if alias domain already exists
alias_domain_parts = tld_and_sub(alias_domain)
base_alias_domain_dir = os.path.join(BASE_ROOT,alias_domain_parts['tld'])
child_alias_domain_dir = os.path.join(base_alias_domain_dir,alias_domain_parts['name']+'.'+alias_domain_parts['tld'])
alias_domain = alias_domain_parts['name']+'.'+alias_domain_parts['tld']
if not os.path.islink(child_alias_domain_dir):
sys.stdout.write('Alias domain %s does not exist at %s\n' % (alias_domain,child_alias_domain_dir))
sys.exit(1)
# get alias parent
domain = get_alias_parent(child_alias_domain_dir)
# remove domain folder
remove_symlink(child_alias_domain_dir)
# remove ssl certificates
remove_alias_ssl_certs(domain, alias_domain)
# remove nginx virtualhost
remove_nginx_virtualhost_alias(domain, alias_domain)
# reload services (nginx + php-fpm)
reload_services()
elif action == 'get_certs':
if domain == None:
sys.stdout.write('Missing domain\n')
sys.exit(1)
# check if domain already exists
domain_parts = tld_and_sub(domain)
base_domain_dir = os.path.join(BASE_ROOT,domain_parts['tld'])
child_domain_dir = os.path.join(base_domain_dir,domain_parts['name']+'.'+domain_parts['tld'])
domain = domain_parts['name']+'.'+domain_parts['tld']
if not os.path.isdir(child_domain_dir):
sys.stdout.write('Domain %s does not exist at %s\n' % (domain,child_domain_dir))
sys.exit(1)
domains = domains_in_virtualhost(domain)
check_update_ssl_certs(domains)
# reload services (nginx + php-fpm)
reload_services()
if __name__ == "__main__":
main()
|
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import re
import types
import unittest
import stubout
exec(open('google/protobuf/internal/utils.py').read())
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name):
"""Create a new mock method call and return it.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(name)
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
method_name: str
call_queue: list or deque
replay_mode: bool
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError('method must be implemented by a subclass.')
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod()
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
self.mox = Mox()
|
|
from django.db import models
from django.utils.translation import ugettext as _
from django.contrib.auth.models import User
from common.models import Project
from celery import states
import logging
import json
from django.conf import settings
import requests
log = logging.getLogger(__name__)
TEST_STATES = (PASSED, FAILED, SKIPPED, BLOCKED) = (0, 1, 2, 3)
LAUNCH_STATES = (INITIALIZED, IN_PROGRESS, FINISHED, STOPPED) = (0, 1, 2, 3)
LAUNCH_TYPES = (ASYNC_CALL, INIT_SCRIPT, CONCLUSIVE) = (0, 1, 2)
CELERY_FINISHED_STATES = (states.SUCCESS, states.FAILURE)
RESULT_PREVIEW_CHOICES = (
('head', 'Show test result head'),
('tail', 'Show test result tail')
)
class ExtUser(models.Model):
user = models.OneToOneField(User, related_name='settings')
default_project = models.IntegerField(_('User default project'),
blank=True, null=True, default=None)
launches_on_page = models.IntegerField(_('Launches on page'), default=10)
testresults_on_page = models.IntegerField(
_('Testresults on page'), default=25)
dashboards = models.TextField(_('Dashboards'), default='[]')
result_preview = models.CharField(_('Result preview'), max_length=128,
choices=RESULT_PREVIEW_CHOICES,
blank=True, null=True, default=None)
def get_dashboards(self):
if self.dashboards == '""' or self.dashboards is None:
self.dashboards = '[]'
return json.loads(self.dashboards)
def set_dashboards(self, dashboards):
self.dashboards = json.dumps(dashboards)
class TestPlan(models.Model):
name = models.CharField(_('Name'), max_length=256)
project = models.ForeignKey(Project)
main = models.BooleanField(_('Show in short statistic'),
blank=True, null=False, default=False)
hidden = models.BooleanField(blank=False, null=False, default=True)
owner = models.ForeignKey(User, default=1)
filter = models.TextField(_('Started by filter'), default='',
blank=True, null=False, max_length=128)
description = models.TextField(_('Description'), default='',
blank=True, null=False)
variable_name = models.TextField(_('Environment variable name'),
default='', blank=True,
null=False, max_length=128)
variable_value_regexp = models.CharField(_('Regexp for variable value'),
max_length=255, default='',
blank=True)
show_in_summary = models.BooleanField(
_('Consider in summary calculation'),
blank=True, null=False, default=False)
show_in_twodays = models.BooleanField(
_('Consider in statistic for last two days'),
blank=True, null=False, default=False)
def __str__(self):
return '{0} -> TestPlan: {1}'.format(self.project, self.name)
class Launch(models.Model):
test_plan = models.ForeignKey(TestPlan)
counts_cache = models.TextField(blank=True, null=True, default=None)
started_by = models.URLField(_('Started by'), blank=True, null=True,
default=None)
created = models.DateTimeField(_('Created'), auto_now_add=True)
finished = models.DateTimeField(_('Finished'), default=None, blank=True,
null=True)
state = models.IntegerField(_('State'), default=FINISHED)
tasks = models.TextField(_('Tasks'), default='')
parameters = models.TextField(_('Parameters'), default='{}')
duration = models.FloatField(_('Duration time'), null=True, default=None)
def is_finished(self):
return self.state == FINISHED
@property
def counts(self):
if self.counts_cache is None or self.state == INITIALIZED:
self.calculate_counts()
return json.loads(self.counts_cache)
def calculate_counts(self):
data = {
'passed': len(self.passed),
'failed': len(self.failed),
'skipped': len(self.skipped),
'blocked': len(self.blocked),
'total': 0
}
for name, count in data.items():
if name != 'total':
data['total'] += count
self.counts_cache = json.dumps(data)
self.save()
@property
def failed(self):
return self.testresult_set.filter(state=FAILED)
@property
def skipped(self):
return self.testresult_set.filter(state=SKIPPED)
@property
def passed(self):
return self.testresult_set.filter(state=PASSED)
@property
def blocked(self):
return self.testresult_set.filter(state=BLOCKED)
def get_tasks(self):
if self.tasks == '' or self.tasks is None:
self.tasks = '{}'
return json.loads(self.tasks)
def set_tasks(self, tasks):
self.tasks = json.dumps(tasks)
def get_parameters(self):
if self.parameters == '' or self.parameters is None:
self.parameters = '{}'
return json.loads(self.parameters)
def set_parameters(self, parameters):
self.parameters = json.dumps(parameters)
def __str__(self):
return '{0} -> Launch: {1}'.format(self.test_plan, self.pk)
class Build(models.Model):
launch = models.OneToOneField(Launch, related_name='build')
version = models.CharField(
max_length=16, default=None, null=True, blank=True)
hash = models.CharField(
max_length=64, default=None, null=True, blank=True)
branch = models.CharField(
max_length=128, default=None, null=True, blank=True)
commit_author = models.CharField(
max_length=16, default=None, null=True, blank=True)
commit_message = models.CharField(
max_length=128, default=None, null=True, blank=True)
last_commits = models.TextField(default=None, null=True, blank=True)
def get_last_commits(self):
if self.last_commits == '""' or self.last_commits is None:
self.last_commits = '[]'
return json.loads(self.last_commits)
def set_last_commits(self, last_commits):
self.last_commits = json.dumps(last_commits)
def __str__(self):
return '{0} -> LaunchBuild: {1}/{2}/{3}'.format(
self.launch, self.version, self.hash, self. branch)
class TestResult(models.Model):
launch = models.ForeignKey(Launch)
name = models.CharField(_('Name'), max_length=128, db_index=True)
suite = models.CharField(_('TestSuite'), max_length=256)
state = models.IntegerField(_('State'), default=BLOCKED)
failure_reason = models.TextField(_('Failure Reason'), default=None,
blank=True, null=True)
duration = models.FloatField(_('Duration time'), default=0.0)
launch_item_id = models.IntegerField(blank=True, default=None, null=True)
def __str__(self):
return '{0} -> TestResult: {1}/{2}'.format(
self.launch, self.suite, self.name)
class LaunchItem(models.Model):
test_plan = models.ForeignKey(TestPlan)
name = models.CharField(
max_length=128, default=None, null=True, blank=True)
command = models.TextField()
timeout = models.IntegerField(default=300)
type = models.IntegerField(default=ASYNC_CALL)
def __str__(self):
return '{0} -> {1}'.format(self.test_plan.name, self.name)
class Bug(models.Model):
externalId = models.CharField(max_length=255, blank=False)
name = models.CharField(max_length=255, default='', blank=True)
regexp = models.CharField(max_length=255, default='', blank=False)
state = models.CharField(max_length=32, default='', blank=True)
updated = models.DateTimeField(auto_now=True)
def get_state(self):
return self.state
def __str__(self):
return ':'.join((self.externalId, self.name))
def get_issue_fields_from_bts(externalId):
log.debug('Get fields for bug {}'.format(externalId))
res = _get_bug(externalId)
if 'fields' in res:
return res['fields']
return res
def _get_bug(bug_id):
response = requests.get(
'https://{}{}'.format(
settings.BUG_TRACKING_SYSTEM_HOST,
settings.BUG_TRACKING_SYSTEM_BUG_PATH.format(issue_id=bug_id)),
auth=(settings.BUG_TRACKING_SYSTEM_LOGIN,
settings.BUG_TRACKING_SYSTEM_PASSWORD),
headers={'Content-Type': 'application/json'})
data = response.json()
log.debug(data)
return data
|
|
'''
Astro.IQ - Trajectory
Christopher Iliffe Sprague
[email protected]
https://cisprague.github.io/Astro.IQ
'''
''' --------
Dependencies
-------- '''
from numpy import *
from scipy.integrate import odeint
import matplotlib.pyplot as plt
set_printoptions(suppress=True)
from ML import MLP
import os
from numba import jit
import tensorflow
''' -----------
Dynamical Model
----------- '''
class Dynamical_Model(object):
def __init__(self, si, st, slb, sub, clb, cub, tlb, tub, silb, siub):
# Initial and target state
self.si , self.st = array(si , float), array(st , float)
# Lower and upper state bounds
self.slb , self.sub = array(slb, float), array(sub, float)
# Lower and upper control bounds
self.clb , self.cub = array(clb, float), array(cub, float)
# Lower and upper time bounds
self.tlb , self.tub = float(tlb), float(tub)
# State and control space dimensions
self.sdim, self.cdim = len(slb) , len(clb)
# Lower and upper initial state bounds
self.silb, self.siub = array(silb, float), array(siub, float)
# Numerical integration
self.Propagate = Propagate(self)
def __repr__(self):
n, t = "\n", "\t"
lb, ub, dim = "Lower Bound: ", "Upper Bound: ", "Dimensions: "
s = "State" + n
s += t + dim + str(self.sdim) + n
s += t + "Initial: " + str(self.si) + n
s += t + "Target: " + str(self.st) + n
s += t + lb + str(self.slb) + n
s += t + ub + str(self.sub) + n
s += "Control" + n
s += t + dim + str(self.cdim) + n
s += t + lb + str(self.clb) + n
s += t + ub + str(self.cub) + n
s += "Time" + n
s += t + lb + str(self.tlb) + n
s += t + ub + str(self.tub) + n
return s
def EOM_State(self, state, control):
return None
def EOM_State_Jac(self, state, control):
return None
def EOM_Fullstate(self, fullstate, control):
return None
def EOM_Fullstate_Jac(self, fullstate, control):
return None
def Hamiltonian(self, fullstate, control):
return None
def Pontryagin(self, fullstate):
return None
def Safe(self, state):
if any(state < self.slb) or any(state > self.sub):
print(str(state) + ' is not within:')
print('Lower bound: ' + str(self.slb))
print('Upper bound: ' + str(self.sub))
return False
else:
return True
''' ---
Landers
--- '''
class Point_Lander(Dynamical_Model):
def __init__(
self,
si = [10, 1000, 20, -5, 9500],
st = [0, 0, 0, 0, 8000],
Isp = 311,
g = 1.6229,
T = 44000,
a = 0
):
# Problem parameters
self.Isp = float(Isp) # Specific impulse [s]
self.g = float(g) # Environment's gravity [m/s^2]
self.T = float(T) # Maximum thrust [N]
self.g0 = float(9.81) # Earth's sea-level gravity [m/s^2]
self.a = float(a) # Homotopy parametre
# For optimisation
Dynamical_Model.__init__(
self,
si,
st,
[-10000, 0, -500, -500, 10],
[10000, 2000, 500, 500, 10000],
[0, -1, -1],
[1, 1, 1],
1,
200,
[-400, 500, -150, -200, 8000],
[400, 1000, 150, 2, 9800]
)
def EOM_State(self, state, control):
x, y, vx, vy, m = state
u, ux, uy = control
x0 = self.T*u/m
return array([
vx,
vy,
ux*x0,
uy*x0 - self.g,
-self.T*u/(self.Isp*self.g0)
], float)
def EOM_State_Jac(self, state, control):
x, y, vx, vy, m = state
u, ux, uy = control
x0 = self.T*u/m**2
return array([
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, -ux*x0/m],
[0, 0, 0, 0, -uy*x0/m],
[0, 0, 0, 0, 0]
], float)
def EOM_Fullstate(self, fullstate, control):
x, y, vx, vy, m, lx, ly, lvx, lvy, lm = fullstate
u, ux, uy = control
T, Isp, g0, g = self.T, self.Isp, self.g0, self.g
x0 = T*u/m
x1 = T*u/m**2
return array([
vx,
vy,
ux*x0,
uy*x0 - g,
-T*u/(Isp*g0),
0,
0,
-lx,
-ly,
uy*lvy*x1 + lvx*ux*x1
], float)
def EOM_Fullstate_Jac(self, fullstate, control):
x, y, vx, vy, m, lx, ly, lvx, lvy, lm = fullstate
u, ux, uy = control
T = self.T
x0 = T*u/m**2
x1 = ux*x0
x2 = uy*x0
x3 = 2*T*u/m**3
return array([
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -x1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -x2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1, 0, 0, 0],
[0, 0, 0, 0, -uy*lvy*x3 - lvx*ux*x3, 0, 0, x1, x2, 0]
], float)
def Hamiltonian(self, fullstate, control):
x, y, vx, vy, m, lx, ly, lvx, lvy, lm = fullstate
u, ux, uy = control
T, Isp, g0, g, a = self.T, self.Isp, self.g0, self.g, self.a
x0 = T*u/m
x1 = 1/(Isp*g0)
H = -T*lm*u*x1 + lvx*ux*x0 + lvy*(uy*x0 - g) + lx*vx + ly*vy
H += x1*(T**2*u**2*(-a + 1) + a*(T*u)) # The Lagrangian
return H
def Pontryagin(self, fullstate):
x, y, vx, vy, m, lx, ly, lvx, lvy, lm = fullstate
lv = sqrt(abs(lvx)**2 + abs(lvy)**2)
ux = -lvx/lv
uy = -lvy/lv
# Switching function
S = self.a - lv*self.Isp*self.g0/m - lm
if self.a == 1.:
if S >= 0.: u = 0.
elif S < 0.: u = 1.
else:
u = -S/(2*self.T*(1-self.a))
u = min(u, 1.)
u = max(u, 0.)
return u, ux, uy
class Point_Lander_Drag(Dynamical_Model):
# Specs taken from SpaceX Dragon 2
def __init__(
self,
si = [0, 5000, 150, -10, 8165],
st = [0, 0, 0, 0, 5000], # Free final mass
T = 68.17e3, # SuperDraco [N]
Isp = 243., # SuperDraco [s]
CD = 1.4, # Dragon2 [ND]
rho = 0.020, # Mars [kg/m^3]
A = 10.52, # Dragon 2 [m^2]
g = 3.711): # Mars [m/s^2]
# Problem parametres
self.c1 = float(T) # Max Thrust
self.c2 = float(Isp*9.81) # Effective Velocity
self.c3 = float(0.5*rho*CD*A) # Aerodynamics
self.g = float(g) # Gravity
# Initialise the model
Dynamical_Model.__init__(
self,
si,
st,
[-20000, 0, -200, -200, 10],
[20000, 5000, 200, 0, 10000],
[0, 0], # NOTE: theta measured from the horrizontal
[1, pi], # and restricted to only point upward
1,
1000,
[-200, 2500, -100, -100, 7000],
[200, 3500, 100, -20, 8000])
def EOM_State(self, state, control):
x, y, vx, vy, m = state
u, theta = control
c1, c2, c3, g = self.c1, self.c2, self.c3, self.g
x0 = 1/m
x1 = c1*u*x0
x2 = c3*x0*sqrt(abs(vx)**2 + abs(vy)**2)
return array([
vx,
vy,
-vx*x2 + x1*cos(theta),
-g - vy*x2 + x1*sin(theta),
-c1*u/c2
], float)
def Plot(self, state, control):
# Plot methods are specific to model
x, y, vx, vy, m = hsplit(state, self.sdim)
u, theta = hsplit(control, self.cdim)
plt.figure(1)
# Trajectory
plt.subplot(131)
plt.plot(x, y, 'k.-')
plt.xlabel('Cross-Range [m]')
plt.ylabel('Altitude [m]')
# Velocities
plt.subplot(232)
plt.plot(vx, 'k.-')
plt.plot(vy, 'k.--')
plt.legend(['$v_x$', '$v_y$'], loc='best')
plt.xlabel('Node Index')
plt.ylabel('Velocity [m/s]')
# Mass
plt.subplot(233)
plt.plot(m, 'k.-')
plt.xlabel('Node Index')
plt.ylabel('Mass [kg]')
# Throttle
plt.subplot(235)
plt.plot(u, 'k.-')
plt.xlabel('Node Index')
plt.ylabel('Throttle')
# Thrust angle
plt.subplot(236)
plt.plot(theta, 'k.-')
plt.xlabel('Node Index')
plt.ylabel('Thrust Angle [rad]')
plt.tight_layout()
plt.show()
''' ------
Propogator
------ '''
class Propagate(object):
def __init__(self, model):
self.model = model
def Neural(self, si, tf, nnodes, bangbang=False):
t = linspace(0, tf, nnodes)
state = zeros((nnodes, self.model.sdim))
control = zeros((nnodes, self.model.cdim))
state[0,:] = si
sess = tensorflow.Session()
control[0,:] = self.model.controller(state[0], bangbang, sess, True)
for i in range(nnodes)[1:]:
s1 = state[i-1]
if s1[1] <= 1.:
break
else:
dt = t[[i-1, i]]
state[i,:] = odeint(
self.EOM_State_Neural,
s1,
dt,
rtol=1e-8,
atol=1e-8,
args=(bangbang,sess,False)
)[1,:]
control[i,:] = self.model.controller(state[i,:], bangbang, sess, False)
return state, control
def Ballistic(self, si=None, tf=None, nnodes=None):
if si is None: si = self.model.si
if tf is None: tf = self.model.tub
if nnodes is None: nnodes = 20
return odeint(
self.EOM,
si,
linspace(0, tf, nnodes),
rtol = 1e-12,
atol = 1e-12,
args = (zeros(self.model.cdim),) # No control
)
def Indirect(self, fsi, tf, nnodes):
nsegs = nnodes - 1
t = linspace(0, tf, nnodes)
fs = array(fsi, ndmin=2)
c = array(self.model.Pontryagin(fsi), ndmin=2)
# Must integrate incrimentally to track control
for k in range(nsegs):
fskp1 = odeint(
self.EOM_Indirect,
fs[k],
[t[k], t[k+1]],
Dfun = self.EOM_Indirect_Jac,
rtol = 1e-13,
atol = 1e-13
)
fskp1 = fskp1[1]
fs = vstack((fs, fskp1))
c = vstack((c, self.model.Pontryagin(fskp1)))
return t, fs, c
def EOM(self, state, t, control=None):
return self.model.EOM_State(state, control)
def EOM_Jac(self, state, t, control):
return self.model.EOM_State_Jac(state, control)
def EOM_Indirect(self, fullstate, t):
control = self.model.Pontryagin(fullstate)
return self.model.EOM_Fullstate(fullstate, control)
def EOM_Indirect_Jac(self, fullstate, t):
control = self.model.Pontryagin(fullstate)
return self.model.EOM_Fullstate_Jac(fullstate, control)
def EOM_State_Neural(self, state, t, bangbang, sess, restore):
control = self.model.controller(state, bangbang, sess, restore)
return self.model.EOM_State(state, control)
''' -------
Controllers
------- '''
class Neural(object):
def __init__(self, model, context, dim):
self.model = model
path = os.path.dirname(os.path.abspath(__file__))
# Retreive the data. NOTE: include scaling in TensorFlow rather
i = range(model.sdim + model.cdim)
data = load(path + '/Data/ML/' + str(context) + '.npy')
j, k = 0, model.sdim
iin = i[j:k]
self.xdat = data[:, iin]
j, k = k, k + model.cdim
iout = i[j:k]
self.ydat = data[:, iout]
# Build the brain
net = path + '/Data/ML/Nets/' + str(context) + '_'
net += str(dim[0]) + 'x' + str(dim[1])
self.net = MLP(net)
layers = [dim[0]]*dim[1]
self.net.build(data, iin, iout, layers,0.90)
def Control(self, state, bangbang, sess, restore):
control = self.net.predict(state, sess, restore)
# Squash the control to model limiations
squash = control > self.model.cub
control[squash] = self.model.cub[squash]
squash = control < self.model.clb
control[squash] = self.model.clb[squash]
# If we force bang-bang control:
if bangbang:
cmid = 0.5*(self.model.cub - self.model.clb) + self.model.clb
if control[0] < cmid[0]:
control[0] = self.model.clb[0]
if control[0] >= cmid[0]:
control[0] = self.model.cub[0]
#print("Modified: " + str(control))
return control
def main():
# We instantiate the model
model = Point_Lander_Drag()
# We state some parametres for the neural net
context = 'Mars_Far'
dim = (10, 3)
model.controller = Neural(model, context, dim).Control
# Random state within net training boundaries
si = random.random(model.sdim)*(model.siub - model.silb) + model.silb
# Time should not matter much
tf = 5
# The resolution of the integration
nnodes = 5
# We now propagate the model with the trained neural network
states, controls = model.Propagate.Neural(si, tf, nnodes)
print states
if __name__ == "__main__":
main()
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.network_management_v1.types import connectivity_test
from google.cloud.network_management_v1.types import reachability
from google.longrunning import operations_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-network-management",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class ReachabilityServiceTransport(abc.ABC):
"""Abstract transport class for ReachabilityService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "networkmanagement.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_connectivity_tests: gapic_v1.method.wrap_method(
self.list_connectivity_tests,
default_timeout=None,
client_info=client_info,
),
self.get_connectivity_test: gapic_v1.method.wrap_method(
self.get_connectivity_test,
default_timeout=None,
client_info=client_info,
),
self.create_connectivity_test: gapic_v1.method.wrap_method(
self.create_connectivity_test,
default_timeout=None,
client_info=client_info,
),
self.update_connectivity_test: gapic_v1.method.wrap_method(
self.update_connectivity_test,
default_timeout=None,
client_info=client_info,
),
self.rerun_connectivity_test: gapic_v1.method.wrap_method(
self.rerun_connectivity_test,
default_timeout=None,
client_info=client_info,
),
self.delete_connectivity_test: gapic_v1.method.wrap_method(
self.delete_connectivity_test,
default_timeout=None,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def list_connectivity_tests(
self,
) -> Callable[
[reachability.ListConnectivityTestsRequest],
Union[
reachability.ListConnectivityTestsResponse,
Awaitable[reachability.ListConnectivityTestsResponse],
],
]:
raise NotImplementedError()
@property
def get_connectivity_test(
self,
) -> Callable[
[reachability.GetConnectivityTestRequest],
Union[
connectivity_test.ConnectivityTest,
Awaitable[connectivity_test.ConnectivityTest],
],
]:
raise NotImplementedError()
@property
def create_connectivity_test(
self,
) -> Callable[
[reachability.CreateConnectivityTestRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def update_connectivity_test(
self,
) -> Callable[
[reachability.UpdateConnectivityTestRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def rerun_connectivity_test(
self,
) -> Callable[
[reachability.RerunConnectivityTestRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def delete_connectivity_test(
self,
) -> Callable[
[reachability.DeleteConnectivityTestRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
__all__ = ("ReachabilityServiceTransport",)
|
|
# -*- coding: utf-8 -*-
# pylint: disable=no-self-use, line-too-long
from __future__ import absolute_import, print_function, with_statement
import sys
import unittest
from mock import Mock, patch
from nose.tools import * # pylint: disable=wildcard-import, unused-wildcard-import
import six
from six.moves import range # pylint: disable=redefined-builtin
from six.moves import zip # pylint: disable=redefined-builtin
from behave.model_core import FileLocation
from behave.model import Feature, Scenario, ScenarioOutline, Step
from behave.model import Table, Row
from behave.matchers import NoMatch
from behave.configuration import Configuration
from behave.compat.collections import OrderedDict
from behave import step_registry
# -- CONVENIENCE-ALIAS:
_text = six.text_type
class TestFeatureRun(unittest.TestCase):
# pylint: disable=invalid-name
def setUp(self):
self.runner = Mock()
self.runner.feature.tags = []
self.config = self.runner.config = Mock()
self.context = self.runner.context = Mock()
self.formatters = self.runner.formatters = [Mock()]
self.run_hook = self.runner.run_hook = Mock()
def test_formatter_feature_called(self):
feature = Feature('foo.feature', 1, u'Feature', u'foo',
background=Mock())
feature.run(self.runner)
self.formatters[0].feature.assert_called_with(feature)
def test_formatter_background_called_when_feature_has_background(self):
feature = Feature('foo.feature', 1, u'Feature', u'foo',
background=Mock())
feature.run(self.runner)
self.formatters[0].background.assert_called_with(feature.background)
def test_formatter_background_not_called_when_feature_has_no_background(self):
feature = Feature('foo.feature', 1, u'Feature', u'foo')
feature.run(self.runner)
assert not self.formatters[0].background.called
def test_run_runs_scenarios(self):
scenarios = [Mock(), Mock()]
for scenario in scenarios:
scenario.tags = []
scenario.run.return_value = False
self.config.tags.check.return_value = True # pylint: disable=no-member
self.config.name = []
feature = Feature('foo.feature', 1, u'Feature', u'foo',
scenarios=scenarios)
feature.run(self.runner)
for scenario in scenarios:
scenario.run.assert_called_with(self.runner)
def test_run_runs_named_scenarios(self):
scenarios = [Mock(Scenario), Mock(Scenario)]
scenarios[0].name = 'first scenario'
scenarios[1].name = 'second scenario'
scenarios[0].tags = []
scenarios[1].tags = []
# -- FAKE-CHECK:
scenarios[0].should_run_with_name_select.return_value = True
scenarios[1].should_run_with_name_select.return_value = False
for scenario in scenarios:
scenario.run.return_value = False
self.config.tags.check.return_value = True # pylint: disable=no-member
self.config.name = ['first', 'third']
self.config.name_re = Configuration.build_name_re(self.config.name)
feature = Feature('foo.feature', 1, u'Feature', u'foo',
scenarios=scenarios)
feature.run(self.runner)
scenarios[0].run.assert_called_with(self.runner)
assert not scenarios[1].run.called
scenarios[0].should_run_with_name_select.assert_called_with(self.config)
scenarios[1].should_run_with_name_select.assert_called_with(self.config)
def test_run_runs_named_scenarios_with_regexp(self):
scenarios = [Mock(), Mock()]
scenarios[0].name = 'first scenario'
scenarios[1].name = 'second scenario'
scenarios[0].tags = []
scenarios[1].tags = []
# -- FAKE-CHECK:
scenarios[0].should_run_with_name_select.return_value = False
scenarios[1].should_run_with_name_select.return_value = True
for scenario in scenarios:
scenario.run.return_value = False
self.config.tags.check.return_value = True # pylint: disable=no-member
self.config.name = ['third .*', 'second .*']
self.config.name_re = Configuration.build_name_re(self.config.name)
feature = Feature('foo.feature', 1, u'Feature', u'foo',
scenarios=scenarios)
feature.run(self.runner)
assert not scenarios[0].run.called
scenarios[1].run.assert_called_with(self.runner)
scenarios[0].should_run_with_name_select.assert_called_with(self.config)
scenarios[1].should_run_with_name_select.assert_called_with(self.config)
def test_feature_hooks_not_run_if_feature_not_being_run(self):
self.config.tags.check.return_value = False # pylint: disable=no-member
feature = Feature('foo.feature', 1, u'Feature', u'foo')
feature.run(self.runner)
assert not self.run_hook.called
class TestScenarioRun(unittest.TestCase):
# pylint: disable=invalid-name
def setUp(self):
self.runner = Mock()
self.runner.feature.tags = []
self.config = self.runner.config = Mock()
self.config.dry_run = False
self.context = self.runner.context = Mock()
self.formatters = self.runner.formatters = [Mock()]
self.run_hook = self.runner.run_hook = Mock()
def test_run_invokes_formatter_scenario_and_steps_correctly(self):
self.config.stdout_capture = False
self.config.log_capture = False
self.config.tags.check.return_value = True # pylint: disable=no-member
steps = [Mock(), Mock()]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
scenario.run(self.runner)
self.formatters[0].scenario.assert_called_with(scenario)
for step in steps:
step.run.assert_called_with(self.runner)
if sys.version_info[0] == 3:
stringio_target = 'io.StringIO'
else:
stringio_target = 'StringIO.StringIO'
def test_handles_stdout_and_log_capture(self):
self.config.stdout_capture = True
self.config.log_capture = True
self.config.tags.check.return_value = True # pylint: disable=no-member
steps = [Mock(), Mock()]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
scenario.run(self.runner)
self.runner.setup_capture.assert_called_with()
self.runner.teardown_capture.assert_called_with()
def test_failed_step_causes_remaining_steps_to_be_skipped(self):
self.config.stdout_capture = False
self.config.log_capture = False
self.config.tags.check.return_value = True # pylint: disable=no-member
steps = [Mock(), Mock()]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
steps[0].run.return_value = False
steps[1].step_type = "when"
steps[1].name = "step1"
def step1_function(context): # pylint: disable=unused-argument
pass
my_step_registry = step_registry.StepRegistry()
my_step_registry.add_step_definition("when", "step1", step1_function)
with patch("behave.step_registry.registry", my_step_registry):
assert scenario.run(self.runner)
eq_(steps[1].status, 'skipped')
def test_failed_step_causes_context_failure_to_be_set(self):
self.config.stdout_capture = False
self.config.log_capture = False
self.config.tags.check.return_value = True # pylint: disable=no-member
steps = [
Mock(step_type="given", name="step0"),
Mock(step_type="then", name="step1"),
]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
steps[0].run.return_value = False
assert scenario.run(self.runner)
# pylint: disable=protected-access
self.context._set_root_attribute.assert_called_with('failed', True)
def test_undefined_step_causes_failed_scenario_status(self):
self.config.stdout_capture = False
self.config.log_capture = False
self.config.tags.check.return_value = True # pylint: disable=no-member
passed_step = Mock()
undefined_step = Mock()
steps = [passed_step, undefined_step]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
passed_step.run.return_value = True
passed_step.status = 'passed'
undefined_step.run.return_value = False
undefined_step.status = 'undefined'
assert scenario.run(self.runner)
eq_(undefined_step.status, 'undefined')
eq_(scenario.status, 'failed')
# pylint: disable=protected-access
self.context._set_root_attribute.assert_called_with('failed', True)
def test_skipped_steps_set_step_status_and_scenario_status_if_not_set(self):
self.config.stdout_capture = False
self.config.log_capture = False
self.config.tags.check.return_value = False # pylint: disable=no-member
steps = [Mock(), Mock()]
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo',
steps=steps)
scenario.run(self.runner)
assert False not in [s.status == 'skipped' for s in steps]
eq_(scenario.status, 'skipped')
def test_scenario_hooks_not_run_if_scenario_not_being_run(self):
self.config.tags.check.return_value = False # pylint: disable=no-member
scenario = Scenario('foo.feature', 17, u'Scenario', u'foo')
scenario.run(self.runner)
assert not self.run_hook.called
def test_should_run_with_name_select(self):
scenario_name = u"first scenario"
scenario = Scenario("foo.feature", 17, u"Scenario", scenario_name)
self.config.name = ['first .*', 'second .*']
self.config.name_re = Configuration.build_name_re(self.config.name)
assert scenario.should_run_with_name_select(self.config)
class TestScenarioOutline(unittest.TestCase):
# pylint: disable=invalid-name
def test_run_calls_run_on_each_generated_scenario(self):
# pylint: disable=protected-access
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
outline._scenarios = [Mock(), Mock()]
for scenario in outline._scenarios:
scenario.run.return_value = False
runner = Mock()
runner.context = Mock()
outline.run(runner)
for s in outline._scenarios:
s.run.assert_called_with(runner)
def test_run_stops_on_first_failure_if_requested(self):
# pylint: disable=protected-access
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
outline._scenarios = [Mock(), Mock()]
outline._scenarios[0].run.return_value = True
runner = Mock()
runner.context = Mock()
config = runner.config = Mock()
config.stop = True
outline.run(runner)
outline._scenarios[0].run.assert_called_with(runner)
assert not outline._scenarios[1].run.called
def test_run_sets_context_variable_for_outline(self):
# pylint: disable=protected-access
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
outline._scenarios = [Mock(), Mock(), Mock()]
for scenario in outline._scenarios:
scenario.run.return_value = False
runner = Mock()
context = runner.context = Mock()
config = runner.config = Mock()
config.stop = True
outline.run(runner)
eq_(context._set_root_attribute.call_args_list, [
(('active_outline', outline._scenarios[0]._row), {}),
(('active_outline', outline._scenarios[1]._row), {}),
(('active_outline', outline._scenarios[2]._row), {}),
(('active_outline', None), {}),
])
def test_run_should_pass_when_all_examples_pass(self):
# pylint: disable=protected-access
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
outline._scenarios = [Mock(), Mock(), Mock()]
for scenario in outline._scenarios:
scenario.run.return_value = False
runner = Mock()
context = runner.context = Mock()
config = runner.config = Mock()
config.stop = True
resultFailed = outline.run(runner)
eq_(resultFailed, False)
def test_run_should_fail_when_first_examples_fails(self):
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
failed = True
# pylint: disable=protected-access
outline._scenarios = [Mock(), Mock()]
outline._scenarios[0].run.return_value = failed
outline._scenarios[1].run.return_value = not failed
runner = Mock()
context = runner.context = Mock()
config = runner.config = Mock()
config.stop = True
resultFailed = outline.run(runner)
eq_(resultFailed, True)
def test_run_should_fail_when_last_examples_fails(self):
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
failed = True
# pylint: disable=protected-access
outline._scenarios = [Mock(), Mock()]
outline._scenarios[0].run.return_value = not failed
outline._scenarios[1].run.return_value = failed
runner = Mock()
context = runner.context = Mock()
config = runner.config = Mock()
config.stop = True
resultFailed = outline.run(runner)
eq_(resultFailed, True)
def test_run_should_fail_when_middle_examples_fails(self):
outline = ScenarioOutline('foo.feature', 17, u'Scenario Outline',
u'foo')
failed = True
# pylint: disable=protected-access
outline._scenarios = [Mock(), Mock(), Mock()]
outline._scenarios[0].run.return_value = not failed
outline._scenarios[1].run.return_value = failed
outline._scenarios[2].run.return_value = not failed
runner = Mock()
context = runner.context = Mock()
config = runner.config = Mock()
config.stop = True
resultFailed = outline.run(runner)
eq_(resultFailed, True)
def raiser(exception):
def func(*args, **kwargs): # pylint: disable=unused-argument
raise exception
return func
class TestStepRun(unittest.TestCase):
# pylint: disable=invalid-name
def setUp(self):
self.step_registry = Mock()
self.runner = Mock()
self.runner.step_registry = self.step_registry
self.config = self.runner.config = Mock()
self.config.outputs = [None]
self.context = self.runner.context = Mock()
print('context is %s' % self.context)
self.formatters = self.runner.formatters = [Mock()]
self.stdout_capture = self.runner.stdout_capture = Mock()
self.stdout_capture.getvalue.return_value = ''
self.stderr_capture = self.runner.stderr_capture = Mock()
self.stderr_capture.getvalue.return_value = ''
self.log_capture = self.runner.log_capture = Mock()
self.log_capture.getvalue.return_value = ''
self.run_hook = self.runner.run_hook = Mock()
def test_run_appends_step_to_undefined_when_no_match_found(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
self.runner.step_registry.find_match.return_value = None
self.runner.undefined_steps = []
assert not step.run(self.runner)
assert step in self.runner.undefined_steps
eq_(step.status, 'undefined')
def test_run_reports_undefined_step_via_formatter_when_not_quiet(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
self.runner.step_registry.find_match.return_value = None
assert not step.run(self.runner)
self.formatters[0].match.assert_called_with(NoMatch())
self.formatters[0].result.assert_called_with(step)
def test_run_with_no_match_does_not_touch_formatter_when_quiet(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
self.runner.step_registry.find_match.return_value = None
assert not step.run(self.runner, quiet=True)
assert not self.formatters[0].match.called
assert not self.formatters[0].result.called
def test_run_when_not_quiet_reports_match_and_result(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
side_effects = (None, raiser(AssertionError('whee')),
raiser(Exception('whee')))
for side_effect in side_effects:
match.run.side_effect = side_effect
step.run(self.runner)
self.formatters[0].match.assert_called_with(match)
self.formatters[0].result.assert_called_with(step)
def test_run_when_quiet_reports_nothing(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
side_effects = (None, raiser(AssertionError('whee')),
raiser(Exception('whee')))
for side_effect in side_effects:
match.run.side_effect = side_effect
step.run(self.runner, quiet=True)
assert not self.formatters[0].match.called
assert not self.formatters[0].result.called
def test_run_runs_before_hook_then_match_then_after_hook(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
side_effects = (None, AssertionError('whee'), Exception('whee'))
for side_effect in side_effects:
# Make match.run() and runner.run_hook() the same mock so
# we can make sure things happen in the right order.
self.runner.run_hook = match.run = Mock()
def effect(thing):
# pylint: disable=unused-argument
def raiser_(*args, **kwargs):
match.run.side_effect = None
if thing:
raise thing
def nonraiser(*args, **kwargs):
match.run.side_effect = raiser_
return nonraiser
match.run.side_effect = effect(side_effect)
step.run(self.runner)
eq_(match.run.call_args_list, [
(('before_step', self.context, step), {}),
((self.context,), {}),
(('after_step', self.context, step), {}),
])
def test_run_sets_table_if_present(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo',
table=Mock())
self.runner.step_registry.find_match.return_value = Mock()
step.run(self.runner)
eq_(self.context.table, step.table)
def test_run_sets_text_if_present(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo',
text=Mock(name='text'))
self.runner.step_registry.find_match.return_value = Mock()
step.run(self.runner)
eq_(self.context.text, step.text)
def test_run_sets_status_to_passed_if_nothing_goes_wrong(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
step.error_message = None
self.runner.step_registry.find_match.return_value = Mock()
step.run(self.runner)
eq_(step.status, 'passed')
eq_(step.error_message, None)
def test_run_sets_status_to_failed_on_assertion_error(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
step.error_message = None
match = Mock()
match.run.side_effect = raiser(AssertionError('whee'))
self.runner.step_registry.find_match.return_value = match
step.run(self.runner)
eq_(step.status, 'failed')
assert step.error_message.startswith('Assertion Failed')
@patch('traceback.format_exc')
def test_run_sets_status_to_failed_on_exception(self, format_exc):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
step.error_message = None
match = Mock()
match.run.side_effect = raiser(Exception('whee'))
self.runner.step_registry.find_match.return_value = match
format_exc.return_value = 'something to do with an exception'
step.run(self.runner)
eq_(step.status, 'failed')
eq_(step.error_message, format_exc.return_value)
@patch('time.time')
def test_run_calculates_duration(self, time_time):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
def time_time_1():
def time_time_2():
return 23
time_time.side_effect = time_time_2
return 17
side_effects = (None, raiser(AssertionError('whee')),
raiser(Exception('whee')))
for side_effect in side_effects:
match.run.side_effect = side_effect
time_time.side_effect = time_time_1
step.run(self.runner)
eq_(step.duration, 23 - 17)
def test_run_captures_stdout_and_logging(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
assert step.run(self.runner)
self.runner.start_capture.assert_called_with()
self.runner.stop_capture.assert_called_with()
def test_run_appends_any_captured_stdout_on_failure(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
self.stdout_capture.getvalue.return_value = 'frogs'
match.run.side_effect = raiser(Exception('halibut'))
assert not step.run(self.runner)
assert 'Captured stdout:' in step.error_message
assert 'frogs' in step.error_message
def test_run_appends_any_captured_logging_on_failure(self):
step = Step('foo.feature', 17, u'Given', 'given', u'foo')
match = Mock()
self.runner.step_registry.find_match.return_value = match
self.log_capture.getvalue.return_value = 'toads'
match.run.side_effect = raiser(AssertionError('kipper'))
assert not step.run(self.runner)
assert 'Captured logging:' in step.error_message
assert 'toads' in step.error_message
class TestTableModel(unittest.TestCase):
# pylint: disable=invalid-name
HEAD = [u'type of stuff', u'awesomeness', u'ridiculousness']
DATA = [
[u'fluffy', u'large', u'frequent'],
[u'lint', u'low', u'high'],
[u'green', u'variable', u'awkward'],
]
def setUp(self):
self.table = Table(self.HEAD, 0, self.DATA)
def test_equivalence(self):
t1 = self.table
self.setUp()
eq_(t1, self.table)
def test_table_iteration(self):
for i, row in enumerate(self.table):
for j, cell in enumerate(row):
eq_(cell, self.DATA[i][j])
def test_table_row_by_index(self):
for i in range(3):
eq_(self.table[i], Row(self.HEAD, self.DATA[i], 0))
def test_table_row_name(self):
eq_(self.table[0]['type of stuff'], 'fluffy')
eq_(self.table[1]['awesomeness'], 'low')
eq_(self.table[2]['ridiculousness'], 'awkward')
def test_table_row_index(self):
eq_(self.table[0][0], 'fluffy')
eq_(self.table[1][1], 'low')
eq_(self.table[2][2], 'awkward')
@raises(KeyError)
def test_table_row_keyerror(self):
self.table[0]['spam'] # pylint: disable=pointless-statement
def test_table_row_items(self):
eq_(list(self.table[0].items()), list(zip(self.HEAD, self.DATA[0])))
class TestModelRow(unittest.TestCase):
# pylint: disable=invalid-name, bad-whitespace
HEAD = [u'name', u'sex', u'age']
DATA = [u'Alice', u'female', u'12']
def setUp(self):
self.row = Row(self.HEAD, self.DATA, 0)
def test_len(self):
eq_(len(self.row), 3)
def test_getitem_with_valid_colname(self):
# pylint: disable=bad-whitespace
eq_(self.row['name'], u'Alice')
eq_(self.row['sex'], u'female')
eq_(self.row['age'], u'12')
@raises(KeyError)
def test_getitem_with_unknown_colname(self):
self.row['__UNKNOWN_COLUMN__'] # pylint: disable=pointless-statement
def test_getitem_with_valid_index(self):
eq_(self.row[0], u'Alice')
eq_(self.row[1], u'female')
eq_(self.row[2], u'12')
@raises(IndexError)
def test_getitem_with_invalid_index(self):
colsize = len(self.row)
eq_(colsize, 3)
self.row[colsize] # pylint: disable=pointless-statement
def test_get_with_valid_colname(self):
# pylint: disable=bad-whitespace
eq_(self.row.get('name'), u'Alice')
eq_(self.row.get('sex'), u'female')
eq_(self.row.get('age'), u'12')
def test_getitem_with_unknown_colname_should_return_default(self):
eq_(self.row.get('__UNKNOWN_COLUMN__', 'XXX'), u'XXX')
def test_as_dict(self):
data1 = self.row.as_dict()
data2 = dict(self.row.as_dict())
assert isinstance(data1, dict)
assert isinstance(data2, dict)
assert isinstance(data1, OrderedDict)
# -- REQUIRES: Python2.7 or ordereddict installed.
# assert not isinstance(data2, OrderedDict)
eq_(data1, data2)
# pylint: disable=bad-whitespace
eq_(data1['name'], u'Alice')
eq_(data1['sex'], u'female')
eq_(data1['age'], u'12')
class TestFileLocation(unittest.TestCase):
# pylint: disable=invalid-name
ordered_locations1 = [
FileLocation("features/alice.feature", 1),
FileLocation("features/alice.feature", 5),
FileLocation("features/alice.feature", 10),
FileLocation("features/alice.feature", 11),
FileLocation("features/alice.feature", 100),
]
ordered_locations2 = [
FileLocation("features/alice.feature", 1),
FileLocation("features/alice.feature", 10),
FileLocation("features/bob.feature", 5),
FileLocation("features/charly.feature", None),
FileLocation("features/charly.feature", 0),
FileLocation("features/charly.feature", 100),
]
same_locations = [
(FileLocation("alice.feature"),
FileLocation("alice.feature", None),
),
(FileLocation("alice.feature", 10),
FileLocation("alice.feature", 10),
),
(FileLocation("features/bob.feature", 11),
FileLocation("features/bob.feature", 11),
),
]
def test_compare_equal(self):
for value1, value2 in self.same_locations:
eq_(value1, value2)
def test_compare_equal_with_string(self):
for location in self.ordered_locations2:
eq_(location, location.filename)
eq_(location.filename, location)
def test_compare_not_equal(self):
for value1, value2 in self.same_locations:
assert not(value1 != value2) # pylint: disable=unneeded-not, superfluous-parens
for locations in [self.ordered_locations1, self.ordered_locations2]:
for value1, value2 in zip(locations, locations[1:]):
assert value1 != value2
def test_compare_less_than(self):
for locations in [self.ordered_locations1, self.ordered_locations2]:
for value1, value2 in zip(locations, locations[1:]):
assert value1 < value2, "FAILED: %s < %s" % (_text(value1), _text(value2))
assert value1 != value2
def test_compare_less_than_with_string(self):
locations = self.ordered_locations2
for value1, value2 in zip(locations, locations[1:]):
if value1.filename == value2.filename:
continue
assert value1 < value2.filename, \
"FAILED: %s < %s" % (_text(value1), _text(value2.filename))
assert value1.filename < value2, \
"FAILED: %s < %s" % (_text(value1.filename), _text(value2))
def test_compare_greater_than(self):
for locations in [self.ordered_locations1, self.ordered_locations2]:
for value1, value2 in zip(locations, locations[1:]):
assert value2 > value1, "FAILED: %s > %s" % (_text(value2), _text(value1))
assert value2 != value1
def test_compare_less_or_equal(self):
for value1, value2 in self.same_locations:
assert value1 <= value2, "FAILED: %s <= %s" % (_text(value1), _text(value2))
assert value1 == value2
for locations in [self.ordered_locations1, self.ordered_locations2]:
for value1, value2 in zip(locations, locations[1:]):
assert value1 <= value2, "FAILED: %s <= %s" % (_text(value1), _text(value2))
assert value1 != value2
def test_compare_greater_or_equal(self):
for value1, value2 in self.same_locations:
assert value2 >= value1, "FAILED: %s >= %s" % (_text(value2), _text(value1))
assert value2 == value1
for locations in [self.ordered_locations1, self.ordered_locations2]:
for value1, value2 in zip(locations, locations[1:]):
assert value2 >= value1, "FAILED: %s >= %s" % (_text(value2), _text(value1))
assert value2 != value1
def test_filename_should_be_same_as_self(self):
for location in self.ordered_locations2:
assert location == location.filename
assert location.filename == location
def test_string_conversion(self):
for location in self.ordered_locations2:
expected = u"%s:%s" % (location.filename, location.line)
if location.line is None:
expected = location.filename
assert six.text_type(location) == expected
def test_repr_conversion(self):
for location in self.ordered_locations2:
expected = u'<FileLocation: filename="%s", line=%s>' % \
(location.filename, location.line)
actual = repr(location)
assert actual == expected, "FAILED: %s == %s" % (actual, expected)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Cudnn RNN models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import functools
import itertools
import os
import sys
import unittest
import numpy as np
from tensorflow.contrib.cudnn_rnn.python.layers import cudnn_rnn
from tensorflow.contrib.cudnn_rnn.python.ops import cudnn_rnn_ops
from tensorflow.contrib.rnn.python.ops import rnn as contrib_rnn_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradients_impl as gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import rnn as rnn_lib
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import momentum
from tensorflow.python.training import rmsprop
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.checkpointable import util as checkpointable_utils
CUDNN_LSTM = cudnn_rnn_ops.CUDNN_LSTM
CUDNN_GRU = cudnn_rnn_ops.CUDNN_GRU
CUDNN_RNN_RELU = cudnn_rnn_ops.CUDNN_RNN_RELU
CUDNN_RNN_TANH = cudnn_rnn_ops.CUDNN_RNN_TANH
CUDNN_RNN_UNIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_UNIDIRECTION
CUDNN_RNN_BIDIRECTION = cudnn_rnn_ops.CUDNN_RNN_BIDIRECTION
CUDNN_LSTM_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_LSTM_PARAMS_PER_LAYER
CUDNN_GRU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_GRU_PARAMS_PER_LAYER
CUDNN_RNN_TANH_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_TANH_PARAMS_PER_LAYER
CUDNN_RNN_RELU_PARAMS_PER_LAYER = cudnn_rnn_ops.CUDNN_RNN_RELU_PARAMS_PER_LAYER
class CudnnTestModel(object):
"""Model with convenient APIs for easier building and running test graph.
The graph built is used by all tests below to avoid repeatedly building
similar test graphs.
"""
def __init__(self,
rnn_mode,
num_layers,
num_units,
input_size,
direction=CUDNN_RNN_UNIDIRECTION,
dropout=0.,
dtype=dtypes.float32,
training=False,
seed=None,
kernel_initializer=None,
bias_initializer=None):
if dtype not in (dtypes.float16, dtypes.float32, dtypes.float64):
raise ValueError("Invalid dtype: %s" % dtype)
self._dtype = dtype
self._inputs = array_ops.placeholder(
dtype=dtype, shape=[None, None, input_size], name="inputs")
h = array_ops.placeholder(
dtype=dtype, shape=[None, None, num_units], name="h")
c = array_ops.placeholder(
dtype=dtype, shape=[None, None, num_units], name="c")
if rnn_mode == CUDNN_LSTM:
model_fn = cudnn_rnn.CudnnLSTM
self._initial_state = (h, c)
elif rnn_mode == CUDNN_GRU:
model_fn = cudnn_rnn.CudnnGRU
self._initial_state = (h,)
elif rnn_mode == CUDNN_RNN_TANH:
model_fn = cudnn_rnn.CudnnRNNTanh
self._initial_state = (h,)
elif rnn_mode == CUDNN_RNN_RELU:
model_fn = cudnn_rnn.CudnnRNNRelu
self._initial_state = (h,)
else:
raise ValueError("Invalid rnn_mode: %s" % rnn_mode)
self._rnn = model_fn(
num_layers,
num_units,
direction=direction,
dropout=dropout,
dtype=dtype,
seed=seed,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer)
self._rnn.build([None, None, input_size])
self._outputs, self._output_state = self._rnn(
self._inputs, initial_state=self._initial_state, training=training)
def _AddUp(self, outputs, output_state):
total = math_ops.reduce_sum(outputs)
for s in output_state:
total += math_ops.reduce_sum(s)
return total
@property
def inputs(self):
return self._inputs
@property
def initial_state(self):
return self._initial_state
@property
def outputs(self):
return self._outputs
@property
def output_state(self):
return self._output_state
@property
def rnn(self):
return self._rnn
@property
def total_sum(self):
return self._AddUp(self.outputs, self.output_state)
def SynthesizeInput(self, seq_length, batch_size, seed=1234):
"""Synthesizes input and initial state values for testing."""
np.random.seed(seed)
num_layers = self._rnn.num_layers
dir_count = self._rnn.num_dirs
num_units = self._rnn.num_units
input_size = self._rnn.input_size
np_dtype = np.float32 if self._dtype == dtypes.float32 else np.float64
inputs = np.random.randn(seq_length, batch_size,
input_size).astype(np_dtype)
input_h = np.random.randn(num_layers * dir_count, batch_size,
num_units).astype(np_dtype)
if self._rnn.rnn_mode == CUDNN_LSTM:
input_c = np.random.randn(num_layers * dir_count, batch_size,
num_units).astype(np_dtype)
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
return inputs, initial_state
def ZeroState(self, batch_size):
num_layers = self._rnn.num_layers
dir_count = self._rnn.num_dirs
num_units = self._rnn.num_units
np_dtype = np.float32 if self._dtype == dtypes.float32 else np.float64
input_h = np.zeros((num_layers * dir_count, batch_size,
num_units)).astype(np_dtype)
if self._rnn.rnn_mode == CUDNN_LSTM:
input_c = np.zeros((num_layers * dir_count, batch_size,
num_units)).astype(np_dtype)
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
return initial_state
def FProp(self, inputs_t, initial_state_t, training):
"""Builds additional subgraph with given inputs and state.
Args:
inputs_t: a tensor.
initial_state_t: a tensor.
training: boolean, true if training mode.
Returns:
A tensor of the forward pass output of the model.
"""
outputs, output_state = self._rnn(
inputs_t, initial_state=initial_state_t, training=training)
return self._AddUp(outputs, output_state)
def Feed(self, sess, inputs, initial_state=None, return_sum=True):
"""Runs graph with given inputs and initial state."""
batch_size = inputs.shape[1]
if initial_state is None:
initial_state = self.ZeroState(batch_size)
if return_sum:
return sess.run(
self.total_sum,
feed_dict={self.inputs: inputs,
self.initial_state: initial_state})
else:
return sess.run(
[self.outputs, self.output_state],
feed_dict={self.inputs: inputs,
self.initial_state: initial_state})
def _CreateCudnnCompatibleCanonicalRNN(rnn, inputs, is_bidi=False, scope=None):
mode = rnn.rnn_mode
num_units = rnn.num_units
num_layers = rnn.num_layers
# To reuse cuDNN-trained models, must use cudnn compatible rnn cells.
if mode == CUDNN_LSTM:
single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleLSTMCell(num_units)
elif mode == CUDNN_GRU:
single_cell = lambda: cudnn_rnn_ops.CudnnCompatibleGRUCell(num_units)
elif mode == CUDNN_RNN_TANH:
single_cell = (lambda: rnn_cell_impl.BasicRNNCell(num_units, math_ops.tanh))
elif mode == CUDNN_RNN_RELU:
single_cell = (
lambda: rnn_cell_impl.BasicRNNCell(num_units, gen_nn_ops.relu))
else:
raise ValueError("%s is not supported!" % mode)
if not is_bidi:
cell = rnn_cell_impl.MultiRNNCell(
[single_cell() for _ in range(num_layers)])
return rnn_lib.dynamic_rnn(
cell, inputs, dtype=dtypes.float32, time_major=True, scope=scope)
else:
cells_fw = [single_cell() for _ in range(num_layers)]
cells_bw = [single_cell() for _ in range(num_layers)]
(outputs, output_state_fw,
output_state_bw) = contrib_rnn_lib.stack_bidirectional_dynamic_rnn(
cells_fw,
cells_bw,
inputs,
dtype=dtypes.float32,
time_major=True,
scope=scope)
return outputs, (output_state_fw, output_state_bw)
class CudnnRNNTestBasic(test_util.TensorFlowTestCase):
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testLayerBasic(self):
num_layers = 4
num_units = 2
batch_size = 8
direction = CUDNN_RNN_UNIDIRECTION
dir_count = 1
with vs.variable_scope("main"):
kernel_initializer = init_ops.constant_initializer(0.)
bias_initializer = init_ops.constant_initializer(0.)
inputs = random_ops.random_uniform([
num_layers * dir_count, batch_size, num_units], dtype=dtypes.float32)
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
# Build the layer
outputs1, _ = lstm(inputs)
# Reuse the layer
outputs2, _ = lstm(inputs)
total_sum1 = math_ops.reduce_sum(outputs1)
total_sum2 = math_ops.reduce_sum(outputs2)
with vs.variable_scope("main", reuse=True):
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
# Reuse the layer
outputs3, _ = lstm(inputs)
total_sum3 = math_ops.reduce_sum(outputs3)
self.assertEqual(1, len(variables.trainable_variables()))
self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS)))
self.assertEqual("main/awesome_lstm/opaque_kernel",
variables.trainable_variables()[0].op.name)
with self.test_session(use_gpu=True) as sess:
sess.run(variables.global_variables_initializer())
(total_sum1_v, total_sum2_v, total_sum3_v) = sess.run(
[total_sum1, total_sum2, total_sum3])
self.assertEqual(0, total_sum1_v)
self.assertEqual(0, total_sum2_v)
self.assertEqual(0, total_sum3_v)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testOptimizersSupport(self):
for opt in ("adagrad", "adam", "rmsprop", "momentum", "sgd"):
self._TestOptimizerSupportHelper(opt)
def _GetOptimizer(self, opt):
if opt == "adagrad":
return adagrad.AdagradOptimizer(learning_rate=1e-2)
elif opt == "adam":
return adam.AdamOptimizer(learning_rate=1e-2)
elif opt == "rmsprop":
return rmsprop.RMSPropOptimizer(learning_rate=1e-2)
elif opt == "momentum":
return momentum.MomentumOptimizer(learning_rate=1e-2, momentum=0.9)
elif opt == "sgd":
return gradient_descent.GradientDescentOptimizer(learning_rate=1e-2)
else:
raise ValueError("Unsupported optimizer: %s" % opt)
def _TestOptimizerSupportHelper(self, opt):
num_layers = 4
num_units = 2
batch_size = 8
direction = CUDNN_RNN_UNIDIRECTION
dir_count = 1
with ops.Graph().as_default() as g:
kernel_initializer = init_ops.constant_initializer(0.)
bias_initializer = init_ops.constant_initializer(0.)
inputs = random_ops.random_uniform([
num_layers * dir_count, batch_size, num_units], dtype=dtypes.float32)
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
outputs, _ = lstm(inputs)
loss = math_ops.reduce_sum(outputs)
optimizer = self._GetOptimizer(opt)
train_op = optimizer.minimize(loss)
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(train_op)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveableGraphDeviceAssignment(self):
num_layers = 4
num_units = 2
batch_size = 8
direction = CUDNN_RNN_UNIDIRECTION
dir_count = 1
def DeviceFn(op):
if op.type in ("Variable", "VariableV2"):
return "/cpu:0"
else:
return "/gpu:0"
with ops.Graph().as_default() as g:
with ops.device(DeviceFn):
with vs.variable_scope("main"):
kernel_initializer = init_ops.constant_initializer(3.14)
bias_initializer = init_ops.constant_initializer(1.59)
inputs = random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units],
dtype=dtypes.float32)
lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
direction=direction,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
name="awesome_lstm")
outputs = lstm(inputs)
# saver is created in the scope of DeviceFn.
saver = saver_lib.Saver()
with self.test_session(use_gpu=True, graph=g) as sess:
save_path = os.path.join(self.get_temp_dir(),
"test-saveable-device-assignment")
sess.run(variables.global_variables_initializer())
saver.save(sess, save_path)
saver.restore(sess, save_path)
sess.run(outputs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testDifferentShapesEager(self):
# Checks that kernel caching does not cause sharing of temporary storage
# across different input shapes when executing eagerly.
with context.eager_mode():
with ops.device("gpu:0"):
first_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 28]))
second_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 100]))
self.assertAllEqual([28, 100, 100], first_output.shape)
self.assertAllEqual([28, 100, 100], second_output.shape)
def _LossFunc():
first_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 28]))
second_output, _ = cudnn_rnn.CudnnGRU(1, 100)(
array_ops.zeros([28, 100, 100]))
return (math_ops.reduce_sum(first_output) +
math_ops.reduce_sum(second_output))
backprop.implicit_grad(_LossFunc)()
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testDifferentShapesGraph(self):
# Tests that a single kernel instance presented with multiple input shapes
# does not crash with graph execution.
with ops.device("gpu:0"):
layer = cudnn_rnn.CudnnGRU(1, 100)
layer(array_ops.zeros([28, 100, 100]))
def _Cond(index, accumulation):
del accumulation # unused
return math_ops.less(index, 4)
def _Body(index, accumulation):
layer_input = accumulation[:, :, 10 * (1 + index % 2):]
output, _ = layer(layer_input)
return index + 1, accumulation + output
original_input = array_ops.zeros([28, 100, 100])
_, accumulation = control_flow_ops.while_loop(_Cond, _Body,
[0, original_input])
grad, = gradients.gradients(
math_ops.reduce_sum(accumulation), (original_input,))
init_op = variables.global_variables_initializer()
with self.cached_session() as sess:
sess.run(init_op)
accumulation_eval, grad_eval = sess.run((accumulation, grad))
self.assertAllEqual([28, 100, 100], accumulation_eval.shape)
self.assertAllEqual([28, 100, 100], grad_eval.shape)
# TODO(jamesqin): Transform to parameterized test after it is included in the
# TF open source codebase.
class CudnnRNNTestSaveRestore(test_util.TensorFlowTestCase):
def _CompareWeights(self, lhs, rhs):
self.assertEqual(len(lhs), len(rhs))
for lw, rw in zip(lhs, rhs):
self.assertAllEqual(lw, rw)
def _CompareBiases(self, lhs, rhs, rnn_mode, num_layers, direction):
self.assertEqual(len(lhs), len(rhs))
if rnn_mode == CUDNN_LSTM:
num_params_per_layer = CUDNN_LSTM_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_GRU:
num_params_per_layer = CUDNN_GRU_PARAMS_PER_LAYER
elif rnn_mode == CUDNN_RNN_TANH:
num_params_per_layer = CUDNN_RNN_TANH_PARAMS_PER_LAYER
else:
num_params_per_layer = CUDNN_RNN_RELU_PARAMS_PER_LAYER
num_dirs = 1 if direction == CUDNN_RNN_UNIDIRECTION else 2
num_params_per_layer *= num_dirs
self.assertEqual(num_params_per_layer * num_layers, len(lhs))
for i in range(num_layers):
layer_lhs = lhs[i * num_params_per_layer: (i+1) * num_params_per_layer]
layer_rhs = rhs[i * num_params_per_layer: (i+1) * num_params_per_layer]
if direction == CUDNN_RNN_UNIDIRECTION:
self._CompareSingleLayerBiases(layer_lhs, layer_rhs)
else:
size = len(layer_lhs)
fw_lhs, bw_lhs = layer_lhs[:size//2], layer_lhs[size//2:]
fw_rhs, bw_rhs = layer_rhs[:size//2], layer_rhs[size//2:]
self._CompareSingleLayerBiases(fw_lhs, fw_rhs)
self._CompareSingleLayerBiases(bw_lhs, bw_rhs)
def _CompareSingleLayerBiases(self, lhs, rhs):
self.assertEqual(len(lhs), len(rhs))
lf_lhs, rt_lhs = lhs[:len(lhs)//2], lhs[len(lhs)//2:]
lf_rhs, rt_rhs = rhs[:len(rhs)//2], rhs[len(rhs)//2:]
self.assertEqual(len(lf_lhs), len(rt_lhs))
self.assertEqual(len(lf_rhs), len(rt_rhs))
sum_lhs, sum_rhs = [], []
for lf, rt in zip(lf_lhs, rt_lhs):
sum_lhs.append(lf + rt)
for lf, rt in zip(lf_rhs, rt_rhs):
sum_rhs.append(lf + rt)
self.assertEqual(len(sum_lhs), len(sum_rhs))
for lf, rt in zip(sum_lhs, sum_rhs):
self.assertAllEqual(lf, rt)
def _TestSaveRestoreVariable(self, rnn_mode, direction, dtype):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default() as g:
random_seed.set_random_seed(1234)
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
rnn = model.rnn
save_path = os.path.join(self.get_temp_dir(),
"save-restore-variable-test")
saver = saver_lib.Saver()
weights, biases = (
model.rnn.saveable.format_converter._opaque_to_cu_canonical(
model.rnn.saveable._variables))
opaque_params = rnn.trainable_variables[0]
# CudnnTestModel() creates CudnnOpaqueParamsSaveable that helps saver save
# Cudnn vars in canonical format.
reset_op = state_ops.assign(
opaque_params,
array_ops.zeros(array_ops.shape(opaque_params), dtype=dtype))
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
weights_v, biases_v = sess.run([weights, biases])
# Reset opaque param
sess.run(reset_op)
saver.restore(sess, save_path)
weights_v_restored, biases_v_restored = sess.run([weights, biases])
self._CompareWeights(weights_v, weights_v_restored)
self._CompareBiases(biases_v, biases_v_restored, rnn_mode, num_layers,
direction)
def _TestSaveRestoreTwoVariables(self, rnn_mode, direction, dtype):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default() as g:
random_seed.set_random_seed(1234)
with vs.variable_scope("m1"):
model1 = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
with vs.variable_scope("m2"):
model2 = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype)
opaque_params = (model1.rnn.trainable_variables[0],
model2.rnn.trainable_variables[0])
saveable1 = model1.rnn.saveable
weights1, biases1 = saveable1.format_converter._opaque_to_cu_canonical(
saveable1._variables)
saveable2 = model1.rnn.saveable
weights2, biases2 = saveable2.format_converter._opaque_to_cu_canonical(
saveable2._variables)
reset_params = [
state_ops.assign(params,
array_ops.zeros_like(params, dtype=dtype))
for params in opaque_params
]
reset_op = control_flow_ops.group(*reset_params)
save_path = os.path.join(self.get_temp_dir(),
"save-restore-variable-test2")
saver = saver_lib.Saver()
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
weights1_v, biases1_v = sess.run([weights1, biases1])
weights2_v, biases2_v = sess.run([weights2, biases2])
sess.run(reset_op)
saver.restore(sess, save_path)
weights1_v_restored, biases1_v_restored = sess.run([weights1, biases1])
weights2_v_restored, biases2_v_restored = sess.run([weights2, biases2])
self._CompareWeights(weights1_v, weights1_v_restored)
self._CompareWeights(weights2_v, weights2_v_restored)
self._CompareBiases(biases1_v, biases1_v_restored, rnn_mode, num_layers,
direction)
self._CompareBiases(biases2_v, biases2_v_restored, rnn_mode, num_layers,
direction)
def _TestSaveRestoreOutput(self, rnn_mode, direction, dtype):
with ops.Graph().as_default() as g:
num_layers = 2
num_units = 7
input_size = 7
seq_length = 8
batch_size = 4
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=False)
rnn = model.rnn
save_path = os.path.join(self.get_temp_dir(), "save-restore-output-test")
saver = saver_lib.Saver()
# Only one opaque var in a cudnn layer.
assert len(rnn.trainable_variables) == 1
reset_params = state_ops.assign(
rnn.trainable_variables[0],
array_ops.zeros(
array_ops.shape(rnn.trainable_variables[0]), dtype=dtype))
# Passing graph explicitly, otherwise an old sess would be reused.
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
inputs, initial_state = model.SynthesizeInput(seq_length, batch_size)
total_sum_v = model.Feed(sess, inputs, initial_state)
val = saver.save(sess, save_path)
self.assertEqual(save_path, val)
sess.run(reset_params)
saver.restore(sess, save_path)
total_sum_v_restored = model.Feed(sess, inputs, initial_state)
self.assertAllClose(total_sum_v, total_sum_v_restored, atol=1e-5)
def _TestSaveRestoreHelper(self, rnn_mode):
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
dtype_list = [dtypes.float16, dtypes.float32, dtypes.float64]
for direction, dtype in itertools.product(directions, dtype_list):
self._TestSaveRestoreVariable(rnn_mode, direction, dtype)
self._TestSaveRestoreTwoVariables(rnn_mode, direction, dtype)
self._TestSaveRestoreOutput(rnn_mode, direction, dtype)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRepeatedlyCreateCustomSaveable(self):
input_size = 3
num_layers = 2
num_units = 7
with ops.Graph().as_default():
random_seed.set_random_seed(1234)
model = CudnnTestModel(
CUDNN_LSTM,
num_layers,
num_units,
input_size,
direction=CUDNN_RNN_UNIDIRECTION,
dtype=dtypes.float32)
with self.assertRaisesRegexp(RuntimeError,
"Cudnn saveable already created"):
model.rnn._create_saveable()
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreLSTM(self):
self._TestSaveRestoreHelper(CUDNN_LSTM)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreGRU(self):
self._TestSaveRestoreHelper(CUDNN_GRU)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRNNTanh(self):
self._TestSaveRestoreHelper(CUDNN_RNN_TANH)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSaveRestoreRNNRelu(self):
self._TestSaveRestoreHelper(CUDNN_RNN_RELU)
class CudnnRNNTestSaveRestoreCheckpointable(test_util.TensorFlowTestCase):
def _VerifyCheckpoint(
self, checkpoint_path, compatible_cell_fn, cudnn_cell_fn,
num_layers, input_size, expected_variable_values, num_applications=3):
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
with ops.device("gpu:0"):
cudnn_layer = cudnn_cell_fn()
cudnn_checkpoint = checkpointable_utils.Checkpoint(cell=cudnn_layer)
status = cudnn_checkpoint.restore(checkpoint_path)
inputs = 3. * array_ops.ones([num_applications, num_layers, input_size],
dtype=dtypes.float32)
cudnn_output, _ = cudnn_layer(inputs)
status.run_restore_ops()
second_save_path = cudnn_checkpoint.save(checkpoint_prefix)
restore_layer = compatible_cell_fn()
restore_layer_checkpoint = checkpointable_utils.Checkpoint(
cell=restore_layer)
status = restore_layer_checkpoint.restore(second_save_path)
current_state = restore_layer.zero_state(1, dtypes.float32)
for _ in range(num_applications):
restore_layer_output, current_state = restore_layer(
inputs=3. * array_ops.ones([1, input_size]),
state=current_state)
status.run_restore_ops()
self.assertTrue(restore_layer.variables)
for variable, expected_value in zip(
restore_layer.variables, expected_variable_values):
self.assertAllClose(expected_value, self.evaluate(variable))
self.assertAllClose(self.evaluate(restore_layer_output),
self.evaluate(cudnn_output)[-1, -1:, ...])
def _CheckpointableSingleCellUnidirectionalTestTemplate(
self, single_cell_fn, cudnn_cell_fn):
# Single-layer cuDNN cells with object-based checkpointing should be
# checkpoint compatible with either single CudnnCompatible cells or
# MultiRnnCells with one cell.
input_size = 3
save_cell_layer = single_cell_fn()
save_cell_layer(
inputs=array_ops.ones([1, input_size]),
state=save_cell_layer.zero_state(1, dtypes.float32))
self.assertTrue(save_cell_layer.variables)
expected_values = []
np.random.seed(10)
for variable in save_cell_layer.variables:
value = np.random.normal(size=variable.shape)
expected_values.append(value)
self.evaluate(variable.assign(value))
save_checkpoint = checkpointable_utils.Checkpoint(cell=save_cell_layer)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
first_save_path = save_checkpoint.save(checkpoint_prefix)
self._VerifyCheckpoint(
checkpoint_path=first_save_path,
compatible_cell_fn=
lambda: rnn_cell_impl.MultiRNNCell([single_cell_fn()]),
cudnn_cell_fn=cudnn_cell_fn,
num_layers=1,
expected_variable_values=expected_values,
input_size=input_size)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
@test_util.run_in_graph_and_eager_modes
def testLSTMCheckpointableSingleLayer(self):
num_units = 2
direction = CUDNN_RNN_UNIDIRECTION
self._CheckpointableSingleCellUnidirectionalTestTemplate(
single_cell_fn=functools.partial(
cudnn_rnn_ops.CudnnCompatibleLSTMCell, num_units=num_units),
cudnn_cell_fn=functools.partial(
cudnn_rnn.CudnnLSTM, num_layers=1, num_units=num_units,
direction=direction, name="awesome_lstm"))
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
@test_util.run_in_graph_and_eager_modes
def testGRUCheckpointableSingleLayer(self):
num_units = 2
direction = CUDNN_RNN_UNIDIRECTION
with self.assertRaises(NotImplementedError):
# TODO(allenl): Implement object-based saving for GRUs and other cells.
self._CheckpointableSingleCellUnidirectionalTestTemplate(
single_cell_fn=functools.partial(
cudnn_rnn_ops.CudnnCompatibleGRUCell, num_units=num_units),
cudnn_cell_fn=functools.partial(
cudnn_rnn.CudnnGRU, num_layers=1, num_units=num_units,
direction=direction, name="awesome_gru"))
def _CheckpointableMultiLayerTestTemplate(
self, single_cell_fn, cudnn_cell_fn, num_layers):
def _MultiCellFn():
return rnn_cell_impl.MultiRNNCell(
[single_cell_fn() for _ in range(num_layers)])
input_size = 3
save_graph = ops.Graph()
with save_graph.as_default(), self.session(graph=save_graph):
save_layer = _MultiCellFn()
save_layer(inputs=array_ops.ones([1, input_size]),
state=save_layer.zero_state(1, dtypes.float32))
self.assertTrue(save_layer.variables)
expected_values = []
np.random.seed(10)
for variable in save_layer.variables:
value = np.random.normal(size=variable.shape)
expected_values.append(value)
self.evaluate(variable.assign(value))
save_checkpoint = checkpointable_utils.Checkpoint(cell=save_layer)
checkpoint_directory = self.get_temp_dir()
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
first_save_path = save_checkpoint.save(checkpoint_prefix)
self._VerifyCheckpoint(
checkpoint_path=first_save_path,
compatible_cell_fn=_MultiCellFn, cudnn_cell_fn=cudnn_cell_fn,
num_layers=num_layers,
expected_variable_values=expected_values,
input_size=input_size)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
@test_util.run_in_graph_and_eager_modes
def testCudnnCompatibleLSTMCheckpointablMultiLayer(self):
num_units = 2
num_layers = 3
direction = CUDNN_RNN_UNIDIRECTION
self._CheckpointableMultiLayerTestTemplate(
single_cell_fn=functools.partial(
cudnn_rnn_ops.CudnnCompatibleLSTMCell, num_units=num_units),
cudnn_cell_fn=functools.partial(
cudnn_rnn.CudnnLSTM, num_layers=num_layers, num_units=num_units,
direction=direction, name="awesome_lstm"),
num_layers=num_layers)
# TODO(jamesqin): Transform to parameterized test after it is included in the
# TF open source codebase.
class CudnnRNNTestCompatibleRNNCells(test_util.TensorFlowTestCase):
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleLSTM(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_LSTM)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleGRU(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_GRU)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleRNNTanh(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_RNN_TANH)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testCudnnCompatibleRNNRelu(self):
self._TestCudnnCompatibleRnnCellsHelper(CUDNN_RNN_RELU)
def _TestCudnnCompatibleRnnCellsHelper(self, rnn_mode):
configs = [
{
"num_layers": 1,
"seq_length": 3,
"num_units": 4,
"input_size": 5,
"batch_size": 6,
},
{
"num_layers": 2,
"seq_length": 8,
"num_units": 4,
"input_size": 8,
"batch_size": 16,
},
{
"num_layers": 2,
"seq_length": 3,
"num_units": 4,
"input_size": 5,
"batch_size": 6,
},
{
"num_layers": 1,
"seq_length": 2,
"num_units": 2,
"input_size": 4,
"batch_size": 1,
},
]
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
for cfg, direction in zip(configs, directions):
self._TestCudnnCompatibleRnnCells(cfg["num_layers"], cfg["seq_length"],
cfg["num_units"], cfg["input_size"],
cfg["batch_size"], rnn_mode, direction)
def _TestCudnnCompatibleRnnCells(self, num_layers, seq_length, num_units,
input_size, batch_size, rnn_mode, direction):
dtype = dtypes.float32
# Train graph
with ops.Graph().as_default() as g:
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=True)
target_output = array_ops.placeholder(dtype=dtype)
loss_op = losses.log_loss(
labels=target_output, predictions=model.total_sum)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1e-2)
train_op = optimizer.minimize(loss_op)
saver = saver_lib.Saver()
# Train Cudnn model
seed = 0
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
# Train 128 steps
num_steps = 128
for _ in range(num_steps):
inputs, _ = model.SynthesizeInput(seq_length, batch_size, seed)
targets = np.random.rand()
sess.run(
train_op,
feed_dict={
model.inputs: inputs,
model.initial_state: model.ZeroState(batch_size),
target_output: targets
})
seed += 1
save_path = os.path.join(self.get_temp_dir(),
("cudnn-rnn-%s-test" % rnn_mode))
save_v = saver.save(sess, save_path)
self.assertEqual(save_path, save_v)
# Cudnn inference graph
with ops.Graph().as_default() as g:
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dtype=dtype,
training=False)
rnn = model.rnn
saver = saver_lib.Saver()
inference_input = np.random.rand(seq_length, batch_size,
input_size).astype(np.float32)
with self.test_session(use_gpu=True, graph=g) as sess:
sess.run(variables.global_variables_initializer())
saver.restore(sess, save_path)
# Cudnn inference
cudnn_outputs_v, cudnn_output_states_v = model.Feed(
sess, inference_input, return_sum=False)
# Canonical RNN inference graph
with ops.Graph().as_default() as g:
cell_inputs = array_ops.placeholder(
dtype, shape=[seq_length, batch_size, input_size])
if direction == CUDNN_RNN_UNIDIRECTION:
# outputs is one tensor, states are num_layer tuples, each 2 tensors
(outputs, states) = _CreateCudnnCompatibleCanonicalRNN(rnn, cell_inputs)
if rnn_mode == CUDNN_LSTM:
output_h = array_ops.stack([s.h for s in states])
output_c = array_ops.stack([s.c for s in states])
else:
output_state = array_ops.stack([s for s in states])
else:
# outputs is one tensor.
# states is a tuple of 2 tuples:
# each sub tuple is num_layer tuples, each with 2 tensors.
(outputs, states) = _CreateCudnnCompatibleCanonicalRNN(
rnn, cell_inputs, is_bidi=True)
output_state_fw, output_state_bw = states
if rnn_mode == CUDNN_LSTM:
output_h, output_c = [], []
for s_fw, s_bw in zip(output_state_fw, output_state_bw):
output_h.append(array_ops.stack([s_fw.h, s_bw.h]))
output_c.append(array_ops.stack([s_fw.c, s_bw.c]))
output_h = array_ops.concat(output_h, axis=0)
output_c = array_ops.concat(output_c, axis=0)
else:
output_state = []
for s_fw, s_bw in zip(output_state_fw, output_state_bw):
output_state.append(array_ops.stack([s_fw, s_bw]))
output_state = array_ops.concat(output_state, axis=0)
saver = saver_lib.Saver()
with self.test_session(use_gpu=True, graph=g) as sess:
saver.restore(sess, save_path)
# BlockCell inference
if rnn_mode == CUDNN_LSTM:
outputs_v, output_h_v, output_c_v = sess.run(
[outputs, output_h, output_c],
feed_dict={cell_inputs: inference_input})
self.assertAllClose(cudnn_outputs_v, outputs_v)
cudnn_output_h_v, cudnn_output_c_v = cudnn_output_states_v
self.assertAllClose(cudnn_output_h_v, output_h_v)
self.assertAllClose(cudnn_output_c_v, output_c_v)
else:
outputs_v, output_state_v = sess.run(
[outputs, output_state],
feed_dict={cell_inputs: inference_input})
self.assertAllClose(cudnn_outputs_v, outputs_v, atol=2e-5, rtol=2e-5)
(cudnn_output_h_v,) = cudnn_output_states_v
self.assertAllClose(cudnn_output_h_v, output_state_v, atol=2e-5,
rtol=2e-5)
class CudnnRNNTestParamsSize(test_util.TensorFlowTestCase):
def _TestOpaqueParamsSize(self, rnn_mode, num_layers, num_units, input_size,
dtype, direction):
logging.info("Testing one lstm param size with config: %s", locals())
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
dtype=dtype,
direction=direction)
rnn = model.rnn
# Min param size estimate = sum(weights.size) + sum(biases.size)
min_params_size = (
np.sum(list(map(np.prod, rnn.canonical_weight_shapes))) +
np.sum([sp[0] for sp in rnn.canonical_bias_shapes]))
opaque_params = rnn.trainable_variables[0]
with self.test_session(use_gpu=True, graph=ops.get_default_graph()):
variables.global_variables_initializer().run()
opaque_params_size_v = opaque_params.eval().size
self.assertLessEqual(min_params_size, opaque_params_size_v)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testOpaqueParamsSize(self):
test_configs = [
[4, 200, 200],
[4, 200, 300],
[4, 200, 100],
[1, 100, 200],
[2, 200, 100],
[3, 200, 400],
]
directions = [CUDNN_RNN_UNIDIRECTION, CUDNN_RNN_BIDIRECTION]
dtype_list = [dtypes.float16, dtypes.float32, dtypes.float64]
rnns = [CUDNN_LSTM, CUDNN_GRU, CUDNN_RNN_RELU, CUDNN_RNN_TANH]
for (rnn, config, dtype, direction) in itertools.product(
rnns, test_configs, dtype_list, directions):
num_layers, num_units, input_size = config
with ops.Graph().as_default():
self._TestOpaqueParamsSize(rnn, num_layers, num_units, input_size,
dtype, direction)
class CudnnRNNTestTraining(test_util.TensorFlowTestCase):
def setUp(self):
super(CudnnRNNTestTraining, self).setUp()
self._reset_rnd_gen_state = os.environ.get("TF_CUDNN_RESET_RND_GEN_STATE",
str(False))
self._rnn_use_v2 = os.environ.get("TF_CUDNN_RNN_USE_V2", "0")
def tearDown(self):
super(CudnnRNNTestTraining, self).tearDown()
os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = self._reset_rnd_gen_state
os.environ["TF_CUDNN_RNN_USE_V2"] = self._rnn_use_v2
def _ComputeNumericGrad(self, sess, y, x, delta=1e-4, step=1):
"""Compute the numeric gradient of y wrt to x.
Args:
sess: The TF session constructed with a graph containing x and y.
y: A scalar TF Tensor in the graph constructed in sess.
x: A TF Tensor in the graph constructed in sess.
delta: Gradient checker's small perturbation of x[i].
step: Only compute numerical gradients for a subset of x values.
I.e. dy/dx[i] is computed if i % step == 0.
Returns:
A Tensor of the same shape and dtype as x. If x[i] is not chosen
to compute the numerical gradient dy/x[i], the corresponding
value is set to 0.
"""
x_data = sess.run(x)
x_size = x_data.size
x_shape = x_data.shape
numeric_grad = np.zeros(x_size, dtype=x_data.dtype)
for i in range(0, x_size, step):
x_pos = x_data.copy()
if x_size == 1:
x_pos += delta
else:
x_pos.flat[i] += delta
y_pos_feed_dict = dict([(x.name, x_pos)])
y_pos = sess.run(y, feed_dict=y_pos_feed_dict)
x_neg = x_data.copy()
if x_size == 1:
x_neg -= delta
else:
x_neg.flat[i] -= delta
y_neg_feed_dict = dict([(x.name, x_neg)])
y_neg = sess.run(y, feed_dict=y_neg_feed_dict)
numeric_grad[i] = (y_pos - y_neg) / (2 * delta)
return numeric_grad.reshape(x_shape)
def _GetShape(self, sess, inputs):
if not isinstance(inputs, collections.Iterable):
return sess.run(array_ops.shape(inputs))
else:
return sess.run([array_ops.shape(x) for x in inputs])
def _GradientCheckFp16(self, sess, y, xs, num_samples,
tolerance=1e-6, delta=1e-4):
"""Gradient check for Fp16.
Fp16 numerical gradients end up being zeros. Use a new way to check
gradients:
Given multi-variant function:
y = f(x1, x2, ... xn)
delta_y = f(x1 + delta_x1, x2+delta_x2, ..., xn+delta_xn) -
f(x1, x2, ..., xn)
= f'(x1) * delta_x1 + f'(x2) * delta_x2 + .. + f'(xn) * delta_xn
where:
delta_xi are very small disturbance.
f'(xi) is the gradient of y w.r.t xi.
The gradient check verifies the expected delta_y calculated by the above
equation is close to the actual delta_y.
Args:
sess: tf.Session object.
y: output tensor.
xs: a tensor or a list of input tensors.
num_samples: number of test samples to run.
tolerance: error tolerance.
delta: the order of magnititued of input disturbance to apply to calculate
the output change w.r.t inputs.
"""
sym_grads = self._ComputeSymGrads(sess, y, xs)
xs_shapes = self._GetShape(sess, xs)
x_vals = [sess.run(x) for x in xs]
for _ in range(num_samples):
delta_xs = [delta * np.random.rand(*shape.tolist())
for shape in xs_shapes]
feed_dict = {}
for x, x_val, delta_x in zip(xs, x_vals, delta_xs):
feed_dict[x] = x_val + delta_x
actual_delta_y = (float(sess.run(y, feed_dict=feed_dict)) -
float(sess.run(y)))
expected_delta_y = 0.
for sym_grad, delta_x in zip(sym_grads, delta_xs):
expected_delta_y += np.dot(
sym_grad.astype(np.float32).flatten(),
delta_x.astype(np.float32).flatten())
self.assertAllClose(expected_delta_y, actual_delta_y,
atol=tolerance, rtol=tolerance)
def _GradientCheck(self, sess, y, xs, tolerance=1e-6, delta=1e-4):
sym_grads = self._ComputeSymGrads(sess, y, xs)
num_grads = [self._ComputeNumericGrad(sess, y, x, delta) for x in xs]
self.assertEqual(len(sym_grads), len(num_grads))
for x, sym, num in zip(xs, sym_grads, num_grads):
logging.info("Comparing gradients for input: %s", x.name)
self.assertFalse(np.any(np.isnan(sym)))
self.assertFalse(np.any(np.isnan(num)))
self.assertAllClose(sym, num, atol=tolerance, rtol=tolerance)
def _ComputeSymGrads(self, sess, y, xs):
sym_grads_t = gradients.gradients(y, xs)
return sess.run(sym_grads_t)
def _TestOneSimpleTraining(self, rnn_mode, num_layers, num_units, input_size,
batch_size, seq_length, dir_count, dropout, dtype,
use_v2, delta, tolerance):
# Gradient checking runs two forward ops with almost the same input. Need to
# make sure the drop patterns across the two runs are the same.
logging.info("Training test with config: %s", locals())
os.environ["TF_CUDNN_RESET_RND_GEN_STATE"] = str(True)
np.random.seed(1234)
random_seed.set_random_seed(5678)
has_input_c = (rnn_mode == CUDNN_LSTM)
direction = (CUDNN_RNN_UNIDIRECTION
if dir_count == 1 else CUDNN_RNN_BIDIRECTION)
if use_v2:
os.environ["TF_CUDNN_RNN_USE_V2"] = "1"
else:
os.environ["TF_CUDNN_RNN_USE_V2"] = "0"
model = CudnnTestModel(
rnn_mode,
num_layers,
num_units,
input_size,
direction=direction,
dropout=dropout,
dtype=dtype,
training=True,
bias_initializer=init_ops.random_normal_initializer(
mean=1., dtype=dtype))
rnn = model.rnn
params = rnn.trainable_variables[0]
inputs = variables.Variable(
random_ops.random_uniform([seq_length, batch_size, input_size],
dtype=dtype),
dtype=dtype).read_value()
input_h = variables.Variable(
random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units], dtype=dtype),
dtype=dtype).read_value()
if has_input_c:
input_c = variables.Variable(
random_ops.random_uniform(
[num_layers * dir_count, batch_size, num_units], dtype=dtype),
dtype=dtype).read_value()
initial_state = (input_h, input_c)
else:
initial_state = (input_h,)
total_sum = model.FProp(inputs, initial_state, training=True)
with self.test_session(use_gpu=True, graph=ops.get_default_graph()) as sess:
sess.run(variables.global_variables_initializer())
all_inputs = [inputs, params]
for s in initial_state:
all_inputs.append(s)
if dtype == dtypes.float16:
self._GradientCheckFp16(
sess, total_sum, all_inputs,
num_samples=FLAGS.grad_check_num_samples,
tolerance=tolerance, delta=delta)
else:
for _ in range(FLAGS.grad_check_num_samples):
# Each time choose a different set of inputs.
sess.run(variables.global_variables_initializer())
self._GradientCheck(
sess, total_sum, all_inputs,
tolerance=tolerance, delta=delta)
def _TestSimpleTrainingHelper(self, rnn_mode, test_configs):
dropouts = [0, 0.5, 1.]
v2_options = [False, True]
for config, dropout, use_v2 in itertools.product(test_configs, dropouts,
v2_options):
dtype = config.get("dtype", dtypes.float32)
delta = config.get("delta", 1e-4)
tolerance = config.get("tolerance", 1e-6)
dir_count = config.get("dir_count", 1)
shape = config["shape"]
if dtype == dtypes.float64:
# TODO(jamesqin): b/117848763
use_v2 = False
with ops.Graph().as_default():
self._TestOneSimpleTraining(
rnn_mode, shape["num_layers"], shape["num_units"],
shape["input_size"], shape["batch_size"], shape["seq_length"],
dir_count, dropout, dtype, use_v2, delta, tolerance)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingLSTMFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingLSTMFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-4,
"tolerance": 9e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingLSTMFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 1e-3,
"tolerance": 9e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
{
"dtype": dtypes.float16,
"delta": 1e-2,
"tolerance": 9e-2,
"shape": {
"num_layers": 2,
"num_units": 6,
"input_size": 8,
"batch_size": 6,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_LSTM, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingGRUFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
}
},
]
self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingGRUFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-3,
"tolerance": 4e-3,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingGRUFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 2e-3,
"tolerance": 6e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_GRU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNTanhFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNTanhFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-3,
"tolerance": 5e-3,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNTanhFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 1e-3,
"tolerance": 5e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_TANH, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNReluFp64(self):
test_configs = [
{
"dtype": dtypes.float64,
"tolerance": 5e-6,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNReluFp32(self):
test_configs = [
{
"dtype": dtypes.float32,
"delta": 1e-4,
"tolerance": 3e-1,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)
@unittest.skipUnless(test.is_built_with_cuda(),
"Test only applicable when running on GPUs")
def testSimpleTrainingRNNReluFp16(self):
test_configs = [
{
"dtype": dtypes.float16,
"delta": 1e-3,
"tolerance": 7e-2,
"shape": {
"num_layers": 2,
"num_units": 3,
"input_size": 4,
"batch_size": 3,
"seq_length": 4,
},
},
]
self._TestSimpleTrainingHelper(CUDNN_RNN_RELU, test_configs)
if __name__ == "__main__":
argv0 = sys.argv[0]
parser = argparse.ArgumentParser()
parser.add_argument(
"--grad_check_num_samples",
type=int,
default=1,
help="Number of samples to run for gradient check.")
FLAGS, unparsed = parser.parse_known_args()
sys.argv = [argv0] + unparsed
googletest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.